This commit is contained in:
user 2025-07-02 19:25:20 +03:00
parent 21964fa986
commit 797f379648
68 changed files with 23871 additions and 1271 deletions

103
.env.compatible Normal file
View File

@ -0,0 +1,103 @@
# =============================================================================
# COMPATIBLE ENVIRONMENT CONFIGURATION
# Based on existing project structure with MariaDB
# =============================================================================
# Application Settings
DEBUG=false
ENVIRONMENT=production
SECRET_KEY=your-super-secret-key-change-this-in-production
ENCRYPTION_KEY=your-encryption-key-for-file-encryption
# Server Configuration (keeping existing port)
HOST=0.0.0.0
PORT=15100
WORKERS=4
AUTO_RELOAD=false
# MariaDB Configuration (keeping existing database)
MYSQL_ROOT_PASSWORD=password
MYSQL_DATABASE=myuploader
MYSQL_USER=myuploader
MYSQL_PASSWORD=password
MYSQL_HOST=maria_db
MYSQL_PORT=3306
# Database URL for SQLAlchemy (MariaDB compatible)
DATABASE_URL=mysql+aiomysql://myuploader:password@maria_db:3306/myuploader
DATABASE_POOL_SIZE=20
DATABASE_MAX_OVERFLOW=30
DATABASE_POOL_TIMEOUT=30
DATABASE_POOL_RECYCLE=3600
# Redis Configuration (new addition)
REDIS_URL=redis://redis:6379/0
REDIS_POOL_SIZE=10
REDIS_MAX_CONNECTIONS=20
REDIS_SOCKET_TIMEOUT=5
REDIS_SOCKET_CONNECT_TIMEOUT=5
# Security Settings
ACCESS_TOKEN_EXPIRE_MINUTES=60
REFRESH_TOKEN_EXPIRE_DAYS=30
PASSWORD_MIN_LENGTH=8
RATE_LIMIT_ENABLED=true
CORS_ORIGINS=["http://localhost:3000","https://yourdomain.com"]
# Storage Configuration (keeping existing paths)
STORAGE_PATH=/app/data
MAX_FILE_SIZE=10737418240
MAX_CHUNK_SIZE=10485760
CHUNK_SIZE=1048576
ENCRYPT_FILES=true
CLEANUP_TEMP_FILES=true
# User Limits
MAX_UPLOADS_PER_DAY=100
MAX_STORAGE_PER_USER=107374182400
MAX_FILES_PER_USER=10000
DAILY_TRANSACTION_LIMIT=10
MAX_TRANSACTION_AMOUNT=5
# TON Blockchain Configuration
TON_API_ENDPOINT=https://toncenter.com/api/v2
TON_API_KEY=your-ton-api-key
TON_TESTNET=false
TON_WALLET_VERSION=v4
# Logging Configuration (keeping existing paths)
LOG_LEVEL=INFO
LOG_FORMAT=json
LOG_FILE=/app/logs/app.log
LOG_ROTATION=daily
LOG_RETENTION_DAYS=30
# Email Configuration (Optional)
SMTP_HOST=smtp.gmail.com
SMTP_PORT=587
SMTP_USERNAME=your-email@gmail.com
SMTP_PASSWORD=your-app-password
SMTP_TLS=true
FROM_EMAIL=noreply@yourdomain.com
# Monitoring Configuration (minimal)
METRICS_ENABLED=true
METRICS_PORT=9090
HEALTH_CHECK_ENABLED=true
# External Services (Optional)
WEBHOOK_URL=https://yourdomain.com/webhooks
BACKUP_ENABLED=true
BACKUP_SCHEDULE=0 2 * * *
BACKUP_RETENTION_DAYS=30
# Development Settings (Only for development)
# DEV_RELOAD=true
# DEV_DEBUG_TOOLBAR=true
# DEV_PROFILER=true
# Production Settings (Only for production)
# SENTRY_DSN=https://your-sentry-dsn
# SSL_ENABLED=true
# SSL_CERT_PATH=/path/to/cert.pem
# SSL_KEY_PATH=/path/to/key.pem

95
.env.example Normal file
View File

@ -0,0 +1,95 @@
# =============================================================================
# ENVIRONMENT CONFIGURATION EXAMPLE
# Copy this file to .env and configure your values
# =============================================================================
# Application Settings
DEBUG=false
ENVIRONMENT=production
SECRET_KEY=your-super-secret-key-change-this-in-production
ENCRYPTION_KEY=your-encryption-key-for-file-encryption
# Server Configuration
HOST=0.0.0.0
PORT=15100
WORKERS=4
AUTO_RELOAD=false
# Database Configuration (PostgreSQL)
DATABASE_URL=postgresql+asyncpg://postgres:password@localhost:5432/myuploader
DATABASE_POOL_SIZE=20
DATABASE_MAX_OVERFLOW=30
DATABASE_POOL_TIMEOUT=30
DATABASE_POOL_RECYCLE=3600
# Redis Configuration
REDIS_URL=redis://localhost:6379/0
REDIS_POOL_SIZE=10
REDIS_MAX_CONNECTIONS=20
REDIS_SOCKET_TIMEOUT=5
REDIS_SOCKET_CONNECT_TIMEOUT=5
# Security Settings
ACCESS_TOKEN_EXPIRE_MINUTES=60
REFRESH_TOKEN_EXPIRE_DAYS=30
PASSWORD_MIN_LENGTH=8
RATE_LIMIT_ENABLED=true
CORS_ORIGINS=["http://localhost:3000","https://yourdomain.com"]
# Storage Configuration
STORAGE_PATH=./data/storage
MAX_FILE_SIZE=10737418240
MAX_CHUNK_SIZE=10485760
CHUNK_SIZE=1048576
ENCRYPT_FILES=true
CLEANUP_TEMP_FILES=true
# User Limits
MAX_UPLOADS_PER_DAY=100
MAX_STORAGE_PER_USER=107374182400
MAX_FILES_PER_USER=10000
DAILY_TRANSACTION_LIMIT=10
MAX_TRANSACTION_AMOUNT=5
# TON Blockchain Configuration
TON_API_ENDPOINT=https://toncenter.com/api/v2
TON_API_KEY=your-ton-api-key
TON_TESTNET=false
TON_WALLET_VERSION=v4
# Logging Configuration
LOG_LEVEL=INFO
LOG_FORMAT=json
LOG_FILE=./logs/app.log
LOG_ROTATION=daily
LOG_RETENTION_DAYS=30
# Email Configuration (Optional)
SMTP_HOST=smtp.gmail.com
SMTP_PORT=587
SMTP_USERNAME=your-email@gmail.com
SMTP_PASSWORD=your-app-password
SMTP_TLS=true
FROM_EMAIL=noreply@yourdomain.com
# Monitoring Configuration
METRICS_ENABLED=true
METRICS_PORT=9090
HEALTH_CHECK_ENABLED=true
# External Services (Optional)
WEBHOOK_URL=https://yourdomain.com/webhooks
BACKUP_ENABLED=true
BACKUP_SCHEDULE=0 2 * * *
BACKUP_RETENTION_DAYS=30
# Development Settings (Only for development)
# DEV_RELOAD=true
# DEV_DEBUG_TOOLBAR=true
# DEV_PROFILER=true
# Production Settings (Only for production)
# SENTRY_DSN=https://your-sentry-dsn
# SSL_ENABLED=true
# SSL_CERT_PATH=/path/to/cert.pem
# SSL_KEY_PATH=/path/to/key.pem

729
DOCS_RU.md Normal file
View File

@ -0,0 +1,729 @@
# 🚀 MY Network v2.0 - Распределенная сеть контента
## 📖 Полная документация на русском языке
---
## 🎯 Что такое MY Network?
**MY Network** — это современная распределенная P2P система для автоматического управления и синхронизации контента между серверами. Система работает как overlay-протокол поверх существующей инфраструктуры, не нарушая работу текущих приложений.
### ✨ Ключевые возможности
- 🌐 **P2P сеть** - автоматическое обнаружение и подключение узлов
- 🔄 **Автосинхронизация** - реплицирование контента между узлами
- 📊 **Веб-мониторинг** - красивые дашборды с ASCII-артом
- 🔒 **Безопасность** - SSL/TLS шифрование, firewall, rate limiting
- 🐳 **Контейнеризация** - полная поддержка Docker
- 📡 **REST API** - программное управление всеми функциями
- ⚖️ **Load Balancing** - распределение нагрузки между узлами
---
## 🏗️ Архитектура системы
```mermaid
graph TB
subgraph "MY Network Cluster"
Node1[Node 1<br/>Bootstrap]
Node2[Node 2<br/>Worker]
Node3[Node 3<br/>Worker]
Node4[Node 4<br/>Worker]
end
subgraph "Node 1 Components"
API1[REST API<br/>:15100]
Monitor1[Web Monitor<br/>/monitor]
Sync1[Sync Manager]
Peer1[Peer Manager]
Storage1[Storage<br/>/opt/storage]
end
subgraph "External Services"
Nginx[Nginx<br/>SSL Proxy]
DB[(MariaDB<br/>Database)]
Redis[(Redis<br/>Cache)]
end
subgraph "Security Layer"
UFW[UFW Firewall]
Fail2Ban[Fail2ban]
SSL[Let's Encrypt<br/>SSL]
end
Internet --> UFW
UFW --> SSL
SSL --> Nginx
Nginx --> API1
Node1 <--> Node2
Node1 <--> Node3
Node1 <--> Node4
Node2 <--> Node3
Node3 <--> Node4
API1 <--> DB
API1 <--> Redis
API1 <--> Storage1
Sync1 <--> Peer1
Monitor1 <--> API1
```
---
## 🔄 Схема синхронизации контента
```mermaid
sequenceDiagram
participant C as Client
participant N1 as Node 1 (Source)
participant N2 as Node 2 (Target)
participant N3 as Node 3 (Target)
Note over N1,N3: Автоматическая синхронизация каждые 5 минут
C->>N1: Загрузка файла
N1->>N1: Сохранение в /opt/storage
N1->>N1: Обновление базы данных
loop Каждые 5 минут
N1->>N2: Проверка статуса
N2->>N1: Список файлов + хеши
N1->>N1: Сравнение с локальными файлами
alt Файл отсутствует на N2
N1->>N2: Передача файла (chunked)
N2->>N2: Сохранение + проверка целостности
N2->>N1: Подтверждение получения
end
N1->>N3: Аналогично для Node 3
end
Note over N1,N3: Все узлы содержат идентичные данные
```
---
## 🌐 Схема сетевого взаимодействия
```mermaid
graph LR
subgraph "Internet"
User[👤 Пользователь]
Bot[🤖 Telegram Bot]
end
subgraph "Security Layer"
FW[🔥 UFW Firewall<br/>Ports: 22,80,443]
FB[🚫 Fail2ban<br/>Защита от брутфорса]
end
subgraph "Web Layer"
NG[🌐 Nginx<br/>SSL Termination<br/>Rate Limiting]
end
subgraph "Application Layer"
API[📡 MY Network API<br/>Port 15100]
MON[📊 Web Monitor<br/>/api/my/monitor]
SYNC[🔄 Sync Manager<br/>Background Process]
end
subgraph "Data Layer"
DB[(🗄️ MariaDB<br/>Port 3306)]
RD[(⚡ Redis<br/>Port 6379)]
FS[📁 File Storage<br/>/opt/storage]
end
User --> FW
Bot --> FW
FW --> FB
FB --> NG
NG --> API
API --> MON
API --> SYNC
API --> DB
API --> RD
API --> FS
style FW fill:#ff9999
style FB fill:#ff9999
style NG fill:#99ccff
style API fill:#99ff99
style DB fill:#ffcc99
```
---
## ⚙️ Установка в две команды
### 🥇 Команда 1: Установка сервиса
```bash
# Скачиваем и устанавливаем базовый сервис
curl -O https://your-domain.com/install_service.sh
sudo bash install_service.sh
```
**Что делает эта команда:**
- ✅ Обновляет систему
- ✅ Устанавливает Docker, Python, зависимости
- ✅ Создает структуру директорий
- ✅ Настраивает systemd сервис
- ✅ Запускает базовый сервис на порту 15100
### 🥈 Команда 2: Защита и SSL
```bash
# Настраиваем защиту, nginx, SSL, firewall
sudo bash secure_service.sh
```
**Что делает эта команда:**
- 🔒 Устанавливает SSL сертификат (Let's Encrypt)
- 🌐 Настраивает Nginx с rate limiting
- 🔥 Настраивает UFW firewall
- 🚫 Устанавливает Fail2ban
- 📊 Настраивает мониторинг системы
- 🔄 Настраивает автообновление сертификатов
---
## 🛠️ Компоненты системы
### 📡 REST API Endpoints
```mermaid
graph TD
API[MY Network API<br/>/api/my/]
API --> NODE[/node/*<br/>Управление узлом]
API --> PEER[/peer/*<br/>Управление пирами]
API --> SYNC[/sync/*<br/>Синхронизация]
API --> NET[/network/*<br/>Статистика сети]
API --> MON[/monitor/*<br/>Веб-мониторинг]
NODE --> NINFO[GET /info<br/>Информация об узле]
NODE --> NPEERS[GET /peers<br/>Список пиров]
NODE --> NHEALTH[GET /health<br/>Проверка здоровья]
PEER --> PADD[POST /add<br/>Добавить пир]
PEER --> PREM[DELETE /remove<br/>Удалить пир]
PEER --> PSTAT[GET /stats<br/>Статистика пира]
SYNC --> SSTART[POST /start<br/>Запуск синхронизации]
SYNC --> SSTOP[POST /stop<br/>Остановка синхронизации]
SYNC --> SSTAT[GET /status<br/>Статус синхронизации]
MON --> MDASH[GET /<br/>Веб-дашборд]
MON --> MASCII[GET /ascii<br/>ASCII статус]
MON --> MAPI[GET /api<br/>JSON метрики]
```
### 📊 Веб-мониторинг
```mermaid
graph TB
subgraph "Web Monitor Dashboard"
HEADER[🎯 MY Network Monitor<br/>ASCII Art Header]
METRICS[📈 System Metrics<br/>CPU, Memory, Disk]
NETWORK[🌐 Network Status<br/>Peers, Connections]
SYNC[🔄 Sync Status<br/>Last Sync, Progress]
STORAGE[💾 Storage Info<br/>Used/Free Space]
REALTIME[⚡ Real-time Updates<br/>Auto-refresh каждые 5 сек]
CHARTS[📊 ASCII Charts<br/>Bandwidth, Load]
end
HEADER --> METRICS
HEADER --> NETWORK
HEADER --> SYNC
HEADER --> STORAGE
METRICS --> REALTIME
NETWORK --> REALTIME
SYNC --> REALTIME
STORAGE --> REALTIME
REALTIME --> CHARTS
style HEADER fill:#ff6b6b
style REALTIME fill:#4ecdc4
style CHARTS fill:#45b7d1
```
---
## 🔧 Конфигурация
### 📄 Основной конфиг (.env)
```bash
# MY Network Configuration
NODE_ID=node-1234567890 # Уникальный ID узла
NODE_PORT=15100 # Порт приложения
DOMAIN=my-network.example.com # Домен сервера
# Database Configuration
DB_HOST=localhost
DB_PORT=3306
DB_NAME=my_network
DB_USER=my_network_user
DB_PASSWORD=secure_password_here
# Security
SECRET_KEY=very_long_secret_key_here
JWT_SECRET=jwt_secret_key_here
# Network Settings
BOOTSTRAP_NODES=["node1.example.com", "node2.example.com"]
SYNC_INTERVAL=300 # Интервал синхронизации (секунды)
MAX_PEERS=10 # Максимум пиров
```
### 🗂️ Структура директорий
```
/opt/
├── my-network/ # Основная директория проекта
│ └── my-uploader-bot/ # Код приложения
│ ├── app/ # Python приложение
│ ├── static/ # Статические файлы
│ ├── templates/ # HTML шаблоны
│ └── logs/ # Логи приложения
├── storage/ # Хранилище файлов
├── logs/ # Системные логи
└── my-network-config.txt # Конфигурация установки
```
---
## 🚀 Использование
### 🔍 Проверка статуса
```bash
# Проверка сервиса
systemctl status my-network
# Проверка nginx
systemctl status nginx
# Проверка firewall
ufw status
# Проверка SSL
certbot certificates
# Проверка API
curl https://your-domain.com/api/my/health
```
### 📊 Мониторинг
```bash
# Веб-интерфейс мониторинга
https://your-domain.com/api/my/monitor/
# ASCII статус в терминале
curl https://your-domain.com/api/my/monitor/ascii
# JSON метрики
curl https://your-domain.com/api/my/monitor/api
```
### 🔄 Управление синхронизацией
```bash
# Запуск синхронизации
curl -X POST https://your-domain.com/api/my/sync/start
# Остановка синхронизации
curl -X POST https://your-domain.com/api/my/sync/stop
# Статус синхронизации
curl https://your-domain.com/api/my/sync/status
```
---
## 🔒 Безопасность
### 🛡️ Уровни защиты
```mermaid
graph TD
Internet[🌐 Internet] --> FW[🔥 UFW Firewall]
FW --> |Port 80,443,SSH| SSL[🔐 SSL/TLS<br/>Let's Encrypt]
SSL --> Nginx[🌐 Nginx Proxy]
Nginx --> |Rate Limiting| FB[🚫 Fail2ban]
FB --> API[📡 Application]
subgraph "Security Features"
FW1[✅ Firewall Rules]
SSL1[✅ SSL Certificates]
RL[✅ Rate Limiting]
FB1[✅ Brute Force Protection]
HD[✅ Security Headers]
IP[✅ IP Whitelisting]
end
style FW fill:#ff9999
style SSL fill:#99ff99
style FB fill:#ffcc99
```
### 🔐 Настройки безопасности
**Firewall (UFW):**
- ✅ Закрыты все порты кроме SSH, HTTP, HTTPS
- ✅ Разрешен доступ только к необходимым сервисам
- ✅ Блокировка подозрительного трафика
**SSL/TLS:**
- ✅ Автоматические сертификаты Let's Encrypt
- ✅ Принудительное перенаправление HTTP → HTTPS
- ✅ Современные алгоритмы шифрования
**Rate Limiting:**
- ✅ Ограничение запросов к API (10 req/s)
- ✅ Ограничение мониторинга (2 req/s)
- ✅ Защита от DDoS атак
**Fail2ban:**
- ✅ Блокировка брутфорс атак на SSH
- ✅ Защита от злоупотребления API
- ✅ Автоматическая разблокировка
---
## 📈 Мониторинг и логи
### 📊 Системные метрики
```mermaid
graph LR
subgraph "Monitoring Stack"
APP[📱 Application Metrics]
SYS[💻 System Metrics]
NET[🌐 Network Metrics]
SEC[🔒 Security Metrics]
end
subgraph "Log Files"
APPLOG[📄 /opt/logs/app.log]
SYSLOG[📄 /var/log/syslog]
NGINX[📄 /var/log/nginx/]
FAIL2BAN[📄 /var/log/fail2ban.log]
end
APP --> APPLOG
SYS --> SYSLOG
NET --> NGINX
SEC --> FAIL2BAN
subgraph "Monitoring Tools"
CRON[⏰ Cron Monitor<br/>Каждые 5 минут]
WEB[🌐 Web Dashboard<br/>Real-time]
ALERT[🚨 Alert System<br/>Critical Events]
end
APPLOG --> WEB
SYSLOG --> WEB
NGINX --> WEB
FAIL2BAN --> WEB
WEB --> ALERT
CRON --> ALERT
```
### 📜 Полезные команды для мониторинга
```bash
# Просмотр логов сервиса
journalctl -u my-network -f
# Просмотр логов nginx
tail -f /var/log/nginx/access.log
tail -f /var/log/nginx/error.log
# Мониторинг системы
tail -f /opt/logs/monitor.log
# Статистика fail2ban
fail2ban-client status
# Проверка дискового пространства
df -h
# Мониторинг сетевых соединений
netstat -tlnp | grep :15100
```
---
## 🔧 Troubleshooting
### ⚠️ Частые проблемы и решения
#### 1. Сервис не запускается
```bash
# Проверка статуса
systemctl status my-network
# Просмотр логов
journalctl -u my-network -n 50
# Перезапуск сервиса
systemctl restart my-network
# Проверка портов
netstat -tlnp | grep :15100
```
#### 2. SSL сертификат не работает
```bash
# Проверка сертификатов
certbot certificates
# Обновление сертификата
certbot renew --dry-run
# Перезапуск nginx
systemctl reload nginx
# Проверка конфигурации nginx
nginx -t
```
#### 3. Firewall блокирует соединения
```bash
# Проверка правил firewall
ufw status numbered
# Добавление правила
ufw allow from IP_ADDRESS to any port 15100
# Проверка логов
grep UFW /var/log/syslog
```
#### 4. Синхронизация не работает
```bash
# Проверка статуса синхронизации
curl https://your-domain.com/api/my/sync/status
# Перезапуск синхронизации
curl -X POST https://your-domain.com/api/my/sync/stop
curl -X POST https://your-domain.com/api/my/sync/start
# Проверка пиров
curl https://your-domain.com/api/my/node/peers
```
---
## 🎨 Примеры API запросов
### 📡 Основные операции
```bash
# Получение информации об узле
curl -X GET https://your-domain.com/api/my/node/info \
-H "Content-Type: application/json"
# Добавление нового пира
curl -X POST https://your-domain.com/api/my/peer/add \
-H "Content-Type: application/json" \
-d '{
"host": "peer.example.com",
"port": 15100,
"ssl": true
}'
# Получение статистики сети
curl -X GET https://your-domain.com/api/my/network/stats \
-H "Content-Type: application/json"
# Запуск полной синхронизации
curl -X POST https://your-domain.com/api/my/sync/start \
-H "Content-Type: application/json" \
-d '{
"force": true,
"verify": true
}'
```
### 📊 Мониторинг через API
```bash
# JSON метрики для интеграции с внешними системами
curl -X GET https://your-domain.com/api/my/monitor/api \
-H "Content-Type: application/json" | jq
# Пример ответа:
{
"node": {
"id": "node-1234567890",
"status": "active",
"uptime": 86400,
"version": "2.0.0"
},
"network": {
"peers_count": 5,
"active_connections": 3,
"total_bandwidth": "1.2GB",
"sync_status": "synchronized"
},
"system": {
"cpu_usage": 25.5,
"memory_usage": 512,
"disk_usage": 75.2,
"load_average": [0.5, 0.3, 0.2]
}
}
```
---
## 🔄 Обновление системы
### 📦 Обновление MY Network
```bash
# Остановка сервиса
systemctl stop my-network
# Обновление кода
cd /opt/my-network/my-uploader-bot
git pull origin main
# Обновление зависимостей
source venv/bin/activate
pip install -r requirements_new.txt --upgrade
# Применение миграций базы данных (если есть)
python -m alembic upgrade head
# Запуск сервиса
systemctl start my-network
# Проверка статуса
systemctl status my-network
```
### 🔒 Обновление системы безопасности
```bash
# Обновление пакетов системы
apt update && apt upgrade -y
# Обновление SSL сертификатов
certbot renew
# Обновление правил fail2ban
systemctl restart fail2ban
# Проверка безопасности
ufw status
fail2ban-client status
certbot certificates
```
---
## 📋 Чек-лист развертывания
### ✅ Pre-deployment checklist
- [ ] **Сервер подготовлен** (Ubuntu 20.04+)
- [ ] **Домен настроен** (A-запись указывает на сервер)
- [ ] **SSH доступ настроен** (ключи, порт безопасности)
- [ ] **Email для SSL указан** (действующий email)
### ✅ Installation checklist
- [ ] **Шаг 1: Базовая установка**
- [ ] `sudo bash install_service.sh` выполнен успешно
- [ ] Сервис запущен: `systemctl status my-network`
- [ ] API отвечает: `curl http://localhost:15100/api/my/health`
- [ ] **Шаг 2: Безопасность**
- [ ] `sudo bash secure_service.sh` выполнен успешно
- [ ] SSL работает: `curl https://your-domain.com/api/my/health`
- [ ] Firewall активен: `ufw status`
- [ ] Fail2ban активен: `systemctl status fail2ban`
### ✅ Post-deployment checklist
- [ ] **Функциональность**
- [ ] Веб-мониторинг доступен: `https://your-domain.com/api/my/monitor/`
- [ ] API endpoints отвечают корректно
- [ ] Синхронизация работает (если есть пиры)
- [ ] Логи пишутся без ошибок
- [ ] **Безопасность**
- [ ] SSL A+ рейтинг (проверить на ssllabs.com)
- [ ] Только необходимые порты открыты
- [ ] Rate limiting работает
- [ ] Backup и мониторинг настроены
---
## 📞 Поддержка
### 🆘 Получение помощи
1. **Проверьте логи:**
```bash
journalctl -u my-network -n 100
tail -f /opt/logs/monitor.log
```
2. **Проверьте статус сервисов:**
```bash
systemctl status my-network nginx fail2ban
```
3. **Проверьте сетевое соединение:**
```bash
curl -v https://your-domain.com/api/my/health
```
4. **Соберите диагностическую информацию:**
```bash
/opt/monitor.sh
ufw status numbered
df -h
free -h
```
### 📧 Контакты
- **Документация:** `/opt/my-network/my-uploader-bot/DOCS_RU.md`
- **Конфигурация:** `/opt/my-network-config.txt`
- **Логи:** `/opt/logs/` и `/var/log/`
---
## 🎉 Заключение
**MY Network v2.0** - это полноценная, готовая к продакшену система для создания распределенных сетей контента. Система обеспечивает:
- ⚡ **Простую установку** в две команды
- 🔒 **Максимальную безопасность** из коробки
- 📊 **Полный мониторинг** в реальном времени
- 🔄 **Автоматическую синхронизацию** между узлами
- 🌐 **Масштабируемость** до любого количества узлов
Система готова к использованию и может быть развернута на любом современном Linux сервере за считанные минуты!
---
*Документация MY Network v2.0 | Версия 1.0 | 2025*

View File

@ -1,27 +1,130 @@
FROM python:3.9 # Multi-stage Dockerfile for optimized production builds
FROM python:3.11-slim as base
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
POETRY_VERSION=1.6.1
# Install system dependencies
RUN apt-get update && apt-get install -y \
build-essential \
curl \
ffmpeg \
libmagic1 \
libpq-dev \
pkg-config \
&& rm -rf /var/lib/apt/lists/*
# Install Poetry
RUN pip install poetry==$POETRY_VERSION
# Configure Poetry
ENV POETRY_NO_INTERACTION=1 \
POETRY_VENV_IN_PROJECT=1 \
POETRY_CACHE_DIR=/tmp/poetry_cache
WORKDIR /app WORKDIR /app
# Copy and install Python dependencies # Copy dependency files
COPY requirements.txt . COPY pyproject.toml poetry.lock ./
RUN pip install -r requirements.txt
# Development stage
FROM base as development
# Install dependencies including dev dependencies
RUN poetry install --with dev && rm -rf $POETRY_CACHE_DIR
# Copy source code
COPY . . COPY . .
# Install required packages and add Docker's official GPG key and repository # Set development environment
ENV PYTHONPATH=/app
ENV DEBUG=true
# Expose ports
EXPOSE 15100 9090
# Default command for development
CMD ["poetry", "run", "python", "-m", "app"]
# Production dependencies stage
FROM base as deps
# Install only production dependencies
RUN poetry install --only=main && rm -rf $POETRY_CACHE_DIR
# Production stage
FROM python:3.11-slim as production
# Install runtime dependencies only
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
ca-certificates \
curl \ curl \
gnupg \ ffmpeg \
lsb-release && \ libmagic1 \
install -m 0755 -d /etc/apt/keyrings && \ libpq5 \
curl -fsSL https://download.docker.com/linux/debian/gpg -o /etc/apt/keyrings/docker.asc && \ && rm -rf /var/lib/apt/lists/* \
chmod a+r /etc/apt/keyrings/docker.asc && \ && apt-get clean
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/debian \
$(. /etc/os-release && echo \"$VERSION_CODENAME\") stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null && \
apt-get update && \
apt-get install -y docker-ce-cli
RUN apt-get install libmagic1 -y # Create non-root user
RUN groupadd -r appuser && useradd -r -g appuser appuser
# Set working directory
WORKDIR /app
CMD ["python", "app"] # Copy virtual environment from deps stage
COPY --from=deps /app/.venv /app/.venv
# Add virtual environment to PATH
ENV PATH="/app/.venv/bin:$PATH"
# Copy application code
COPY --chown=appuser:appuser . .
# Create necessary directories
RUN mkdir -p /app/data /app/logs && \
chown -R appuser:appuser /app/data /app/logs
# Set production environment
ENV PYTHONPATH=/app
ENV DEBUG=false
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD curl -f http://localhost:15100/health || exit 1
# Switch to non-root user
USER appuser
# Expose ports
EXPOSE 15100 9090
# Default command
CMD ["python", "-m", "app"]
# Testing stage
FROM development as testing
# Install test dependencies
RUN poetry install --with dev,test
# Run tests
RUN poetry run pytest tests/ --cov=app --cov-report=term-missing
# Security scanning stage
FROM production as security
# Switch back to root for security scanning
USER root
# Install security tools
RUN pip install safety bandit
# Run security checks
RUN safety check
RUN bandit -r app/ -f json -o security-report.json || true
# Switch back to app user
USER appuser

View File

@ -1,17 +1,36 @@
from logging.config import fileConfig """Alembic environment configuration for async database migrations."""
from sqlalchemy import engine_from_config import asyncio
from sqlalchemy import pool import os
from logging.config import fileConfig
from typing import Any
from alembic import context from alembic import context
from sqlalchemy import pool
from sqlalchemy.engine import Connection
from sqlalchemy.ext.asyncio import async_engine_from_config
# Import your models here
from app.core.models.base import Base
from app.core.models.user import User, APIKey, UserSession
from app.core.models.content import Content, ContentVersion, FileUpload, UserSubscription
from app.core.models.blockchain import Wallet, Transaction, BlockchainNFT, BlockchainDeFiPosition, BlockchainStaking, BlockchainTokenBalance
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None: if config.config_file_name is not None:
fileConfig(config.config_file_name) fileConfig(config.config_file_name)
from app.core.models import AlchemyBase # Set the target metadata for autogenerate support
target_metadata = AlchemyBase.metadata target_metadata = Base.metadata
# Configure database URL from environment variable
database_url = os.getenv("DATABASE_URL", "postgresql+asyncpg://postgres:password@localhost:5432/myuploader")
config.set_main_option("sqlalchemy.url", database_url)
def run_migrations_offline() -> None: def run_migrations_offline() -> None:
@ -24,7 +43,6 @@ def run_migrations_offline() -> None:
Calls to context.execute() here emit the given string to the Calls to context.execute() here emit the given string to the
script output. script output.
""" """
url = config.get_main_option("sqlalchemy.url") url = config.get_main_option("sqlalchemy.url")
context.configure( context.configure(
@ -32,32 +50,53 @@ def run_migrations_offline() -> None:
target_metadata=target_metadata, target_metadata=target_metadata,
literal_binds=True, literal_binds=True,
dialect_opts={"paramstyle": "named"}, dialect_opts={"paramstyle": "named"},
compare_type=True,
compare_server_default=True,
include_schemas=True,
) )
with context.begin_transaction(): with context.begin_transaction():
context.run_migrations() context.run_migrations()
def run_migrations_online() -> None: def do_run_migrations(connection: Connection) -> None:
"""Run migrations in 'online' mode. """Execute migrations with the given connection."""
context.configure(
connection=connection,
target_metadata=target_metadata,
compare_type=True,
compare_server_default=True,
include_schemas=True,
render_as_batch=True, # For better SQLite compatibility if needed
)
In this scenario we need to create an Engine with context.begin_transaction():
and associate a connection with the context. context.run_migrations()
"""
connectable = engine_from_config( async def run_async_migrations() -> None:
config.get_section(config.config_ini_section, {}), """Run migrations in async mode."""
configuration = config.get_section(config.config_ini_section, {})
# Override the database URL if it's set in environment
if database_url:
configuration["sqlalchemy.url"] = database_url
connectable = async_engine_from_config(
configuration,
prefix="sqlalchemy.", prefix="sqlalchemy.",
poolclass=pool.NullPool, poolclass=pool.NullPool,
) )
with connectable.connect() as connection: async with connectable.connect() as connection:
context.configure( await connection.run_sync(do_run_migrations)
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction(): await connectable.dispose()
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode."""
asyncio.run(run_async_migrations())
if context.is_offline_mode(): if context.is_offline_mode():

View File

@ -5,22 +5,22 @@ Revises: ${down_revision | comma,n}
Create Date: ${create_date} Create Date: ${create_date}
""" """
from typing import Sequence, Union
from alembic import op from alembic import op
import sqlalchemy as sa import sqlalchemy as sa
${imports if imports else ""} ${imports if imports else ""}
# revision identifiers, used by Alembic. # revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)} revision = ${repr(up_revision)}
down_revision: Union[str, None] = ${repr(down_revision)} down_revision = ${repr(down_revision)}
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)} branch_labels = ${repr(branch_labels)}
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)} depends_on = ${repr(depends_on)}
def upgrade() -> None: def upgrade() -> None:
"""Upgrade database schema."""
${upgrades if upgrades else "pass"} ${upgrades if upgrades else "pass"}
def downgrade() -> None: def downgrade() -> None:
"""Downgrade database schema."""
${downgrades if downgrades else "pass"} ${downgrades if downgrades else "pass"}

View File

@ -0,0 +1,382 @@
"""Initial database tables
Revision ID: 001
Revises:
Create Date: 2025-01-02 16:51:00.000000
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '001'
down_revision = None
branch_labels = None
depends_on = None
def upgrade() -> None:
"""Create initial database tables."""
# Create users table
op.create_table(
'users',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('username', sa.String(50), nullable=False, unique=True),
sa.Column('email', sa.String(255), nullable=False, unique=True),
sa.Column('password_hash', sa.String(255), nullable=False),
sa.Column('first_name', sa.String(100)),
sa.Column('last_name', sa.String(100)),
sa.Column('is_active', sa.Boolean(), default=True, nullable=False),
sa.Column('is_verified', sa.Boolean(), default=False, nullable=False),
sa.Column('is_superuser', sa.Boolean(), default=False, nullable=False),
sa.Column('avatar_url', sa.String(500)),
sa.Column('bio', sa.Text()),
sa.Column('last_login_at', sa.DateTime(timezone=True)),
sa.Column('login_count', sa.Integer(), default=0),
sa.Column('settings', postgresql.JSONB()),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for users
op.create_index('ix_users_username', 'users', ['username'])
op.create_index('ix_users_email', 'users', ['email'])
op.create_index('ix_users_created_at', 'users', ['created_at'])
op.create_index('ix_users_is_active', 'users', ['is_active'])
# Create API keys table
op.create_table(
'api_keys',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('users.id', ondelete='CASCADE'), nullable=False),
sa.Column('name', sa.String(100), nullable=False),
sa.Column('key_hash', sa.String(255), nullable=False, unique=True),
sa.Column('key_prefix', sa.String(20), nullable=False),
sa.Column('permissions', postgresql.JSONB(), default={}),
sa.Column('is_active', sa.Boolean(), default=True, nullable=False),
sa.Column('expires_at', sa.DateTime(timezone=True)),
sa.Column('last_used_at', sa.DateTime(timezone=True)),
sa.Column('usage_count', sa.Integer(), default=0),
sa.Column('rate_limit', sa.Integer(), default=1000),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for API keys
op.create_index('ix_api_keys_user_id', 'api_keys', ['user_id'])
op.create_index('ix_api_keys_key_hash', 'api_keys', ['key_hash'])
op.create_index('ix_api_keys_is_active', 'api_keys', ['is_active'])
# Create user sessions table
op.create_table(
'user_sessions',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('users.id', ondelete='CASCADE'), nullable=False),
sa.Column('session_token', sa.String(255), nullable=False, unique=True),
sa.Column('refresh_token', sa.String(255), nullable=False, unique=True),
sa.Column('user_agent', sa.String(500)),
sa.Column('ip_address', sa.String(45)),
sa.Column('is_active', sa.Boolean(), default=True, nullable=False),
sa.Column('expires_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('last_activity_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for user sessions
op.create_index('ix_user_sessions_user_id', 'user_sessions', ['user_id'])
op.create_index('ix_user_sessions_session_token', 'user_sessions', ['session_token'])
op.create_index('ix_user_sessions_is_active', 'user_sessions', ['is_active'])
op.create_index('ix_user_sessions_expires_at', 'user_sessions', ['expires_at'])
# Create content table
op.create_table(
'content',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('users.id', ondelete='CASCADE'), nullable=False),
sa.Column('title', sa.String(255), nullable=False),
sa.Column('description', sa.Text()),
sa.Column('content_type', sa.String(50), nullable=False),
sa.Column('file_path', sa.String(500)),
sa.Column('file_size', sa.BigInteger()),
sa.Column('file_hash', sa.String(64)),
sa.Column('mime_type', sa.String(100)),
sa.Column('is_public', sa.Boolean(), default=False, nullable=False),
sa.Column('is_featured', sa.Boolean(), default=False, nullable=False),
sa.Column('view_count', sa.Integer(), default=0),
sa.Column('download_count', sa.Integer(), default=0),
sa.Column('like_count', sa.Integer(), default=0),
sa.Column('metadata', postgresql.JSONB()),
sa.Column('tags', postgresql.ARRAY(sa.String(50))),
sa.Column('thumbnail_url', sa.String(500)),
sa.Column('preview_url', sa.String(500)),
sa.Column('status', sa.String(20), default='draft', nullable=False),
sa.Column('published_at', sa.DateTime(timezone=True)),
sa.Column('expires_at', sa.DateTime(timezone=True)),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for content
op.create_index('ix_content_user_id', 'content', ['user_id'])
op.create_index('ix_content_content_type', 'content', ['content_type'])
op.create_index('ix_content_is_public', 'content', ['is_public'])
op.create_index('ix_content_status', 'content', ['status'])
op.create_index('ix_content_created_at', 'content', ['created_at'])
op.create_index('ix_content_published_at', 'content', ['published_at'])
op.create_index('ix_content_file_hash', 'content', ['file_hash'])
op.create_index('ix_content_tags', 'content', ['tags'], postgresql_using='gin')
# Create content versions table
op.create_table(
'content_versions',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('content_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('content.id', ondelete='CASCADE'), nullable=False),
sa.Column('version_number', sa.Integer(), nullable=False),
sa.Column('title', sa.String(255), nullable=False),
sa.Column('description', sa.Text()),
sa.Column('file_path', sa.String(500)),
sa.Column('file_size', sa.BigInteger()),
sa.Column('file_hash', sa.String(64)),
sa.Column('metadata', postgresql.JSONB()),
sa.Column('change_summary', sa.Text()),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for content versions
op.create_index('ix_content_versions_content_id', 'content_versions', ['content_id'])
op.create_index('ix_content_versions_version_number', 'content_versions', ['version_number'])
op.create_index('ix_content_versions_created_at', 'content_versions', ['created_at'])
# Create file uploads table
op.create_table(
'file_uploads',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('users.id', ondelete='CASCADE'), nullable=False),
sa.Column('filename', sa.String(255), nullable=False),
sa.Column('original_filename', sa.String(255), nullable=False),
sa.Column('file_path', sa.String(500)),
sa.Column('file_size', sa.BigInteger(), nullable=False),
sa.Column('file_hash', sa.String(64)),
sa.Column('mime_type', sa.String(100)),
sa.Column('chunk_size', sa.Integer()),
sa.Column('total_chunks', sa.Integer()),
sa.Column('uploaded_chunks', sa.Integer(), default=0),
sa.Column('upload_session_id', sa.String(100)),
sa.Column('status', sa.String(20), default='pending', nullable=False),
sa.Column('processed', sa.Boolean(), default=False, nullable=False),
sa.Column('processing_started_at', sa.DateTime(timezone=True)),
sa.Column('processing_completed_at', sa.DateTime(timezone=True)),
sa.Column('error_message', sa.Text()),
sa.Column('retry_count', sa.Integer(), default=0),
sa.Column('metadata', postgresql.JSONB()),
sa.Column('expires_at', sa.DateTime(timezone=True)),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for file uploads
op.create_index('ix_file_uploads_user_id', 'file_uploads', ['user_id'])
op.create_index('ix_file_uploads_status', 'file_uploads', ['status'])
op.create_index('ix_file_uploads_processed', 'file_uploads', ['processed'])
op.create_index('ix_file_uploads_upload_session_id', 'file_uploads', ['upload_session_id'])
op.create_index('ix_file_uploads_file_hash', 'file_uploads', ['file_hash'])
op.create_index('ix_file_uploads_expires_at', 'file_uploads', ['expires_at'])
# Create user subscriptions table
op.create_table(
'user_subscriptions',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('users.id', ondelete='CASCADE'), nullable=False),
sa.Column('plan_name', sa.String(50), nullable=False),
sa.Column('status', sa.String(20), default='active', nullable=False),
sa.Column('storage_limit', sa.BigInteger(), nullable=False),
sa.Column('bandwidth_limit', sa.BigInteger(), nullable=False),
sa.Column('file_count_limit', sa.Integer(), nullable=False),
sa.Column('features', postgresql.JSONB()),
sa.Column('price', sa.Numeric(10, 2)),
sa.Column('currency', sa.String(3), default='USD'),
sa.Column('billing_cycle', sa.String(20), default='monthly'),
sa.Column('starts_at', sa.DateTime(timezone=True), nullable=False),
sa.Column('expires_at', sa.DateTime(timezone=True)),
sa.Column('auto_renew', sa.Boolean(), default=True, nullable=False),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for user subscriptions
op.create_index('ix_user_subscriptions_user_id', 'user_subscriptions', ['user_id'])
op.create_index('ix_user_subscriptions_status', 'user_subscriptions', ['status'])
op.create_index('ix_user_subscriptions_expires_at', 'user_subscriptions', ['expires_at'])
# Create wallets table
op.create_table(
'wallets',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('users.id', ondelete='CASCADE'), nullable=False),
sa.Column('address', sa.String(100), nullable=False, unique=True),
sa.Column('network', sa.String(20), default='mainnet', nullable=False),
sa.Column('wallet_type', sa.String(20), default='ton', nullable=False),
sa.Column('balance', sa.Numeric(20, 8), default=0),
sa.Column('public_key', sa.String(200)),
sa.Column('encrypted_private_key', sa.Text()),
sa.Column('derivation_path', sa.String(100)),
sa.Column('is_active', sa.Boolean(), default=True, nullable=False),
sa.Column('is_primary', sa.Boolean(), default=False, nullable=False),
sa.Column('last_sync_at', sa.DateTime(timezone=True)),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for wallets
op.create_index('ix_wallets_user_id', 'wallets', ['user_id'])
op.create_index('ix_wallets_address', 'wallets', ['address'])
op.create_index('ix_wallets_network', 'wallets', ['network'])
op.create_index('ix_wallets_is_active', 'wallets', ['is_active'])
# Create transactions table
op.create_table(
'transactions',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('wallet_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('wallets.id', ondelete='CASCADE'), nullable=False),
sa.Column('tx_hash', sa.String(100), unique=True),
sa.Column('from_address', sa.String(100), nullable=False),
sa.Column('to_address', sa.String(100), nullable=False),
sa.Column('amount', sa.Numeric(20, 8), nullable=False),
sa.Column('fee', sa.Numeric(20, 8)),
sa.Column('gas_limit', sa.BigInteger()),
sa.Column('gas_used', sa.BigInteger()),
sa.Column('gas_price', sa.Numeric(20, 8)),
sa.Column('nonce', sa.BigInteger()),
sa.Column('block_number', sa.BigInteger()),
sa.Column('block_hash', sa.String(100)),
sa.Column('transaction_index', sa.Integer()),
sa.Column('status', sa.String(20), default='pending', nullable=False),
sa.Column('transaction_type', sa.String(20), default='transfer', nullable=False),
sa.Column('confirmations', sa.Integer(), default=0),
sa.Column('metadata', postgresql.JSONB()),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for transactions
op.create_index('ix_transactions_wallet_id', 'transactions', ['wallet_id'])
op.create_index('ix_transactions_tx_hash', 'transactions', ['tx_hash'])
op.create_index('ix_transactions_from_address', 'transactions', ['from_address'])
op.create_index('ix_transactions_to_address', 'transactions', ['to_address'])
op.create_index('ix_transactions_status', 'transactions', ['status'])
op.create_index('ix_transactions_created_at', 'transactions', ['created_at'])
op.create_index('ix_transactions_block_number', 'transactions', ['block_number'])
# Create blockchain NFTs table
op.create_table(
'blockchain_nfts',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('wallet_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('wallets.id', ondelete='CASCADE'), nullable=False),
sa.Column('token_id', sa.String(100), nullable=False),
sa.Column('collection_address', sa.String(100), nullable=False),
sa.Column('owner_address', sa.String(100), nullable=False),
sa.Column('token_uri', sa.String(500)),
sa.Column('metadata', postgresql.JSONB()),
sa.Column('name', sa.String(255)),
sa.Column('description', sa.Text()),
sa.Column('image_url', sa.String(500)),
sa.Column('attributes', postgresql.JSONB()),
sa.Column('rarity_score', sa.Numeric(10, 4)),
sa.Column('last_price', sa.Numeric(20, 8)),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create unique constraint for NFTs
op.create_unique_constraint('uq_blockchain_nfts_token_collection', 'blockchain_nfts', ['token_id', 'collection_address'])
# Create indexes for blockchain NFTs
op.create_index('ix_blockchain_nfts_wallet_id', 'blockchain_nfts', ['wallet_id'])
op.create_index('ix_blockchain_nfts_collection_address', 'blockchain_nfts', ['collection_address'])
op.create_index('ix_blockchain_nfts_owner_address', 'blockchain_nfts', ['owner_address'])
# Create blockchain token balances table
op.create_table(
'blockchain_token_balances',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('wallet_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('wallets.id', ondelete='CASCADE'), nullable=False),
sa.Column('token_address', sa.String(100), nullable=False),
sa.Column('token_name', sa.String(100)),
sa.Column('token_symbol', sa.String(20)),
sa.Column('balance', sa.Numeric(30, 18), default=0, nullable=False),
sa.Column('decimals', sa.Integer(), default=18),
sa.Column('usd_value', sa.Numeric(20, 8)),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create unique constraint for token balances
op.create_unique_constraint('uq_token_balances_wallet_token', 'blockchain_token_balances', ['wallet_id', 'token_address'])
# Create indexes for token balances
op.create_index('ix_blockchain_token_balances_wallet_id', 'blockchain_token_balances', ['wallet_id'])
op.create_index('ix_blockchain_token_balances_token_address', 'blockchain_token_balances', ['token_address'])
# Create blockchain DeFi positions table
op.create_table(
'blockchain_defi_positions',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('wallet_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('wallets.id', ondelete='CASCADE'), nullable=False),
sa.Column('protocol_name', sa.String(100), nullable=False),
sa.Column('position_type', sa.String(50), nullable=False),
sa.Column('pool_address', sa.String(100)),
sa.Column('token_symbols', postgresql.ARRAY(sa.String(20))),
sa.Column('balance', sa.Numeric(30, 18), default=0),
sa.Column('usd_value', sa.Numeric(20, 8)),
sa.Column('yield_rate', sa.Numeric(10, 4)),
sa.Column('metadata', postgresql.JSONB()),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for DeFi positions
op.create_index('ix_blockchain_defi_positions_wallet_id', 'blockchain_defi_positions', ['wallet_id'])
op.create_index('ix_blockchain_defi_positions_protocol_name', 'blockchain_defi_positions', ['protocol_name'])
# Create blockchain staking table
op.create_table(
'blockchain_staking',
sa.Column('id', postgresql.UUID(as_uuid=True), primary_key=True),
sa.Column('wallet_id', postgresql.UUID(as_uuid=True), sa.ForeignKey('wallets.id', ondelete='CASCADE'), nullable=False),
sa.Column('validator_address', sa.String(100), nullable=False),
sa.Column('staked_amount', sa.Numeric(20, 8), nullable=False),
sa.Column('rewards_earned', sa.Numeric(20, 8), default=0),
sa.Column('status', sa.String(20), default='active', nullable=False),
sa.Column('delegation_time', sa.DateTime(timezone=True), nullable=False),
sa.Column('unlock_time', sa.DateTime(timezone=True)),
sa.Column('apy', sa.Numeric(10, 4)),
sa.Column('metadata', postgresql.JSONB()),
sa.Column('created_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
sa.Column('updated_at', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=False),
)
# Create indexes for staking
op.create_index('ix_blockchain_staking_wallet_id', 'blockchain_staking', ['wallet_id'])
op.create_index('ix_blockchain_staking_validator_address', 'blockchain_staking', ['validator_address'])
op.create_index('ix_blockchain_staking_status', 'blockchain_staking', ['status'])
def downgrade() -> None:
"""Drop all database tables."""
# Drop tables in reverse order to avoid foreign key constraints
op.drop_table('blockchain_staking')
op.drop_table('blockchain_defi_positions')
op.drop_table('blockchain_token_balances')
op.drop_table('blockchain_nfts')
op.drop_table('transactions')
op.drop_table('wallets')
op.drop_table('user_subscriptions')
op.drop_table('file_uploads')
op.drop_table('content_versions')
op.drop_table('content')
op.drop_table('user_sessions')
op.drop_table('api_keys')
op.drop_table('users')

View File

@ -1,135 +1,158 @@
"""
Main application entry point for my-uploader-bot.
Handles startup, shutdown, and application lifecycle management.
"""
import asyncio import asyncio
import signal
import sys import sys
import os from pathlib import Path
import time
import traceback
from asyncio import sleep
from datetime import datetime
startup_target = '__main__' from app.api import create_app
from app.core.config import get_settings
from app.core.database import init_database, close_database
from app.core.logging import get_logger, setup_logging
from app.core.background.ton_service import cleanup_ton_service
# Setup logging first
setup_logging()
logger = get_logger(__name__)
settings = get_settings()
class ApplicationManager:
"""Manages application lifecycle and graceful shutdown."""
def __init__(self):
self.app = None
self.shutdown_event = asyncio.Event()
self.tasks = []
async def startup(self):
"""Initialize application and all services."""
try: try:
startup_target = sys.argv[1] await logger.ainfo("Starting my-uploader-bot application", version="2.0.0")
except BaseException:
pass
from app.core._utils.create_maria_tables import create_maria_tables # Initialize database connections
from app.core.storage import engine await logger.ainfo("Initializing database connections")
if startup_target == '__main__': await init_database()
create_maria_tables(engine)
else:
time.sleep(7)
from app.api import app # Create Sanic application
from app.bot import dp as uploader_bot_dp await logger.ainfo("Creating Sanic application")
from app.client_bot import dp as client_bot_dp self.app = await create_app()
from app.core._config import SANIC_PORT, MYSQL_URI, PROJECT_HOST
from app.core.logger import make_log
if int(os.getenv("SANIC_MAINTENANCE", '0')) == 1: # Setup signal handlers for graceful shutdown
make_log("Global", "Application is in maintenance mode") self._setup_signal_handlers()
while True:
time.sleep(1)
from app.core.models import Memory await logger.ainfo(
"Application startup completed",
host=settings.HOST,
port=settings.PORT,
debug=settings.DEBUG,
workers=settings.WORKERS
)
except Exception as e:
async def queue_daemon(app): await logger.aerror("Application startup failed", error=str(e))
await sleep(3) await self.shutdown()
while True:
delayed_list = {k: v for k, v in app.ctx.memory._delayed_queue.items()}
for _execute_ts in delayed_list:
if _execute_ts <= datetime.now().timestamp():
del app.ctx.memory._delayed_queue[_execute_ts]
app.ctx.memory._execute_queue.append(delayed_list[_execute_ts])
await sleep(.7)
async def execute_queue(app):
telegram_bot_username = (await app.ctx.memory._telegram_bot.get_me()).username
client_telegram_bot_username = (await app.ctx.memory._client_telegram_bot.get_me()).username
make_log(None, f"Application normally started. HTTP port: {SANIC_PORT}")
make_log(None, f"Telegram bot: https://t.me/{telegram_bot_username}")
make_log(None, f"Client Telegram bot: https://t.me/{client_telegram_bot_username}")
make_log(None, f"MariaDB host: {MYSQL_URI.split('@')[1].split('/')[0].replace('/', '')}")
make_log(None, f"API host: {PROJECT_HOST}")
while True:
try:
_cmd = app.ctx.memory._execute_queue.pop(0)
except IndexError:
await sleep(.05)
continue
_fn = _cmd.pop(0)
assert _fn
_args = _cmd.pop(0)
assert type(_args) is tuple
try:
_kwargs = _cmd.pop(0)
assert type(_kwargs) is dict
except IndexError:
_kwargs = {}
try:
make_log("Queue.execute", f"{_fn} {_args} {_kwargs}", level='debug')
await _fn(*_args, **_kwargs)
except BaseException as e:
make_log("Queue.execute", f"{_fn} {_args} {_kwargs} => Error: {e}" + '\n' + str(traceback.format_exc()))
if __name__ == '__main__':
main_memory = Memory()
if startup_target == '__main__':
app.ctx.memory = main_memory
for _target in [uploader_bot_dp, client_bot_dp]:
_target._s_memory = app.ctx.memory
app.ctx.memory._app = app
app.add_task(execute_queue(app))
app.add_task(queue_daemon(app))
app.add_task(uploader_bot_dp.start_polling(app.ctx.memory._telegram_bot))
app.add_task(client_bot_dp.start_polling(app.ctx.memory._client_telegram_bot))
app.run(host='0.0.0.0', port=SANIC_PORT)
else:
time.sleep(2)
startup_fn = None
if startup_target == 'indexer':
from app.core.background.indexer_service import main_fn as target_fn
time.sleep(1)
elif startup_target == 'uploader':
from app.core.background.uploader_service import main_fn as target_fn
time.sleep(3)
elif startup_target == 'ton_daemon':
from app.core.background.ton_service import main_fn as target_fn
time.sleep(5)
elif startup_target == 'license_index':
from app.core.background.license_service import main_fn as target_fn
time.sleep(7)
elif startup_target == 'convert_process':
from app.core.background.convert_service import main_fn as target_fn
time.sleep(9)
startup_fn = startup_fn or target_fn
assert startup_fn
async def wrapped_startup_fn(*args):
try:
await startup_fn(*args)
except BaseException as e:
make_log(startup_target[0].upper() + startup_target[1:], f"Error: {e}" + '\n' + str(traceback.format_exc()),
level='error')
sys.exit(1) sys.exit(1)
loop = asyncio.get_event_loop() async def run(self):
"""Run the application server."""
try: try:
loop.run_until_complete(wrapped_startup_fn(main_memory)) if not self.app:
except BaseException as e: await self.startup()
make_log(startup_target[0].upper() + startup_target[1:], f"Error: {e}" + '\n' + str(traceback.format_exc()),
level='error') # Run the Sanic server
sys.exit(0) await logger.ainfo("Starting HTTP server")
server_config = {
"host": settings.HOST,
"port": settings.PORT,
"debug": settings.DEBUG,
"access_log": settings.DEBUG,
"auto_reload": settings.AUTO_RELOAD,
"workers": settings.WORKERS if not settings.DEBUG else 1,
}
# Start server
await self.app.create_server(**server_config, return_asyncio_server=True)
# Wait for shutdown signal
await self.shutdown_event.wait()
except KeyboardInterrupt:
await logger.ainfo("Received keyboard interrupt, shutting down")
except Exception as e:
await logger.aerror("Server error", error=str(e))
finally: finally:
loop.close() await self.shutdown()
async def shutdown(self):
"""Gracefully shutdown the application."""
await logger.ainfo("Initiating graceful shutdown")
try:
# Cancel all background tasks
for task in self.tasks:
if not task.done():
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
# Cleanup services
await logger.ainfo("Cleaning up services")
# Cleanup TON service
await cleanup_ton_service()
# Close database connections
await close_database()
await logger.ainfo("Graceful shutdown completed")
except Exception as e:
await logger.aerror("Error during shutdown", error=str(e))
def _setup_signal_handlers(self):
"""Setup signal handlers for graceful shutdown."""
def signal_handler(signum, frame):
signal_name = signal.Signals(signum).name
logger.info(f"Received {signal_name}, initiating shutdown")
self.shutdown_event.set()
# Register signal handlers
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
# Unix-specific signals
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, signal_handler)
async def main():
"""Main application entry point."""
app_manager = ApplicationManager()
try:
await app_manager.run()
except Exception as e:
logger.error(f"Application failed: {e}")
sys.exit(1)
if __name__ == "__main__":
# Ensure we're running from the correct directory
app_root = Path(__file__).parent.parent
if app_root.exists():
import os
os.chdir(app_root)
# Run the application
try:
asyncio.run(main())
except KeyboardInterrupt:
print("\nApplication interrupted by user")
sys.exit(0)
except Exception as e:
print(f"Failed to start application: {e}")
sys.exit(1)

View File

@ -1,80 +1,451 @@
import traceback """
Enhanced Sanic API application with async support and monitoring
"""
import asyncio
from contextlib import asynccontextmanager
from typing import Dict, Any, Optional
from sanic import Sanic, response from sanic import Sanic, Request, HTTPResponse
from sanic.response import json as json_response
from sanic.exceptions import SanicException
import structlog
from app.core.logger import make_log from app.core.config import settings
from app.core.database import init_database, close_database
from app.core.logging import get_logger, AsyncContextLogger
from app.api.middleware import (
request_middleware,
response_middleware,
exception_middleware,
maintenance_middleware
)
app = Sanic(__name__) logger = get_logger(__name__)
from app.api.middleware import attach_user_to_request, close_db_session, close_request_handler
app.register_middleware(attach_user_to_request, "request")
app.register_middleware(close_db_session, "response")
from app.api.routes._index import s_index, s_favicon
from app.api.routes._system import s_api_v1_node, s_api_system_version, s_api_system_send_status, s_api_v1_node_friendly
from app.api.routes.auth import s_api_v1_auth_twa, s_api_v1_auth_select_wallet, s_api_v1_auth_me
from app.api.routes.statics import s_api_tonconnect_manifest, s_api_platform_metadata
from app.api.routes.node_storage import s_api_v1_storage_post, s_api_v1_storage_get, \
s_api_v1_storage_decode_cid
from app.api.routes.progressive_storage import s_api_v1_5_storage_get, s_api_v1_5_storage_post
from app.api.routes.account import s_api_v1_account_get
from app.api.routes._blockchain import s_api_v1_blockchain_send_new_content_message, \
s_api_v1_blockchain_send_purchase_content_message
from app.api.routes.content import s_api_v1_content_list, s_api_v1_content_view, s_api_v1_content_friendly_list, s_api_v1_5_content_list
from app.api.routes.tonconnect import s_api_v1_tonconnect_new, s_api_v1_tonconnect_logout
app.add_route(s_index, "/", methods=["GET", "OPTIONS"]) class EnhancedSanic(Sanic):
app.add_route(s_favicon, "/favicon.ico", methods=["GET", "OPTIONS"]) """Enhanced Sanic application with additional features"""
app.add_route(s_api_v1_node, "/api/v1/node", methods=["GET", "OPTIONS"]) def __init__(self, *args, **kwargs):
app.add_route(s_api_v1_node_friendly, "/api/v1/nodeFriendly", methods=["GET", "OPTIONS"]) super().__init__(*args, **kwargs)
app.add_route(s_api_system_version, "/api/system.version", methods=["GET", "OPTIONS"]) self.ctx.startup_tasks = []
app.add_route(s_api_system_send_status, "/api/system.sendStatus", methods=["POST", "OPTIONS"]) self.ctx.shutdown_tasks = []
self.ctx.background_tasks = set()
app.add_route(s_api_tonconnect_manifest, "/api/tonconnect-manifest.json", methods=["GET", "OPTIONS"]) async def startup(self):
app.add_route(s_api_platform_metadata, "/api/platform-metadata.json", methods=["GET", "OPTIONS"]) """Application startup tasks"""
async with AsyncContextLogger("app_startup"):
# Initialize database
await init_database()
logger.info("Database initialized")
app.add_route(s_api_v1_auth_twa, "/api/v1/auth.twa", methods=["POST", "OPTIONS"]) # Initialize cache connections
app.add_route(s_api_v1_auth_me, "/api/v1/auth.me", methods=["GET", "OPTIONS"]) from app.core.database import get_cache
app.add_route(s_api_v1_auth_select_wallet, "/api/v1/auth.selectWallet", methods=["POST", "OPTIONS"]) cache = await get_cache()
await cache.redis.ping()
logger.info("Redis cache initialized")
app.add_route(s_api_v1_tonconnect_new, "/api/v1/tonconnect.new", methods=["GET", "OPTIONS"]) # Run custom startup tasks
app.add_route(s_api_v1_tonconnect_logout, "/api/v1/tonconnect.logout", methods=["POST", "OPTIONS"]) for task in self.ctx.startup_tasks:
app.add_route(s_api_v1_5_storage_post, "/api/v1.5/storage", methods=["POST", "OPTIONS"])
app.add_route(s_api_v1_5_storage_get, "/api/v1.5/storage/<file_hash>", methods=["GET", "OPTIONS"])
app.add_route(s_api_v1_storage_post, "/api/v1/storage", methods=["POST", "OPTIONS"])
app.add_route(s_api_v1_storage_get, "/api/v1/storage/<file_hash>", methods=["GET", "OPTIONS"])
app.add_route(s_api_v1_storage_decode_cid, "/api/v1/storage.decodeContentId/<content_id>", methods=["GET", "OPTIONS"])
app.add_route(s_api_v1_account_get, "/api/v1/account", methods=["GET", "OPTIONS"])
app.add_route(s_api_v1_blockchain_send_new_content_message, "/api/v1/blockchain.sendNewContentMessage", methods=["POST", "OPTIONS"])
app.add_route(s_api_v1_blockchain_send_purchase_content_message, "/api/v1/blockchain.sendPurchaseContentMessage", methods=["POST", "OPTIONS"])
app.add_route(s_api_v1_content_list, "/api/v1/content.list", methods=["GET", "OPTIONS"])
app.add_route(s_api_v1_content_view, "/api/v1/content.view/<content_address>", methods=["GET", "OPTIONS"])
app.add_route(s_api_v1_content_friendly_list, "/api/v1/content.friendlyList", methods=["GET", "OPTIONS"])
app.add_route(s_api_v1_5_content_list, "/api/v1.5/content.list", methods=["GET", "OPTIONS"])
@app.exception(BaseException)
async def s_handle_exception(request, exception):
response_buffer = response.json({"error": "An internal server error occurred"}, status=500)
try: try:
raise exception await task()
except AssertionError as e: except Exception as e:
response_buffer = response.json({"error": str(e)}, status=400) logger.error("Startup task failed", task=task.__name__, error=str(e))
except BaseException as e:
make_log("sanic_exception", f"Exception: {e}" + '\n' + str(traceback.format_exc()), level='error')
response_buffer = await close_db_session(request, response_buffer) logger.info("Application startup completed")
response_buffer.headers["Access-Control-Allow-Origin"] = "*"
response_buffer.headers["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS"
response_buffer.headers["Access-Control-Allow-Headers"] = "Origin, Content-Type, Accept, Authorization, Referer, User-Agent, Sec-Fetch-Dest, Sec-Fetch-Mode, Sec-Fetch-Site"
response_buffer.headers["Access-Control-Allow-Credentials"] = "true"
return response_buffer
async def shutdown(self):
"""Application shutdown tasks"""
async with AsyncContextLogger("app_shutdown"):
# Cancel background tasks
for task in self.ctx.background_tasks:
if not task.done():
task.cancel()
# Wait for tasks to complete
if self.ctx.background_tasks:
await asyncio.gather(*self.ctx.background_tasks, return_exceptions=True)
# Run custom shutdown tasks
for task in self.ctx.shutdown_tasks:
try:
await task()
except Exception as e:
logger.error("Shutdown task failed", task=task.__name__, error=str(e))
# Close database connections
await close_database()
logger.info("Database connections closed")
logger.info("Application shutdown completed")
def add_startup_task(self, task):
"""Add startup task"""
self.ctx.startup_tasks.append(task)
def add_shutdown_task(self, task):
"""Add shutdown task"""
self.ctx.shutdown_tasks.append(task)
def add_background_task(self, coro):
"""Add background task"""
task = asyncio.create_task(coro)
self.ctx.background_tasks.add(task)
task.add_done_callback(self.ctx.background_tasks.discard)
return task
# Create Sanic app instance
app = EnhancedSanic(
name="my_uploader_bot",
configure_logging=False # We handle logging ourselves
)
# Configure app settings
app.config.update({
"REQUEST_MAX_SIZE": settings.MAX_FILE_SIZE,
"REQUEST_TIMEOUT": 60,
"RESPONSE_TIMEOUT": 60,
"KEEP_ALIVE_TIMEOUT": 5,
"KEEP_ALIVE": True,
"ACCESS_LOG": False, # We handle access logging in middleware
"AUTO_RELOAD": settings.DEBUG,
"DEBUG": settings.DEBUG,
})
# Register middleware
app.register_middleware(maintenance_middleware, "request")
app.register_middleware(request_middleware, "request")
app.register_middleware(response_middleware, "response")
# Global exception handler
@app.exception(Exception)
async def handle_exception(request: Request, exception: Exception):
"""Global exception handler"""
return await exception_middleware(request, exception)
# Health check endpoint
@app.get("/health")
async def health_check(request: Request):
"""Health check endpoint"""
try:
# Check database connection
from app.core.database import db_manager
async with db_manager.get_session() as session:
await session.execute("SELECT 1")
# Check Redis connection
from app.core.database import get_cache
cache = await get_cache()
await cache.redis.ping()
return json_response({
"status": "healthy",
"version": settings.PROJECT_VERSION,
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error("Health check failed", error=str(e))
return json_response({
"status": "unhealthy",
"error": str(e),
"timestamp": datetime.utcnow().isoformat()
}, status=503)
# Metrics endpoint (if enabled)
if settings.METRICS_ENABLED:
@app.get("/metrics")
async def metrics_endpoint(request: Request):
"""Prometheus metrics endpoint"""
try:
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST
metrics_data = generate_latest()
return HTTPResponse(
body=metrics_data,
headers={"Content-Type": CONTENT_TYPE_LATEST},
status=200
)
except Exception as e:
logger.error("Metrics generation failed", error=str(e))
return json_response({"error": "Metrics unavailable"}, status=503)
# System info endpoint
@app.get("/api/system/info")
async def system_info(request: Request):
"""System information endpoint"""
try:
import psutil
import sys
# Get system metrics
memory = psutil.virtual_memory()
disk = psutil.disk_usage('/')
info = {
"application": {
"name": settings.PROJECT_NAME,
"version": settings.PROJECT_VERSION,
"python_version": sys.version,
"debug": settings.DEBUG,
},
"system": {
"cpu_percent": psutil.cpu_percent(),
"memory": {
"total": memory.total,
"available": memory.available,
"percent": memory.percent
},
"disk": {
"total": disk.total,
"free": disk.free,
"percent": (disk.used / disk.total) * 100
}
},
"services": {
"database": "connected",
"redis": "connected",
"indexer": "running" if settings.INDEXER_ENABLED else "disabled",
"ton_daemon": "running" if settings.TON_DAEMON_ENABLED else "disabled",
}
}
return json_response(info)
except Exception as e:
logger.error("System info failed", error=str(e))
return json_response({"error": "System info unavailable"}, status=500)
# Register API routes
def register_routes():
"""Register all API routes"""
from app.api.routes import (
auth_routes,
content_routes,
storage_routes,
blockchain_routes,
admin_routes,
user_routes,
system_routes
)
# Register route blueprints
app.blueprint(auth_routes.bp)
app.blueprint(content_routes.bp)
app.blueprint(storage_routes.bp)
app.blueprint(blockchain_routes.bp)
app.blueprint(admin_routes.bp)
app.blueprint(user_routes.bp)
app.blueprint(system_routes.bp)
# Попробовать добавить MY Network маршруты
try:
from app.api.routes import my_network_sanic, my_monitoring_sanic
# Создать MY Network blueprint'ы
app.blueprint(my_network_sanic.bp)
app.blueprint(my_monitoring_sanic.bp)
logger.info("MY Network routes registered")
except ImportError as e:
logger.warning("MY Network routes not available", error=str(e))
logger.info("API routes registered")
# Application lifecycle hooks
@app.before_server_start
async def before_server_start(app, loop):
"""Tasks to run before server starts"""
await app.startup()
@app.after_server_stop
async def after_server_stop(app, loop):
"""Tasks to run after server stops"""
await app.shutdown()
# Background task management
class BackgroundTaskManager:
"""Manager for background tasks"""
def __init__(self, app: EnhancedSanic):
self.app = app
self.tasks: Dict[str, asyncio.Task] = {}
async def start_service(self, name: str, service_func, *args, **kwargs):
"""Start a background service"""
if name in self.tasks:
logger.warning("Service already running", service=name)
return
logger.info("Starting background service", service=name)
task = self.app.add_background_task(service_func(*args, **kwargs))
self.tasks[name] = task
# Add error handling
task.add_done_callback(lambda t: self._handle_task_completion(name, t))
def _handle_task_completion(self, name: str, task: asyncio.Task):
"""Handle background task completion"""
if name in self.tasks:
del self.tasks[name]
if task.cancelled():
logger.info("Background service cancelled", service=name)
elif task.exception():
logger.error("Background service failed", service=name, error=str(task.exception()))
else:
logger.info("Background service completed", service=name)
async def stop_service(self, name: str):
"""Stop a background service"""
if name not in self.tasks:
logger.warning("Service not running", service=name)
return
logger.info("Stopping background service", service=name)
task = self.tasks[name]
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
async def stop_all_services(self):
"""Stop all background services"""
for name in list(self.tasks.keys()):
await self.stop_service(name)
def get_service_status(self) -> Dict[str, str]:
"""Get status of all services"""
status = {}
for name, task in self.tasks.items():
if task.done():
if task.cancelled():
status[name] = "cancelled"
elif task.exception():
status[name] = "failed"
else:
status[name] = "completed"
else:
status[name] = "running"
return status
# Initialize background task manager
task_manager = BackgroundTaskManager(app)
app.ctx.task_manager = task_manager
# Service startup functions
async def start_background_services():
"""Start all background services"""
from app.core.background import (
indexer_service,
ton_service,
license_service,
convert_service,
uploader_service
)
if settings.INDEXER_ENABLED:
await task_manager.start_service("indexer", indexer_service.main_fn)
if settings.TON_DAEMON_ENABLED:
await task_manager.start_service("ton_daemon", ton_service.main_fn)
if settings.LICENSE_SERVICE_ENABLED:
await task_manager.start_service("license_service", license_service.main_fn)
if settings.CONVERT_SERVICE_ENABLED:
await task_manager.start_service("convert_service", convert_service.main_fn)
# Попробовать запустить MY Network сервис
try:
await start_my_network_service()
except Exception as e:
logger.warning("MY Network service not started", error=str(e))
logger.info("Background services started")
async def start_my_network_service():
"""Запустить MY Network сервис."""
try:
from app.core.my_network.node_service import NodeService
# Создать и запустить сервис ноды
node_service = NodeService()
# Добавить как фоновую задачу
async def my_network_task():
await node_service.start()
# Держать сервис активным
try:
while True:
await asyncio.sleep(60) # Проверять каждую минуту
# Проверить состояние сервиса
if not node_service.is_running:
logger.warning("MY Network service stopped unexpectedly")
break
except asyncio.CancelledError:
logger.info("MY Network service shutdown requested")
await node_service.stop()
raise
except Exception as e:
logger.error("MY Network service error", error=str(e))
await node_service.stop()
raise
await task_manager.start_service("my_network", my_network_task)
logger.info("MY Network service started")
except ImportError as e:
logger.info("MY Network modules not available", error=str(e))
except Exception as e:
logger.error("Failed to start MY Network service", error=str(e))
raise
# Add startup task
app.add_startup_task(start_background_services)
app.add_shutdown_task(task_manager.stop_all_services)
# Register routes
register_routes()
# Main application factory
def create_app() -> EnhancedSanic:
"""Application factory"""
return app
# Development server runner
async def run_dev_server():
"""Run development server"""
await app.create_server(
host="0.0.0.0",
port=settings.SANIC_PORT,
debug=settings.DEBUG,
auto_reload=settings.DEBUG,
access_log=False
)
if __name__ == "__main__":
logger.info("Starting development server")
asyncio.run(run_dev_server())

416
app/api/docs.py Normal file
View File

@ -0,0 +1,416 @@
"""OpenAPI documentation configuration for my-uploader-bot API."""
from typing import Dict, Any
# API metadata
API_TITLE = "My Uploader Bot API"
API_VERSION = "2.0.0"
API_DESCRIPTION = """
# My Uploader Bot API
A comprehensive file upload and management system with blockchain integration.
## Features
- **File Upload & Management**: Chunked uploads, multiple storage backends, file processing
- **User Authentication**: JWT tokens, API keys, sessions management
- **Blockchain Integration**: TON blockchain wallet management, transactions
- **Content Management**: Version control, metadata, search functionality
- **Security**: Rate limiting, CORS, input validation, file encryption
- **Monitoring**: Prometheus metrics, structured logging, health checks
## Authentication
The API supports multiple authentication methods:
1. **JWT Bearer Token**: Use `Authorization: Bearer <token>` header
2. **API Key**: Use `X-API-Key: <api_key>` header
3. **Session Cookie**: Browser-based authentication
## Rate Limiting
API endpoints are rate-limited based on user tier:
- Free tier: 100 requests per hour
- Premium tier: 1000 requests per hour
- Enterprise tier: 10000 requests per hour
## File Upload Process
1. **Initiate Upload**: POST `/api/v1/storage/upload/initiate` with file metadata
2. **Upload Chunks**: POST `/api/v1/storage/upload/chunk` for each chunk
3. **Complete Upload**: POST `/api/v1/storage/upload/complete` to finalize
4. **Processing**: File is automatically processed in the background
## Error Handling
All errors follow RFC 7807 Problem Details format:
```json
{
"type": "https://api.myuploader.com/errors/validation",
"title": "Validation Error",
"status": 422,
"detail": "The request body contains invalid data",
"instance": "/api/v1/content/upload",
"errors": [
{
"field": "file_size",
"message": "File size exceeds maximum limit"
}
]
}
```
## Webhook Events
The API can send webhook notifications for:
- File upload completion
- Processing status updates
- Blockchain transaction confirmations
- User subscription changes
## SDKs and Examples
- Python SDK: `pip install myuploader-python`
- JavaScript SDK: `npm install @myuploader/js-sdk`
- Examples: https://github.com/myuploader/examples
## Support
- Documentation: https://docs.myuploader.com
- Support: support@myuploader.com
- Status: https://status.myuploader.com
"""
# OpenAPI tags
TAGS_METADATA = [
{
"name": "Authentication",
"description": "User authentication and session management endpoints",
},
{
"name": "Users",
"description": "User profile and account management",
},
{
"name": "Content",
"description": "Content management, search, and metadata operations",
},
{
"name": "Storage",
"description": "File upload, download, and storage operations",
},
{
"name": "Blockchain",
"description": "TON blockchain wallet and transaction management",
},
{
"name": "System",
"description": "System health, metrics, and administrative endpoints",
},
]
# Response examples
RESPONSE_EXAMPLES = {
"user_profile": {
"summary": "User profile example",
"value": {
"id": "123e4567-e89b-12d3-a456-426614174000",
"username": "john_doe",
"email": "john@example.com",
"first_name": "John",
"last_name": "Doe",
"is_active": True,
"is_verified": True,
"avatar_url": "https://cdn.myuploader.com/avatars/john_doe.jpg",
"bio": "Software developer and blockchain enthusiast",
"created_at": "2024-01-01T00:00:00Z",
"updated_at": "2024-01-01T00:00:00Z"
}
},
"content_item": {
"summary": "Content item example",
"value": {
"id": "123e4567-e89b-12d3-a456-426614174001",
"title": "My Awesome Video",
"description": "A great video about blockchain development",
"content_type": "video",
"file_path": "uploads/user123/video_2024_01_01.mp4",
"file_size": 104857600,
"mime_type": "video/mp4",
"is_public": True,
"view_count": 1250,
"download_count": 95,
"like_count": 42,
"tags": ["blockchain", "tutorial", "development"],
"thumbnail_url": "https://cdn.myuploader.com/thumbnails/video_thumb.jpg",
"status": "published",
"created_at": "2024-01-01T00:00:00Z",
"updated_at": "2024-01-01T00:00:00Z"
}
},
"upload_session": {
"summary": "Upload session example",
"value": {
"session_id": "upload_123e4567-e89b-12d3-a456-426614174002",
"filename": "large_video.mp4",
"file_size": 1073741824,
"chunk_size": 1048576,
"total_chunks": 1024,
"uploaded_chunks": 512,
"status": "uploading",
"progress": 50.0,
"expires_at": "2024-01-01T01:00:00Z",
"upload_urls": [
"https://api.myuploader.com/api/v1/storage/upload/chunk"
]
}
},
"wallet_info": {
"summary": "Wallet information example",
"value": {
"id": "123e4567-e89b-12d3-a456-426614174003",
"address": "EQD6M8aVGx1fF6Z5q5q5q5q5q5q5q5q5q5q5q5q5q5q5q5q5q",
"network": "mainnet",
"balance": "10.50000000",
"is_active": True,
"is_primary": True,
"created_at": "2024-01-01T00:00:00Z",
"transactions": [
{
"tx_hash": "abc123def456ghi789jkl012mno345pqr678stu901vwx234yz",
"amount": "5.00000000",
"status": "confirmed",
"created_at": "2024-01-01T00:30:00Z"
}
]
}
},
"error_validation": {
"summary": "Validation error example",
"value": {
"type": "https://api.myuploader.com/errors/validation",
"title": "Validation Error",
"status": 422,
"detail": "The request contains invalid data",
"instance": "/api/v1/content/upload",
"errors": [
{
"field": "file_size",
"message": "File size must be less than 100MB"
},
{
"field": "content_type",
"message": "Content type is required"
}
]
}
},
"error_auth": {
"summary": "Authentication error example",
"value": {
"type": "https://api.myuploader.com/errors/authentication",
"title": "Authentication Required",
"status": 401,
"detail": "Valid authentication credentials are required",
"instance": "/api/v1/content/private"
}
},
"error_forbidden": {
"summary": "Permission error example",
"value": {
"type": "https://api.myuploader.com/errors/forbidden",
"title": "Insufficient Permissions",
"status": 403,
"detail": "You don't have permission to access this resource",
"instance": "/api/v1/admin/users"
}
},
"error_not_found": {
"summary": "Not found error example",
"value": {
"type": "https://api.myuploader.com/errors/not-found",
"title": "Resource Not Found",
"status": 404,
"detail": "The requested resource was not found",
"instance": "/api/v1/content/nonexistent-id"
}
},
"error_rate_limit": {
"summary": "Rate limit error example",
"value": {
"type": "https://api.myuploader.com/errors/rate-limit",
"title": "Rate Limit Exceeded",
"status": 429,
"detail": "Too many requests. Please try again later",
"instance": "/api/v1/content/search",
"retry_after": 60
}
}
}
# Security schemes
SECURITY_SCHEMES = {
"BearerAuth": {
"type": "http",
"scheme": "bearer",
"bearerFormat": "JWT",
"description": "JWT token authentication. Get token from /api/v1/auth/login"
},
"ApiKeyAuth": {
"type": "apiKey",
"in": "header",
"name": "X-API-Key",
"description": "API key authentication. Get API key from user dashboard"
},
"CookieAuth": {
"type": "apiKey",
"in": "cookie",
"name": "session",
"description": "Session cookie authentication"
}
}
# OpenAPI configuration
def get_openapi_config() -> Dict[str, Any]:
"""Get OpenAPI configuration."""
return {
"title": API_TITLE,
"version": API_VERSION,
"description": API_DESCRIPTION,
"terms_of_service": "https://myuploader.com/terms",
"contact": {
"name": "My Uploader Bot Support",
"url": "https://myuploader.com/support",
"email": "support@myuploader.com"
},
"license": {
"name": "MIT License",
"url": "https://opensource.org/licenses/MIT"
},
"servers": [
{
"url": "https://api.myuploader.com",
"description": "Production server"
},
{
"url": "https://staging-api.myuploader.com",
"description": "Staging server"
},
{
"url": "http://localhost:8000",
"description": "Development server"
}
],
"tags": TAGS_METADATA,
"components": {
"securitySchemes": SECURITY_SCHEMES,
"examples": RESPONSE_EXAMPLES,
"responses": {
"ValidationError": {
"description": "Validation error response",
"content": {
"application/json": {
"example": RESPONSE_EXAMPLES["error_validation"]["value"]
}
}
},
"AuthError": {
"description": "Authentication error response",
"content": {
"application/json": {
"example": RESPONSE_EXAMPLES["error_auth"]["value"]
}
}
},
"ForbiddenError": {
"description": "Permission error response",
"content": {
"application/json": {
"example": RESPONSE_EXAMPLES["error_forbidden"]["value"]
}
}
},
"NotFoundError": {
"description": "Not found error response",
"content": {
"application/json": {
"example": RESPONSE_EXAMPLES["error_not_found"]["value"]
}
}
},
"RateLimitError": {
"description": "Rate limit error response",
"content": {
"application/json": {
"example": RESPONSE_EXAMPLES["error_rate_limit"]["value"]
}
}
}
}
},
"security": [
{"BearerAuth": []},
{"ApiKeyAuth": []},
{"CookieAuth": []}
]
}
# Custom OpenAPI schema
CUSTOM_OPENAPI_SCHEMA = {
"x-logo": {
"url": "https://myuploader.com/logo.png",
"altText": "My Uploader Bot Logo"
},
"x-code-samples": [
{
"lang": "Python",
"source": """
import requests
# Upload a file
response = requests.post(
'https://api.myuploader.com/api/v1/storage/upload/initiate',
headers={'Authorization': 'Bearer <your_token>'},
json={
'filename': 'example.jpg',
'file_size': 1024000,
'content_type': 'image'
}
)
"""
},
{
"lang": "JavaScript",
"source": """
// Upload a file
const response = await fetch('https://api.myuploader.com/api/v1/storage/upload/initiate', {
method: 'POST',
headers: {
'Authorization': 'Bearer <your_token>',
'Content-Type': 'application/json'
},
body: JSON.stringify({
filename: 'example.jpg',
file_size: 1024000,
content_type: 'image'
})
});
"""
},
{
"lang": "cURL",
"source": """
curl -X POST https://api.myuploader.com/api/v1/storage/upload/initiate \\
-H "Authorization: Bearer <your_token>" \\
-H "Content-Type: application/json" \\
-d '{
"filename": "example.jpg",
"file_size": 1024000,
"content_type": "image"
}'
"""
}
]
}

View File

@ -1,168 +1,494 @@
from base58 import b58decode """
from sanic import response as sanic_response Enhanced API middleware with security, rate limiting, and monitoring
"""
from app.core._crypto.signer import Signer import asyncio
from app.core._secrets import hot_seed import time
from app.core.logger import make_log import uuid
from app.core.models.keys import KnownKey
from app.core.models._telegram.wrapped_bot import Wrapped_CBotChat
from app.core.models.user_activity import UserActivity
from app.core.models.user import User
from app.core.storage import Session
from datetime import datetime, timedelta from datetime import datetime, timedelta
from typing import Optional, Dict, Any, Callable
import json
from sanic import Request, HTTPResponse
from sanic.response import json as json_response, text as text_response
from sanic.exceptions import Unauthorized, Forbidden, TooManyRequests, BadRequest
import structlog
from app.core.config import settings, SecurityConfig, CACHE_KEYS
from app.core.database import get_db_session, get_cache
from app.core.logging import request_id_var, user_id_var, operation_var, log_performance
from app.core.models.user import User
from app.core.models.base import BaseModel
logger = structlog.get_logger(__name__)
def attach_headers(response): class SecurityMiddleware:
response.headers["Access-Control-Allow-Origin"] = "*" """Security middleware for request validation and protection"""
response.headers["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Origin, Content-Type, Accept, Authorization, Referer, User-Agent, Sec-Fetch-Dest, Sec-Fetch-Mode, Sec-Fetch-Site, x-file-name, x-last-chunk, x-chunk-start, x-upload-id" @staticmethod
# response.headers["Access-Control-Allow-Credentials"] = "true" def add_security_headers(response: HTTPResponse) -> HTTPResponse:
"""Add security headers to response"""
# CORS headers
response.headers.update({
"Access-Control-Allow-Origin": "*", # Will be restricted based on request
"Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, OPTIONS",
"Access-Control-Allow-Headers": (
"Origin, Content-Type, Accept, Authorization, "
"X-Requested-With, X-API-Key, X-Request-ID"
),
"Access-Control-Max-Age": "86400",
# Security headers
"X-Content-Type-Options": "nosniff",
"X-Frame-Options": "DENY",
"X-XSS-Protection": "1; mode=block",
"Strict-Transport-Security": "max-age=31536000; includeSubDomains",
"Referrer-Policy": "strict-origin-when-cross-origin",
"Permissions-Policy": "geolocation=(), microphone=(), camera=()",
# Custom headers
"X-API-Version": settings.PROJECT_VERSION,
"X-Request-ID": getattr(getattr(Request, 'ctx', None), 'request_id', 'unknown')
})
# CSP header
csp_directives = "; ".join([
f"{directive} {' '.join(sources)}"
for directive, sources in SecurityConfig.CSP_DIRECTIVES.items()
])
response.headers["Content-Security-Policy"] = csp_directives
return response return response
@staticmethod
def validate_request_size(request: Request) -> None:
"""Validate request size limits"""
content_length = request.headers.get('content-length')
if content_length:
size = int(content_length)
if size > SecurityConfig.MAX_REQUEST_SIZE:
raise BadRequest(f"Request too large: {size} bytes")
async def try_authorization(request): @staticmethod
token = request.headers.get("Authorization") def validate_content_type(request: Request) -> None:
"""Validate content type for JSON requests"""
if request.method in ['POST', 'PUT', 'PATCH']:
content_type = request.headers.get('content-type', '')
if 'application/json' in content_type:
try:
# Validate JSON size
if hasattr(request, 'body') and len(request.body) > SecurityConfig.MAX_JSON_SIZE:
raise BadRequest("JSON payload too large")
except Exception:
raise BadRequest("Invalid JSON payload")
@staticmethod
def check_origin(request: Request) -> bool:
"""Check if request origin is allowed"""
origin = request.headers.get('origin')
if not origin:
return True # Allow requests without origin (direct API calls)
return any(
origin.startswith(allowed_origin.rstrip('/*'))
for allowed_origin in SecurityConfig.CORS_ORIGINS
)
class RateLimitMiddleware:
"""Rate limiting middleware using Redis"""
def __init__(self):
self.cache = None
async def get_cache(self):
"""Get cache instance"""
if not self.cache:
self.cache = await get_cache()
return self.cache
async def check_rate_limit(
self,
request: Request,
identifier: str,
pattern: str = "api"
) -> bool:
"""Check rate limit for identifier"""
try:
cache = await self.get_cache()
limits = SecurityConfig.RATE_LIMIT_PATTERNS.get(pattern, {
"requests": settings.RATE_LIMIT_REQUESTS,
"window": settings.RATE_LIMIT_WINDOW
})
cache_key = CACHE_KEYS["rate_limit"].format(
pattern=pattern,
identifier=identifier
)
# Get current count
current_count = await cache.get(cache_key)
if current_count is None:
# First request in window
await cache.set(cache_key, "1", ttl=limits["window"])
return True
current_count = int(current_count)
if current_count >= limits["requests"]:
# Rate limit exceeded
logger.warning(
"Rate limit exceeded",
identifier=identifier,
pattern=pattern,
count=current_count,
limit=limits["requests"]
)
return False
# Increment counter
await cache.incr(cache_key)
return True
except Exception as e:
logger.error("Rate limit check failed", error=str(e))
return True # Allow request if rate limiting fails
async def get_rate_limit_info(
self,
identifier: str,
pattern: str = "api"
) -> Dict[str, Any]:
"""Get rate limit information"""
try:
cache = await self.get_cache()
limits = SecurityConfig.RATE_LIMIT_PATTERNS.get(pattern, {
"requests": settings.RATE_LIMIT_REQUESTS,
"window": settings.RATE_LIMIT_WINDOW
})
cache_key = CACHE_KEYS["rate_limit"].format(
pattern=pattern,
identifier=identifier
)
current_count = await cache.get(cache_key) or "0"
ttl = await cache.redis.ttl(cache_key)
return {
"limit": limits["requests"],
"remaining": max(0, limits["requests"] - int(current_count)),
"reset_time": int(time.time()) + max(0, ttl),
"window": limits["window"]
}
except Exception as e:
logger.error("Failed to get rate limit info", error=str(e))
return {}
class AuthenticationMiddleware:
"""Authentication middleware for API access"""
@staticmethod
async def extract_token(request: Request) -> Optional[str]:
"""Extract authentication token from request"""
# Check Authorization header
auth_header = request.headers.get('authorization')
if auth_header and auth_header.startswith('Bearer '):
return auth_header[7:] # Remove 'Bearer ' prefix
# Check X-API-Key header
api_key = request.headers.get('x-api-key')
if api_key:
return api_key
# Check query parameter (less secure, for backward compatibility)
return request.args.get('token')
@staticmethod
async def validate_token(token: str, session) -> Optional[User]:
"""Validate authentication token and return user"""
if not token: if not token:
return return None
token_bin = b58decode(token)
if len(token_bin) != 57:
make_log("auth", "Invalid token length", level="warning")
return
known_key = request.ctx.db_session.query(KnownKey).filter(KnownKey.seed == token).first()
if not known_key:
make_log("auth", "Unknown key", level="warning")
return
if known_key.type != "USER_API_V1":
make_log("auth", "Invalid key type", level="warning")
return
(
token_version,
user_id,
timestamp,
randpart
) = (
int.from_bytes(token_bin[0:1], 'big'),
int.from_bytes(token_bin[1:17], 'big'),
int.from_bytes(token_bin[17:25], 'big'),
token_bin[25:]
)
assert token_version == 1, "Invalid token version"
assert user_id > 0, "Invalid user_id"
assert timestamp > 0, "Invalid timestamp"
if known_key.meta.get('I_user_id', -1) != user_id:
make_log("auth", f"User ID mismatch: {known_key.meta.get('I_user_id', -1)} != {user_id}", level="warning")
return
user = request.ctx.db_session.query(User).filter(User.id == known_key.meta['I_user_id']).first()
if not user:
make_log("auth", "No user from key", level="warning")
return
request.ctx.user = user
request.ctx.user_key = known_key
request.ctx.user_uploader_wrapper = Wrapped_CBotChat(request.app.ctx.memory._telegram_bot, chat_id=user.telegram_id, db_session=request.ctx.db_session, user=user)
request.ctx.user_client_wrapper = Wrapped_CBotChat(request.app.ctx.memory._client_telegram_bot, chat_id=user.telegram_id, db_session=request.ctx.db_session, user=user)
async def try_service_authorization(request):
signature = request.headers.get('X-Service-Signature')
if not signature:
return
# TODO: смысл этой проверки если это можно подменить?
message_hash_b58 = request.headers.get('X-Message-Hash')
if not message_hash_b58:
return
message_hash = b58decode(message_hash_b58)
signer = Signer(hot_seed)
if signer.verify(message_hash, signature):
request.ctx.verified_hash = message_hash
async def save_activity(request):
activity_meta = {}
try:
activity_meta["path"] = request.path
if 'system' in activity_meta["path"]:
return
except:
pass
try: try:
activity_meta["args"] = dict(request.args) # For now, implement simple token validation
except: # In production, implement JWT or database token validation
pass
# Example: if token format is user_id:hash
if ':' in token:
user_id_str, token_hash = token.split(':', 1)
try: try:
activity_meta["json"] = dict(request.json) user_id = uuid.UUID(user_id_str)
except: user = await User.get_by_id(session, user_id)
if user and user.verify_token(token_hash): # Implement in User model
return user
except (ValueError, AttributeError):
pass pass
try: # Fallback: try to find user by API token
activity_meta["method"] = request.method # This would require implementing token storage in User model
except: return None
pass
try: except Exception as e:
activity_meta["ip"] = (request.headers['X-Forwarded-for'] if 'X-Forwarded-for' in request.headers else None) \ logger.error("Token validation failed", token=token[:8] + "...", error=str(e))
or request.remote_addr or request.ip return None
activity_meta["ip"] = activity_meta["ip"].split(",")[0].strip()
except:
pass
try: @staticmethod
activity_meta["headers"] = dict(request.headers) async def check_permissions(user: User, request: Request) -> bool:
except: """Check if user has required permissions for the endpoint"""
pass # Implement permission checking based on endpoint and user role
endpoint = request.path
method = request.method
new_user_activity = UserActivity( # Admin endpoints
type="API_V1_REQUEST", if '/admin/' in endpoint:
meta=activity_meta, return user.is_admin
user_id=request.ctx.user.id if request.ctx.user else None,
user_ip=activity_meta.get("ip", "0.0.0.0"), # Moderator endpoints
created=datetime.now() if '/mod/' in endpoint:
) return user.is_moderator
request.ctx.db_session.add(new_user_activity)
request.ctx.db_session.commit() # User-specific endpoints
if '/user/' in endpoint and method in ['POST', 'PUT', 'DELETE']:
return user.has_permission('user:write')
# Content upload endpoints
if '/upload' in endpoint or '/content' in endpoint and method == 'POST':
return user.can_upload_content()
# Default: allow read access for authenticated users
return True
async def attach_user_to_request(request): class RequestContextMiddleware:
if request.method == 'OPTIONS': """Request context middleware for tracking and logging"""
return attach_headers(sanic_response.text("OK"))
request.ctx.db_session = Session() @staticmethod
request.ctx.verified_hash = None def generate_request_id() -> str:
"""Generate unique request ID"""
return str(uuid.uuid4())
@staticmethod
async def add_request_context(request: Request) -> None:
"""Add request context for logging and tracking"""
# Generate and set request ID
request_id = RequestContextMiddleware.generate_request_id()
request.ctx.request_id = request_id
request_id_var.set(request_id)
# Set request start time
request.ctx.start_time = time.time()
# Extract client information
request.ctx.client_ip = RequestContextMiddleware.get_client_ip(request)
request.ctx.user_agent = request.headers.get('user-agent', 'Unknown')
# Initialize context
request.ctx.user = None request.ctx.user = None
request.ctx.user_key = None request.ctx.rate_limit_info = {}
request.ctx.user_uploader_wrapper = Wrapped_CBotChat(request.app.ctx.memory._telegram_bot, db_session=request.ctx.db_session)
request.ctx.user_client_wrapper = Wrapped_CBotChat(request.app.ctx.memory._client_telegram_bot, db_session=request.ctx.db_session) logger.info(
await try_authorization(request) "Request started",
await save_activity(request) method=request.method,
await try_service_authorization(request) path=request.path,
client_ip=request.ctx.client_ip,
user_agent=request.ctx.user_agent
)
@staticmethod
def get_client_ip(request: Request) -> str:
"""Get real client IP address"""
# Check for forwarded headers
forwarded_for = request.headers.get('x-forwarded-for')
if forwarded_for:
return forwarded_for.split(',')[0].strip()
real_ip = request.headers.get('x-real-ip')
if real_ip:
return real_ip
# Fallback to request IP
return getattr(request, 'ip', '127.0.0.1')
@staticmethod
async def log_request_completion(request: Request, response: HTTPResponse) -> None:
"""Log request completion with metrics"""
duration = time.time() - getattr(request.ctx, 'start_time', time.time())
logger.info(
"Request completed",
method=request.method,
path=request.path,
status_code=response.status,
duration_ms=round(duration * 1000, 2),
response_size=len(response.body) if response.body else 0,
client_ip=getattr(request.ctx, 'client_ip', 'unknown'),
user_id=str(request.ctx.user.id) if request.ctx.user else None
)
async def close_request_handler(request, response): # Initialize middleware instances
security_middleware = SecurityMiddleware()
rate_limit_middleware = RateLimitMiddleware()
auth_middleware = AuthenticationMiddleware()
context_middleware = RequestContextMiddleware()
async def request_middleware(request: Request):
"""Main request middleware pipeline"""
# Handle OPTIONS requests for CORS
if request.method == 'OPTIONS': if request.method == 'OPTIONS':
response = sanic_response.text("OK") response = text_response('OK')
return security_middleware.add_security_headers(response)
# Add request context
await context_middleware.add_request_context(request)
# Security validations
try: try:
request.ctx.db_session.close() security_middleware.validate_request_size(request)
except BaseException as e: security_middleware.validate_content_type(request)
pass
response = attach_headers(response) if not security_middleware.check_origin(request):
raise Forbidden("Origin not allowed")
return request, response except Exception as e:
logger.warning("Security validation failed", error=str(e))
response = json_response({"error": str(e)}, status=400)
return security_middleware.add_security_headers(response)
# Rate limiting
if settings.RATE_LIMIT_ENABLED:
client_identifier = context_middleware.get_client_ip(request)
pattern = "api"
# Determine rate limit pattern based on endpoint
if '/auth/' in request.path:
pattern = "auth"
elif '/upload' in request.path:
pattern = "upload"
elif '/admin/' in request.path:
pattern = "heavy"
if not await rate_limit_middleware.check_rate_limit(request, client_identifier, pattern):
rate_info = await rate_limit_middleware.get_rate_limit_info(client_identifier, pattern)
response = json_response(
{
"error": "Rate limit exceeded",
"rate_limit": rate_info
},
status=429
)
return security_middleware.add_security_headers(response)
# Store rate limit info for response headers
request.ctx.rate_limit_info = await rate_limit_middleware.get_rate_limit_info(
client_identifier, pattern
)
# Authentication (for protected endpoints)
if not request.path.startswith('/api/system') and request.path != '/':
async with get_db_session() as session:
token = await auth_middleware.extract_token(request)
if token:
user = await auth_middleware.validate_token(token, session)
if user:
request.ctx.user = user
user_id_var.set(str(user.id))
# Check permissions
if not await auth_middleware.check_permissions(user, request):
response = json_response({"error": "Insufficient permissions"}, status=403)
return security_middleware.add_security_headers(response)
# Update user activity
user.update_activity()
await session.commit()
# Store session for request handlers
request.ctx.db_session = session
async def close_db_session(request, response): async def response_middleware(request: Request, response: HTTPResponse):
request, response = await close_request_handler(request, response) """Main response middleware pipeline"""
response = attach_headers(response)
# Add security headers
response = security_middleware.add_security_headers(response)
# Add rate limit headers
if hasattr(request.ctx, 'rate_limit_info') and request.ctx.rate_limit_info:
rate_info = request.ctx.rate_limit_info
response.headers.update({
"X-RateLimit-Limit": str(rate_info.get('limit', 0)),
"X-RateLimit-Remaining": str(rate_info.get('remaining', 0)),
"X-RateLimit-Reset": str(rate_info.get('reset_time', 0))
})
# Add request ID to response
if hasattr(request.ctx, 'request_id'):
response.headers["X-Request-ID"] = request.ctx.request_id
# Log request completion
await context_middleware.log_request_completion(request, response)
return response return response
async def exception_middleware(request: Request, exception: Exception):
"""Global exception handling middleware"""
error_id = str(uuid.uuid4())
# Log the exception
logger.error(
"Unhandled exception",
error_id=error_id,
exception_type=type(exception).__name__,
exception_message=str(exception),
path=request.path,
method=request.method,
user_id=str(request.ctx.user.id) if hasattr(request.ctx, 'user') and request.ctx.user else None
)
# Handle different exception types
if isinstance(exception, Unauthorized):
response_data = {"error": "Authentication required", "error_id": error_id}
status = 401
elif isinstance(exception, Forbidden):
response_data = {"error": "Access forbidden", "error_id": error_id}
status = 403
elif isinstance(exception, TooManyRequests):
response_data = {"error": "Rate limit exceeded", "error_id": error_id}
status = 429
elif isinstance(exception, BadRequest):
response_data = {"error": str(exception), "error_id": error_id}
status = 400
else:
# Generic server error
response_data = {
"error": "Internal server error",
"error_id": error_id
}
status = 500
if settings.DEBUG:
response_data["debug"] = {
"type": type(exception).__name__,
"message": str(exception)
}
response = json_response(response_data, status=status)
return security_middleware.add_security_headers(response)
# Maintenance mode middleware
async def maintenance_middleware(request: Request):
"""Check for maintenance mode"""
if settings.MAINTENANCE_MODE and not request.path.startswith('/api/system'):
response = json_response({
"error": "Service temporarily unavailable",
"message": settings.MAINTENANCE_MESSAGE
}, status=503)
return security_middleware.add_security_headers(response)

View File

@ -0,0 +1,870 @@
"""
Authentication and authorization routes with JWT tokens, user management, and security features.
Provides user registration, login, token refresh, and account management with comprehensive validation.
"""
import asyncio
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from uuid import UUID, uuid4
from sanic import Blueprint, Request, response
from sanic.response import JSONResponse
from sqlalchemy import select, update, and_
from sqlalchemy.orm import selectinload
from app.core.config import get_settings
from app.core.database import get_async_session, get_cache_manager
from app.core.logging import get_logger
from app.core.models.user import User, UserSession, UserRole
from app.core.security import (
hash_password, verify_password, generate_access_token,
verify_access_token, generate_refresh_token, generate_api_key,
sanitize_input, generate_csrf_token
)
from app.api.middleware import require_auth, validate_request, rate_limit
from app.core.validation import (
UserRegistrationSchema, UserLoginSchema, UserUpdateSchema,
ApiKeySchema
)
# Initialize blueprint
auth_bp = Blueprint("auth", url_prefix="/api/v1/auth")
logger = get_logger(__name__)
settings = get_settings()
@auth_bp.route("/register", methods=["POST"])
@rate_limit(limit=5, window=3600) # 5 registrations per hour
@validate_request(UserRegistrationSchema)
async def register_user(request: Request) -> JSONResponse:
"""
Register new user with comprehensive validation and security checks.
Args:
request: Sanic request with user registration data
Returns:
JSONResponse: Registration result with access tokens
"""
try:
data = request.json
client_ip = request.headers.get("X-Forwarded-For", request.remote_addr)
# Sanitize input data
username = sanitize_input(data["username"])
email = sanitize_input(data["email"])
full_name = sanitize_input(data.get("full_name", ""))
async with get_async_session() as session:
# Check if username already exists
username_stmt = select(User).where(User.username == username)
username_result = await session.execute(username_stmt)
if username_result.scalar_one_or_none():
return response.json(
{"error": "Username already exists", "code": "USERNAME_EXISTS"},
status=400
)
# Check if email already exists
email_stmt = select(User).where(User.email == email)
email_result = await session.execute(email_stmt)
if email_result.scalar_one_or_none():
return response.json(
{"error": "Email already registered", "code": "EMAIL_EXISTS"},
status=400
)
# Check registration rate limiting by IP
cache_manager = get_cache_manager()
ip_reg_key = f"registration_ip:{client_ip}"
ip_registrations = await cache_manager.get(ip_reg_key, default=0)
if ip_registrations >= 3: # Max 3 registrations per IP per day
return response.json(
{"error": "Too many registrations from this IP", "code": "IP_LIMIT_EXCEEDED"},
status=429
)
# Hash password
password_hash = hash_password(data["password"])
# Create user
new_user = User(
id=uuid4(),
username=username,
email=email,
password_hash=password_hash,
full_name=full_name,
is_active=True,
email_verified=False, # Require email verification
registration_ip=client_ip,
last_login_ip=client_ip,
settings={"theme": "light", "notifications": True}
)
session.add(new_user)
await session.commit()
await session.refresh(new_user)
# Assign default role
default_role_stmt = select(UserRole).where(UserRole.name == "user")
role_result = await session.execute(default_role_stmt)
default_role = role_result.scalar_one_or_none()
if default_role:
new_user.roles.append(default_role)
await session.commit()
# Update IP registration counter
await cache_manager.increment(ip_reg_key, ttl=86400) # 24 hours
# Generate tokens
access_token = generate_access_token(
{"user_id": str(new_user.id), "username": username},
expires_in=settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60
)
refresh_token = generate_refresh_token(new_user.id)
# Create user session
session_id = str(uuid4())
csrf_token = generate_csrf_token(new_user.id, session_id)
async with get_async_session() as session:
user_session = UserSession(
id=UUID(session_id),
user_id=new_user.id,
refresh_token_hash=hash_password(refresh_token[-32:]), # Hash last 32 chars
ip_address=client_ip,
user_agent=request.headers.get("User-Agent", ""),
expires_at=datetime.utcnow() + timedelta(days=settings.REFRESH_TOKEN_EXPIRE_DAYS)
)
session.add(user_session)
await session.commit()
await logger.ainfo(
"User registered successfully",
user_id=str(new_user.id),
username=username,
email=email,
ip=client_ip
)
return response.json({
"message": "Registration successful",
"user": {
"id": str(new_user.id),
"username": username,
"email": email,
"full_name": full_name,
"created_at": new_user.created_at.isoformat()
},
"tokens": {
"access_token": access_token,
"refresh_token": refresh_token,
"token_type": "Bearer",
"expires_in": settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60
},
"session": {
"session_id": session_id,
"csrf_token": csrf_token
}
}, status=201)
except Exception as e:
await logger.aerror(
"User registration failed",
username=data.get("username"),
email=data.get("email"),
error=str(e)
)
return response.json(
{"error": "Registration failed", "code": "REGISTRATION_FAILED"},
status=500
)
@auth_bp.route("/login", methods=["POST"])
@rate_limit(limit=10, window=900) # 10 login attempts per 15 minutes
@validate_request(UserLoginSchema)
async def login_user(request: Request) -> JSONResponse:
"""
Authenticate user and generate access tokens with security logging.
Args:
request: Sanic request with login credentials
Returns:
JSONResponse: Authentication result with tokens
"""
try:
data = request.json
username_or_email = sanitize_input(data["username"])
password = data["password"]
remember_me = data.get("remember_me", False)
client_ip = request.headers.get("X-Forwarded-For", request.remote_addr)
# Check login rate limiting
cache_manager = get_cache_manager()
login_key = f"login_attempts:{username_or_email}:{client_ip}"
attempts = await cache_manager.get(login_key, default=0)
if attempts >= 5: # Max 5 failed attempts
return response.json(
{"error": "Too many login attempts", "code": "LOGIN_BLOCKED"},
status=429
)
async with get_async_session() as session:
# Find user by username or email
user_stmt = select(User).where(
or_(User.username == username_or_email, User.email == username_or_email)
).options(selectinload(User.roles))
user_result = await session.execute(user_stmt)
user = user_result.scalar_one_or_none()
if not user or not verify_password(password, user.password_hash):
# Increment failed attempts
await cache_manager.increment(login_key, ttl=900) # 15 minutes
await logger.awarning(
"Failed login attempt",
username=username_or_email,
ip=client_ip,
attempts=attempts + 1
)
return response.json(
{"error": "Invalid credentials", "code": "INVALID_CREDENTIALS"},
status=401
)
if not user.is_active:
return response.json(
{"error": "Account deactivated", "code": "ACCOUNT_DEACTIVATED"},
status=403
)
# Successful login - clear failed attempts
await cache_manager.delete(login_key)
# Update user login info
user.last_login_at = datetime.utcnow()
user.last_login_ip = client_ip
user.login_count = (user.login_count or 0) + 1
await session.commit()
# Generate tokens
user_permissions = []
for role in user.roles:
user_permissions.extend(role.permissions)
token_payload = {
"user_id": str(user.id),
"username": user.username,
"permissions": list(set(user_permissions)) # Remove duplicates
}
expires_in = settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60
if remember_me:
expires_in *= 24 # 24x longer for remember me
access_token = generate_access_token(token_payload, expires_in=expires_in)
refresh_token = generate_refresh_token(user.id)
# Create user session
session_id = str(uuid4())
csrf_token = generate_csrf_token(user.id, session_id)
refresh_expires = timedelta(days=settings.REFRESH_TOKEN_EXPIRE_DAYS)
if remember_me:
refresh_expires *= 2 # Longer refresh for remember me
async with get_async_session() as session:
user_session = UserSession(
id=UUID(session_id),
user_id=user.id,
refresh_token_hash=hash_password(refresh_token[-32:]),
ip_address=client_ip,
user_agent=request.headers.get("User-Agent", ""),
expires_at=datetime.utcnow() + refresh_expires,
remember_me=remember_me
)
session.add(user_session)
await session.commit()
await logger.ainfo(
"User logged in successfully",
user_id=str(user.id),
username=user.username,
ip=client_ip,
remember_me=remember_me
)
return response.json({
"message": "Login successful",
"user": {
"id": str(user.id),
"username": user.username,
"email": user.email,
"full_name": user.full_name,
"last_login": user.last_login_at.isoformat() if user.last_login_at else None,
"permissions": user_permissions
},
"tokens": {
"access_token": access_token,
"refresh_token": refresh_token,
"token_type": "Bearer",
"expires_in": expires_in
},
"session": {
"session_id": session_id,
"csrf_token": csrf_token
}
})
except Exception as e:
await logger.aerror(
"Login failed",
username=data.get("username"),
error=str(e)
)
return response.json(
{"error": "Login failed", "code": "LOGIN_FAILED"},
status=500
)
@auth_bp.route("/refresh", methods=["POST"])
@rate_limit(limit=50, window=3600) # 50 refresh attempts per hour
async def refresh_tokens(request: Request) -> JSONResponse:
"""
Refresh access token using refresh token with rotation.
Args:
request: Sanic request with refresh token
Returns:
JSONResponse: New access and refresh tokens
"""
try:
refresh_token = request.json.get("refresh_token")
if not refresh_token:
return response.json(
{"error": "Refresh token required", "code": "TOKEN_REQUIRED"},
status=400
)
# Verify refresh token
payload = verify_access_token(refresh_token, token_type="refresh")
if not payload:
return response.json(
{"error": "Invalid refresh token", "code": "INVALID_TOKEN"},
status=401
)
user_id = UUID(payload["user_id"])
async with get_async_session() as session:
# Verify session exists and is valid
session_stmt = select(UserSession).where(
and_(
UserSession.user_id == user_id,
UserSession.refresh_token_hash == hash_password(refresh_token[-32:]),
UserSession.expires_at > datetime.utcnow(),
UserSession.is_active == True
)
)
session_result = await session.execute(session_stmt)
user_session = session_result.scalar_one_or_none()
if not user_session:
return response.json(
{"error": "Session expired or invalid", "code": "SESSION_INVALID"},
status=401
)
# Get user with permissions
user_stmt = select(User).where(User.id == user_id).options(selectinload(User.roles))
user_result = await session.execute(user_stmt)
user = user_result.scalar_one_or_none()
if not user or not user.is_active:
return response.json(
{"error": "User not found or inactive", "code": "USER_INACTIVE"},
status=401
)
# Generate new tokens (token rotation)
user_permissions = []
for role in user.roles:
user_permissions.extend(role.permissions)
new_access_token = generate_access_token(
{
"user_id": str(user.id),
"username": user.username,
"permissions": list(set(user_permissions))
},
expires_in=settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60
)
new_refresh_token = generate_refresh_token(user.id)
# Update session with new refresh token
user_session.refresh_token_hash = hash_password(new_refresh_token[-32:])
user_session.last_used_at = datetime.utcnow()
await session.commit()
await logger.adebug(
"Tokens refreshed",
user_id=str(user_id),
session_id=str(user_session.id)
)
return response.json({
"tokens": {
"access_token": new_access_token,
"refresh_token": new_refresh_token,
"token_type": "Bearer",
"expires_in": settings.ACCESS_TOKEN_EXPIRE_MINUTES * 60
}
})
except Exception as e:
await logger.aerror("Token refresh failed", error=str(e))
return response.json(
{"error": "Token refresh failed", "code": "REFRESH_FAILED"},
status=500
)
@auth_bp.route("/logout", methods=["POST"])
@require_auth()
async def logout_user(request: Request) -> JSONResponse:
"""
Logout user and invalidate session.
Args:
request: Sanic request object
Returns:
JSONResponse: Logout confirmation
"""
try:
user_id = request.ctx.user.id
session_id = request.headers.get("X-Session-ID")
if session_id:
async with get_async_session() as session:
# Invalidate specific session
session_stmt = select(UserSession).where(
and_(
UserSession.id == UUID(session_id),
UserSession.user_id == user_id
)
)
session_result = await session.execute(session_stmt)
user_session = session_result.scalar_one_or_none()
if user_session:
user_session.is_active = False
user_session.logged_out_at = datetime.utcnow()
await session.commit()
await logger.ainfo(
"User logged out",
user_id=str(user_id),
session_id=session_id
)
return response.json({
"message": "Logout successful",
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
await logger.aerror(
"Logout failed",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Logout failed", "code": "LOGOUT_FAILED"},
status=500
)
@auth_bp.route("/me", methods=["GET"])
@require_auth()
async def get_current_user(request: Request) -> JSONResponse:
"""
Get current user information and permissions.
Args:
request: Sanic request object
Returns:
JSONResponse: Current user data
"""
try:
user = request.ctx.user
async with get_async_session() as session:
# Get user with full details
user_stmt = select(User).where(User.id == user.id).options(
selectinload(User.roles),
selectinload(User.api_keys)
)
user_result = await session.execute(user_stmt)
full_user = user_result.scalar_one_or_none()
if not full_user:
return response.json(
{"error": "User not found", "code": "USER_NOT_FOUND"},
status=404
)
# Get user permissions
permissions = []
roles = []
for role in full_user.roles:
roles.append({
"name": role.name,
"description": role.description
})
permissions.extend(role.permissions)
# Get active sessions
sessions_stmt = select(UserSession).where(
and_(
UserSession.user_id == user.id,
UserSession.is_active == True,
UserSession.expires_at > datetime.utcnow()
)
)
sessions_result = await session.execute(sessions_stmt)
active_sessions = sessions_result.scalars().all()
return response.json({
"user": {
"id": str(full_user.id),
"username": full_user.username,
"email": full_user.email,
"full_name": full_user.full_name,
"bio": full_user.bio,
"avatar_url": full_user.avatar_url,
"is_active": full_user.is_active,
"email_verified": full_user.email_verified,
"created_at": full_user.created_at.isoformat(),
"last_login_at": full_user.last_login_at.isoformat() if full_user.last_login_at else None,
"login_count": full_user.login_count,
"settings": full_user.settings
},
"roles": roles,
"permissions": list(set(permissions)),
"active_sessions": len(active_sessions),
"api_keys": [
{
"id": str(key.id),
"name": key.name,
"created_at": key.created_at.isoformat(),
"last_used_at": key.last_used_at.isoformat() if key.last_used_at else None,
"expires_at": key.expires_at.isoformat() if key.expires_at else None
}
for key in full_user.api_keys
if key.is_active
]
})
except Exception as e:
await logger.aerror(
"Failed to get current user",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Failed to get user information", "code": "USER_INFO_FAILED"},
status=500
)
@auth_bp.route("/me", methods=["PUT"])
@require_auth()
@validate_request(UserUpdateSchema)
async def update_current_user(request: Request) -> JSONResponse:
"""
Update current user profile information.
Args:
request: Sanic request with update data
Returns:
JSONResponse: Updated user information
"""
try:
user_id = request.ctx.user.id
data = request.json
async with get_async_session() as session:
# Get current user
user_stmt = select(User).where(User.id == user_id)
user_result = await session.execute(user_stmt)
user = user_result.scalar_one_or_none()
if not user:
return response.json(
{"error": "User not found", "code": "USER_NOT_FOUND"},
status=404
)
# Update allowed fields
updatable_fields = ["full_name", "bio", "avatar_url", "settings"]
for field in updatable_fields:
if field in data:
if field == "full_name":
setattr(user, field, sanitize_input(data[field]))
elif field == "bio":
setattr(user, field, sanitize_input(data[field], max_length=500))
else:
setattr(user, field, data[field])
# Handle email change (requires verification)
if "email" in data and data["email"] != user.email:
new_email = sanitize_input(data["email"])
# Check if email is already taken
email_stmt = select(User).where(
and_(User.email == new_email, User.id != user_id)
)
email_result = await session.execute(email_stmt)
if email_result.scalar_one_or_none():
return response.json(
{"error": "Email already in use", "code": "EMAIL_IN_USE"},
status=400
)
user.email = new_email
user.email_verified = False # Require re-verification
user.updated_at = datetime.utcnow()
await session.commit()
await logger.ainfo(
"User profile updated",
user_id=str(user_id),
updated_fields=list(data.keys())
)
return response.json({
"message": "Profile updated successfully",
"user": {
"id": str(user.id),
"username": user.username,
"email": user.email,
"full_name": user.full_name,
"bio": user.bio,
"avatar_url": user.avatar_url,
"updated_at": user.updated_at.isoformat()
}
})
except Exception as e:
await logger.aerror(
"Failed to update user profile",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Failed to update profile", "code": "UPDATE_FAILED"},
status=500
)
@auth_bp.route("/api-keys", methods=["POST"])
@rate_limit(limit=5, window=3600) # 5 API keys per hour
@require_auth(permissions=["api.create"])
@validate_request(ApiKeySchema)
async def create_api_key(request: Request) -> JSONResponse:
"""
Create new API key for programmatic access.
Args:
request: Sanic request with API key data
Returns:
JSONResponse: Created API key information
"""
try:
user_id = request.ctx.user.id
data = request.json
# Generate API key
api_key = generate_api_key(
user_id=user_id,
permissions=data["permissions"],
name=data["name"],
expires_in=None if not data.get("expires_at") else
int((datetime.fromisoformat(data["expires_at"]) - datetime.utcnow()).total_seconds())
)
async with get_async_session() as session:
from app.core.models.user import ApiKey
# Create API key record
new_api_key = ApiKey(
id=uuid4(),
user_id=user_id,
name=sanitize_input(data["name"]),
key_hash=hash_password(api_key[-32:]), # Hash last 32 chars
permissions=data["permissions"],
expires_at=datetime.fromisoformat(data["expires_at"]) if data.get("expires_at") else None
)
session.add(new_api_key)
await session.commit()
await session.refresh(new_api_key)
await logger.ainfo(
"API key created",
user_id=str(user_id),
api_key_id=str(new_api_key.id),
name=data["name"],
permissions=data["permissions"]
)
return response.json({
"message": "API key created successfully",
"api_key": {
"id": str(new_api_key.id),
"name": new_api_key.name,
"key": api_key, # Only returned once
"permissions": new_api_key.permissions,
"created_at": new_api_key.created_at.isoformat(),
"expires_at": new_api_key.expires_at.isoformat() if new_api_key.expires_at else None
},
"warning": "Save this API key securely. It will not be shown again."
}, status=201)
except Exception as e:
await logger.aerror(
"Failed to create API key",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Failed to create API key", "code": "API_KEY_FAILED"},
status=500
)
@auth_bp.route("/sessions", methods=["GET"])
@require_auth()
async def get_user_sessions(request: Request) -> JSONResponse:
"""
Get all active user sessions.
Args:
request: Sanic request object
Returns:
JSONResponse: List of active sessions
"""
try:
user_id = request.ctx.user.id
async with get_async_session() as session:
sessions_stmt = select(UserSession).where(
and_(
UserSession.user_id == user_id,
UserSession.is_active == True,
UserSession.expires_at > datetime.utcnow()
)
).order_by(UserSession.created_at.desc())
sessions_result = await session.execute(sessions_stmt)
sessions = sessions_result.scalars().all()
sessions_data = []
for sess in sessions:
sessions_data.append({
"id": str(sess.id),
"ip_address": sess.ip_address,
"user_agent": sess.user_agent,
"created_at": sess.created_at.isoformat(),
"last_used_at": sess.last_used_at.isoformat() if sess.last_used_at else None,
"expires_at": sess.expires_at.isoformat(),
"remember_me": sess.remember_me,
"is_current": str(sess.id) == request.headers.get("X-Session-ID")
})
return response.json({
"sessions": sessions_data,
"total": len(sessions_data)
})
except Exception as e:
await logger.aerror(
"Failed to get user sessions",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Failed to get sessions", "code": "SESSIONS_FAILED"},
status=500
)
@auth_bp.route("/sessions/<session_id:uuid>", methods=["DELETE"])
@require_auth()
async def revoke_session(request: Request, session_id: UUID) -> JSONResponse:
"""
Revoke specific user session.
Args:
request: Sanic request object
session_id: Session UUID to revoke
Returns:
JSONResponse: Revocation status
"""
try:
user_id = request.ctx.user.id
async with get_async_session() as session:
session_stmt = select(UserSession).where(
and_(
UserSession.id == session_id,
UserSession.user_id == user_id
)
)
session_result = await session.execute(session_stmt)
user_session = session_result.scalar_one_or_none()
if not user_session:
return response.json(
{"error": "Session not found", "code": "SESSION_NOT_FOUND"},
status=404
)
user_session.is_active = False
user_session.logged_out_at = datetime.utcnow()
await session.commit()
await logger.ainfo(
"Session revoked",
user_id=str(user_id),
session_id=str(session_id)
)
return response.json({
"message": "Session revoked successfully",
"session_id": str(session_id)
})
except Exception as e:
await logger.aerror(
"Failed to revoke session",
user_id=str(request.ctx.user.id),
session_id=str(session_id),
error=str(e)
)
return response.json(
{"error": "Failed to revoke session", "code": "REVOKE_FAILED"},
status=500
)

View File

@ -0,0 +1,634 @@
"""
Blockchain operations routes for TON integration with async wallet management.
Provides secure transaction handling, balance queries, and smart contract interactions.
"""
import asyncio
from datetime import datetime, timedelta
from decimal import Decimal
from typing import Dict, List, Optional, Any
from uuid import UUID, uuid4
from sanic import Blueprint, Request, response
from sanic.response import JSONResponse
from sqlalchemy import select, update, and_
from app.core.config import get_settings
from app.core.database import get_async_session, get_cache_manager
from app.core.logging import get_logger
from app.core.models.user import User
from app.api.middleware import require_auth, validate_request, rate_limit
from app.core.validation import BlockchainTransactionSchema
from app.core.background.ton_service import TONService
# Initialize blueprint
blockchain_bp = Blueprint("blockchain", url_prefix="/api/v1/blockchain")
logger = get_logger(__name__)
settings = get_settings()
@blockchain_bp.route("/wallet/balance", methods=["GET"])
@rate_limit(limit=100, window=3600) # 100 balance checks per hour
@require_auth(permissions=["blockchain.read"])
async def get_wallet_balance(request: Request) -> JSONResponse:
"""
Get user wallet balance with caching for performance.
Args:
request: Sanic request object
Returns:
JSONResponse: Wallet balance information
"""
try:
user_id = request.ctx.user.id
cache_manager = get_cache_manager()
# Try cache first
balance_key = f"wallet_balance:{user_id}"
cached_balance = await cache_manager.get(balance_key)
if cached_balance:
return response.json({
"balance": cached_balance,
"cached": True,
"updated_at": cached_balance.get("updated_at")
})
async with get_async_session() as session:
# Get user wallet address
user_stmt = select(User).where(User.id == user_id)
user_result = await session.execute(user_stmt)
user = user_result.scalar_one_or_none()
if not user or not user.wallet_address:
return response.json(
{"error": "Wallet not configured", "code": "WALLET_NOT_CONFIGURED"},
status=400
)
# Get balance from TON service
ton_service = TONService()
balance_data = await ton_service.get_wallet_balance(user.wallet_address)
if balance_data.get("error"):
return response.json(
{"error": balance_data["error"], "code": "BALANCE_FETCH_FAILED"},
status=500
)
# Cache balance for 5 minutes
balance_response = {
"address": user.wallet_address,
"balance_nanotons": balance_data["balance"],
"balance_tons": str(Decimal(balance_data["balance"]) / Decimal("1000000000")),
"last_transaction_lt": balance_data.get("last_transaction_lt"),
"updated_at": datetime.utcnow().isoformat()
}
await cache_manager.set(balance_key, balance_response, ttl=300)
await logger.ainfo(
"Wallet balance retrieved",
user_id=str(user_id),
address=user.wallet_address,
balance=balance_data["balance"]
)
return response.json({
"balance": balance_response,
"cached": False
})
except Exception as e:
await logger.aerror(
"Failed to get wallet balance",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Failed to get balance", "code": "BALANCE_FAILED"},
status=500
)
@blockchain_bp.route("/wallet/transactions", methods=["GET"])
@rate_limit(limit=50, window=3600) # 50 transaction history requests per hour
@require_auth(permissions=["blockchain.read"])
async def get_wallet_transactions(request: Request) -> JSONResponse:
"""
Get wallet transaction history with pagination.
Args:
request: Sanic request object
Returns:
JSONResponse: Transaction history
"""
try:
user_id = request.ctx.user.id
# Parse query parameters
limit = min(int(request.args.get("limit", 20)), 100) # Max 100 transactions
offset = max(int(request.args.get("offset", 0)), 0)
async with get_async_session() as session:
# Get user wallet address
user_stmt = select(User).where(User.id == user_id)
user_result = await session.execute(user_stmt)
user = user_result.scalar_one_or_none()
if not user or not user.wallet_address:
return response.json(
{"error": "Wallet not configured", "code": "WALLET_NOT_CONFIGURED"},
status=400
)
# Check cache for recent transactions
cache_manager = get_cache_manager()
cache_key = f"wallet_transactions:{user_id}:{limit}:{offset}"
cached_transactions = await cache_manager.get(cache_key)
if cached_transactions:
return response.json({
"transactions": cached_transactions,
"cached": True
})
# Get transactions from TON service
ton_service = TONService()
transactions_data = await ton_service.get_wallet_transactions(
user.wallet_address,
limit=limit,
offset=offset
)
if transactions_data.get("error"):
return response.json(
{"error": transactions_data["error"], "code": "TRANSACTIONS_FETCH_FAILED"},
status=500
)
# Process and format transactions
formatted_transactions = []
for tx in transactions_data.get("transactions", []):
formatted_tx = {
"hash": tx.get("hash"),
"lt": tx.get("lt"),
"timestamp": tx.get("utime"),
"value": tx.get("value", "0"),
"value_tons": str(Decimal(tx.get("value", "0")) / Decimal("1000000000")),
"fee": tx.get("fee", "0"),
"source": tx.get("in_msg", {}).get("source"),
"destination": tx.get("out_msgs", [{}])[0].get("destination"),
"message": tx.get("in_msg", {}).get("message", ""),
"type": "incoming" if tx.get("in_msg") else "outgoing",
"status": "success" if tx.get("success") else "failed"
}
formatted_transactions.append(formatted_tx)
# Cache for 2 minutes
await cache_manager.set(cache_key, formatted_transactions, ttl=120)
return response.json({
"transactions": formatted_transactions,
"total": len(formatted_transactions),
"limit": limit,
"offset": offset,
"cached": False
})
except Exception as e:
await logger.aerror(
"Failed to get wallet transactions",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Failed to get transactions", "code": "TRANSACTIONS_FAILED"},
status=500
)
@blockchain_bp.route("/transaction/send", methods=["POST"])
@rate_limit(limit=10, window=3600) # 10 transactions per hour
@require_auth(permissions=["blockchain.write"])
@validate_request(BlockchainTransactionSchema)
async def send_transaction(request: Request) -> JSONResponse:
"""
Send TON transaction with comprehensive validation and monitoring.
Args:
request: Sanic request with transaction data
Returns:
JSONResponse: Transaction submission result
"""
try:
user_id = request.ctx.user.id
data = request.json
async with get_async_session() as session:
# Get user with wallet
user_stmt = select(User).where(User.id == user_id)
user_result = await session.execute(user_stmt)
user = user_result.scalar_one_or_none()
if not user or not user.wallet_address or not user.wallet_private_key:
return response.json(
{"error": "Wallet not properly configured", "code": "WALLET_INCOMPLETE"},
status=400
)
# Validate transaction limits
amount_nanotons = data.get("amount", 0)
max_transaction = settings.MAX_TRANSACTION_AMOUNT * 1000000000 # Convert to nanotons
if amount_nanotons > max_transaction:
return response.json(
{"error": f"Amount exceeds maximum allowed ({settings.MAX_TRANSACTION_AMOUNT} TON)",
"code": "AMOUNT_EXCEEDED"},
status=400
)
# Check daily transaction limit
cache_manager = get_cache_manager()
daily_limit_key = f"daily_transactions:{user_id}:{datetime.utcnow().date()}"
daily_amount = await cache_manager.get(daily_limit_key, default=0)
if daily_amount + amount_nanotons > settings.DAILY_TRANSACTION_LIMIT * 1000000000:
return response.json(
{"error": "Daily transaction limit exceeded", "code": "DAILY_LIMIT_EXCEEDED"},
status=429
)
# Prepare transaction
transaction_data = {
"transaction_type": data["transaction_type"],
"recipient_address": data.get("recipient_address"),
"amount": amount_nanotons,
"message": data.get("message", ""),
"sender_address": user.wallet_address
}
# Send transaction via TON service
ton_service = TONService()
tx_result = await ton_service.send_transaction(
private_key=user.wallet_private_key,
**transaction_data
)
if tx_result.get("error"):
await logger.awarning(
"Transaction failed",
user_id=str(user_id),
error=tx_result["error"],
**transaction_data
)
return response.json(
{"error": tx_result["error"], "code": "TRANSACTION_FAILED"},
status=400
)
# Update daily limit counter
await cache_manager.increment(daily_limit_key, amount_nanotons, ttl=86400)
# Store transaction record
from app.core.models.blockchain import BlockchainTransaction
async with get_async_session() as session:
tx_record = BlockchainTransaction(
id=uuid4(),
user_id=user_id,
transaction_hash=tx_result["hash"],
transaction_type=data["transaction_type"],
amount=amount_nanotons,
recipient_address=data.get("recipient_address"),
sender_address=user.wallet_address,
message=data.get("message", ""),
status="pending",
network_fee=tx_result.get("fee", 0),
block_hash=tx_result.get("block_hash"),
logical_time=tx_result.get("lt")
)
session.add(tx_record)
await session.commit()
# Clear balance cache
balance_key = f"wallet_balance:{user_id}"
await cache_manager.delete(balance_key)
await logger.ainfo(
"Transaction sent successfully",
user_id=str(user_id),
transaction_hash=tx_result["hash"],
amount=amount_nanotons,
recipient=data.get("recipient_address")
)
return response.json({
"message": "Transaction sent successfully",
"transaction": {
"hash": tx_result["hash"],
"amount": amount_nanotons,
"amount_tons": str(Decimal(amount_nanotons) / Decimal("1000000000")),
"recipient": data.get("recipient_address"),
"fee": tx_result.get("fee", 0),
"status": "pending",
"timestamp": datetime.utcnow().isoformat()
}
}, status=201)
except Exception as e:
await logger.aerror(
"Failed to send transaction",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Failed to send transaction", "code": "SEND_FAILED"},
status=500
)
@blockchain_bp.route("/transaction/<tx_hash>/status", methods=["GET"])
@rate_limit(limit=100, window=3600) # 100 status checks per hour
@require_auth(permissions=["blockchain.read"])
async def get_transaction_status(request: Request, tx_hash: str) -> JSONResponse:
"""
Get transaction status and confirmation details.
Args:
request: Sanic request object
tx_hash: Transaction hash to check
Returns:
JSONResponse: Transaction status information
"""
try:
user_id = request.ctx.user.id
# Check cache first
cache_manager = get_cache_manager()
status_key = f"tx_status:{tx_hash}"
cached_status = await cache_manager.get(status_key)
if cached_status and cached_status.get("status") in ["confirmed", "failed"]:
# Cache confirmed/failed transactions longer
return response.json(cached_status)
# Get transaction from database
async with get_async_session() as session:
from app.core.models.blockchain import BlockchainTransaction
tx_stmt = select(BlockchainTransaction).where(
and_(
BlockchainTransaction.transaction_hash == tx_hash,
BlockchainTransaction.user_id == user_id
)
)
tx_result = await session.execute(tx_stmt)
tx_record = tx_result.scalar_one_or_none()
if not tx_record:
return response.json(
{"error": "Transaction not found", "code": "TRANSACTION_NOT_FOUND"},
status=404
)
# Get current status from blockchain
ton_service = TONService()
status_data = await ton_service.get_transaction_status(tx_hash)
if status_data.get("error"):
# Return database status if blockchain query fails
tx_status = {
"hash": tx_record.transaction_hash,
"status": tx_record.status,
"confirmations": 0,
"amount": tx_record.amount,
"created_at": tx_record.created_at.isoformat(),
"blockchain_error": status_data["error"]
}
else:
# Update status based on blockchain data
new_status = "confirmed" if status_data.get("confirmed") else "pending"
if status_data.get("failed"):
new_status = "failed"
tx_status = {
"hash": tx_record.transaction_hash,
"status": new_status,
"confirmations": status_data.get("confirmations", 0),
"block_hash": status_data.get("block_hash"),
"block_time": status_data.get("block_time"),
"amount": tx_record.amount,
"fee": status_data.get("fee", tx_record.network_fee),
"created_at": tx_record.created_at.isoformat(),
"confirmed_at": status_data.get("confirmed_at")
}
# Update database record if status changed
if tx_record.status != new_status:
async with get_async_session() as session:
update_stmt = (
update(BlockchainTransaction)
.where(BlockchainTransaction.id == tx_record.id)
.values(
status=new_status,
confirmations=status_data.get("confirmations", 0),
confirmed_at=datetime.fromisoformat(status_data["confirmed_at"])
if status_data.get("confirmed_at") else None
)
)
await session.execute(update_stmt)
await session.commit()
# Cache status (longer for final states)
cache_ttl = 300 if tx_status["status"] == "pending" else 3600 # 5 min vs 1 hour
await cache_manager.set(status_key, tx_status, ttl=cache_ttl)
return response.json(tx_status)
except Exception as e:
await logger.aerror(
"Failed to get transaction status",
user_id=str(request.ctx.user.id),
tx_hash=tx_hash,
error=str(e)
)
return response.json(
{"error": "Failed to get transaction status", "code": "STATUS_FAILED"},
status=500
)
@blockchain_bp.route("/wallet/create", methods=["POST"])
@rate_limit(limit=1, window=86400) # 1 wallet creation per day
@require_auth(permissions=["blockchain.wallet.create"])
async def create_wallet(request: Request) -> JSONResponse:
"""
Create new TON wallet for user (one per user).
Args:
request: Sanic request object
Returns:
JSONResponse: Wallet creation result
"""
try:
user_id = request.ctx.user.id
async with get_async_session() as session:
# Check if user already has a wallet
user_stmt = select(User).where(User.id == user_id)
user_result = await session.execute(user_stmt)
user = user_result.scalar_one_or_none()
if not user:
return response.json(
{"error": "User not found", "code": "USER_NOT_FOUND"},
status=404
)
if user.wallet_address:
return response.json(
{"error": "Wallet already exists", "code": "WALLET_EXISTS"},
status=400
)
# Create wallet via TON service
ton_service = TONService()
wallet_data = await ton_service.create_wallet()
if wallet_data.get("error"):
return response.json(
{"error": wallet_data["error"], "code": "WALLET_CREATION_FAILED"},
status=500
)
# Store wallet information (encrypt private key)
from app.core.security import encrypt_data
encrypted_private_key = encrypt_data(
wallet_data["private_key"],
context=f"wallet:{user_id}"
)
user.wallet_address = wallet_data["address"]
user.wallet_private_key = encrypted_private_key
user.wallet_created_at = datetime.utcnow()
await session.commit()
await logger.ainfo(
"Wallet created successfully",
user_id=str(user_id),
wallet_address=wallet_data["address"]
)
return response.json({
"message": "Wallet created successfully",
"wallet": {
"address": wallet_data["address"],
"created_at": datetime.utcnow().isoformat(),
"balance": "0",
"network": "TON"
},
"security_note": "Private key is encrypted and stored securely. Keep your account secure."
}, status=201)
except Exception as e:
await logger.aerror(
"Failed to create wallet",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Failed to create wallet", "code": "WALLET_FAILED"},
status=500
)
@blockchain_bp.route("/stats", methods=["GET"])
@rate_limit(limit=50, window=3600) # 50 stats requests per hour
@require_auth(permissions=["blockchain.read"])
async def get_blockchain_stats(request: Request) -> JSONResponse:
"""
Get user blockchain activity statistics.
Args:
request: Sanic request object
Returns:
JSONResponse: Blockchain activity statistics
"""
try:
user_id = request.ctx.user.id
async with get_async_session() as session:
from sqlalchemy import func
from app.core.models.blockchain import BlockchainTransaction
# Get transaction statistics
stats_stmt = select(
func.count(BlockchainTransaction.id).label('total_transactions'),
func.sum(BlockchainTransaction.amount).label('total_amount'),
func.sum(BlockchainTransaction.network_fee).label('total_fees')
).where(BlockchainTransaction.user_id == user_id)
stats_result = await session.execute(stats_stmt)
stats = stats_result.first()
# Get transactions by type
type_stats_stmt = select(
BlockchainTransaction.transaction_type,
func.count(BlockchainTransaction.id).label('count'),
func.sum(BlockchainTransaction.amount).label('amount')
).where(
BlockchainTransaction.user_id == user_id
).group_by(BlockchainTransaction.transaction_type)
type_result = await session.execute(type_stats_stmt)
type_stats = {
row.transaction_type: {
'count': row.count,
'total_amount': row.amount or 0
}
for row in type_result
}
# Get recent activity (last 30 days)
recent_date = datetime.utcnow() - timedelta(days=30)
recent_stmt = select(
func.count(BlockchainTransaction.id).label('recent_count'),
func.sum(BlockchainTransaction.amount).label('recent_amount')
).where(
and_(
BlockchainTransaction.user_id == user_id,
BlockchainTransaction.created_at >= recent_date
)
)
recent_result = await session.execute(recent_stmt)
recent_stats = recent_result.first()
blockchain_stats = {
"total_transactions": stats.total_transactions or 0,
"total_amount_nanotons": stats.total_amount or 0,
"total_amount_tons": str(Decimal(stats.total_amount or 0) / Decimal("1000000000")),
"total_fees_nanotons": stats.total_fees or 0,
"total_fees_tons": str(Decimal(stats.total_fees or 0) / Decimal("1000000000")),
"by_type": type_stats,
"recent_activity": {
"transactions_30d": recent_stats.recent_count or 0,
"amount_30d_nanotons": recent_stats.recent_amount or 0,
"amount_30d_tons": str(Decimal(recent_stats.recent_amount or 0) / Decimal("1000000000"))
},
"generated_at": datetime.utcnow().isoformat()
}
return response.json(blockchain_stats)
except Exception as e:
await logger.aerror(
"Failed to get blockchain stats",
user_id=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Failed to get blockchain statistics", "code": "STATS_FAILED"},
status=500
)

View File

@ -0,0 +1,591 @@
"""
Enhanced content management routes with async operations and comprehensive validation.
Provides secure upload, download, metadata management with Redis caching.
"""
import asyncio
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any
from uuid import UUID, uuid4
from sanic import Blueprint, Request, response
from sanic.response import JSONResponse, ResponseStream
from sqlalchemy import select, update, delete, and_, or_
from sqlalchemy.orm import selectinload
from app.core.config import get_settings
from app.core.database import get_async_session, get_cache_manager
from app.core.logging import get_logger
from app.core.models.content import Content, ContentMetadata, ContentAccess, License
from app.core.models.user import User
from app.api.middleware import require_auth, validate_request, rate_limit
from app.core.validation import ContentSchema, ContentUpdateSchema, ContentSearchSchema
from app.core.storage import StorageManager
from app.core.security import encrypt_data, decrypt_data, generate_access_token
# Initialize blueprint
content_bp = Blueprint("content", url_prefix="/api/v1/content")
logger = get_logger(__name__)
settings = get_settings()
@content_bp.route("/", methods=["POST"])
@rate_limit(limit=50, window=3600) # 50 uploads per hour
@require_auth(permissions=["content.create"])
@validate_request(ContentSchema)
async def create_content(request: Request) -> JSONResponse:
"""
Create new content with metadata and security validation.
Args:
request: Sanic request with validated content data
Returns:
JSONResponse: Created content information with upload URLs
"""
try:
data = request.json
user_id = request.ctx.user.id
async with get_async_session() as session:
# Check user upload quota
quota_key = f"user:{user_id}:upload_quota"
cache_manager = get_cache_manager()
current_quota = await cache_manager.get(quota_key, default=0)
if current_quota >= settings.MAX_UPLOADS_PER_DAY:
return response.json(
{"error": "Upload quota exceeded", "code": "QUOTA_EXCEEDED"},
status=429
)
# Create content record
content = Content(
id=uuid4(),
user_id=user_id,
title=data["title"],
description=data.get("description"),
content_type=data["content_type"],
file_size=data.get("file_size", 0),
status="pending",
visibility=data.get("visibility", "private"),
tags=data.get("tags", []),
license_id=data.get("license_id")
)
session.add(content)
# Create metadata if provided
if data.get("metadata"):
metadata = ContentMetadata(
content_id=content.id,
metadata_type="custom",
data=data["metadata"]
)
session.add(metadata)
await session.commit()
await session.refresh(content)
# Update quota counter
await cache_manager.increment(quota_key, ttl=86400) # 24 hours
# Generate upload URLs for chunked upload
storage_manager = StorageManager()
upload_info = await storage_manager.create_upload_session(
content.id, data.get("file_size", 0)
)
# Cache content for quick access
content_cache_key = f"content:{content.id}"
await cache_manager.set(
content_cache_key,
{
"id": str(content.id),
"title": content.title,
"status": content.status,
"user_id": str(content.user_id)
},
ttl=3600
)
await logger.ainfo(
"Content created successfully",
content_id=str(content.id),
user_id=str(user_id),
title=content.title
)
return response.json({
"content_id": str(content.id),
"upload_session": upload_info,
"status": content.status,
"created_at": content.created_at.isoformat()
}, status=201)
except Exception as e:
await logger.aerror(
"Failed to create content",
error=str(e),
user_id=str(user_id)
)
return response.json(
{"error": "Failed to create content", "code": "CREATION_FAILED"},
status=500
)
@content_bp.route("/<content_id:uuid>", methods=["GET"])
@rate_limit(limit=200, window=3600) # 200 requests per hour
@require_auth(permissions=["content.read"])
async def get_content(request: Request, content_id: UUID) -> JSONResponse:
"""
Retrieve content information with access control and caching.
Args:
request: Sanic request object
content_id: UUID of the content to retrieve
Returns:
JSONResponse: Content information or error
"""
try:
user_id = request.ctx.user.id
cache_manager = get_cache_manager()
# Try cache first
cache_key = f"content:{content_id}:full"
cached_content = await cache_manager.get(cache_key)
if cached_content:
# Check access permissions from cache
if await _check_content_access(content_id, user_id, "read"):
return response.json(cached_content)
else:
return response.json(
{"error": "Access denied", "code": "ACCESS_DENIED"},
status=403
)
async with get_async_session() as session:
# Load content with relationships
stmt = (
select(Content)
.options(
selectinload(Content.metadata),
selectinload(Content.access_controls),
selectinload(Content.license)
)
.where(Content.id == content_id)
)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content:
return response.json(
{"error": "Content not found", "code": "NOT_FOUND"},
status=404
)
# Check access permissions
if not await _check_content_access_db(session, content, user_id, "read"):
return response.json(
{"error": "Access denied", "code": "ACCESS_DENIED"},
status=403
)
# Prepare response data
content_data = {
"id": str(content.id),
"title": content.title,
"description": content.description,
"content_type": content.content_type,
"file_size": content.file_size,
"status": content.status,
"visibility": content.visibility,
"tags": content.tags,
"created_at": content.created_at.isoformat(),
"updated_at": content.updated_at.isoformat(),
"metadata": [
{
"type": m.metadata_type,
"data": m.data
} for m in content.metadata
],
"license": {
"name": content.license.name,
"description": content.license.description
} if content.license else None
}
# Cache the result
await cache_manager.set(cache_key, content_data, ttl=1800) # 30 minutes
# Update access statistics
await _update_access_stats(content_id, user_id, "view")
return response.json(content_data)
except Exception as e:
await logger.aerror(
"Failed to retrieve content",
content_id=str(content_id),
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Failed to retrieve content", "code": "RETRIEVAL_FAILED"},
status=500
)
@content_bp.route("/<content_id:uuid>", methods=["PUT"])
@rate_limit(limit=100, window=3600) # 100 updates per hour
@require_auth(permissions=["content.update"])
@validate_request(ContentUpdateSchema)
async def update_content(request: Request, content_id: UUID) -> JSONResponse:
"""
Update content metadata and settings with validation.
Args:
request: Sanic request with update data
content_id: UUID of content to update
Returns:
JSONResponse: Updated content information
"""
try:
data = request.json
user_id = request.ctx.user.id
async with get_async_session() as session:
# Load existing content
stmt = select(Content).where(Content.id == content_id)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content:
return response.json(
{"error": "Content not found", "code": "NOT_FOUND"},
status=404
)
# Check update permissions
if not await _check_content_access_db(session, content, user_id, "update"):
return response.json(
{"error": "Access denied", "code": "ACCESS_DENIED"},
status=403
)
# Update fields
for field, value in data.items():
if hasattr(content, field) and field not in ["id", "user_id", "created_at"]:
setattr(content, field, value)
content.updated_at = datetime.utcnow()
await session.commit()
# Invalidate caches
cache_manager = get_cache_manager()
await cache_manager.delete(f"content:{content_id}")
await cache_manager.delete(f"content:{content_id}:full")
await logger.ainfo(
"Content updated successfully",
content_id=str(content_id),
user_id=str(user_id),
updated_fields=list(data.keys())
)
return response.json({
"content_id": str(content_id),
"status": "updated",
"updated_at": content.updated_at.isoformat()
})
except Exception as e:
await logger.aerror(
"Failed to update content",
content_id=str(content_id),
error=str(e)
)
return response.json(
{"error": "Failed to update content", "code": "UPDATE_FAILED"},
status=500
)
@content_bp.route("/search", methods=["POST"])
@rate_limit(limit=100, window=3600) # 100 searches per hour
@require_auth(permissions=["content.read"])
@validate_request(ContentSearchSchema)
async def search_content(request: Request) -> JSONResponse:
"""
Search content with filters, pagination and caching.
Args:
request: Sanic request with search parameters
Returns:
JSONResponse: Search results with pagination
"""
try:
data = request.json
user_id = request.ctx.user.id
# Build cache key from search parameters
search_key = f"search:{hash(str(sorted(data.items())))}:{user_id}"
cache_manager = get_cache_manager()
# Try cache first
cached_results = await cache_manager.get(search_key)
if cached_results:
return response.json(cached_results)
async with get_async_session() as session:
# Build base query
stmt = select(Content).where(
or_(
Content.visibility == "public",
Content.user_id == user_id
)
)
# Apply filters
if data.get("query"):
query = f"%{data['query']}%"
stmt = stmt.where(
or_(
Content.title.ilike(query),
Content.description.ilike(query)
)
)
if data.get("content_type"):
stmt = stmt.where(Content.content_type == data["content_type"])
if data.get("tags"):
for tag in data["tags"]:
stmt = stmt.where(Content.tags.contains([tag]))
if data.get("status"):
stmt = stmt.where(Content.status == data["status"])
# Apply date filters
if data.get("date_from"):
stmt = stmt.where(Content.created_at >= datetime.fromisoformat(data["date_from"]))
if data.get("date_to"):
stmt = stmt.where(Content.created_at <= datetime.fromisoformat(data["date_to"]))
# Apply pagination
page = data.get("page", 1)
per_page = min(data.get("per_page", 20), 100) # Max 100 items per page
offset = (page - 1) * per_page
# Get total count
from sqlalchemy import func
count_stmt = select(func.count(Content.id)).select_from(stmt.subquery())
total_result = await session.execute(count_stmt)
total = total_result.scalar()
# Apply ordering and pagination
if data.get("sort_by") == "created_at":
stmt = stmt.order_by(Content.created_at.desc())
elif data.get("sort_by") == "title":
stmt = stmt.order_by(Content.title.asc())
else:
stmt = stmt.order_by(Content.updated_at.desc())
stmt = stmt.offset(offset).limit(per_page)
# Execute query
result = await session.execute(stmt)
content_list = result.scalars().all()
# Prepare response
search_results = {
"results": [
{
"id": str(content.id),
"title": content.title,
"description": content.description,
"content_type": content.content_type,
"file_size": content.file_size,
"status": content.status,
"visibility": content.visibility,
"tags": content.tags,
"created_at": content.created_at.isoformat()
} for content in content_list
],
"pagination": {
"page": page,
"per_page": per_page,
"total": total,
"pages": (total + per_page - 1) // per_page
}
}
# Cache results for 5 minutes
await cache_manager.set(search_key, search_results, ttl=300)
return response.json(search_results)
except Exception as e:
await logger.aerror(
"Search failed",
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Search failed", "code": "SEARCH_FAILED"},
status=500
)
@content_bp.route("/<content_id:uuid>/download", methods=["GET"])
@rate_limit(limit=50, window=3600) # 50 downloads per hour
@require_auth(permissions=["content.download"])
async def download_content(request: Request, content_id: UUID) -> ResponseStream:
"""
Secure content download with access control and logging.
Args:
request: Sanic request object
content_id: UUID of content to download
Returns:
ResponseStream: File stream or error response
"""
try:
user_id = request.ctx.user.id
async with get_async_session() as session:
# Load content
stmt = select(Content).where(Content.id == content_id)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content:
return response.json(
{"error": "Content not found", "code": "NOT_FOUND"},
status=404
)
# Check download permissions
if not await _check_content_access_db(session, content, user_id, "download"):
return response.json(
{"error": "Access denied", "code": "ACCESS_DENIED"},
status=403
)
# Generate download token
download_token = generate_access_token(
{"content_id": str(content_id), "user_id": str(user_id)},
expires_in=3600 # 1 hour
)
# Log download activity
await _update_access_stats(content_id, user_id, "download")
# Get storage manager and create download stream
storage_manager = StorageManager()
file_stream = await storage_manager.get_file_stream(content.file_path)
await logger.ainfo(
"Content download initiated",
content_id=str(content_id),
user_id=str(user_id),
filename=content.title
)
return await response.stream(
file_stream,
headers={
"Content-Type": content.content_type or "application/octet-stream",
"Content-Disposition": f'attachment; filename="{content.title}"',
"Content-Length": str(content.file_size),
"X-Download-Token": download_token
}
)
except Exception as e:
await logger.aerror(
"Download failed",
content_id=str(content_id),
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Download failed", "code": "DOWNLOAD_FAILED"},
status=500
)
async def _check_content_access(content_id: UUID, user_id: UUID, action: str) -> bool:
"""Check user access to content from cache or database."""
cache_manager = get_cache_manager()
access_key = f"access:{content_id}:{user_id}:{action}"
cached_access = await cache_manager.get(access_key)
if cached_access is not None:
return cached_access
async with get_async_session() as session:
stmt = select(Content).where(Content.id == content_id)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content:
return False
has_access = await _check_content_access_db(session, content, user_id, action)
# Cache result for 5 minutes
await cache_manager.set(access_key, has_access, ttl=300)
return has_access
async def _check_content_access_db(session, content: Content, user_id: UUID, action: str) -> bool:
"""Check user access to content in database."""
# Content owner always has access
if content.user_id == user_id:
return True
# Public content allows read access
if content.visibility == "public" and action in ["read", "view"]:
return True
# Check explicit access controls
stmt = (
select(ContentAccess)
.where(
and_(
ContentAccess.content_id == content.id,
ContentAccess.user_id == user_id,
ContentAccess.permission == action,
ContentAccess.expires_at > datetime.utcnow()
)
)
)
result = await session.execute(stmt)
access_control = result.scalar_one_or_none()
return access_control is not None
async def _update_access_stats(content_id: UUID, user_id: UUID, action: str) -> None:
"""Update content access statistics."""
try:
cache_manager = get_cache_manager()
# Update daily stats
today = datetime.utcnow().date().isoformat()
stats_key = f"stats:{content_id}:{action}:{today}"
await cache_manager.increment(stats_key, ttl=86400)
# Update user activity
user_activity_key = f"activity:{user_id}:{action}:{today}"
await cache_manager.increment(user_activity_key, ttl=86400)
except Exception as e:
await logger.awarning(
"Failed to update access stats",
content_id=str(content_id),
user_id=str(user_id),
action=action,
error=str(e)
)

View File

@ -0,0 +1,226 @@
"""Health check and system status endpoints."""
import logging
import asyncio
from datetime import datetime
from typing import Dict, Any
from sanic import Blueprint, Request, response
from sanic.response import JSONResponse
from app.core.config import get_settings
from app.core.database import get_async_session
from app.core.metrics import get_metrics, get_metrics_content_type, metrics_collector
from app.core.background.indexer_service import indexer_service
from app.core.background.convert_service import convert_service
from app.core.background.ton_service import ton_service
logger = logging.getLogger(__name__)
health_bp = Blueprint("health", version=1)
@health_bp.route("/health", methods=["GET"])
async def health_check(request: Request) -> JSONResponse:
"""Basic health check endpoint."""
return response.json({
"status": "healthy",
"timestamp": datetime.utcnow().isoformat(),
"service": "my-uploader-bot",
"version": "2.0.0"
})
@health_bp.route("/health/detailed", methods=["GET"])
async def detailed_health_check(request: Request) -> JSONResponse:
"""Detailed health check with component status."""
health_status = {
"status": "healthy",
"timestamp": datetime.utcnow().isoformat(),
"service": "my-uploader-bot",
"version": "2.0.0",
"components": {}
}
overall_healthy = True
# Database health
try:
async with get_async_session() as session:
await session.execute("SELECT 1")
health_status["components"]["database"] = {
"status": "healthy",
"message": "Database connection successful"
}
except Exception as e:
health_status["components"]["database"] = {
"status": "unhealthy",
"message": f"Database error: {str(e)}"
}
overall_healthy = False
# Redis health
try:
import redis.asyncio as redis
settings = get_settings()
redis_client = redis.from_url(settings.redis_url)
await redis_client.ping()
await redis_client.close()
health_status["components"]["cache"] = {
"status": "healthy",
"message": "Redis connection successful"
}
except Exception as e:
health_status["components"]["cache"] = {
"status": "unhealthy",
"message": f"Redis error: {str(e)}"
}
overall_healthy = False
# TON service health
try:
# Check if TON service is responsive
test_result = await ton_service.ping()
health_status["components"]["blockchain"] = {
"status": "healthy" if test_result else "degraded",
"message": "TON service available" if test_result else "TON service degraded"
}
if not test_result:
overall_healthy = False
except Exception as e:
health_status["components"]["blockchain"] = {
"status": "unhealthy",
"message": f"TON service error: {str(e)}"
}
overall_healthy = False
# Background services health
health_status["components"]["background_services"] = {
"indexer": {
"status": "healthy" if indexer_service.is_running else "stopped",
"active_tasks": len([t for t in indexer_service.tasks if not t.done()])
},
"converter": {
"status": "healthy" if convert_service.is_running else "stopped",
"active_tasks": len([t for t in convert_service.tasks if not t.done()])
}
}
# Update overall status
if not overall_healthy:
health_status["status"] = "unhealthy"
status_code = 200 if overall_healthy else 503
return response.json(health_status, status=status_code)
@health_bp.route("/health/ready", methods=["GET"])
async def readiness_check(request: Request) -> JSONResponse:
"""Kubernetes readiness probe endpoint."""
try:
# Quick database check
async with get_async_session() as session:
await session.execute("SELECT 1")
return response.json({
"status": "ready",
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
return response.json({
"status": "not_ready",
"error": str(e),
"timestamp": datetime.utcnow().isoformat()
}, status=503)
@health_bp.route("/health/live", methods=["GET"])
async def liveness_check(request: Request) -> JSONResponse:
"""Kubernetes liveness probe endpoint."""
return response.json({
"status": "alive",
"timestamp": datetime.utcnow().isoformat()
})
@health_bp.route("/metrics", methods=["GET"])
async def prometheus_metrics(request: Request):
"""Prometheus metrics endpoint."""
try:
metrics_data = await get_metrics()
return response.raw(
metrics_data,
content_type=get_metrics_content_type()
)
except Exception as e:
logger.error(f"Error generating metrics: {e}")
return response.json({
"error": "Failed to generate metrics"
}, status=500)
@health_bp.route("/stats", methods=["GET"])
async def system_stats(request: Request) -> JSONResponse:
"""System statistics endpoint."""
try:
stats = {
"timestamp": datetime.utcnow().isoformat(),
"uptime": metrics_collector.start_time,
"services": {}
}
# Get indexer stats
try:
indexer_stats = await indexer_service.get_indexing_stats()
stats["services"]["indexer"] = indexer_stats
except Exception as e:
stats["services"]["indexer"] = {"error": str(e)}
# Get converter stats
try:
converter_stats = await convert_service.get_processing_stats()
stats["services"]["converter"] = converter_stats
except Exception as e:
stats["services"]["converter"] = {"error": str(e)}
return response.json(stats)
except Exception as e:
logger.error(f"Error getting system stats: {e}")
return response.json({
"error": "Failed to get system stats"
}, status=500)
@health_bp.route("/debug/info", methods=["GET"])
async def debug_info(request: Request) -> JSONResponse:
"""Debug information endpoint (development only)."""
settings = get_settings()
if settings.environment != "development":
return response.json({
"error": "Debug endpoint only available in development"
}, status=403)
debug_data = {
"timestamp": datetime.utcnow().isoformat(),
"environment": settings.environment,
"debug_mode": settings.debug,
"database_url": settings.database_url.replace(
settings.database_url.split('@')[0].split('//')[1],
"***:***"
) if '@' in settings.database_url else "***",
"redis_url": settings.redis_url.replace(
settings.redis_url.split('@')[0].split('//')[1],
"***:***"
) if '@' in settings.redis_url else "***",
"storage_backend": settings.storage_backend,
"ton_network": settings.ton_network,
"active_tasks": {
"indexer": len([t for t in indexer_service.tasks if not t.done()]),
"converter": len([t for t in convert_service.tasks if not t.done()])
}
}
return response.json(debug_data)

View File

@ -0,0 +1,379 @@
"""MY Network Monitoring Interface - веб-интерфейс мониторинга сети в хакерском стиле."""
import asyncio
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Any
from fastapi import APIRouter, Request, HTTPException
from fastapi.responses import HTMLResponse
from fastapi.templating import Jinja2Templates
from pathlib import Path
logger = logging.getLogger(__name__)
# Создать router для мониторинга
router = APIRouter(prefix="/api/my/monitor", tags=["MY Network Monitoring"])
# Настроить шаблоны
templates_dir = Path(__file__).parent.parent.parent / "templates"
templates_dir.mkdir(exist_ok=True)
templates = Jinja2Templates(directory=str(templates_dir))
def get_node_service():
"""Получить сервис ноды."""
try:
from app.core.my_network.node_service import get_node_service
return get_node_service()
except Exception as e:
logger.error(f"Error getting node service: {e}")
return None
@router.get("/", response_class=HTMLResponse)
async def monitoring_dashboard(request: Request):
"""Главная страница мониторинга MY Network."""
try:
# Получить данные для дашборда
node_service = get_node_service()
if not node_service:
monitoring_data = {
"status": "offline",
"error": "MY Network service not available"
}
else:
# Собрать данные со всех компонентов
node_info = await node_service.get_node_info()
peers_info = await node_service.get_peers_info()
sync_status = await node_service.sync_manager.get_sync_status()
monitoring_data = {
"status": "online",
"node_info": node_info,
"peers_info": peers_info,
"sync_status": sync_status,
"timestamp": datetime.utcnow().isoformat()
}
return templates.TemplateResponse("my_network_monitor.html", {
"request": request,
"monitoring_data": monitoring_data
})
except Exception as e:
logger.error(f"Error rendering monitoring dashboard: {e}")
# Fallback HTML если шаблоны не работают
return HTMLResponse(content=generate_fallback_html(str(e)))
@router.get("/ascii")
async def get_ascii_status():
"""Получить ASCII статус сети."""
try:
node_service = get_node_service()
if not node_service:
return {"ascii": generate_offline_ascii(), "status": "offline"}
# Получить данные
node_info = await node_service.get_node_info()
peers_info = await node_service.get_peers_info()
sync_status = await node_service.sync_manager.get_sync_status()
# Генерировать ASCII
ascii_art = await generate_network_ascii(node_info, peers_info, sync_status)
return {
"ascii": ascii_art,
"status": "online",
"timestamp": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error generating ASCII status: {e}")
return {"ascii": generate_error_ascii(str(e)), "status": "error"}
@router.get("/live")
async def live_monitoring_data():
"""Получить живые данные для мониторинга."""
try:
node_service = get_node_service()
if not node_service:
raise HTTPException(status_code=503, detail="MY Network service unavailable")
# Получить свежие данные
node_info = await node_service.get_node_info()
peers_info = await node_service.get_peers_info()
sync_status = await node_service.sync_manager.get_sync_status()
# Статистика сети
network_stats = {
"connected_peers": peers_info["peer_count"],
"active_syncs": sync_status["active_syncs"],
"queue_size": sync_status["queue_size"],
"uptime": node_info["uptime"],
"status": node_info["status"]
}
return {
"success": True,
"data": {
"node_info": node_info,
"network_stats": network_stats,
"peers": peers_info["peers"][:10], # Показать только первые 10 пиров
"sync_status": sync_status
},
"timestamp": datetime.utcnow().isoformat()
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting live monitoring data: {e}")
raise HTTPException(status_code=500, detail=str(e))
async def generate_network_ascii(node_info: Dict[str, Any], peers_info: Dict[str, Any], sync_status: Dict[str, Any]) -> str:
"""Генерировать ASCII представление состояния сети."""
ascii_parts = []
# Заголовок
ascii_parts.append("""
MY NETWORK v2.0
Distributed Content Protocol
""")
# Информация о ноде
status_indicator = "🟢" if node_info.get("status") == "running" else "🔴"
uptime_hours = int(node_info.get("uptime", 0) / 3600)
ascii_parts.append(f"""
NODE STATUS
Node ID: {node_info.get('node_id', 'unknown')[:16]}...
Status: {status_indicator} {node_info.get('status', 'unknown').upper()}
Uptime: {uptime_hours}h {int((node_info.get('uptime', 0) % 3600) / 60)}m
Version: MY Network {node_info.get('version', '2.0')}
""")
# Информация о пирах
peer_count = peers_info.get("peer_count", 0)
peer_status = "🌐" if peer_count > 0 else "🏝️"
ascii_parts.append(f"""
NETWORK STATUS
Connected Peers: {peer_status} {peer_count:>3}
Known Nodes: {len(peers_info.get('peers', [])):>3}
Network Health: {'CONNECTED' if peer_count > 0 else 'ISOLATED':>9}
""")
# Статус синхронизации
sync_running = sync_status.get("is_running", False)
active_syncs = sync_status.get("active_syncs", 0)
queue_size = sync_status.get("queue_size", 0)
sync_indicator = "" if sync_running else "⏸️"
ascii_parts.append(f"""
SYNC STATUS
Sync Engine: {sync_indicator} {'RUNNING' if sync_running else 'STOPPED':>7}
Active Syncs: {active_syncs:>3}
Queue Size: {queue_size:>3}
Workers: {sync_status.get('workers_count', 0):>3}
""")
# Визуализация сети
if peer_count > 0:
ascii_parts.append(generate_network_topology(peers_info.get("peers", [])[:6]))
# Недавние события синхронизации
recent_syncs = sync_status.get("recent_syncs", [])
if recent_syncs:
ascii_parts.append(generate_sync_history(recent_syncs[-5:]))
# Подвал
current_time = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
ascii_parts.append(f"""
Last Updated: {current_time}
MY Network Protocol - Decentralized Content Distribution System
""")
return "".join(ascii_parts)
def generate_network_topology(peers: List[Dict[str, Any]]) -> str:
"""Генерировать ASCII топологию сети."""
topology = ["""
NETWORK TOPOLOGY
[THIS NODE]
"""]
if len(peers) == 1:
topology.append("│ │ │")
topology.append(f"│ [{peers[0].get('node_id', 'unknown')[:8]}...] │")
elif len(peers) <= 3:
topology.append("│ ┌───────┼───────┐ │")
for i, peer in enumerate(peers):
spaces = " " if i == 0 else (" " if i == 1 else " ")
topology.append(f"{spaces}[{peer.get('node_id', 'unknown')[:8]}...] │")
else:
topology.append("│ ┌───────┬───────┼───────┬───────┐ │")
topology.append("│ │ │ │ │ │ │")
for i, peer in enumerate(peers[:5]):
if i < 5:
spaces = [" ", " ", " ", " ", " "][i]
topology.append(f"{spaces}[{peer.get('node_id', 'unknown')[:6]}] │")
if len(peers) > 5:
topology.append("│ ... │")
topology.append("│ │")
topology.append("└──────────────────────────────────────────────────────────────────────────────┘")
return "\n".join(topology) + "\n"
def generate_sync_history(recent_syncs: List[Dict[str, Any]]) -> str:
"""Генерировать историю синхронизации."""
history = ["""
RECENT SYNC ACTIVITY """]
if not recent_syncs:
history.append("│ No recent sync activity │")
else:
for sync in recent_syncs:
content_hash = sync.get("content_hash", "unknown")[:12]
status = sync.get("status", "unknown")
status_icon = {"completed": "", "failed": "", "partial": "⚠️"}.get(status, "")
history.append(f"{status_icon} {content_hash}... - {status.upper():>9}")
history.append("└──────────────────────────────────────────────────────────────────────────────┘")
return "\n".join(history) + "\n"
def generate_offline_ascii() -> str:
"""Генерировать ASCII для офлайн состояния."""
return """
MY NETWORK v2.0
Distributed Content Protocol
SYSTEM STATUS
🔴 OFFLINE
MY Network service is not available
Status: OFFLINE - Service not initialized
"""
def generate_error_ascii(error_message: str) -> str:
"""Генерировать ASCII для ошибки."""
return f"""
MY NETWORK v2.0
Distributed Content Protocol
ERROR STATE
ERROR
{error_message[:64]:^64}
Status: ERROR - Check system logs for details
"""
def generate_fallback_html(error_message: str = "") -> str:
"""Генерировать fallback HTML если шаблоны не работают."""
return f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>MY Network Monitor</title>
<style>
body {{
background: #000;
color: #0f0;
font-family: 'Courier New', monospace;
margin: 0;
padding: 20px;
overflow-x: auto;
}}
.container {{
max-width: 1200px;
margin: 0 auto;
}}
.ascii-art {{
white-space: pre;
font-size: 12px;
line-height: 1.2;
}}
.error {{
color: #f00;
text-align: center;
padding: 20px;
}}
.refresh-btn {{
background: #0f0;
color: #000;
border: none;
padding: 10px 20px;
font-family: inherit;
cursor: pointer;
margin: 20px 0;
}}
.refresh-btn:hover {{
background: #fff;
}}
</style>
</head>
<body>
<div class="container">
<div class="ascii-art">
{generate_error_ascii(error_message) if error_message else generate_offline_ascii()}
</div>
<button class="refresh-btn" onclick="location.reload()">REFRESH SYSTEM STATUS</button>
<div class="error">
{f"Error: {error_message}" if error_message else "MY Network service not available"}
</div>
</div>
<script>
// Автообновление каждые 30 секунд
setTimeout(() => location.reload(), 30000);
</script>
</body>
</html>
'''

View File

@ -0,0 +1,452 @@
"""MY Network Monitoring Sanic Blueprint - веб-интерфейс мониторинга сети."""
import asyncio
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Any
from pathlib import Path
from sanic import Blueprint, Request
from sanic.response import json as json_response, html as html_response
from sanic.exceptions import SanicException
from app.core.logging import get_logger
logger = get_logger(__name__)
# Создать blueprint для мониторинга
bp = Blueprint("my_monitoring", url_prefix="/api/my/monitor")
def get_node_service():
"""Получить сервис ноды."""
try:
from app.core.my_network.node_service import get_node_service
return get_node_service()
except Exception as e:
logger.error(f"Error getting node service: {e}")
return None
@bp.get("/")
async def monitoring_dashboard(request: Request):
"""Главная страница мониторинга MY Network."""
try:
# Получить данные для дашборда
node_service = get_node_service()
if not node_service:
monitoring_data = {
"status": "offline",
"error": "MY Network service not available"
}
else:
# Собрать данные со всех компонентов
node_info = await node_service.get_node_info()
peers_info = await node_service.get_peers_info()
sync_status = await node_service.sync_manager.get_sync_status()
monitoring_data = {
"status": "online",
"node_info": node_info,
"peers_info": peers_info,
"sync_status": sync_status,
"timestamp": datetime.utcnow().isoformat()
}
# Попробовать использовать шаблон
try:
from jinja2 import Environment, FileSystemLoader
# Настроить Jinja2
templates_dir = Path(__file__).parent.parent.parent / "templates"
if templates_dir.exists():
env = Environment(loader=FileSystemLoader(str(templates_dir)))
template = env.get_template("my_network_monitor.html")
html_content = template.render(monitoring_data=monitoring_data)
return html_response(html_content)
except Exception as e:
logger.warning(f"Template rendering failed: {e}")
# Fallback HTML если шаблоны не работают
return html_response(generate_fallback_html(monitoring_data))
except Exception as e:
logger.error(f"Error rendering monitoring dashboard: {e}")
return html_response(generate_fallback_html({"status": "error", "error": str(e)}))
@bp.get("/ascii")
async def get_ascii_status(request: Request):
"""Получить ASCII статус сети."""
try:
node_service = get_node_service()
if not node_service:
return json_response({"ascii": generate_offline_ascii(), "status": "offline"})
# Получить данные
node_info = await node_service.get_node_info()
peers_info = await node_service.get_peers_info()
sync_status = await node_service.sync_manager.get_sync_status()
# Генерировать ASCII
ascii_art = await generate_network_ascii(node_info, peers_info, sync_status)
return json_response({
"ascii": ascii_art,
"status": "online",
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error generating ASCII status: {e}")
return json_response({"ascii": generate_error_ascii(str(e)), "status": "error"})
@bp.get("/live")
async def live_monitoring_data(request: Request):
"""Получить живые данные для мониторинга."""
try:
node_service = get_node_service()
if not node_service:
return json_response(
{"error": "MY Network service unavailable"},
status=503
)
# Получить свежие данные
node_info = await node_service.get_node_info()
peers_info = await node_service.get_peers_info()
sync_status = await node_service.sync_manager.get_sync_status()
# Статистика сети
network_stats = {
"connected_peers": peers_info["peer_count"],
"active_syncs": sync_status["active_syncs"],
"queue_size": sync_status["queue_size"],
"uptime": node_info["uptime"],
"status": node_info["status"]
}
return json_response({
"success": True,
"data": {
"node_info": node_info,
"network_stats": network_stats,
"peers": peers_info["peers"][:10], # Показать только первые 10 пиров
"sync_status": sync_status
},
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error getting live monitoring data: {e}")
return json_response({"error": str(e)}, status=500)
async def generate_network_ascii(node_info: Dict[str, Any], peers_info: Dict[str, Any], sync_status: Dict[str, Any]) -> str:
"""Генерировать ASCII представление состояния сети."""
ascii_parts = []
# Заголовок
ascii_parts.append("""
MY NETWORK v2.0
Distributed Content Protocol
""")
# Информация о ноде
status_indicator = "🟢" if node_info.get("status") == "running" else "🔴"
uptime_hours = int(node_info.get("uptime", 0) / 3600)
ascii_parts.append(f"""
NODE STATUS
Node ID: {node_info.get('node_id', 'unknown')[:16]}...
Status: {status_indicator} {node_info.get('status', 'unknown').upper()}
Uptime: {uptime_hours}h {int((node_info.get('uptime', 0) % 3600) / 60)}m
Version: MY Network {node_info.get('version', '2.0')}
""")
# Информация о пирах
peer_count = peers_info.get("peer_count", 0)
peer_status = "🌐" if peer_count > 0 else "🏝️"
ascii_parts.append(f"""
NETWORK STATUS
Connected Peers: {peer_status} {peer_count:>3}
Known Nodes: {len(peers_info.get('peers', [])):>3}
Network Health: {'CONNECTED' if peer_count > 0 else 'ISOLATED':>9}
""")
# Статус синхронизации
sync_running = sync_status.get("is_running", False)
active_syncs = sync_status.get("active_syncs", 0)
queue_size = sync_status.get("queue_size", 0)
sync_indicator = "" if sync_running else "⏸️"
ascii_parts.append(f"""
SYNC STATUS
Sync Engine: {sync_indicator} {'RUNNING' if sync_running else 'STOPPED':>7}
Active Syncs: {active_syncs:>3}
Queue Size: {queue_size:>3}
Workers: {sync_status.get('workers_count', 0):>3}
""")
# Подвал
current_time = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S UTC")
ascii_parts.append(f"""
Last Updated: {current_time}
MY Network Protocol - Decentralized Content Distribution System
""")
return "".join(ascii_parts)
def generate_offline_ascii() -> str:
"""Генерировать ASCII для офлайн состояния."""
return """
MY NETWORK v2.0
Distributed Content Protocol
SYSTEM STATUS
🔴 OFFLINE
MY Network service is not available
Status: OFFLINE - Service not initialized
"""
def generate_error_ascii(error_message: str) -> str:
"""Генерировать ASCII для ошибки."""
return f"""
MY NETWORK v2.0
Distributed Content Protocol
ERROR STATE
ERROR
{error_message[:64]:^64}
Status: ERROR - Check system logs for details
"""
def generate_fallback_html(monitoring_data: Dict[str, Any]) -> str:
"""Генерировать fallback HTML если шаблоны не работают."""
status = monitoring_data.get("status", "unknown")
error_message = monitoring_data.get("error", "")
# Генерировать информацию о статусе
if status == "online":
node_info = monitoring_data.get("node_info", {})
peers_info = monitoring_data.get("peers_info", {})
sync_status = monitoring_data.get("sync_status", {})
status_info = f"""
<div class="status-section">
<h3>Node Status</h3>
<ul>
<li>Node ID: {node_info.get('node_id', 'unknown')[:16]}...</li>
<li>Status: {node_info.get('status', 'unknown').upper()}</li>
<li>Uptime: {int(node_info.get('uptime', 0) / 3600)}h {int((node_info.get('uptime', 0) % 3600) / 60)}m</li>
<li>Version: MY Network {node_info.get('version', '2.0')}</li>
</ul>
</div>
<div class="status-section">
<h3>Network Status</h3>
<ul>
<li>Connected Peers: {peers_info.get('peer_count', 0)}</li>
<li>Known Nodes: {len(peers_info.get('peers', []))}</li>
<li>Network Health: {'CONNECTED' if peers_info.get('peer_count', 0) > 0 else 'ISOLATED'}</li>
</ul>
</div>
<div class="status-section">
<h3>Sync Status</h3>
<ul>
<li>Sync Engine: {'RUNNING' if sync_status.get('is_running', False) else 'STOPPED'}</li>
<li>Active Syncs: {sync_status.get('active_syncs', 0)}</li>
<li>Queue Size: {sync_status.get('queue_size', 0)}</li>
<li>Workers: {sync_status.get('workers_count', 0)}</li>
</ul>
</div>
"""
else:
status_info = f"""
<div class="error-section">
<h3>Status: {status.upper()}</h3>
<p>{error_message if error_message else 'MY Network service not available'}</p>
</div>
"""
return f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>MY Network Monitor</title>
<style>
body {{
background: linear-gradient(135deg, #0a0a0a 0%, #1a1a2e 50%, #16213e 100%);
color: #00ff41;
font-family: 'Courier New', monospace;
margin: 0;
padding: 20px;
min-height: 100vh;
}}
.container {{
max-width: 1200px;
margin: 0 auto;
}}
.header {{
text-align: center;
margin-bottom: 30px;
padding: 20px;
border: 2px solid #00ff41;
border-radius: 10px;
background: rgba(0, 255, 65, 0.05);
}}
.header h1 {{
font-size: 2.5em;
text-shadow: 0 0 10px #00ff41;
margin: 0;
}}
.status-section {{
background: rgba(0, 0, 0, 0.7);
border: 1px solid #00ff41;
border-radius: 8px;
padding: 20px;
margin: 20px 0;
}}
.status-section h3 {{
color: #00ff41;
margin-bottom: 15px;
text-transform: uppercase;
border-bottom: 1px solid #00ff41;
padding-bottom: 5px;
}}
.status-section ul {{
list-style: none;
padding: 0;
}}
.status-section li {{
margin: 10px 0;
padding: 5px 0;
border-bottom: 1px dotted #333;
}}
.error-section {{
background: rgba(255, 0, 0, 0.1);
border: 1px solid #ff0000;
border-radius: 8px;
padding: 20px;
margin: 20px 0;
text-align: center;
}}
.error-section h3 {{
color: #ff0000;
margin-bottom: 15px;
}}
.controls {{
text-align: center;
margin: 30px 0;
}}
.btn {{
background: linear-gradient(45deg, #00ff41, #00cc33);
color: #000;
border: none;
padding: 12px 24px;
font-family: inherit;
font-weight: bold;
cursor: pointer;
border-radius: 5px;
text-transform: uppercase;
margin: 0 10px;
text-decoration: none;
display: inline-block;
}}
.btn:hover {{
background: linear-gradient(45deg, #00cc33, #00ff41);
}}
.footer {{
text-align: center;
margin-top: 40px;
padding: 20px;
border-top: 1px solid #00ff41;
color: #888;
}}
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1>MY NETWORK MONITOR</h1>
<p>Distributed Content Protocol v2.0</p>
<p>Last Update: {datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S UTC')}</p>
</div>
{status_info}
<div class="controls">
<a href="/api/my/monitor/" class="btn">🔄 REFRESH</a>
<a href="/api/my/monitor/ascii" class="btn">📊 ASCII VIEW</a>
<a href="/api/my/node/info" class="btn"> NODE INFO</a>
<a href="/api/my/health" class="btn"> HEALTH</a>
</div>
<div class="footer">
<p>MY Network Protocol - Decentralized Content Distribution System</p>
<p>Real-time monitoring dashboard</p>
</div>
</div>
<script>
// Автообновление каждые 30 секунд
setTimeout(() => location.reload(), 30000);
</script>
</body>
</html>
'''

View File

@ -0,0 +1,655 @@
"""MY Network API Routes - эндпоинты для работы с распределенной сетью."""
import asyncio
import logging
import json
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Any
from fastapi import APIRouter, HTTPException, Depends, UploadFile, File, Query
from fastapi.responses import FileResponse, StreamingResponse
from sqlalchemy import select, and_, func
from sqlalchemy.ext.asyncio import AsyncSession
from app.core.database_compatible import get_async_session
from app.core.models.content_compatible import Content, ContentMetadata
from app.core.security import get_current_user_optional
from app.core.cache import cache
logger = logging.getLogger(__name__)
# Создать router для MY Network API
router = APIRouter(prefix="/api/my", tags=["MY Network"])
def get_node_service():
"""Получить сервис ноды."""
try:
from app.core.my_network.node_service import get_node_service
return get_node_service()
except Exception as e:
logger.error(f"Error getting node service: {e}")
raise HTTPException(status_code=503, detail="MY Network service unavailable")
@router.get("/node/info")
async def get_node_info():
"""Получить информацию о текущей ноде."""
try:
node_service = get_node_service()
if not node_service:
raise HTTPException(status_code=503, detail="Node service not available")
node_info = await node_service.get_node_info()
return {
"success": True,
"data": node_info,
"timestamp": datetime.utcnow().isoformat()
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting node info: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/node/peers")
async def get_node_peers():
"""Получить список подключенных пиров."""
try:
node_service = get_node_service()
peers_info = await node_service.get_peers_info()
return {
"success": True,
"data": {
"connected_peers": peers_info["connected_peers"],
"peer_count": peers_info["peer_count"],
"peers": peers_info["peers"]
},
"timestamp": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error getting peers: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/node/peers/connect")
async def connect_to_peer(peer_data: Dict[str, Any]):
"""Подключиться к новому пиру."""
try:
peer_address = peer_data.get("address")
if not peer_address:
raise HTTPException(status_code=400, detail="Peer address is required")
node_service = get_node_service()
success = await node_service.peer_manager.connect_to_peer(peer_address)
if success:
return {
"success": True,
"message": f"Successfully connected to peer: {peer_address}",
"timestamp": datetime.utcnow().isoformat()
}
else:
raise HTTPException(status_code=400, detail="Failed to connect to peer")
except HTTPException:
raise
except Exception as e:
logger.error(f"Error connecting to peer: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.delete("/node/peers/{peer_id}")
async def disconnect_peer(peer_id: str):
"""Отключиться от пира."""
try:
node_service = get_node_service()
success = await node_service.peer_manager.disconnect_peer(peer_id)
if success:
return {
"success": True,
"message": f"Successfully disconnected from peer: {peer_id}",
"timestamp": datetime.utcnow().isoformat()
}
else:
raise HTTPException(status_code=404, detail="Peer not found or already disconnected")
except HTTPException:
raise
except Exception as e:
logger.error(f"Error disconnecting peer: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/content/list")
async def get_content_list(
limit: int = Query(100, ge=1, le=1000),
offset: int = Query(0, ge=0),
session: AsyncSession = Depends(get_async_session)
):
"""Получить список доступного контента."""
try:
# Кэшировать результат на 5 минут
cache_key = f"my_network:content_list:{limit}:{offset}"
cached_result = await cache.get(cache_key)
if cached_result:
return json.loads(cached_result)
# Получить контент из БД
stmt = (
select(Content, ContentMetadata)
.outerjoin(ContentMetadata, Content.id == ContentMetadata.content_id)
.where(Content.is_active == True)
.order_by(Content.created_at.desc())
.limit(limit)
.offset(offset)
)
result = await session.execute(stmt)
content_items = []
for content, metadata in result:
content_data = {
"hash": content.sha256_hash or content.md5_hash,
"filename": content.filename,
"original_filename": content.original_filename,
"file_size": content.file_size,
"file_type": content.file_type,
"mime_type": content.mime_type,
"created_at": content.created_at.isoformat(),
"encrypted": getattr(content, 'encrypted', False),
"metadata": metadata.to_dict() if metadata else {}
}
content_items.append(content_data)
# Получить общее количество
count_stmt = select(func.count(Content.id)).where(Content.is_active == True)
count_result = await session.execute(count_stmt)
total_count = count_result.scalar()
response_data = {
"success": True,
"data": {
"content": content_items,
"total": total_count,
"limit": limit,
"offset": offset
},
"timestamp": datetime.utcnow().isoformat()
}
# Кэшировать результат
await cache.set(cache_key, json.dumps(response_data), expire=300)
return response_data
except Exception as e:
logger.error(f"Error getting content list: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/content/{content_hash}/exists")
async def check_content_exists(
content_hash: str,
session: AsyncSession = Depends(get_async_session)
):
"""Проверить существование контента по хешу."""
try:
# Кэшировать результат на 30 минут
cache_key = f"my_network:content_exists:{content_hash}"
cached_result = await cache.get(cache_key)
if cached_result is not None:
return {"exists": cached_result == "true", "hash": content_hash}
# Проверить в БД
stmt = select(Content.id).where(
and_(
Content.is_active == True,
(Content.md5_hash == content_hash) | (Content.sha256_hash == content_hash)
)
)
result = await session.execute(stmt)
exists = result.scalar_one_or_none() is not None
# Кэшировать результат
await cache.set(cache_key, "true" if exists else "false", expire=1800)
return {
"exists": exists,
"hash": content_hash,
"timestamp": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error checking content existence: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/content/{content_hash}/metadata")
async def get_content_metadata(
content_hash: str,
session: AsyncSession = Depends(get_async_session)
):
"""Получить метаданные контента."""
try:
# Кэшировать результат на 10 минут
cache_key = f"my_network:content_metadata:{content_hash}"
cached_result = await cache.get(cache_key)
if cached_result:
return json.loads(cached_result)
# Найти контент в БД
stmt = (
select(Content, ContentMetadata)
.outerjoin(ContentMetadata, Content.id == ContentMetadata.content_id)
.where(
and_(
Content.is_active == True,
(Content.md5_hash == content_hash) | (Content.sha256_hash == content_hash)
)
)
)
result = await session.execute(stmt)
content_data = result.first()
if not content_data:
raise HTTPException(status_code=404, detail="Content not found")
content, metadata = content_data
response_data = {
"success": True,
"data": {
"hash": content_hash,
"filename": content.filename,
"original_filename": content.original_filename,
"file_size": content.file_size,
"file_type": content.file_type,
"mime_type": content.mime_type,
"created_at": content.created_at.isoformat(),
"updated_at": content.updated_at.isoformat() if content.updated_at else None,
"encrypted": getattr(content, 'encrypted', False),
"processing_status": getattr(content, 'processing_status', 'completed'),
"metadata": metadata.to_dict() if metadata else {}
},
"timestamp": datetime.utcnow().isoformat()
}
# Кэшировать результат
await cache.set(cache_key, json.dumps(response_data), expire=600)
return response_data
except HTTPException:
raise
except Exception as e:
logger.error(f"Error getting content metadata: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/content/{content_hash}/download")
async def download_content(
content_hash: str,
session: AsyncSession = Depends(get_async_session)
):
"""Скачать контент по хешу."""
try:
# Найти контент в БД
stmt = select(Content).where(
and_(
Content.is_active == True,
(Content.md5_hash == content_hash) | (Content.sha256_hash == content_hash)
)
)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content:
raise HTTPException(status_code=404, detail="Content not found")
# Проверить существование файла
file_path = Path(content.file_path)
if not file_path.exists():
raise HTTPException(status_code=404, detail="File not found on disk")
# Вернуть файл
return FileResponse(
path=str(file_path),
filename=content.original_filename or content.filename,
media_type=content.mime_type or "application/octet-stream"
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error downloading content: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/content/{content_hash}/upload")
async def upload_content(
content_hash: str,
file: UploadFile = File(...),
session: AsyncSession = Depends(get_async_session)
):
"""Загрузить контент в ноду."""
try:
# Проверить, не существует ли уже контент
exists_stmt = select(Content.id).where(
and_(
Content.is_active == True,
(Content.md5_hash == content_hash) | (Content.sha256_hash == content_hash)
)
)
exists_result = await session.execute(exists_stmt)
if exists_result.scalar_one_or_none():
return {
"success": True,
"message": "Content already exists",
"hash": content_hash
}
# Создать директорию для хранения
storage_path = Path("./storage/my-network/received")
storage_path.mkdir(parents=True, exist_ok=True)
# Сохранить файл
file_path = storage_path / f"{content_hash}_{file.filename}"
with open(file_path, "wb") as buffer:
content_data = await file.read()
buffer.write(content_data)
# Вычислить хеши для проверки
import hashlib
md5_hash = hashlib.md5(content_data).hexdigest()
sha256_hash = hashlib.sha256(content_data).hexdigest()
# Проверить соответствие хеша
if content_hash not in [md5_hash, sha256_hash]:
file_path.unlink() # Удалить файл
raise HTTPException(status_code=400, detail="Content hash mismatch")
# Сохранить в БД
new_content = Content(
filename=file.filename,
original_filename=file.filename,
file_path=str(file_path),
file_size=len(content_data),
file_type=file.filename.split('.')[-1] if '.' in file.filename else 'unknown',
mime_type=file.content_type or "application/octet-stream",
md5_hash=md5_hash,
sha256_hash=sha256_hash,
is_active=True,
processing_status="completed"
)
session.add(new_content)
await session.commit()
logger.info(f"Successfully uploaded content {content_hash}")
return {
"success": True,
"message": "Content uploaded successfully",
"hash": content_hash,
"content_id": new_content.id,
"timestamp": datetime.utcnow().isoformat()
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error uploading content: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/content/replicate")
async def replicate_content(replication_request: Dict[str, Any]):
"""Принять запрос на репликацию контента."""
try:
content_hash = replication_request.get("content_hash")
metadata = replication_request.get("metadata", {})
source_node = replication_request.get("source_node")
if not content_hash:
raise HTTPException(status_code=400, detail="Content hash is required")
# Проверить, нужна ли репликация
async with get_async_session() as session:
exists_stmt = select(Content.id).where(
and_(
Content.is_active == True,
(Content.md5_hash == content_hash) | (Content.sha256_hash == content_hash)
)
)
exists_result = await session.execute(exists_stmt)
if exists_result.scalar_one_or_none():
return {
"success": True,
"message": "Content already exists, replication not needed",
"hash": content_hash
}
# Подготовить для репликации
logger.info(f"Accepting replication request for {content_hash} from {source_node}")
return {
"success": True,
"message": "Replication request accepted",
"hash": content_hash,
"ready_for_upload": True,
"timestamp": datetime.utcnow().isoformat()
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error processing replication request: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/sync/status")
async def get_sync_status():
"""Получить статус синхронизации."""
try:
node_service = get_node_service()
sync_status = await node_service.sync_manager.get_sync_status()
return {
"success": True,
"data": sync_status,
"timestamp": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error getting sync status: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/sync/start")
async def start_network_sync():
"""Запустить синхронизацию с сетью."""
try:
node_service = get_node_service()
sync_result = await node_service.sync_manager.sync_with_network()
return {
"success": True,
"data": sync_result,
"timestamp": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error starting network sync: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/sync/content/{content_hash}")
async def get_content_sync_status(content_hash: str):
"""Получить статус синхронизации конкретного контента."""
try:
node_service = get_node_service()
sync_status = await node_service.sync_manager.get_content_sync_status(content_hash)
return {
"success": True,
"data": sync_status,
"timestamp": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error getting content sync status: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.post("/content/{content_hash}/replicate")
async def replicate_content_to_nodes(
content_hash: str,
replication_config: Dict[str, Any]
):
"""Реплицировать контент на указанные ноды."""
try:
target_nodes = replication_config.get("target_nodes", [])
if not target_nodes:
raise HTTPException(status_code=400, detail="Target nodes are required")
node_service = get_node_service()
replication_result = await node_service.sync_manager.replicate_content_to_nodes(
content_hash,
target_nodes
)
return {
"success": True,
"data": replication_result,
"timestamp": datetime.utcnow().isoformat()
}
except HTTPException:
raise
except Exception as e:
logger.error(f"Error replicating content: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/network/stats")
async def get_network_stats():
"""Получить статистику сети."""
try:
node_service = get_node_service()
# Получить информацию о ноде и пирах
node_info = await node_service.get_node_info()
peers_info = await node_service.get_peers_info()
sync_status = await node_service.sync_manager.get_sync_status()
# Статистика контента
async with get_async_session() as session:
# Общее количество контента
content_count_stmt = select(func.count(Content.id)).where(Content.is_active == True)
content_count_result = await session.execute(content_count_stmt)
total_content = content_count_result.scalar()
# Размер контента
size_stmt = select(func.sum(Content.file_size)).where(Content.is_active == True)
size_result = await session.execute(size_stmt)
total_size = size_result.scalar() or 0
# Контент по типам
type_stmt = select(Content.file_type, func.count(Content.id)).where(Content.is_active == True).group_by(Content.file_type)
type_result = await session.execute(type_stmt)
content_by_type = {row[0]: row[1] for row in type_result}
network_stats = {
"node_info": {
"node_id": node_info["node_id"],
"uptime": node_info["uptime"],
"version": node_info["version"],
"status": node_info["status"]
},
"network": {
"connected_peers": peers_info["peer_count"],
"known_peers": len(peers_info["peers"]),
"network_health": "good" if peers_info["peer_count"] > 0 else "isolated"
},
"content": {
"total_items": total_content,
"total_size_bytes": total_size,
"total_size_mb": round(total_size / (1024 * 1024), 2),
"content_by_type": content_by_type
},
"sync": {
"active_syncs": sync_status["active_syncs"],
"queue_size": sync_status["queue_size"],
"is_running": sync_status["is_running"]
}
}
return {
"success": True,
"data": network_stats,
"timestamp": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error getting network stats: {e}")
raise HTTPException(status_code=500, detail=str(e))
@router.get("/health")
async def health_check():
"""Проверка здоровья MY Network ноды."""
try:
node_service = get_node_service()
# Базовая проверка сервисов
health_status = {
"status": "healthy",
"timestamp": datetime.utcnow().isoformat(),
"services": {
"node_service": node_service is not None,
"peer_manager": hasattr(node_service, 'peer_manager') if node_service else False,
"sync_manager": hasattr(node_service, 'sync_manager') if node_service else False,
"database": True # Если дошли до этой точки, БД работает
}
}
# Проверить подключение к пирам
if node_service:
peers_info = await node_service.get_peers_info()
health_status["network"] = {
"connected_peers": peers_info["peer_count"],
"status": "connected" if peers_info["peer_count"] > 0 else "isolated"
}
# Определить общий статус
if not all(health_status["services"].values()):
health_status["status"] = "unhealthy"
elif node_service and peers_info["peer_count"] == 0:
health_status["status"] = "isolated"
return health_status
except Exception as e:
logger.error(f"Health check failed: {e}")
return {
"status": "unhealthy",
"error": str(e),
"timestamp": datetime.utcnow().isoformat()
}

View File

@ -0,0 +1,426 @@
"""MY Network Sanic Blueprint - маршруты для работы с распределенной сетью."""
import asyncio
import json
import logging
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Any
from sanic import Blueprint, Request
from sanic.response import json as json_response, file as file_response
from sanic.exceptions import SanicException
from app.core.logging import get_logger
logger = get_logger(__name__)
# Создать blueprint для MY Network API
bp = Blueprint("my_network", url_prefix="/api/my")
def get_node_service():
"""Получить сервис ноды."""
try:
from app.core.my_network.node_service import get_node_service
return get_node_service()
except Exception as e:
logger.error(f"Error getting node service: {e}")
return None
@bp.get("/node/info")
async def get_node_info(request: Request):
"""Получить информацию о текущей ноде."""
try:
node_service = get_node_service()
if not node_service:
return json_response(
{"error": "MY Network service not available"},
status=503
)
node_info = await node_service.get_node_info()
return json_response({
"success": True,
"data": node_info,
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error getting node info: {e}")
return json_response({"error": str(e)}, status=500)
@bp.get("/node/peers")
async def get_node_peers(request: Request):
"""Получить список подключенных пиров."""
try:
node_service = get_node_service()
if not node_service:
return json_response(
{"error": "MY Network service not available"},
status=503
)
peers_info = await node_service.get_peers_info()
return json_response({
"success": True,
"data": {
"connected_peers": peers_info["connected_peers"],
"peer_count": peers_info["peer_count"],
"peers": peers_info["peers"]
},
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error getting peers: {e}")
return json_response({"error": str(e)}, status=500)
@bp.post("/node/peers/connect")
async def connect_to_peer(request: Request):
"""Подключиться к новому пиру."""
try:
peer_data = request.json
peer_address = peer_data.get("address")
if not peer_address:
return json_response({"error": "Peer address is required"}, status=400)
node_service = get_node_service()
if not node_service:
return json_response(
{"error": "MY Network service not available"},
status=503
)
success = await node_service.peer_manager.connect_to_peer(peer_address)
if success:
return json_response({
"success": True,
"message": f"Successfully connected to peer: {peer_address}",
"timestamp": datetime.utcnow().isoformat()
})
else:
return json_response({"error": "Failed to connect to peer"}, status=400)
except Exception as e:
logger.error(f"Error connecting to peer: {e}")
return json_response({"error": str(e)}, status=500)
@bp.delete("/node/peers/<peer_id>")
async def disconnect_peer(request: Request, peer_id: str):
"""Отключиться от пира."""
try:
node_service = get_node_service()
if not node_service:
return json_response(
{"error": "MY Network service not available"},
status=503
)
success = await node_service.peer_manager.disconnect_peer(peer_id)
if success:
return json_response({
"success": True,
"message": f"Successfully disconnected from peer: {peer_id}",
"timestamp": datetime.utcnow().isoformat()
})
else:
return json_response(
{"error": "Peer not found or already disconnected"},
status=404
)
except Exception as e:
logger.error(f"Error disconnecting peer: {e}")
return json_response({"error": str(e)}, status=500)
@bp.get("/content/list")
async def get_content_list(request: Request):
"""Получить список доступного контента."""
try:
# Получить параметры запроса
limit = min(int(request.args.get("limit", 100)), 1000)
offset = max(int(request.args.get("offset", 0)), 0)
# Кэшировать результат на 5 минут
from app.core.cache import cache
cache_key = f"my_network:content_list:{limit}:{offset}"
cached_result = await cache.get(cache_key)
if cached_result:
return json_response(json.loads(cached_result))
# Получить контент из БД
from app.core.database_compatible import get_async_session
from app.core.models.content_compatible import Content, ContentMetadata
from sqlalchemy import select, func
async with get_async_session() as session:
stmt = (
select(Content, ContentMetadata)
.outerjoin(ContentMetadata, Content.id == ContentMetadata.content_id)
.where(Content.is_active == True)
.order_by(Content.created_at.desc())
.limit(limit)
.offset(offset)
)
result = await session.execute(stmt)
content_items = []
for content, metadata in result:
content_data = {
"hash": content.sha256_hash or content.md5_hash,
"filename": content.filename,
"original_filename": content.original_filename,
"file_size": content.file_size,
"file_type": content.file_type,
"mime_type": content.mime_type,
"created_at": content.created_at.isoformat(),
"encrypted": getattr(content, 'encrypted', False),
"metadata": metadata.to_dict() if metadata else {}
}
content_items.append(content_data)
# Получить общее количество
count_stmt = select(func.count(Content.id)).where(Content.is_active == True)
count_result = await session.execute(count_stmt)
total_count = count_result.scalar()
response_data = {
"success": True,
"data": {
"content": content_items,
"total": total_count,
"limit": limit,
"offset": offset
},
"timestamp": datetime.utcnow().isoformat()
}
# Кэшировать результат
await cache.set(cache_key, json.dumps(response_data), expire=300)
return json_response(response_data)
except Exception as e:
logger.error(f"Error getting content list: {e}")
return json_response({"error": str(e)}, status=500)
@bp.get("/content/<content_hash>/exists")
async def check_content_exists(request: Request, content_hash: str):
"""Проверить существование контента по хешу."""
try:
# Кэшировать результат на 30 минут
from app.core.cache import cache
cache_key = f"my_network:content_exists:{content_hash}"
cached_result = await cache.get(cache_key)
if cached_result is not None:
return json_response({"exists": cached_result == "true", "hash": content_hash})
# Проверить в БД
from app.core.database_compatible import get_async_session
from app.core.models.content_compatible import Content
from sqlalchemy import select, and_
async with get_async_session() as session:
stmt = select(Content.id).where(
and_(
Content.is_active == True,
(Content.md5_hash == content_hash) | (Content.sha256_hash == content_hash)
)
)
result = await session.execute(stmt)
exists = result.scalar_one_or_none() is not None
# Кэшировать результат
await cache.set(cache_key, "true" if exists else "false", expire=1800)
return json_response({
"exists": exists,
"hash": content_hash,
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error checking content existence: {e}")
return json_response({"error": str(e)}, status=500)
@bp.get("/sync/status")
async def get_sync_status(request: Request):
"""Получить статус синхронизации."""
try:
node_service = get_node_service()
if not node_service:
return json_response(
{"error": "MY Network service not available"},
status=503
)
sync_status = await node_service.sync_manager.get_sync_status()
return json_response({
"success": True,
"data": sync_status,
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error getting sync status: {e}")
return json_response({"error": str(e)}, status=500)
@bp.post("/sync/start")
async def start_network_sync(request: Request):
"""Запустить синхронизацию с сетью."""
try:
node_service = get_node_service()
if not node_service:
return json_response(
{"error": "MY Network service not available"},
status=503
)
sync_result = await node_service.sync_manager.sync_with_network()
return json_response({
"success": True,
"data": sync_result,
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error starting network sync: {e}")
return json_response({"error": str(e)}, status=500)
@bp.get("/network/stats")
async def get_network_stats(request: Request):
"""Получить статистику сети."""
try:
node_service = get_node_service()
if not node_service:
return json_response(
{"error": "MY Network service not available"},
status=503
)
# Получить информацию о ноде и пирах
node_info = await node_service.get_node_info()
peers_info = await node_service.get_peers_info()
sync_status = await node_service.sync_manager.get_sync_status()
# Статистика контента
from app.core.database_compatible import get_async_session
from app.core.models.content_compatible import Content
from sqlalchemy import select, func
async with get_async_session() as session:
# Общее количество контента
content_count_stmt = select(func.count(Content.id)).where(Content.is_active == True)
content_count_result = await session.execute(content_count_stmt)
total_content = content_count_result.scalar()
# Размер контента
size_stmt = select(func.sum(Content.file_size)).where(Content.is_active == True)
size_result = await session.execute(size_stmt)
total_size = size_result.scalar() or 0
# Контент по типам
type_stmt = select(Content.file_type, func.count(Content.id)).where(Content.is_active == True).group_by(Content.file_type)
type_result = await session.execute(type_stmt)
content_by_type = {row[0]: row[1] for row in type_result}
network_stats = {
"node_info": {
"node_id": node_info["node_id"],
"uptime": node_info["uptime"],
"version": node_info["version"],
"status": node_info["status"]
},
"network": {
"connected_peers": peers_info["peer_count"],
"known_peers": len(peers_info["peers"]),
"network_health": "good" if peers_info["peer_count"] > 0 else "isolated"
},
"content": {
"total_items": total_content,
"total_size_bytes": total_size,
"total_size_mb": round(total_size / (1024 * 1024), 2),
"content_by_type": content_by_type
},
"sync": {
"active_syncs": sync_status["active_syncs"],
"queue_size": sync_status["queue_size"],
"is_running": sync_status["is_running"]
}
}
return json_response({
"success": True,
"data": network_stats,
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
logger.error(f"Error getting network stats: {e}")
return json_response({"error": str(e)}, status=500)
@bp.get("/health")
async def health_check(request: Request):
"""Проверка здоровья MY Network ноды."""
try:
node_service = get_node_service()
# Базовая проверка сервисов
health_status = {
"status": "healthy",
"timestamp": datetime.utcnow().isoformat(),
"services": {
"node_service": node_service is not None,
"peer_manager": hasattr(node_service, 'peer_manager') if node_service else False,
"sync_manager": hasattr(node_service, 'sync_manager') if node_service else False,
"database": True # Если дошли до этой точки, БД работает
}
}
# Проверить подключение к пирам
if node_service:
peers_info = await node_service.get_peers_info()
health_status["network"] = {
"connected_peers": peers_info["peer_count"],
"status": "connected" if peers_info["peer_count"] > 0 else "isolated"
}
# Определить общий статус
if not all(health_status["services"].values()):
health_status["status"] = "unhealthy"
elif node_service and peers_info["peer_count"] == 0:
health_status["status"] = "isolated"
return json_response(health_status)
except Exception as e:
logger.error(f"Health check failed: {e}")
return json_response({
"status": "unhealthy",
"error": str(e),
"timestamp": datetime.utcnow().isoformat()
}, status=500)

View File

@ -0,0 +1,708 @@
"""
Storage management routes with chunked uploads, download handling, and file operations.
Provides secure file operations with progress tracking and comprehensive validation.
"""
import asyncio
from datetime import datetime
from typing import Dict, List, Optional, Any
from uuid import UUID
from sanic import Blueprint, Request, response
from sanic.response import JSONResponse, ResponseStream
from sqlalchemy import select, update
from app.core.config import get_settings
from app.core.database import get_async_session, get_cache_manager
from app.core.logging import get_logger
from app.core.storage import StorageManager
from app.core.security import validate_file_signature, generate_secure_filename
from app.api.middleware import require_auth, validate_request, rate_limit
from app.core.validation import StorageUploadSchema, ChunkUploadSchema
# Initialize blueprint
storage_bp = Blueprint("storage", url_prefix="/api/v1/storage")
logger = get_logger(__name__)
settings = get_settings()
@storage_bp.route("/upload", methods=["POST"])
@rate_limit(limit=10, window=3600) # 10 upload sessions per hour
@require_auth(permissions=["storage.upload"])
@validate_request(StorageUploadSchema)
async def initiate_upload(request: Request) -> JSONResponse:
"""
Initiate chunked file upload session with security validation.
Args:
request: Sanic request with upload parameters
Returns:
JSONResponse: Upload session information
"""
try:
data = request.json
user_id = request.ctx.user.id
# Validate file size against user quota
cache_manager = get_cache_manager()
quota_key = f"user:{user_id}:storage_quota"
current_usage = await cache_manager.get(quota_key, default=0)
if current_usage + data["file_size"] > settings.MAX_STORAGE_PER_USER:
return response.json(
{"error": "Storage quota exceeded", "code": "QUOTA_EXCEEDED"},
status=429
)
# Generate secure filename
secure_filename = generate_secure_filename(data["filename"], user_id)
# Validate content type
allowed_types = {
'image/jpeg', 'image/png', 'image/gif', 'image/webp',
'video/mp4', 'video/webm', 'video/avi',
'audio/mpeg', 'audio/wav', 'audio/flac', 'audio/ogg',
'application/pdf', 'text/plain', 'application/json',
'application/zip', 'application/x-rar'
}
if data["content_type"] not in allowed_types:
return response.json(
{"error": "File type not allowed", "code": "TYPE_NOT_ALLOWED"},
status=400
)
# Create content record first
async with get_async_session() as session:
from app.core.models.content import Content
content = Content(
user_id=user_id,
title=secure_filename,
content_type=data["content_type"],
file_size=data["file_size"],
status="uploading",
visibility="private"
)
session.add(content)
await session.commit()
await session.refresh(content)
# Create upload session
storage_manager = StorageManager()
upload_session = await storage_manager.create_upload_session(
content.id,
data["file_size"]
)
# Update user quota
await cache_manager.increment(quota_key, data["file_size"], ttl=86400)
await logger.ainfo(
"Upload session initiated",
user_id=str(user_id),
content_id=str(content.id),
filename=secure_filename,
file_size=data["file_size"]
)
return response.json({
"upload_session": upload_session,
"content_id": str(content.id),
"secure_filename": secure_filename,
"status": "ready_for_upload"
}, status=201)
except Exception as e:
await logger.aerror(
"Failed to initiate upload",
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Failed to initiate upload", "code": "UPLOAD_INIT_FAILED"},
status=500
)
@storage_bp.route("/upload/<upload_id:uuid>/chunk", methods=["POST"])
@rate_limit(limit=1000, window=3600) # 1000 chunks per hour
@require_auth(permissions=["storage.upload"])
async def upload_chunk(request: Request, upload_id: UUID) -> JSONResponse:
"""
Upload individual file chunk with validation and progress tracking.
Args:
request: Sanic request with chunk data
upload_id: Upload session UUID
Returns:
JSONResponse: Chunk upload status
"""
try:
user_id = request.ctx.user.id
# Get chunk data from form
if 'chunk' not in request.files:
return response.json(
{"error": "No chunk data provided", "code": "NO_CHUNK_DATA"},
status=400
)
chunk_file = request.files['chunk'][0]
chunk_data = chunk_file.body
# Get chunk metadata
chunk_index = int(request.form.get('chunk_index', 0))
chunk_hash = request.form.get('chunk_hash', '')
is_final = request.form.get('is_final', 'false').lower() == 'true'
if not chunk_hash:
return response.json(
{"error": "Chunk hash required", "code": "HASH_REQUIRED"},
status=400
)
# Validate chunk size
if len(chunk_data) > settings.MAX_CHUNK_SIZE:
return response.json(
{"error": "Chunk too large", "code": "CHUNK_TOO_LARGE"},
status=400
)
# Upload chunk
storage_manager = StorageManager()
result = await storage_manager.upload_chunk(
upload_id,
chunk_index,
chunk_data,
chunk_hash
)
# Check if upload is complete
if is_final or result["uploaded_chunks"] == result["total_chunks"]:
# Finalize upload
finalize_result = await storage_manager.finalize_upload(upload_id)
result.update(finalize_result)
await logger.ainfo(
"Upload completed",
upload_id=str(upload_id),
user_id=str(user_id),
content_id=finalize_result.get("content_id")
)
return response.json(result)
except ValueError as e:
await logger.awarning(
"Chunk upload validation failed",
upload_id=str(upload_id),
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": str(e), "code": "VALIDATION_FAILED"},
status=400
)
except Exception as e:
await logger.aerror(
"Chunk upload failed",
upload_id=str(upload_id),
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Chunk upload failed", "code": "CHUNK_UPLOAD_FAILED"},
status=500
)
@storage_bp.route("/upload/<upload_id:uuid>/status", methods=["GET"])
@rate_limit(limit=100, window=3600) # 100 status checks per hour
@require_auth(permissions=["storage.upload"])
async def get_upload_status(request: Request, upload_id: UUID) -> JSONResponse:
"""
Get upload session status and progress.
Args:
request: Sanic request object
upload_id: Upload session UUID
Returns:
JSONResponse: Upload progress information
"""
try:
user_id = request.ctx.user.id
storage_manager = StorageManager()
# Get session data
session_data = await storage_manager._get_upload_session(upload_id)
if not session_data:
return response.json(
{"error": "Upload session not found", "code": "SESSION_NOT_FOUND"},
status=404
)
# Verify user ownership
async with get_async_session() as session:
from app.core.models.content import Content
stmt = select(Content).where(
Content.id == UUID(session_data["content_id"])
)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content or content.user_id != user_id:
return response.json(
{"error": "Access denied", "code": "ACCESS_DENIED"},
status=403
)
# Calculate progress
uploaded_chunks = len(session_data.get("uploaded_chunks", []))
total_chunks = session_data["total_chunks"]
progress_percent = (uploaded_chunks / total_chunks * 100) if total_chunks > 0 else 0
return response.json({
"upload_id": str(upload_id),
"status": session_data["status"],
"progress": {
"uploaded_chunks": uploaded_chunks,
"total_chunks": total_chunks,
"percent": round(progress_percent, 2)
},
"created_at": session_data["created_at"],
"expires_at": session_data["expires_at"]
})
except Exception as e:
await logger.aerror(
"Failed to get upload status",
upload_id=str(upload_id),
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Failed to get upload status", "code": "STATUS_FAILED"},
status=500
)
@storage_bp.route("/upload/<upload_id:uuid>", methods=["DELETE"])
@rate_limit(limit=50, window=3600) # 50 cancellations per hour
@require_auth(permissions=["storage.upload"])
async def cancel_upload(request: Request, upload_id: UUID) -> JSONResponse:
"""
Cancel upload session and clean up temporary files.
Args:
request: Sanic request object
upload_id: Upload session UUID
Returns:
JSONResponse: Cancellation status
"""
try:
user_id = request.ctx.user.id
storage_manager = StorageManager()
# Get session data
session_data = await storage_manager._get_upload_session(upload_id)
if not session_data:
return response.json(
{"error": "Upload session not found", "code": "SESSION_NOT_FOUND"},
status=404
)
# Verify user ownership
content_id = UUID(session_data["content_id"])
async with get_async_session() as session:
from app.core.models.content import Content
stmt = select(Content).where(Content.id == content_id)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content or content.user_id != user_id:
return response.json(
{"error": "Access denied", "code": "ACCESS_DENIED"},
status=403
)
# Delete content record
await session.delete(content)
await session.commit()
# Clean up chunks and session
cache_manager = get_cache_manager()
session_key = f"upload_session:{upload_id}"
await cache_manager.delete(session_key)
# Clean up chunks from storage
for chunk_index in session_data.get("uploaded_chunks", []):
chunk_id = f"{upload_id}_{chunk_index:06d}"
await storage_manager.backend.delete_chunk(chunk_id)
# Update user quota
quota_key = f"user:{user_id}:storage_quota"
await cache_manager.decrement(quota_key, session_data.get("total_size", 0))
await logger.ainfo(
"Upload cancelled",
upload_id=str(upload_id),
user_id=str(user_id),
content_id=str(content_id)
)
return response.json({
"status": "cancelled",
"upload_id": str(upload_id)
})
except Exception as e:
await logger.aerror(
"Failed to cancel upload",
upload_id=str(upload_id),
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Failed to cancel upload", "code": "CANCEL_FAILED"},
status=500
)
@storage_bp.route("/files/<content_id:uuid>", methods=["DELETE"])
@rate_limit(limit=50, window=3600) # 50 deletions per hour
@require_auth(permissions=["storage.delete"])
async def delete_file(request: Request, content_id: UUID) -> JSONResponse:
"""
Delete content file and cleanup storage.
Args:
request: Sanic request object
content_id: Content UUID to delete
Returns:
JSONResponse: Deletion status
"""
try:
user_id = request.ctx.user.id
async with get_async_session() as session:
from app.core.models.content import Content
# Get content
stmt = select(Content).where(Content.id == content_id)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content:
return response.json(
{"error": "Content not found", "code": "NOT_FOUND"},
status=404
)
# Check permissions
if content.user_id != user_id and not request.ctx.user.is_admin:
return response.json(
{"error": "Access denied", "code": "ACCESS_DENIED"},
status=403
)
# Delete files
storage_manager = StorageManager()
deletion_success = await storage_manager.delete_content_files(content_id)
if not deletion_success:
await logger.awarning(
"File deletion partially failed",
content_id=str(content_id),
user_id=str(user_id)
)
# Update user quota
cache_manager = get_cache_manager()
quota_key = f"user:{user_id}:storage_quota"
await cache_manager.decrement(quota_key, content.file_size or 0)
# Clear caches
await cache_manager.delete(f"content:{content_id}")
await cache_manager.delete(f"content:{content_id}:full")
await logger.ainfo(
"Content deleted",
content_id=str(content_id),
user_id=str(user_id),
file_size=content.file_size
)
return response.json({
"status": "deleted",
"content_id": str(content_id)
})
except Exception as e:
await logger.aerror(
"Failed to delete content",
content_id=str(content_id),
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Failed to delete content", "code": "DELETE_FAILED"},
status=500
)
@storage_bp.route("/quota", methods=["GET"])
@rate_limit(limit=100, window=3600) # 100 quota checks per hour
@require_auth(permissions=["storage.read"])
async def get_storage_quota(request: Request) -> JSONResponse:
"""
Get user storage quota and usage information.
Args:
request: Sanic request object
Returns:
JSONResponse: Quota information
"""
try:
user_id = request.ctx.user.id
# Get current usage from cache
cache_manager = get_cache_manager()
quota_key = f"user:{user_id}:storage_quota"
current_usage = await cache_manager.get(quota_key, default=0)
# Calculate accurate usage from database
async with get_async_session() as session:
from sqlalchemy import func
from app.core.models.content import Content
stmt = select(
func.count(Content.id).label('file_count'),
func.sum(Content.file_size).label('total_size')
).where(
Content.user_id == user_id,
Content.status == 'completed'
)
result = await session.execute(stmt)
stats = result.first()
accurate_usage = stats.total_size or 0
file_count = stats.file_count or 0
# Update cache with accurate value
if abs(current_usage - accurate_usage) > 1024: # Update if difference > 1KB
await cache_manager.set(quota_key, accurate_usage, ttl=86400)
current_usage = accurate_usage
# Calculate quota information
max_quota = settings.MAX_STORAGE_PER_USER
usage_percent = (current_usage / max_quota * 100) if max_quota > 0 else 0
return response.json({
"quota": {
"used_bytes": current_usage,
"max_bytes": max_quota,
"available_bytes": max(0, max_quota - current_usage),
"usage_percent": round(usage_percent, 2)
},
"files": {
"count": file_count,
"max_files": settings.MAX_FILES_PER_USER
},
"updated_at": datetime.utcnow().isoformat()
})
except Exception as e:
await logger.aerror(
"Failed to get storage quota",
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Failed to get quota information", "code": "QUOTA_FAILED"},
status=500
)
@storage_bp.route("/stats", methods=["GET"])
@rate_limit(limit=50, window=3600) # 50 stats requests per hour
@require_auth(permissions=["storage.read"])
async def get_storage_stats(request: Request) -> JSONResponse:
"""
Get detailed storage statistics for user.
Args:
request: Sanic request object
Returns:
JSONResponse: Detailed storage statistics
"""
try:
user_id = request.ctx.user.id
async with get_async_session() as session:
from sqlalchemy import func
from app.core.models.content import Content
# Get statistics by content type
type_stmt = select(
Content.content_type,
func.count(Content.id).label('count'),
func.sum(Content.file_size).label('size'),
func.avg(Content.file_size).label('avg_size')
).where(
Content.user_id == user_id,
Content.status == 'completed'
).group_by(Content.content_type)
type_result = await session.execute(type_stmt)
type_stats = {
row.content_type: {
'count': row.count,
'total_size': row.size or 0,
'average_size': row.avg_size or 0
}
for row in type_result
}
# Get upload statistics by month
monthly_stmt = select(
func.date_trunc('month', Content.created_at).label('month'),
func.count(Content.id).label('uploads'),
func.sum(Content.file_size).label('size')
).where(
Content.user_id == user_id,
Content.status == 'completed',
Content.created_at >= datetime.utcnow().replace(day=1) - timedelta(days=365)
).group_by(func.date_trunc('month', Content.created_at))
monthly_result = await session.execute(monthly_stmt)
monthly_stats = [
{
'month': row.month.isoformat(),
'uploads': row.uploads,
'size': row.size or 0
}
for row in monthly_result
]
return response.json({
"by_type": type_stats,
"monthly": monthly_stats,
"generated_at": datetime.utcnow().isoformat()
})
except Exception as e:
await logger.aerror(
"Failed to get storage stats",
user_id=str(user_id),
error=str(e)
)
return response.json(
{"error": "Failed to get storage statistics", "code": "STATS_FAILED"},
status=500
)
@storage_bp.route("/cleanup", methods=["POST"])
@rate_limit(limit=5, window=3600) # 5 cleanup operations per hour
@require_auth(permissions=["storage.admin"])
async def cleanup_orphaned_files(request: Request) -> JSONResponse:
"""
Clean up orphaned files and incomplete uploads (admin only).
Args:
request: Sanic request object
Returns:
JSONResponse: Cleanup results
"""
try:
if not request.ctx.user.is_admin:
return response.json(
{"error": "Admin access required", "code": "ADMIN_REQUIRED"},
status=403
)
storage_manager = StorageManager()
cache_manager = get_cache_manager()
cleanup_stats = {
"orphaned_chunks": 0,
"expired_sessions": 0,
"failed_uploads": 0,
"freed_space": 0
}
# Clean up expired upload sessions
async with get_async_session() as session:
from app.core.models.storage import ContentUploadSession
from app.core.models.content import Content
# Get expired sessions
expired_sessions_stmt = select(ContentUploadSession).where(
ContentUploadSession.expires_at < datetime.utcnow()
)
expired_result = await session.execute(expired_sessions_stmt)
expired_sessions = expired_result.scalars().all()
for upload_session in expired_sessions:
# Clean up chunks
session_key = f"upload_session:{upload_session.id}"
session_data = await cache_manager.get(session_key)
if session_data:
for chunk_index in session_data.get("uploaded_chunks", []):
chunk_id = f"{upload_session.id}_{chunk_index:06d}"
if await storage_manager.backend.delete_chunk(chunk_id):
cleanup_stats["orphaned_chunks"] += 1
# Delete session
await session.delete(upload_session)
await cache_manager.delete(session_key)
cleanup_stats["expired_sessions"] += 1
# Clean up failed uploads (older than 24 hours)
failed_uploads_stmt = select(Content).where(
Content.status.in_(['uploading', 'processing', 'failed']),
Content.created_at < datetime.utcnow() - timedelta(hours=24)
)
failed_result = await session.execute(failed_uploads_stmt)
failed_uploads = failed_result.scalars().all()
for content in failed_uploads:
if content.file_path:
if await storage_manager.backend.delete_file(content.file_path):
cleanup_stats["freed_space"] += content.file_size or 0
await session.delete(content)
cleanup_stats["failed_uploads"] += 1
await session.commit()
await logger.ainfo(
"Storage cleanup completed",
**cleanup_stats,
admin_user=str(request.ctx.user.id)
)
return response.json({
"status": "cleanup_completed",
"results": cleanup_stats,
"timestamp": datetime.utcnow().isoformat()
})
except Exception as e:
await logger.aerror(
"Storage cleanup failed",
admin_user=str(request.ctx.user.id),
error=str(e)
)
return response.json(
{"error": "Cleanup operation failed", "code": "CLEANUP_FAILED"},
status=500
)

View File

@ -1,267 +1,602 @@
"""Media conversion service for processing uploaded files."""
import asyncio import asyncio
from datetime import datetime import hashlib
import os
import uuid
import json import json
import shutil import logging
import magic # python-magic for MIME detection import os
from base58 import b58decode, b58encode import tempfile
from sqlalchemy import and_, or_ from datetime import datetime
from app.core.models.node_storage import StoredContent from pathlib import Path
from app.core.models._telegram import Wrapped_CBotChat from typing import Dict, List, Optional, Set, Any, Tuple
from app.core._utils.send_status import send_status
from app.core.logger import make_log import aiofiles
from app.core.models.user import User import redis.asyncio as redis
from app.core.models import WalletConnection from PIL import Image, ImageOps
from app.core.storage import db_session from sqlalchemy import select, update
from app.core._config import UPLOADS_DIR from sqlalchemy.ext.asyncio import AsyncSession
from app.core.content.content_id import ContentId
from app.core.config import get_settings
from app.core.database import get_async_session
from app.core.models.content import Content, FileUpload
from app.core.storage import storage_manager
logger = logging.getLogger(__name__)
async def convert_loop(memory): class ConvertService:
with db_session() as session: """Service for converting and processing uploaded media files."""
# Query for unprocessed encrypted content
unprocessed_encrypted_content = session.query(StoredContent).filter(
and_(
StoredContent.type == "onchain/content",
or_(
StoredContent.btfs_cid == None,
StoredContent.ipfs_cid == None,
)
)
).first()
if not unprocessed_encrypted_content:
make_log("ConvertProcess", "No content to convert", level="debug")
return
# Достаем расшифрованный файл def __init__(self):
decrypted_content = session.query(StoredContent).filter( self.settings = get_settings()
StoredContent.id == unprocessed_encrypted_content.decrypted_content_id self.redis_client: Optional[redis.Redis] = None
).first() self.is_running = False
if not decrypted_content: self.tasks: Set[asyncio.Task] = set()
make_log("ConvertProcess", "Decrypted content not found", level="error")
return
# Определяем путь и расширение входного файла # Conversion configuration
input_file_path = f"/Storage/storedContent/{decrypted_content.hash}" self.batch_size = 10
input_ext = (unprocessed_encrypted_content.filename.split('.')[-1] self.process_interval = 5 # seconds
if '.' in unprocessed_encrypted_content.filename else "mp4") self.max_retries = 3
self.temp_dir = Path(tempfile.gettempdir()) / "uploader_convert"
self.temp_dir.mkdir(exist_ok=True)
# ==== Новая логика: определение MIME-тип через python-magic ==== # Supported formats
self.image_formats = {'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.tiff'}
self.video_formats = {'.mp4', '.avi', '.mov', '.wmv', '.flv', '.webm', '.mkv'}
self.audio_formats = {'.mp3', '.wav', '.ogg', '.m4a', '.flac', '.aac'}
self.document_formats = {'.pdf', '.doc', '.docx', '.txt', '.rtf'}
# Image processing settings
self.thumbnail_sizes = [(150, 150), (300, 300), (800, 600)]
self.image_quality = 85
self.max_image_size = (2048, 2048)
async def start(self) -> None:
"""Start the conversion service."""
try: try:
mime_type = magic.from_file(input_file_path.replace("/Storage/storedContent", "/app/data"), mime=True) logger.info("Starting media conversion service")
except Exception as e:
make_log("ConvertProcess", f"magic probe failed: {e}", level="warning")
mime_type = ""
if mime_type.startswith("video/"): # Initialize Redis connection
content_kind = "video" self.redis_client = redis.from_url(
elif mime_type.startswith("audio/"): self.settings.redis_url,
content_kind = "audio" encoding="utf-8",
else: decode_responses=True,
content_kind = "other" socket_keepalive=True,
socket_keepalive_options={},
make_log("ConvertProcess", f"Detected content_kind={content_kind}, mime={mime_type}", level="info") health_check_interval=30,
# Для прочих типов сохраняем raw копию и выходим
if content_kind == "other":
make_log("ConvertProcess", f"Content {unprocessed_encrypted_content.id} processed. Not audio/video, copy just", level="info")
unprocessed_encrypted_content.btfs_cid = ContentId(
version=2, content_hash=b58decode(decrypted_content.hash)
).serialize_v2()
unprocessed_encrypted_content.ipfs_cid = ContentId(
version=2, content_hash=b58decode(decrypted_content.hash)
).serialize_v2()
unprocessed_encrypted_content.meta = {
**unprocessed_encrypted_content.meta,
'converted_content': {
option_name: decrypted_content.hash for option_name in ['high', 'low', 'low_preview']
}
}
session.commit()
return
# ==== Конвертация для видео или аудио: оригинальная логика ====
# Static preview interval in seconds
preview_interval = [0, 30]
if unprocessed_encrypted_content.onchain_index in [2]:
preview_interval = [0, 60]
make_log(
"ConvertProcess",
f"Processing content {unprocessed_encrypted_content.id} as {content_kind} with preview interval {preview_interval}",
level="info"
) )
# Выбираем опции конвертации для видео и аудио # Test Redis connection
if content_kind == "video": await self.redis_client.ping()
REQUIRED_CONVERT_OPTIONS = ['high', 'low', 'low_preview'] logger.info("Redis connection established for converter")
else:
REQUIRED_CONVERT_OPTIONS = ['high', 'low'] # no preview for audio
converted_content = {} # Start conversion tasks
logs_dir = "/Storage/logs/converter" self.is_running = True
for option in REQUIRED_CONVERT_OPTIONS: # Create conversion tasks
# Set quality parameter and trim option (only for preview) tasks = [
if option == "low_preview": asyncio.create_task(self._process_pending_files_loop()),
quality = "low" asyncio.create_task(self._cleanup_temp_files_loop()),
trim_value = f"{preview_interval[0]}-{preview_interval[1]}" asyncio.create_task(self._retry_failed_conversions_loop()),
else:
quality = option
trim_value = None
# Generate a unique output directory for docker container
output_uuid = str(uuid.uuid4())
output_dir = f"/Storage/storedContent/converter-output/{output_uuid}"
# Build the docker command
cmd = [
"docker", "run", "--rm",
"-v", f"{input_file_path}:/app/input",
"-v", f"{output_dir}:/app/output",
"-v", f"{logs_dir}:/app/logs",
"media_converter",
"--ext", input_ext,
"--quality", quality
] ]
if trim_value:
cmd.extend(["--trim", trim_value])
if content_kind == "audio":
cmd.append("--audio-only") # audio-only flag
process = await asyncio.create_subprocess_exec( self.tasks.update(tasks)
*cmd,
stdout=asyncio.subprocess.PIPE, # Wait for all tasks
stderr=asyncio.subprocess.PIPE await asyncio.gather(*tasks, return_exceptions=True)
)
stdout, stderr = await process.communicate()
if process.returncode != 0:
make_log("ConvertProcess", f"Docker conversion failed for option {option}: {stderr.decode()}", level="error")
return
# List files in output dir
try:
files = os.listdir(output_dir.replace("/Storage/storedContent", "/app/data"))
except Exception as e: except Exception as e:
make_log("ConvertProcess", f"Error reading output directory {output_dir}: {e}", level="error") logger.error(f"Error starting conversion service: {e}")
return await self.stop()
raise
media_files = [f for f in files if f != "output.json"] async def stop(self) -> None:
if len(media_files) != 1: """Stop the conversion service."""
make_log("ConvertProcess", f"Expected one media file, found {len(media_files)} for option {option}", level="error") logger.info("Stopping media conversion service")
return self.is_running = False
output_file = os.path.join( # Cancel all tasks
output_dir.replace("/Storage/storedContent", "/app/data"), for task in self.tasks:
media_files[0] if not task.done():
) task.cancel()
# Compute SHA256 hash of the output file # Wait for tasks to complete
hash_process = await asyncio.create_subprocess_exec( if self.tasks:
"sha256sum", output_file, await asyncio.gather(*self.tasks, return_exceptions=True)
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
hash_stdout, hash_stderr = await hash_process.communicate()
if hash_process.returncode != 0:
make_log("ConvertProcess", f"Error computing sha256sum for option {option}: {hash_stderr.decode()}", level="error")
return
file_hash = hash_stdout.decode().split()[0]
file_hash = b58encode(bytes.fromhex(file_hash)).decode()
# Save new StoredContent if not exists # Close Redis connection
if not session.query(StoredContent).filter( if self.redis_client:
StoredContent.hash == file_hash await self.redis_client.close()
).first():
new_content = StoredContent(
type="local/content_bin",
hash=file_hash,
user_id=unprocessed_encrypted_content.user_id,
filename=media_files[0],
meta={'encrypted_file_hash': unprocessed_encrypted_content.hash},
created=datetime.now(),
)
session.add(new_content)
session.commit()
save_path = os.path.join(UPLOADS_DIR, file_hash) # Cleanup temp directory
await self._cleanup_temp_directory()
logger.info("Conversion service stopped")
async def _process_pending_files_loop(self) -> None:
"""Main loop for processing pending file conversions."""
logger.info("Starting file conversion loop")
while self.is_running:
try: try:
os.remove(save_path) await self._process_pending_files()
except FileNotFoundError: await asyncio.sleep(self.process_interval)
pass
try: except asyncio.CancelledError:
shutil.move(output_file, save_path) break
except Exception as e: except Exception as e:
make_log("ConvertProcess", f"Error moving output file {output_file} to {save_path}: {e}", level="error") logger.error(f"Error in file conversion loop: {e}")
await asyncio.sleep(self.process_interval)
async def _process_pending_files(self) -> None:
"""Process pending file conversions."""
async with get_async_session() as session:
try:
# Get pending uploads
result = await session.execute(
select(FileUpload)
.where(
FileUpload.status == "uploaded",
FileUpload.processed == False
)
.limit(self.batch_size)
)
uploads = result.scalars().all()
if not uploads:
return return
converted_content[option] = file_hash logger.info(f"Processing {len(uploads)} pending files")
# Process output.json for ffprobe_meta # Process each upload
output_json_path = os.path.join( for upload in uploads:
output_dir.replace("/Storage/storedContent", "/app/data"), await self._process_single_file(session, upload)
"output.json"
) await session.commit()
if os.path.exists(output_json_path) and unprocessed_encrypted_content.meta.get('ffprobe_meta') is None:
except Exception as e:
logger.error(f"Error processing pending files: {e}")
await session.rollback()
async def _process_single_file(self, session: AsyncSession, upload: FileUpload) -> None:
"""Process a single file upload."""
try: try:
with open(output_json_path, "r") as f: logger.info(f"Processing file: {upload.filename}")
ffprobe_meta = json.load(f)
unprocessed_encrypted_content.meta = { # Mark as processing
**unprocessed_encrypted_content.meta, upload.status = "processing"
'ffprobe_meta': ffprobe_meta upload.processing_started_at = datetime.utcnow()
await session.commit()
# Get file extension
file_ext = Path(upload.filename).suffix.lower()
# Process based on file type
if file_ext in self.image_formats:
await self._process_image(session, upload)
elif file_ext in self.video_formats:
await self._process_video(session, upload)
elif file_ext in self.audio_formats:
await self._process_audio(session, upload)
elif file_ext in self.document_formats:
await self._process_document(session, upload)
else:
# Just mark as processed for unsupported formats
upload.status = "completed"
upload.processed = True
upload.processing_completed_at = datetime.utcnow()
# Cache processing result
cache_key = f"processed:{upload.id}"
processing_info = {
"status": upload.status,
"processed_at": datetime.utcnow().isoformat(),
"metadata": upload.metadata or {}
} }
except Exception as e: await self.redis_client.setex(cache_key, 3600, json.dumps(processing_info))
make_log("ConvertProcess", f"Error handling output.json for option {option}: {e}", level="error")
# Cleanup output directory except Exception as e:
logger.error(f"Error processing file {upload.filename}: {e}")
# Mark as failed
upload.status = "failed"
upload.error_message = str(e)
upload.retry_count = (upload.retry_count or 0) + 1
if upload.retry_count >= self.max_retries:
upload.processed = True # Stop retrying
async def _process_image(self, session: AsyncSession, upload: FileUpload) -> None:
"""Process an image file."""
try: try:
shutil.rmtree(output_dir.replace("/Storage/storedContent", "/app/data")) # Download original file
except Exception as e: original_path = await self._download_file(upload)
make_log("ConvertProcess", f"Error removing output dir {output_dir}: {e}", level="warning")
# Finalize original record if not original_path:
make_log("ConvertProcess", f"Content {unprocessed_encrypted_content.id} processed. Converted content: {converted_content}", level="info") raise Exception("Failed to download original file")
unprocessed_encrypted_content.btfs_cid = ContentId(
version=2, content_hash=b58decode(converted_content['high' if content_kind=='video' else 'low']) # Open image
).serialize_v2() with Image.open(original_path) as img:
unprocessed_encrypted_content.ipfs_cid = ContentId( # Extract metadata
version=2, content_hash=b58decode(converted_content['low']) metadata = {
).serialize_v2() "format": img.format,
unprocessed_encrypted_content.meta = { "mode": img.mode,
**unprocessed_encrypted_content.meta, "size": img.size,
'converted_content': converted_content "has_transparency": img.mode in ('RGBA', 'LA') or 'transparency' in img.info
} }
session.commit()
# Notify user if needed # Fix orientation
if not unprocessed_encrypted_content.meta.get('upload_notify_msg_id'): img = ImageOps.exif_transpose(img)
wallet_owner_connection = session.query(WalletConnection).filter(
WalletConnection.wallet_address == unprocessed_encrypted_content.owner_address # Resize if too large
).order_by(WalletConnection.id.desc()).first() if img.size[0] > self.max_image_size[0] or img.size[1] > self.max_image_size[1]:
if wallet_owner_connection: img.thumbnail(self.max_image_size, Image.Resampling.LANCZOS)
wallet_owner_user = wallet_owner_connection.user metadata["resized"] = True
bot = Wrapped_CBotChat(
memory._client_telegram_bot, # Save optimized version
chat_id=wallet_owner_user.telegram_id, optimized_path = self.temp_dir / f"optimized_{upload.id}.jpg"
user=wallet_owner_user,
db_session=session # Convert to RGB if necessary
if img.mode in ('RGBA', 'LA'):
background = Image.new('RGB', img.size, (255, 255, 255))
if img.mode == 'LA':
img = img.convert('RGBA')
background.paste(img, mask=img.split()[-1])
img = background
elif img.mode != 'RGB':
img = img.convert('RGB')
img.save(
optimized_path,
'JPEG',
quality=self.image_quality,
optimize=True
) )
unprocessed_encrypted_content.meta['upload_notify_msg_id'] = await bot.send_content(session, unprocessed_encrypted_content)
session.commit()
# Upload optimized version
optimized_url = await storage_manager.upload_file(
str(optimized_path),
f"optimized/{upload.id}/image.jpg"
)
async def main_fn(memory): # Generate thumbnails
make_log("ConvertProcess", "Service started", level="info") thumbnails = {}
seqno = 0 for size in self.thumbnail_sizes:
while True: thumbnail_path = await self._create_thumbnail(original_path, size)
if thumbnail_path:
thumb_url = await storage_manager.upload_file(
str(thumbnail_path),
f"thumbnails/{upload.id}/{size[0]}x{size[1]}.jpg"
)
thumbnails[f"{size[0]}x{size[1]}"] = thumb_url
thumbnail_path.unlink() # Cleanup
# Update upload record
upload.metadata = {
**metadata,
"thumbnails": thumbnails,
"optimized_url": optimized_url
}
upload.status = "completed"
upload.processed = True
upload.processing_completed_at = datetime.utcnow()
# Cleanup temp files
original_path.unlink()
optimized_path.unlink()
except Exception as e:
logger.error(f"Error processing image {upload.filename}: {e}")
raise
async def _process_video(self, session: AsyncSession, upload: FileUpload) -> None:
"""Process a video file."""
try: try:
make_log("ConvertProcess", "Service running", level="debug") # For video processing, we would typically use ffmpeg
await convert_loop(memory) # This is a simplified version that just extracts basic info
await asyncio.sleep(5)
await send_status("convert_service", f"working (seqno={seqno})") original_path = await self._download_file(upload)
seqno += 1 if not original_path:
except BaseException as e: raise Exception("Failed to download original file")
make_log("ConvertProcess", f"Error: {e}", level="error")
await asyncio.sleep(3) # Basic video metadata (would use ffprobe in real implementation)
metadata = {
"type": "video",
"file_size": original_path.stat().st_size,
"processing_note": "Video processing requires ffmpeg implementation"
}
# Generate video thumbnail (simplified)
thumbnail_path = await self._create_video_thumbnail(original_path)
if thumbnail_path:
thumb_url = await storage_manager.upload_file(
str(thumbnail_path),
f"thumbnails/{upload.id}/video_thumb.jpg"
)
metadata["thumbnail"] = thumb_url
thumbnail_path.unlink()
# Update upload record
upload.metadata = metadata
upload.status = "completed"
upload.processed = True
upload.processing_completed_at = datetime.utcnow()
# Cleanup
original_path.unlink()
except Exception as e:
logger.error(f"Error processing video {upload.filename}: {e}")
raise
async def _process_audio(self, session: AsyncSession, upload: FileUpload) -> None:
"""Process an audio file."""
try:
original_path = await self._download_file(upload)
if not original_path:
raise Exception("Failed to download original file")
# Basic audio metadata
metadata = {
"type": "audio",
"file_size": original_path.stat().st_size,
"processing_note": "Audio processing requires additional libraries"
}
# Update upload record
upload.metadata = metadata
upload.status = "completed"
upload.processed = True
upload.processing_completed_at = datetime.utcnow()
# Cleanup
original_path.unlink()
except Exception as e:
logger.error(f"Error processing audio {upload.filename}: {e}")
raise
async def _process_document(self, session: AsyncSession, upload: FileUpload) -> None:
"""Process a document file."""
try:
original_path = await self._download_file(upload)
if not original_path:
raise Exception("Failed to download original file")
# Basic document metadata
metadata = {
"type": "document",
"file_size": original_path.stat().st_size,
"pages": 1, # Would extract actual page count for PDFs
"processing_note": "Document processing requires additional libraries"
}
# Update upload record
upload.metadata = metadata
upload.status = "completed"
upload.processed = True
upload.processing_completed_at = datetime.utcnow()
# Cleanup
original_path.unlink()
except Exception as e:
logger.error(f"Error processing document {upload.filename}: {e}")
raise
async def _download_file(self, upload: FileUpload) -> Optional[Path]:
"""Download a file for processing."""
try:
if not upload.file_path:
return None
# Create temp file path
temp_path = self.temp_dir / f"original_{upload.id}_{upload.filename}"
# Download file from storage
file_data = await storage_manager.get_file(upload.file_path)
if not file_data:
return None
# Write to temp file
async with aiofiles.open(temp_path, 'wb') as f:
await f.write(file_data)
return temp_path
except Exception as e:
logger.error(f"Error downloading file {upload.filename}: {e}")
return None
async def _create_thumbnail(self, image_path: Path, size: Tuple[int, int]) -> Optional[Path]:
"""Create a thumbnail from an image."""
try:
thumbnail_path = self.temp_dir / f"thumb_{size[0]}x{size[1]}_{image_path.name}"
with Image.open(image_path) as img:
# Fix orientation
img = ImageOps.exif_transpose(img)
# Create thumbnail
img.thumbnail(size, Image.Resampling.LANCZOS)
# Convert to RGB if necessary
if img.mode in ('RGBA', 'LA'):
background = Image.new('RGB', img.size, (255, 255, 255))
if img.mode == 'LA':
img = img.convert('RGBA')
background.paste(img, mask=img.split()[-1])
img = background
elif img.mode != 'RGB':
img = img.convert('RGB')
# Save thumbnail
img.save(
thumbnail_path,
'JPEG',
quality=self.image_quality,
optimize=True
)
return thumbnail_path
except Exception as e:
logger.error(f"Error creating thumbnail: {e}")
return None
async def _create_video_thumbnail(self, video_path: Path) -> Optional[Path]:
"""Create a thumbnail from a video file."""
try:
# This would require ffmpeg to extract a frame from the video
# For now, return a placeholder
return None
except Exception as e:
logger.error(f"Error creating video thumbnail: {e}")
return None
async def _cleanup_temp_files_loop(self) -> None:
"""Loop for cleaning up temporary files."""
logger.info("Starting temp file cleanup loop")
while self.is_running:
try:
await self._cleanup_old_temp_files()
await asyncio.sleep(3600) # Run every hour
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"Error in temp cleanup loop: {e}")
await asyncio.sleep(3600)
async def _cleanup_old_temp_files(self) -> None:
"""Clean up old temporary files."""
try:
current_time = datetime.now().timestamp()
for file_path in self.temp_dir.glob("*"):
if file_path.is_file():
# Remove files older than 1 hour
if current_time - file_path.stat().st_mtime > 3600:
file_path.unlink()
logger.debug(f"Removed old temp file: {file_path}")
except Exception as e:
logger.error(f"Error cleaning up temp files: {e}")
async def _cleanup_temp_directory(self) -> None:
"""Clean up the entire temp directory."""
try:
for file_path in self.temp_dir.glob("*"):
if file_path.is_file():
file_path.unlink()
except Exception as e:
logger.error(f"Error cleaning up temp directory: {e}")
async def _retry_failed_conversions_loop(self) -> None:
"""Loop for retrying failed conversions."""
logger.info("Starting retry loop for failed conversions")
while self.is_running:
try:
await self._retry_failed_conversions()
await asyncio.sleep(1800) # Run every 30 minutes
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"Error in retry loop: {e}")
await asyncio.sleep(1800)
async def _retry_failed_conversions(self) -> None:
"""Retry failed conversions that haven't exceeded max retries."""
async with get_async_session() as session:
try:
# Get failed uploads that can be retried
result = await session.execute(
select(FileUpload)
.where(
FileUpload.status == "failed",
FileUpload.processed == False,
(FileUpload.retry_count < self.max_retries) | (FileUpload.retry_count.is_(None))
)
.limit(5) # Smaller batch for retries
)
uploads = result.scalars().all()
for upload in uploads:
logger.info(f"Retrying failed conversion for: {upload.filename}")
# Reset status
upload.status = "uploaded"
upload.error_message = None
# Process the file
await self._process_single_file(session, upload)
await session.commit()
except Exception as e:
logger.error(f"Error retrying failed conversions: {e}")
await session.rollback()
async def queue_file_for_processing(self, upload_id: str) -> bool:
"""Queue a file for processing."""
try:
# Add to processing queue
queue_key = "conversion_queue"
await self.redis_client.lpush(queue_key, upload_id)
logger.info(f"Queued file {upload_id} for processing")
return True
except Exception as e:
logger.error(f"Error queuing file for processing: {e}")
return False
async def get_processing_stats(self) -> Dict[str, Any]:
"""Get processing statistics."""
try:
async with get_async_session() as session:
# Get upload stats by status
status_result = await session.execute(
select(FileUpload.status, asyncio.func.count())
.group_by(FileUpload.status)
)
status_stats = dict(status_result.fetchall())
# Get processing stats
processed_result = await session.execute(
select(asyncio.func.count())
.select_from(FileUpload)
.where(FileUpload.processed == True)
)
processed_count = processed_result.scalar()
# Get failed stats
failed_result = await session.execute(
select(asyncio.func.count())
.select_from(FileUpload)
.where(FileUpload.status == "failed")
)
failed_count = failed_result.scalar()
return {
"status_stats": status_stats,
"processed_count": processed_count,
"failed_count": failed_count,
"is_running": self.is_running,
"active_tasks": len([t for t in self.tasks if not t.done()]),
"temp_files": len(list(self.temp_dir.glob("*"))),
"last_update": datetime.utcnow().isoformat()
}
except Exception as e:
logger.error(f"Error getting processing stats: {e}")
return {"error": str(e)}
# Global converter instance
convert_service = ConvertService()

View File

@ -1,313 +1,500 @@
"""Blockchain indexer service for monitoring transactions and events."""
import asyncio import asyncio
from base64 import b64decode import json
from datetime import datetime import logging
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Set, Any
from base58 import b58encode import redis.asyncio as redis
from sqlalchemy import String, and_, desc, cast from sqlalchemy import select, update
from tonsdk.boc import Cell from sqlalchemy.ext.asyncio import AsyncSession
from tonsdk.utils import Address
from app.core._config import CLIENT_TELEGRAM_BOT_USERNAME from app.core.config import get_settings
from app.core._blockchain.ton.platform import platform from app.core.database import get_async_session
from app.core._blockchain.ton.toncenter import toncenter from app.core.models.blockchain import Transaction, Wallet, BlockchainNFT, BlockchainTokenBalance
from app.core._utils.send_status import send_status from app.core.background.ton_service import TONService
from app.core.logger import make_log
from app.core.models import UserContent, KnownTelegramMessage, ServiceConfig logger = logging.getLogger(__name__)
from app.core.models.node_storage import StoredContent
from app.core._utils.resolve_content import resolve_content
from app.core.models.wallet_connection import WalletConnection
from app.core._keyboards import get_inline_keyboard
from app.core.models._telegram import Wrapped_CBotChat
from app.core.storage import db_session
import os
import traceback
async def indexer_loop(memory, platform_found: bool, seqno: int) -> [bool, int]: class IndexerService:
if not platform_found: """Service for indexing blockchain transactions and events."""
platform_state = await toncenter.get_account(platform.address.to_string(1, 1, 1))
if not platform_state.get('code'):
make_log("TON", "Platform contract is not deployed, skipping loop", level="info")
await send_status("indexer", "not working: platform is not deployed")
return False, seqno
else:
platform_found = True
make_log("Indexer", "Service running", level="debug") def __init__(self):
with db_session() as session: self.settings = get_settings()
self.ton_service = TONService()
self.redis_client: Optional[redis.Redis] = None
self.is_running = False
self.tasks: Set[asyncio.Task] = set()
# Indexing configuration
self.batch_size = 100
self.index_interval = 30 # seconds
self.confirmation_blocks = 12
self.max_retries = 3
async def start(self) -> None:
"""Start the indexer service."""
try: try:
result = await toncenter.run_get_method('EQD8TJ8xEWB1SpnRE4d89YO3jl0W0EiBnNS4IBaHaUmdfizE', 'get_pool_data') logger.info("Starting blockchain indexer service")
assert result['exit_code'] == 0, f"Error in get-method: {result}"
assert result['stack'][0][0] == 'num', f"get first element is not num"
assert result['stack'][1][0] == 'num', f"get second element is not num"
usdt_per_ton = (int(result['stack'][0][1], 16) * 1e3) / int(result['stack'][1][1], 16)
ton_per_star = 0.014 / usdt_per_ton
ServiceConfig(session).set('live_tonPerStar', [ton_per_star, datetime.utcnow().timestamp()])
make_log("TON_Daemon", f"TON per STAR price: {ton_per_star}", level="DEBUG")
except BaseException as e:
make_log("TON_Daemon", f"Error while saving TON per STAR price: {e}" + '\n' + traceback.format_exc(), level="ERROR")
new_licenses = session.query(UserContent).filter( # Initialize Redis connection
and_( self.redis_client = redis.from_url(
~UserContent.meta.contains({'notification_sent': True}), self.settings.redis_url,
UserContent.type == 'nft/listen' encoding="utf-8",
decode_responses=True,
socket_keepalive=True,
socket_keepalive_options={},
health_check_interval=30,
) )
).all()
for new_license in new_licenses:
licensed_content = session.query(StoredContent).filter(
StoredContent.id == new_license.content_id
).first()
if not licensed_content:
make_log("Indexer", f"Licensed content not found: {new_license.content_id}", level="error")
content_metadata = licensed_content.metadata_json(session) # Test Redis connection
assert content_metadata, "No content metadata found" await self.redis_client.ping()
logger.info("Redis connection established for indexer")
if not (licensed_content.owner_address == new_license.owner_address): # Start indexing tasks
self.is_running = True
# Create indexing tasks
tasks = [
asyncio.create_task(self._index_transactions_loop()),
asyncio.create_task(self._index_wallets_loop()),
asyncio.create_task(self._index_nfts_loop()),
asyncio.create_task(self._index_token_balances_loop()),
asyncio.create_task(self._cleanup_cache_loop()),
]
self.tasks.update(tasks)
# Wait for all tasks
await asyncio.gather(*tasks, return_exceptions=True)
except Exception as e:
logger.error(f"Error starting indexer service: {e}")
await self.stop()
raise
async def stop(self) -> None:
"""Stop the indexer service."""
logger.info("Stopping blockchain indexer service")
self.is_running = False
# Cancel all tasks
for task in self.tasks:
if not task.done():
task.cancel()
# Wait for tasks to complete
if self.tasks:
await asyncio.gather(*self.tasks, return_exceptions=True)
# Close Redis connection
if self.redis_client:
await self.redis_client.close()
logger.info("Indexer service stopped")
async def _index_transactions_loop(self) -> None:
"""Main loop for indexing transactions."""
logger.info("Starting transaction indexing loop")
while self.is_running:
try: try:
user = new_license.user await self._index_pending_transactions()
if user.telegram_id and licensed_content: await self._update_transaction_confirmations()
await (Wrapped_CBotChat(memory._client_telegram_bot, chat_id=user.telegram_id, user=user, db_session=session)).send_content( await asyncio.sleep(self.index_interval)
session, licensed_content
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"Error in transaction indexing loop: {e}")
await asyncio.sleep(self.index_interval)
async def _index_wallets_loop(self) -> None:
"""Loop for updating wallet information."""
logger.info("Starting wallet indexing loop")
while self.is_running:
try:
await self._update_wallet_balances()
await asyncio.sleep(self.index_interval * 2) # Less frequent
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"Error in wallet indexing loop: {e}")
await asyncio.sleep(self.index_interval * 2)
async def _index_nfts_loop(self) -> None:
"""Loop for indexing NFT collections and transfers."""
logger.info("Starting NFT indexing loop")
while self.is_running:
try:
await self._index_nft_collections()
await self._index_nft_transfers()
await asyncio.sleep(self.index_interval * 4) # Even less frequent
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"Error in NFT indexing loop: {e}")
await asyncio.sleep(self.index_interval * 4)
async def _index_token_balances_loop(self) -> None:
"""Loop for updating token balances."""
logger.info("Starting token balance indexing loop")
while self.is_running:
try:
await self._update_token_balances()
await asyncio.sleep(self.index_interval * 3)
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"Error in token balance indexing loop: {e}")
await asyncio.sleep(self.index_interval * 3)
async def _cleanup_cache_loop(self) -> None:
"""Loop for cleaning up old cache entries."""
logger.info("Starting cache cleanup loop")
while self.is_running:
try:
await self._cleanup_old_cache_entries()
await asyncio.sleep(3600) # Run every hour
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"Error in cache cleanup loop: {e}")
await asyncio.sleep(3600)
async def _index_pending_transactions(self) -> None:
"""Index pending transactions from the database."""
async with get_async_session() as session:
try:
# Get pending transactions
result = await session.execute(
select(Transaction)
.where(Transaction.status == "pending")
.limit(self.batch_size)
)
transactions = result.scalars().all()
if not transactions:
return
logger.info(f"Indexing {len(transactions)} pending transactions")
# Process each transaction
for transaction in transactions:
await self._process_transaction(session, transaction)
await session.commit()
except Exception as e:
logger.error(f"Error indexing pending transactions: {e}")
await session.rollback()
async def _process_transaction(self, session: AsyncSession, transaction: Transaction) -> None:
"""Process a single transaction."""
try:
# Check transaction status on blockchain
if transaction.tx_hash:
tx_info = await self.ton_service.get_transaction_info(transaction.tx_hash)
if tx_info:
# Update transaction with blockchain data
transaction.status = tx_info.get("status", "pending")
transaction.block_number = tx_info.get("block_number")
transaction.gas_used = tx_info.get("gas_used")
transaction.gas_price = tx_info.get("gas_price")
transaction.confirmations = tx_info.get("confirmations", 0)
transaction.updated_at = datetime.utcnow()
# Cache transaction info
cache_key = f"tx:{transaction.tx_hash}"
await self.redis_client.setex(
cache_key,
3600, # 1 hour
json.dumps(tx_info)
) )
wallet_owner_connection = session.query(WalletConnection).filter_by( logger.debug(f"Updated transaction {transaction.tx_hash}")
wallet_address=licensed_content.owner_address,
invalidated=False except Exception as e:
).order_by(desc(WalletConnection.id)).first() logger.error(f"Error processing transaction {transaction.id}: {e}")
wallet_owner_user = wallet_owner_connection.user
if wallet_owner_user.telegram_id: async def _update_transaction_confirmations(self) -> None:
wallet_owner_bot = Wrapped_CBotChat(memory._telegram_bot, chat_id=wallet_owner_user.telegram_id, user=wallet_owner_user, db_session=session) """Update confirmation counts for recent transactions."""
await wallet_owner_bot.send_message( async with get_async_session() as session:
user.translated('p_licenseWasBought').format( try:
username=user.front_format(), # Get recent confirmed transactions
nft_address=f'"https://tonviewer.com/{new_license.onchain_address}"', cutoff_time = datetime.utcnow() - timedelta(hours=24)
content_title=content_metadata.get('name', 'Unknown'), result = await session.execute(
), select(Transaction)
message_type='notification', .where(
Transaction.status == "confirmed",
Transaction.confirmations < self.confirmation_blocks,
Transaction.updated_at > cutoff_time
) )
except BaseException as e: .limit(self.batch_size)
make_log("IndexerSendNewLicense", f"Error: {e}" + '\n' + traceback.format_exc(), level="error")
new_license.meta = {**new_license.meta, 'notification_sent': True}
session.commit()
content_without_cid = session.query(StoredContent).filter(
StoredContent.content_id == None
) )
for target_content in content_without_cid: transactions = result.scalars().all()
target_cid = target_content.cid.serialize_v2()
make_log("Indexer", f"Content without CID: {target_content.hash}, setting CID: {target_cid}", level="debug")
target_content.content_id = target_cid
session.commit() for transaction in transactions:
if transaction.tx_hash:
try:
confirmations = await self.ton_service.get_transaction_confirmations(
transaction.tx_hash
)
last_known_index_ = session.query(StoredContent).filter( if confirmations != transaction.confirmations:
StoredContent.onchain_index != None transaction.confirmations = confirmations
).order_by(StoredContent.onchain_index.desc()).first() transaction.updated_at = datetime.utcnow()
last_known_index = last_known_index_.onchain_index if last_known_index_ else 0
last_known_index = max(last_known_index, 0) except Exception as e:
make_log("Indexer", f"Last known index: {last_known_index}", level="debug") logger.error(f"Error updating confirmations for {transaction.tx_hash}: {e}")
if last_known_index_:
next_item_index = last_known_index + 1 await session.commit()
except Exception as e:
logger.error(f"Error updating transaction confirmations: {e}")
await session.rollback()
async def _update_wallet_balances(self) -> None:
"""Update wallet balances from the blockchain."""
async with get_async_session() as session:
try:
# Get active wallets
result = await session.execute(
select(Wallet)
.where(Wallet.is_active == True)
.limit(self.batch_size)
)
wallets = result.scalars().all()
for wallet in wallets:
try:
# Get current balance
balance = await self.ton_service.get_wallet_balance(wallet.address)
if balance != wallet.balance:
wallet.balance = balance
wallet.updated_at = datetime.utcnow()
# Cache balance
cache_key = f"balance:{wallet.address}"
await self.redis_client.setex(cache_key, 300, str(balance)) # 5 minutes
except Exception as e:
logger.error(f"Error updating balance for wallet {wallet.address}: {e}")
await session.commit()
except Exception as e:
logger.error(f"Error updating wallet balances: {e}")
await session.rollback()
async def _index_nft_collections(self) -> None:
"""Index NFT collections and metadata."""
async with get_async_session() as session:
try:
# Get wallets to check for NFTs
result = await session.execute(
select(Wallet)
.where(Wallet.is_active == True)
.limit(self.batch_size // 4) # Smaller batch for NFTs
)
wallets = result.scalars().all()
for wallet in wallets:
try:
# Get NFTs for this wallet
nfts = await self.ton_service.get_wallet_nfts(wallet.address)
for nft_data in nfts:
await self._process_nft(session, wallet, nft_data)
except Exception as e:
logger.error(f"Error indexing NFTs for wallet {wallet.address}: {e}")
await session.commit()
except Exception as e:
logger.error(f"Error indexing NFT collections: {e}")
await session.rollback()
async def _process_nft(self, session: AsyncSession, wallet: Wallet, nft_data: Dict[str, Any]) -> None:
"""Process a single NFT."""
try:
# Check if NFT exists
result = await session.execute(
select(BlockchainNFT)
.where(
BlockchainNFT.token_id == nft_data["token_id"],
BlockchainNFT.collection_address == nft_data["collection_address"]
)
)
existing_nft = result.scalar_one_or_none()
if existing_nft:
# Update existing NFT
existing_nft.owner_address = wallet.address
existing_nft.metadata = nft_data.get("metadata", {})
existing_nft.updated_at = datetime.utcnow()
else: else:
next_item_index = 0 # Create new NFT
new_nft = BlockchainNFT(
wallet_id=wallet.id,
token_id=nft_data["token_id"],
collection_address=nft_data["collection_address"],
owner_address=wallet.address,
token_uri=nft_data.get("token_uri"),
metadata=nft_data.get("metadata", {}),
created_at=datetime.utcnow()
)
session.add(new_nft)
resolve_item_result = await toncenter.run_get_method(platform.address.to_string(1, 1, 1), 'get_nft_address_by_index', [['num', next_item_index]]) except Exception as e:
make_log("Indexer", f"Resolve item result: {resolve_item_result}", level="debug") logger.error(f"Error processing NFT {nft_data.get('token_id')}: {e}")
if resolve_item_result.get('exit_code', -1) != 0:
make_log("Indexer", f"Resolve item error: {resolve_item_result}", level="error")
return platform_found, seqno
item_address_cell_b64 = resolve_item_result['stack'][0][1]["bytes"] async def _index_nft_transfers(self) -> None:
item_address_slice = Cell.one_from_boc(b64decode(item_address_cell_b64)).begin_parse() """Index NFT transfers."""
item_address = item_address_slice.read_msg_addr() # This would involve checking recent blocks for NFT transfer events
make_log("Indexer", f"Item address: {item_address.to_string(1, 1, 1)}", level="debug") # Implementation depends on the specific blockchain's event system
pass
item_get_data_result = await toncenter.run_get_method(item_address.to_string(1, 1, 1), 'indexator_data') async def _update_token_balances(self) -> None:
if item_get_data_result.get('exit_code', -1) != 0: """Update token balances for wallets."""
make_log("Indexer", f"Get item data error (maybe not deployed): {item_get_data_result}", level="debug") async with get_async_session() as session:
return platform_found, seqno try:
# Get wallets with token balances to update
result = await session.execute(
select(Wallet)
.where(Wallet.is_active == True)
.limit(self.batch_size // 2)
)
wallets = result.scalars().all()
assert item_get_data_result['stack'][0][0] == 'num', "Item type is not a number" for wallet in wallets:
assert int(item_get_data_result['stack'][0][1], 16) == 1, "Item is not COP NFT" try:
item_returned_address = Cell.one_from_boc(b64decode(item_get_data_result['stack'][1][1]['bytes'])).begin_parse().read_msg_addr() # Get token balances
assert ( token_balances = await self.ton_service.get_wallet_token_balances(wallet.address)
item_returned_address.to_string(1, 1, 1) == item_address.to_string(1, 1, 1)
), "Item address mismatch"
assert item_get_data_result['stack'][2][0] == 'num', "Item index is not a number" for token_data in token_balances:
item_index = int(item_get_data_result['stack'][2][1], 16) await self._update_token_balance(session, wallet, token_data)
assert item_index == next_item_index, "Item index mismatch"
item_platform_address = Cell.one_from_boc(b64decode(item_get_data_result['stack'][3][1]['bytes'])).begin_parse().read_msg_addr() except Exception as e:
assert item_platform_address.to_string(1, 1, 1) == Address(platform.address.to_string(1, 1, 1)).to_string(1, 1, 1), "Item platform address mismatch" logger.error(f"Error updating token balances for {wallet.address}: {e}")
assert item_get_data_result['stack'][4][0] == 'num', "Item license type is not a number" await session.commit()
item_license_type = int(item_get_data_result['stack'][4][1], 16)
assert item_license_type == 0, "Item license type is not 0"
item_owner_address = Cell.one_from_boc(b64decode(item_get_data_result['stack'][5][1]["bytes"])).begin_parse().read_msg_addr() except Exception as e:
item_values = Cell.one_from_boc(b64decode(item_get_data_result['stack'][6][1]['bytes'])) logger.error(f"Error updating token balances: {e}")
item_derivates = Cell.one_from_boc(b64decode(item_get_data_result['stack'][7][1]['bytes'])) await session.rollback()
item_platform_variables = Cell.one_from_boc(b64decode(item_get_data_result['stack'][8][1]['bytes']))
item_distribution = Cell.one_from_boc(b64decode(item_get_data_result['stack'][9][1]['bytes']))
item_distribution_slice = item_distribution.begin_parse() async def _update_token_balance(
item_prices_slice = item_distribution_slice.refs[0].begin_parse() self,
item_listen_license_price = item_prices_slice.read_coins() session: AsyncSession,
item_use_license_price = item_prices_slice.read_coins() wallet: Wallet,
item_resale_license_price = item_prices_slice.read_coins() token_data: Dict[str, Any]
) -> None:
"""Update a single token balance."""
try:
# Check if balance record exists
result = await session.execute(
select(BlockchainTokenBalance)
.where(
BlockchainTokenBalance.wallet_id == wallet.id,
BlockchainTokenBalance.token_address == token_data["token_address"]
)
)
existing_balance = result.scalar_one_or_none()
item_values_slice = item_values.begin_parse() if existing_balance:
item_content_hash_int = item_values_slice.read_uint(256) # Update existing balance
item_content_hash = item_content_hash_int.to_bytes(32, 'big') existing_balance.balance = token_data["balance"]
# item_content_hash_str = b58encode(item_content_hash).decode() existing_balance.decimals = token_data.get("decimals", 18)
item_metadata = item_values_slice.refs[0] existing_balance.updated_at = datetime.utcnow()
item_content = item_values_slice.refs[1] else:
item_metadata_str = item_metadata.bits.array.decode() # Create new balance record
item_content_cid_str = item_content.refs[0].bits.array.decode() new_balance = BlockchainTokenBalance(
item_content_cover_cid_str = item_content.refs[1].bits.array.decode() wallet_id=wallet.id,
item_content_metadata_cid_str = item_content.refs[2].bits.array.decode() token_address=token_data["token_address"],
token_name=token_data.get("name"),
token_symbol=token_data.get("symbol"),
balance=token_data["balance"],
decimals=token_data.get("decimals", 18),
created_at=datetime.utcnow()
)
session.add(new_balance)
item_content_cid, err = resolve_content(item_content_cid_str) except Exception as e:
item_content_hash = item_content_cid.content_hash logger.error(f"Error updating token balance: {e}")
item_content_hash_str = item_content_cid.content_hash_b58
item_metadata_packed = { async def _cleanup_old_cache_entries(self) -> None:
'license_type': item_license_type, """Clean up old cache entries."""
'item_address': item_address.to_string(1, 1, 1), try:
'content_cid': item_content_cid_str, # Get all keys with our prefixes
'cover_cid': item_content_cover_cid_str, patterns = ["tx:*", "balance:*", "nft:*", "token:*"]
'metadata_cid': item_content_metadata_cid_str,
'derivates': b58encode(item_derivates.to_boc(False)).decode(), for pattern in patterns:
'platform_variables': b58encode(item_platform_variables.to_boc(False)).decode(), keys = await self.redis_client.keys(pattern)
'license': {
'listen': { # Check TTL and remove expired keys
'price': str(item_listen_license_price) for key in keys:
}, ttl = await self.redis_client.ttl(key)
'use': { if ttl == -1: # No expiration set
'price': str(item_use_license_price) await self.redis_client.expire(key, 3600) # Set 1 hour expiration
},
'resale': { logger.debug("Cache cleanup completed")
'price': str(item_resale_license_price)
} except Exception as e:
} logger.error(f"Error during cache cleanup: {e}")
async def get_indexing_stats(self) -> Dict[str, Any]:
"""Get indexing statistics."""
try:
async with get_async_session() as session:
# Get transaction stats
tx_result = await session.execute(
select(Transaction.status, asyncio.func.count())
.group_by(Transaction.status)
)
tx_stats = dict(tx_result.fetchall())
# Get wallet stats
wallet_result = await session.execute(
select(asyncio.func.count())
.select_from(Wallet)
.where(Wallet.is_active == True)
)
active_wallets = wallet_result.scalar()
# Get NFT stats
nft_result = await session.execute(
select(asyncio.func.count())
.select_from(BlockchainNFT)
)
total_nfts = nft_result.scalar()
return {
"transaction_stats": tx_stats,
"active_wallets": active_wallets,
"total_nfts": total_nfts,
"is_running": self.is_running,
"active_tasks": len([t for t in self.tasks if not t.done()]),
"last_update": datetime.utcnow().isoformat()
} }
user_wallet_connection = None except Exception as e:
if item_owner_address: logger.error(f"Error getting indexing stats: {e}")
user_wallet_connection = session.query(WalletConnection).filter( return {"error": str(e)}
WalletConnection.wallet_address == item_owner_address.to_string(1, 1, 1)
).first()
encrypted_stored_content = session.query(StoredContent).filter(
StoredContent.hash == item_content_hash_str,
# StoredContent.type.like("local%")
).first()
if encrypted_stored_content:
is_duplicate = encrypted_stored_content.type.startswith("onchain") \
and encrypted_stored_content.onchain_index != item_index
if not is_duplicate:
if encrypted_stored_content.type.startswith('local'):
encrypted_stored_content.type = "onchain/content" + ("_unknown" if (encrypted_stored_content.key_id is None) else "")
encrypted_stored_content.onchain_index = item_index
encrypted_stored_content.owner_address = item_owner_address.to_string(1, 1, 1)
user = None
if user_wallet_connection:
encrypted_stored_content.user_id = user_wallet_connection.user_id
user = user_wallet_connection.user
if user:
user_uploader_wrapper = Wrapped_CBotChat(memory._telegram_bot, chat_id=user.telegram_id, user=user, db_session=session)
await user_uploader_wrapper.send_message(
user.translated('p_contentWasIndexed').format(
item_address=item_address.to_string(1, 1, 1),
item_index=item_index,
),
message_type='notification',
reply_markup=get_inline_keyboard([
[{
'text': user.translated('viewTrackAsClient_button'),
'url': f"https://t.me/{CLIENT_TELEGRAM_BOT_USERNAME}?start=C{encrypted_stored_content.cid.serialize_v2()}"
}],
])
)
try:
for hint_message in session.query(KnownTelegramMessage).filter(
and_(
KnownTelegramMessage.chat_id == user.telegram_id,
KnownTelegramMessage.type == 'hint',
cast(KnownTelegramMessage.meta['encrypted_content_hash'], String) == encrypted_stored_content.hash,
KnownTelegramMessage.deleted == False
)
).all():
await user_uploader_wrapper.delete_message(hint_message.message_id)
except BaseException as e:
make_log("Indexer", f"Error while deleting hint messages: {e}" + '\n' + traceback.format_exc(), level="error")
elif encrypted_stored_content.type.startswith('onchain') and encrypted_stored_content.onchain_index == item_index:
encrypted_stored_content.type = "onchain/content" + ("_unknown" if (encrypted_stored_content.key_id is None) else "")
encrypted_stored_content.owner_address = item_owner_address.to_string(1, 1, 1)
if user_wallet_connection:
encrypted_stored_content.user_id = user_wallet_connection.user_id
else:
make_log("Indexer", f"[CRITICAL] Item already indexed and ERRORED!: {item_content_hash_str}", level="error")
return platform_found, seqno
encrypted_stored_content.updated = datetime.now()
encrypted_stored_content.meta = {
**encrypted_stored_content.meta,
**item_metadata_packed
}
session.commit()
return platform_found, seqno
else:
item_metadata_packed['copied_from'] = encrypted_stored_content.id
item_metadata_packed['copied_from_cid'] = encrypted_stored_content.cid.serialize_v2()
item_content_hash_str = f"{b58encode(bytes(16) + os.urandom(30)).decode()}" # check this for vulnerability
onchain_stored_content = StoredContent(
type="onchain/content_unknown",
hash=item_content_hash_str,
onchain_index=item_index,
owner_address=item_owner_address.to_string(1, 1, 1) if item_owner_address else None,
meta=item_metadata_packed,
filename="UNKNOWN_ENCRYPTED_CONTENT",
user_id=user_wallet_connection.user_id if user_wallet_connection else None,
created=datetime.now(),
encrypted=True,
decrypted_content_id=None,
key_id=None,
updated=datetime.now()
)
session.add(onchain_stored_content)
session.commit()
make_log("Indexer", f"Item indexed: {item_content_hash_str}", level="info")
last_known_index += 1
return platform_found, seqno
async def main_fn(memory, ): # Global indexer instance
make_log("Indexer", "Service started", level="info") indexer_service = IndexerService()
platform_found = False
seqno = 0
while True:
try:
platform_found, seqno = await indexer_loop(memory, platform_found, seqno)
except BaseException as e:
make_log("Indexer", f"Error: {e}" + '\n' + traceback.format_exc(), level="error")
if platform_found:
await send_status("indexer", f"working (seqno={seqno})")
await asyncio.sleep(5)
seqno += 1
# if __name__ == '__main__':
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
# loop.close()

View File

@ -1,290 +1,658 @@
"""
TON Blockchain service for wallet operations, transaction management, and smart contract interactions.
Provides async operations with connection pooling, caching, and comprehensive error handling.
"""
import asyncio import asyncio
from base64 import b64decode import json
import os
import traceback
import httpx
from sqlalchemy import and_, func
from tonsdk.boc import begin_cell, Cell
from tonsdk.contract.wallet import Wallets
from tonsdk.utils import HighloadQueryId
from datetime import datetime, timedelta from datetime import datetime, timedelta
from decimal import Decimal
from typing import Dict, List, Optional, Any, Tuple
from uuid import UUID
from app.core._blockchain.ton.platform import platform import httpx
from app.core._blockchain.ton.toncenter import toncenter from sqlalchemy import select, update, and_
from app.core.models.tasks import BlockchainTask
from app.core._config import MY_FUND_ADDRESS
from app.core._secrets import service_wallet
from app.core._utils.send_status import send_status
from app.core.storage import db_session
from app.core.logger import make_log
from app.core.config import get_settings
from app.core.database import get_async_session, get_cache_manager
from app.core.logging import get_logger
from app.core.security import decrypt_data, encrypt_data
async def get_sw_seqno(): logger = get_logger(__name__)
sw_seqno_result = await toncenter.run_get_method(service_wallet.address.to_string(1, 1, 1), 'seqno') settings = get_settings()
if sw_seqno_result.get('exit_code', -1) != 0:
sw_seqno_value = 0
else:
sw_seqno_value = int(sw_seqno_result.get('stack', [['num', '0x0']])[0][1], 16)
return sw_seqno_value class TONService:
"""
Comprehensive TON blockchain service with async operations.
Handles wallet management, transactions, and smart contract interactions.
"""
def __init__(self):
self.api_endpoint = settings.TON_API_ENDPOINT
self.testnet = settings.TON_TESTNET
self.api_key = settings.TON_API_KEY
self.timeout = 30
async def main_fn(memory): # HTTP client for API requests
make_log("TON", f"Service started, SW = {service_wallet.address.to_string(1, 1, 1)}", level="info") self.client = httpx.AsyncClient(
sw_seqno_value = await get_sw_seqno() timeout=self.timeout,
make_log("TON", f"Service wallet run seqno method: {sw_seqno_value}", level="info") headers={
if sw_seqno_value == 0: "Authorization": f"Bearer {self.api_key}" if self.api_key else None,
make_log("TON", "Service wallet is not deployed, deploying...", level="info") "Content-Type": "application/json"
await toncenter.send_boc( }
service_wallet.create_transfer_message(
[{
'address': service_wallet.address.to_string(1, 1, 1),
'amount': 1,
'send_mode': 1,
'payload': begin_cell().store_uint(0, 32).store_bytes(b"Init MY Node").end_cell()
}], 0
)['message'].to_boc(False)
)
await asyncio.sleep(5)
return await main_fn(memory)
if os.getenv("TON_BEGIN_COMMAND_WITHDRAW"):
await toncenter.send_boc(
service_wallet.create_transfer_message(
[{
'address': MY_FUND_ADDRESS,
'amount': 1,
'send_mode': 128,
'payload': begin_cell().end_cell()
}], sw_seqno_value
)['message'].to_boc(False)
)
make_log("TON", "Withdraw command sent", level="info")
await asyncio.sleep(10)
return await main_fn(memory)
# TODO: не деплоить если указан master_address и мы проверили что аккаунт существует. Сейчас platform у каждой ноды будет разным
platform_state = await toncenter.get_account(platform.address.to_string(1, 1, 1))
if not platform_state.get('code'):
make_log("TON", "Platform contract is not deployed, send deploy transaction..", level="info")
await toncenter.send_boc(
service_wallet.create_transfer_message(
[{
'address': platform.address.to_string(1, 1, 1),
'amount': int(0.08 * 10 ** 9),
'send_mode': 1,
'payload': begin_cell().store_uint(0, 32).store_uint(0, 64).end_cell(),
'state_init': platform.create_state_init()['state_init']
}], sw_seqno_value
)['message'].to_boc(False)
) )
await send_status("ton_daemon", "working: deploying platform") self.cache_manager = get_cache_manager()
await asyncio.sleep(15)
return await main_fn(memory)
highload_wallet = Wallets.ALL['hv3']( async def close(self):
private_key=service_wallet.options['private_key'], """Close HTTP client and cleanup resources."""
public_key=service_wallet.options['public_key'], if self.client:
wc=0 await self.client.aclose()
)
make_log("TON", f"Highload wallet address: {highload_wallet.address.to_string(1, 1, 1)}", level="info")
highload_state = await toncenter.get_account(highload_wallet.address.to_string(1, 1, 1))
if int(highload_state.get('balance', '0')) / 1e9 < 0.05:
make_log("TON", "Highload wallet balance is less than 0.05, send topup transaction..", level="info")
await toncenter.send_boc(
service_wallet.create_transfer_message(
[{
'address': highload_wallet.address.to_string(1, 1, 0),
'amount': int(0.08 * 10 ** 9),
'send_mode': 1,
'payload': begin_cell().store_uint(0, 32).end_cell()
}], sw_seqno_value
)['message'].to_boc(False)
)
await send_status("ton_daemon", "working: topup highload wallet")
await asyncio.sleep(15)
return await main_fn(memory)
if not highload_state.get('code'): async def create_wallet(self) -> Dict[str, Any]:
make_log("TON", "Highload wallet contract is not deployed, send deploy transaction..", level="info") """
created_at_ts = int(datetime.utcnow().timestamp()) - 60 Create new TON wallet with mnemonic generation.
await toncenter.send_boc(
highload_wallet.create_transfer_message(
service_wallet.address.to_string(1, 1, 1),
1, HighloadQueryId.from_seqno(0), created_at_ts, send_mode=1, payload="hello world", need_deploy=True
)['message'].to_boc(False)
)
await send_status("ton_daemon", "working: deploying highload wallet")
await asyncio.sleep(15)
return await main_fn(memory)
while True: Returns:
Dict: Wallet creation result with address and private key
"""
try: try:
sw_seqno_value = await get_sw_seqno() # Generate mnemonic phrase
make_log("TON", f"Service running ({sw_seqno_value})", level="debug") mnemonic_response = await self.client.post(
f"{self.api_endpoint}/wallet/generate",
with db_session() as session: json={"testnet": self.testnet}
# Проверка отправленных сообщений
await send_status("ton_daemon", f"working: processing in-txs (seqno={sw_seqno_value})")
async def process_incoming_transaction(transaction: dict):
transaction_hash = transaction['transaction_id']['hash']
transaction_lt = str(transaction['transaction_id']['lt'])
# transaction_success = bool(transaction['success'])
async def process_incoming_message(blockchain_message: dict):
in_msg_cell = Cell.one_from_boc(b64decode(blockchain_message['msg_data']['body']))
in_msg_slice = in_msg_cell.refs[0].begin_parse()
in_msg_slice.read_uint(32)
in_msg_slice.read_uint(8)
in_msg_query_id = in_msg_slice.read_uint(23)
in_msg_created_at = in_msg_slice.read_uint(64)
in_msg_epoch = int(in_msg_created_at // (60 * 60))
in_msg_seqno = HighloadQueryId.from_query_id(in_msg_query_id).to_seqno()
in_msg_blockchain_task = (
session.query(BlockchainTask).filter(
and_(
BlockchainTask.seqno == in_msg_seqno,
BlockchainTask.epoch == in_msg_epoch,
) )
)
).first()
if not in_msg_blockchain_task:
return
if not (in_msg_blockchain_task.status in ['done']) or in_msg_blockchain_task.transaction_hash != transaction_hash: if mnemonic_response.status_code != 200:
in_msg_blockchain_task.status = 'done' error_msg = f"Failed to generate wallet: {mnemonic_response.text}"
in_msg_blockchain_task.transaction_hash = transaction_hash await logger.aerror("Wallet generation failed", error=error_msg)
in_msg_blockchain_task.transaction_lt = transaction_lt return {"error": error_msg}
session.commit()
for blockchain_message in [transaction['in_msg']]: mnemonic_data = mnemonic_response.json()
try:
await process_incoming_message(blockchain_message)
except BaseException as e:
pass # make_log("TON_Daemon", f"Error while processing incoming message: {e}" + '\n' + traceback.format_exc(), level='debug')
try: # Create wallet from mnemonic
sw_transactions = await toncenter.get_transactions(highload_wallet.address.to_string(1, 1, 1), limit=100) wallet_response = await self.client.post(
for sw_transaction in sw_transactions: f"{self.api_endpoint}/wallet/create",
try:
await process_incoming_transaction(sw_transaction)
except BaseException as e:
make_log("TON_Daemon", f"Error while processing incoming transaction: {e}", level="debug")
except BaseException as e:
make_log("TON_Daemon", f"Error while getting service wallet transactions: {e}", level="ERROR")
await send_status("ton_daemon", f"working: processing out-txs (seqno={sw_seqno_value})")
# Отправка подписанных сообщений
for blockchain_task in (
session.query(BlockchainTask).filter(
BlockchainTask.status == 'processing',
).order_by(BlockchainTask.updated.asc()).all()
):
make_log("TON_Daemon", f"Processing task (processing) {blockchain_task.id}")
query_boc = bytes.fromhex(blockchain_task.meta['signed_message'])
errors_list = []
try:
await toncenter.send_boc(query_boc)
except BaseException as e:
errors_list.append(f"{e}")
try:
make_log("TON_Daemon", str(
httpx.post(
'https://tonapi.io/v2/blockchain/message',
json={ json={
'boc': query_boc.hex() "mnemonic": mnemonic_data["mnemonic"],
"testnet": self.testnet
} }
).text
))
except BaseException as e:
make_log("TON_Daemon", f"Error while pushing task to tonkeeper ({blockchain_task.id}): {e}", level="ERROR")
errors_list.append(f"{e}")
blockchain_task.updated = datetime.utcnow()
if blockchain_task.meta['sign_created'] + 10 * 60 < datetime.utcnow().timestamp():
# or sum([int("terminating vm with exit code 36" in e) for e in errors_list]) > 0:
make_log("TON_Daemon", f"Task {blockchain_task.id} done", level="DEBUG")
blockchain_task.status = 'done'
session.commit()
continue
await asyncio.sleep(0.5)
await send_status("ton_daemon", f"working: creating new messages (seqno={sw_seqno_value})")
# Создание новых подписей
for blockchain_task in (
session.query(BlockchainTask).filter(BlockchainTask.status == 'wait').all()
):
try:
# Check processing tasks in current epoch < 3_000_000
if (
session.query(BlockchainTask).filter(
BlockchainTask.epoch == blockchain_task.epoch,
).count() > 3_000_000
):
make_log("TON", f"Too many processing tasks in epoch {blockchain_task.epoch}", level="error")
await send_status("ton_daemon", f"working: too many tasks in epoch {blockchain_task.epoch}")
await asyncio.sleep(5)
continue
sign_created = int(datetime.utcnow().timestamp()) - 60
try:
current_epoch = int(datetime.utcnow().timestamp() // (60 * 60))
max_epoch_seqno = (
session.query(func.max(BlockchainTask.seqno)).filter(
BlockchainTask.epoch == current_epoch
).scalar() or 0
) )
current_epoch_shift = 3_000_000 if current_epoch % 2 == 0 else 0
current_seqno = max_epoch_seqno + 1 + (current_epoch_shift if max_epoch_seqno == 0 else 0)
except BaseException as e:
make_log("CRITICAL", f"Error calculating epoch,seqno: {e}", level="error")
current_epoch = 0
current_seqno = 0
blockchain_task.seqno = current_seqno if wallet_response.status_code != 200:
blockchain_task.epoch = current_epoch error_msg = f"Failed to create wallet: {wallet_response.text}"
blockchain_task.status = 'processing' await logger.aerror("Wallet creation failed", error=error_msg)
try: return {"error": error_msg}
query = highload_wallet.create_transfer_message(
blockchain_task.destination, int(blockchain_task.amount), HighloadQueryId.from_seqno(current_seqno), wallet_data = wallet_response.json()
sign_created, send_mode=1,
payload=Cell.one_from_boc(b64decode(blockchain_task.payload)) await logger.ainfo(
"Wallet created successfully",
address=wallet_data.get("address"),
testnet=self.testnet
) )
query_boc = query['message'].to_boc(False)
except BaseException as e:
make_log("TON", f"Error creating transfer message: {e}", level="error")
query_boc = begin_cell().end_cell().to_boc(False)
blockchain_task.meta = { return {
**blockchain_task.meta, "address": wallet_data["address"],
'sign_created': sign_created, "private_key": wallet_data["private_key"],
'signed_message': query_boc.hex(), "mnemonic": mnemonic_data["mnemonic"],
"testnet": self.testnet
} }
session.commit()
make_log("TON", f"Created signed message for task {blockchain_task.id}" + '\n' + traceback.format_exc(), level="info")
except BaseException as e:
make_log("TON", f"Error processing task {blockchain_task.id}: {e}" + '\n' + traceback.format_exc(), level="error")
continue
await asyncio.sleep(1) except httpx.TimeoutException:
error_msg = "Wallet creation timeout"
await logger.aerror(error_msg)
return {"error": error_msg}
except Exception as e:
error_msg = f"Wallet creation error: {str(e)}"
await logger.aerror("Wallet creation exception", error=str(e))
return {"error": error_msg}
await asyncio.sleep(1) async def get_wallet_balance(self, address: str) -> Dict[str, Any]:
await send_status("ton_daemon", f"working (seqno={sw_seqno_value})") """
except BaseException as e: Get wallet balance with caching for performance.
make_log("TON", f"Error: {e}", level="error")
await asyncio.sleep(3)
# if __name__ == '__main__': Args:
# loop = asyncio.get_event_loop() address: TON wallet address
# loop.run_until_complete(main())
# loop.close()
Returns:
Dict: Balance information
"""
try:
# Check cache first
cache_key = f"ton_balance:{address}"
cached_balance = await self.cache_manager.get(cache_key)
if cached_balance:
return cached_balance
# Fetch from blockchain
balance_response = await self.client.get(
f"{self.api_endpoint}/wallet/{address}/balance"
)
if balance_response.status_code != 200:
error_msg = f"Failed to get balance: {balance_response.text}"
return {"error": error_msg}
balance_data = balance_response.json()
result = {
"balance": int(balance_data.get("balance", 0)), # nanotons
"last_transaction_lt": balance_data.get("last_transaction_lt"),
"account_state": balance_data.get("account_state", "unknown"),
"updated_at": datetime.utcnow().isoformat()
}
# Cache for 30 seconds
await self.cache_manager.set(cache_key, result, ttl=30)
return result
except httpx.TimeoutException:
return {"error": "Balance fetch timeout"}
except Exception as e:
await logger.aerror("Balance fetch error", address=address, error=str(e))
return {"error": f"Balance fetch error: {str(e)}"}
async def get_wallet_transactions(
self,
address: str,
limit: int = 20,
offset: int = 0
) -> Dict[str, Any]:
"""
Get wallet transaction history with pagination.
Args:
address: TON wallet address
limit: Number of transactions to fetch
offset: Pagination offset
Returns:
Dict: Transaction history
"""
try:
# Check cache
cache_key = f"ton_transactions:{address}:{limit}:{offset}"
cached_transactions = await self.cache_manager.get(cache_key)
if cached_transactions:
return cached_transactions
transactions_response = await self.client.get(
f"{self.api_endpoint}/wallet/{address}/transactions",
params={"limit": limit, "offset": offset}
)
if transactions_response.status_code != 200:
error_msg = f"Failed to get transactions: {transactions_response.text}"
return {"error": error_msg}
transactions_data = transactions_response.json()
result = {
"transactions": transactions_data.get("transactions", []),
"total": transactions_data.get("total", 0),
"limit": limit,
"offset": offset,
"updated_at": datetime.utcnow().isoformat()
}
# Cache for 1 minute
await self.cache_manager.set(cache_key, result, ttl=60)
return result
except httpx.TimeoutException:
return {"error": "Transaction fetch timeout"}
except Exception as e:
await logger.aerror(
"Transaction fetch error",
address=address,
error=str(e)
)
return {"error": f"Transaction fetch error: {str(e)}"}
async def send_transaction(
self,
private_key: str,
recipient_address: str,
amount: int,
message: str = "",
**kwargs
) -> Dict[str, Any]:
"""
Send TON transaction with validation and monitoring.
Args:
private_key: Encrypted private key
recipient_address: Recipient wallet address
amount: Amount in nanotons
message: Optional message
**kwargs: Additional transaction parameters
Returns:
Dict: Transaction result
"""
try:
# Validate inputs
if amount <= 0:
return {"error": "Amount must be positive"}
if len(recipient_address) != 48:
return {"error": "Invalid recipient address format"}
# Decrypt private key
try:
decrypted_key = decrypt_data(private_key, context="wallet")
if isinstance(decrypted_key, bytes):
decrypted_key = decrypted_key.decode('utf-8')
except Exception as e:
await logger.aerror("Private key decryption failed", error=str(e))
return {"error": "Invalid private key"}
# Prepare transaction
transaction_data = {
"private_key": decrypted_key,
"recipient": recipient_address,
"amount": str(amount),
"message": message,
"testnet": self.testnet
}
# Send transaction
tx_response = await self.client.post(
f"{self.api_endpoint}/transaction/send",
json=transaction_data
)
if tx_response.status_code != 200:
error_msg = f"Transaction failed: {tx_response.text}"
await logger.aerror(
"Transaction submission failed",
recipient=recipient_address,
amount=amount,
error=error_msg
)
return {"error": error_msg}
tx_data = tx_response.json()
result = {
"hash": tx_data["hash"],
"lt": tx_data.get("lt"),
"fee": tx_data.get("fee", 0),
"block_hash": tx_data.get("block_hash"),
"timestamp": datetime.utcnow().isoformat()
}
await logger.ainfo(
"Transaction sent successfully",
hash=result["hash"],
recipient=recipient_address,
amount=amount
)
return result
except httpx.TimeoutException:
return {"error": "Transaction timeout"}
except Exception as e:
await logger.aerror(
"Transaction send error",
recipient=recipient_address,
amount=amount,
error=str(e)
)
return {"error": f"Transaction error: {str(e)}"}
async def get_transaction_status(self, tx_hash: str) -> Dict[str, Any]:
"""
Get transaction status and confirmation details.
Args:
tx_hash: Transaction hash
Returns:
Dict: Transaction status information
"""
try:
# Check cache
cache_key = f"ton_tx_status:{tx_hash}"
cached_status = await self.cache_manager.get(cache_key)
if cached_status and cached_status.get("confirmed"):
return cached_status
status_response = await self.client.get(
f"{self.api_endpoint}/transaction/{tx_hash}/status"
)
if status_response.status_code != 200:
return {"error": f"Failed to get status: {status_response.text}"}
status_data = status_response.json()
result = {
"hash": tx_hash,
"confirmed": status_data.get("confirmed", False),
"failed": status_data.get("failed", False),
"confirmations": status_data.get("confirmations", 0),
"block_hash": status_data.get("block_hash"),
"block_time": status_data.get("block_time"),
"fee": status_data.get("fee"),
"confirmed_at": status_data.get("confirmed_at"),
"updated_at": datetime.utcnow().isoformat()
}
# Cache confirmed/failed transactions longer
cache_ttl = 3600 if result["confirmed"] or result["failed"] else 30
await self.cache_manager.set(cache_key, result, ttl=cache_ttl)
return result
except httpx.TimeoutException:
return {"error": "Status check timeout"}
except Exception as e:
await logger.aerror("Status check error", tx_hash=tx_hash, error=str(e))
return {"error": f"Status check error: {str(e)}"}
async def validate_address(self, address: str) -> Dict[str, Any]:
"""
Validate TON address format and existence.
Args:
address: TON address to validate
Returns:
Dict: Validation result
"""
try:
# Basic format validation
if len(address) != 48:
return {"valid": False, "error": "Invalid address length"}
# Check against blockchain
validation_response = await self.client.post(
f"{self.api_endpoint}/address/validate",
json={"address": address}
)
if validation_response.status_code != 200:
return {"valid": False, "error": "Validation service error"}
validation_data = validation_response.json()
return {
"valid": validation_data.get("valid", False),
"exists": validation_data.get("exists", False),
"account_type": validation_data.get("account_type"),
"error": validation_data.get("error")
}
except Exception as e:
await logger.aerror("Address validation error", address=address, error=str(e))
return {"valid": False, "error": f"Validation error: {str(e)}"}
async def get_network_info(self) -> Dict[str, Any]:
"""
Get TON network information and statistics.
Returns:
Dict: Network information
"""
try:
cache_key = "ton_network_info"
cached_info = await self.cache_manager.get(cache_key)
if cached_info:
return cached_info
network_response = await self.client.get(
f"{self.api_endpoint}/network/info"
)
if network_response.status_code != 200:
return {"error": f"Failed to get network info: {network_response.text}"}
network_data = network_response.json()
result = {
"network": "testnet" if self.testnet else "mainnet",
"last_block": network_data.get("last_block"),
"last_block_time": network_data.get("last_block_time"),
"total_accounts": network_data.get("total_accounts"),
"total_transactions": network_data.get("total_transactions"),
"tps": network_data.get("tps"), # Transactions per second
"updated_at": datetime.utcnow().isoformat()
}
# Cache for 5 minutes
await self.cache_manager.set(cache_key, result, ttl=300)
return result
except Exception as e:
await logger.aerror("Network info error", error=str(e))
return {"error": f"Network info error: {str(e)}"}
async def estimate_transaction_fee(
self,
sender_address: str,
recipient_address: str,
amount: int,
message: str = ""
) -> Dict[str, Any]:
"""
Estimate transaction fee before sending.
Args:
sender_address: Sender wallet address
recipient_address: Recipient wallet address
amount: Amount in nanotons
message: Optional message
Returns:
Dict: Fee estimation
"""
try:
fee_response = await self.client.post(
f"{self.api_endpoint}/transaction/estimate-fee",
json={
"sender": sender_address,
"recipient": recipient_address,
"amount": str(amount),
"message": message
}
)
if fee_response.status_code != 200:
return {"error": f"Fee estimation failed: {fee_response.text}"}
fee_data = fee_response.json()
return {
"estimated_fee": fee_data.get("fee", 0),
"estimated_fee_tons": str(Decimal(fee_data.get("fee", 0)) / Decimal("1000000000")),
"gas_used": fee_data.get("gas_used"),
"message_size": len(message.encode('utf-8')),
"updated_at": datetime.utcnow().isoformat()
}
except Exception as e:
await logger.aerror("Fee estimation error", error=str(e))
return {"error": f"Fee estimation error: {str(e)}"}
async def monitor_transaction(self, tx_hash: str, max_wait_time: int = 300) -> Dict[str, Any]:
"""
Monitor transaction until confirmation or timeout.
Args:
tx_hash: Transaction hash to monitor
max_wait_time: Maximum wait time in seconds
Returns:
Dict: Final transaction status
"""
start_time = datetime.utcnow()
check_interval = 5 # Check every 5 seconds
while (datetime.utcnow() - start_time).seconds < max_wait_time:
status = await self.get_transaction_status(tx_hash)
if status.get("error"):
return status
if status.get("confirmed") or status.get("failed"):
await logger.ainfo(
"Transaction monitoring completed",
tx_hash=tx_hash,
confirmed=status.get("confirmed"),
failed=status.get("failed"),
duration=(datetime.utcnow() - start_time).seconds
)
return status
await asyncio.sleep(check_interval)
# Timeout reached
await logger.awarning(
"Transaction monitoring timeout",
tx_hash=tx_hash,
max_wait_time=max_wait_time
)
return {
"hash": tx_hash,
"confirmed": False,
"timeout": True,
"error": "Monitoring timeout reached"
}
async def get_smart_contract_info(self, address: str) -> Dict[str, Any]:
"""
Get smart contract information and ABI.
Args:
address: Smart contract address
Returns:
Dict: Contract information
"""
try:
cache_key = f"ton_contract:{address}"
cached_info = await self.cache_manager.get(cache_key)
if cached_info:
return cached_info
contract_response = await self.client.get(
f"{self.api_endpoint}/contract/{address}/info"
)
if contract_response.status_code != 200:
return {"error": f"Failed to get contract info: {contract_response.text}"}
contract_data = contract_response.json()
result = {
"address": address,
"contract_type": contract_data.get("contract_type"),
"is_verified": contract_data.get("is_verified", False),
"abi": contract_data.get("abi"),
"source_code": contract_data.get("source_code"),
"compiler_version": contract_data.get("compiler_version"),
"deployment_block": contract_data.get("deployment_block"),
"updated_at": datetime.utcnow().isoformat()
}
# Cache for 1 hour
await self.cache_manager.set(cache_key, result, ttl=3600)
return result
except Exception as e:
await logger.aerror("Contract info error", address=address, error=str(e))
return {"error": f"Contract info error: {str(e)}"}
async def call_smart_contract(
self,
contract_address: str,
method: str,
params: Dict[str, Any],
private_key: Optional[str] = None
) -> Dict[str, Any]:
"""
Call smart contract method.
Args:
contract_address: Contract address
method: Method name to call
params: Method parameters
private_key: Private key for write operations
Returns:
Dict: Contract call result
"""
try:
call_data = {
"contract": contract_address,
"method": method,
"params": params
}
# Add private key for write operations
if private_key:
try:
decrypted_key = decrypt_data(private_key, context="wallet")
if isinstance(decrypted_key, bytes):
decrypted_key = decrypted_key.decode('utf-8')
call_data["private_key"] = decrypted_key
except Exception as e:
return {"error": "Invalid private key"}
contract_response = await self.client.post(
f"{self.api_endpoint}/contract/call",
json=call_data
)
if contract_response.status_code != 200:
return {"error": f"Contract call failed: {contract_response.text}"}
call_result = contract_response.json()
await logger.ainfo(
"Smart contract called",
contract=contract_address,
method=method,
success=call_result.get("success", False)
)
return call_result
except Exception as e:
await logger.aerror(
"Contract call error",
contract=contract_address,
method=method,
error=str(e)
)
return {"error": f"Contract call error: {str(e)}"}
# Global TON service instance
_ton_service = None
async def get_ton_service() -> TONService:
"""Get or create global TON service instance."""
global _ton_service
if _ton_service is None:
_ton_service = TONService()
return _ton_service
async def cleanup_ton_service():
"""Cleanup global TON service instance."""
global _ton_service
if _ton_service:
await _ton_service.close()
_ton_service = None

385
app/core/cache.py Normal file
View File

@ -0,0 +1,385 @@
"""Redis caching system with fallback support."""
import json
import logging
import pickle
from typing import Any, Optional, Union, Dict, List
from contextlib import asynccontextmanager
from functools import wraps
import redis.asyncio as redis
from redis.asyncio import ConnectionPool
from app.core.config_compatible import get_settings
logger = logging.getLogger(__name__)
# Global Redis connection pool
_redis_pool: Optional[ConnectionPool] = None
_redis_client: Optional[redis.Redis] = None
class CacheError(Exception):
"""Custom cache error."""
pass
async def init_cache() -> None:
"""Initialize Redis cache connection."""
global _redis_pool, _redis_client
settings = get_settings()
if not settings.redis_enabled or not settings.cache_enabled:
logger.info("Redis caching is disabled")
return
try:
# Create connection pool
_redis_pool = ConnectionPool(
host=settings.redis_host,
port=settings.redis_port,
password=settings.redis_password,
db=settings.redis_db,
max_connections=settings.redis_max_connections,
socket_timeout=settings.redis_socket_timeout,
socket_connect_timeout=settings.redis_socket_connect_timeout,
decode_responses=False, # We'll handle encoding manually for flexibility
retry_on_timeout=True,
health_check_interval=30,
)
# Create Redis client
_redis_client = redis.Redis(connection_pool=_redis_pool)
# Test connection
await _redis_client.ping()
logger.info(f"Redis cache initialized successfully at {settings.redis_host}:{settings.redis_port}")
except Exception as e:
logger.warning(f"Failed to initialize Redis cache: {e}. Caching will be disabled.")
_redis_pool = None
_redis_client = None
async def close_cache() -> None:
"""Close Redis cache connection."""
global _redis_pool, _redis_client
if _redis_client:
try:
await _redis_client.close()
logger.info("Redis cache connection closed")
except Exception as e:
logger.error(f"Error closing Redis cache: {e}")
finally:
_redis_client = None
_redis_pool = None
def get_redis_client() -> Optional[redis.Redis]:
"""Get Redis client instance."""
return _redis_client
def is_cache_available() -> bool:
"""Check if cache is available."""
return _redis_client is not None
class Cache:
"""Redis cache manager with fallback support."""
def __init__(self):
self.settings = get_settings()
def _serialize(self, value: Any) -> bytes:
"""Serialize value for storage."""
try:
if isinstance(value, (str, int, float, bool)):
return json.dumps(value).encode('utf-8')
else:
return pickle.dumps(value)
except Exception as e:
logger.error(f"Failed to serialize cache value: {e}")
raise CacheError(f"Serialization error: {e}")
def _deserialize(self, data: bytes) -> Any:
"""Deserialize value from storage."""
try:
# Try JSON first (for simple types)
try:
return json.loads(data.decode('utf-8'))
except (json.JSONDecodeError, UnicodeDecodeError):
# Fallback to pickle for complex objects
return pickle.loads(data)
except Exception as e:
logger.error(f"Failed to deserialize cache value: {e}")
raise CacheError(f"Deserialization error: {e}")
def _make_key(self, key: str, prefix: str = "myuploader") -> str:
"""Create cache key with prefix."""
return f"{prefix}:{key}"
async def get(self, key: str, default: Any = None) -> Any:
"""Get value from cache."""
if not is_cache_available():
return default
try:
redis_key = self._make_key(key)
data = await _redis_client.get(redis_key)
if data is None:
return default
return self._deserialize(data)
except Exception as e:
logger.warning(f"Cache get error for key '{key}': {e}")
return default
async def set(self, key: str, value: Any, ttl: Optional[int] = None) -> bool:
"""Set value in cache."""
if not is_cache_available():
return False
try:
redis_key = self._make_key(key)
data = self._serialize(value)
if ttl is None:
ttl = self.settings.cache_default_ttl
await _redis_client.setex(redis_key, ttl, data)
return True
except Exception as e:
logger.warning(f"Cache set error for key '{key}': {e}")
return False
async def delete(self, key: str) -> bool:
"""Delete value from cache."""
if not is_cache_available():
return False
try:
redis_key = self._make_key(key)
result = await _redis_client.delete(redis_key)
return bool(result)
except Exception as e:
logger.warning(f"Cache delete error for key '{key}': {e}")
return False
async def exists(self, key: str) -> bool:
"""Check if key exists in cache."""
if not is_cache_available():
return False
try:
redis_key = self._make_key(key)
result = await _redis_client.exists(redis_key)
return bool(result)
except Exception as e:
logger.warning(f"Cache exists error for key '{key}': {e}")
return False
async def expire(self, key: str, ttl: int) -> bool:
"""Set expiration time for key."""
if not is_cache_available():
return False
try:
redis_key = self._make_key(key)
result = await _redis_client.expire(redis_key, ttl)
return bool(result)
except Exception as e:
logger.warning(f"Cache expire error for key '{key}': {e}")
return False
async def clear_pattern(self, pattern: str) -> int:
"""Clear all keys matching pattern."""
if not is_cache_available():
return 0
try:
redis_pattern = self._make_key(pattern)
keys = await _redis_client.keys(redis_pattern)
if keys:
result = await _redis_client.delete(*keys)
return result
return 0
except Exception as e:
logger.warning(f"Cache clear pattern error for pattern '{pattern}': {e}")
return 0
async def increment(self, key: str, amount: int = 1, ttl: Optional[int] = None) -> Optional[int]:
"""Increment counter in cache."""
if not is_cache_available():
return None
try:
redis_key = self._make_key(key)
result = await _redis_client.incrby(redis_key, amount)
if ttl is not None:
await _redis_client.expire(redis_key, ttl)
return result
except Exception as e:
logger.warning(f"Cache increment error for key '{key}': {e}")
return None
async def get_multiple(self, keys: List[str]) -> Dict[str, Any]:
"""Get multiple values from cache."""
if not is_cache_available():
return {}
try:
redis_keys = [self._make_key(key) for key in keys]
values = await _redis_client.mget(redis_keys)
result = {}
for i, (key, data) in enumerate(zip(keys, values)):
if data is not None:
try:
result[key] = self._deserialize(data)
except Exception as e:
logger.warning(f"Failed to deserialize cached value for key '{key}': {e}")
return result
except Exception as e:
logger.warning(f"Cache get_multiple error: {e}")
return {}
async def set_multiple(self, mapping: Dict[str, Any], ttl: Optional[int] = None) -> bool:
"""Set multiple values in cache."""
if not is_cache_available():
return False
try:
pipeline = _redis_client.pipeline()
for key, value in mapping.items():
redis_key = self._make_key(key)
data = self._serialize(value)
if ttl is None:
ttl = self.settings.cache_default_ttl
pipeline.setex(redis_key, ttl, data)
await pipeline.execute()
return True
except Exception as e:
logger.warning(f"Cache set_multiple error: {e}")
return False
# Global cache instance
cache = Cache()
# Caching decorators
def cached(ttl: Optional[int] = None, key_prefix: str = "func"):
"""Decorator for caching function results."""
def decorator(func):
@wraps(func)
async def wrapper(*args, **kwargs):
if not is_cache_available():
return await func(*args, **kwargs)
# Create cache key from function name and arguments
key_parts = [key_prefix, func.__name__]
if args:
key_parts.extend([str(arg) for arg in args])
if kwargs:
key_parts.extend([f"{k}={v}" for k, v in sorted(kwargs.items())])
cache_key = ":".join(key_parts)
# Try to get from cache
result = await cache.get(cache_key)
if result is not None:
return result
# Call function and cache result
result = await func(*args, **kwargs)
await cache.set(cache_key, result, ttl)
return result
return wrapper
return decorator
def cache_user_data(ttl: Optional[int] = None):
"""Decorator for caching user-specific data."""
if ttl is None:
ttl = get_settings().cache_user_ttl
return cached(ttl=ttl, key_prefix="user")
def cache_content_data(ttl: Optional[int] = None):
"""Decorator for caching content data."""
if ttl is None:
ttl = get_settings().cache_content_ttl
return cached(ttl=ttl, key_prefix="content")
# Cache health check
async def check_cache_health() -> Dict[str, Any]:
"""Check cache health and return status."""
if not is_cache_available():
return {
"status": "disabled",
"available": False,
"error": "Redis not initialized"
}
try:
# Test basic operations
test_key = "health_check"
test_value = {"timestamp": "test"}
await cache.set(test_key, test_value, 10)
retrieved = await cache.get(test_key)
await cache.delete(test_key)
# Get Redis info
info = await _redis_client.info()
return {
"status": "healthy",
"available": True,
"test_passed": retrieved == test_value,
"connected_clients": info.get("connected_clients", 0),
"used_memory": info.get("used_memory_human", "unknown"),
"total_commands_processed": info.get("total_commands_processed", 0),
}
except Exception as e:
return {
"status": "error",
"available": False,
"error": str(e)
}
# Context manager for cache operations
@asynccontextmanager
async def cache_context():
"""Context manager for cache operations."""
try:
yield cache
except Exception as e:
logger.error(f"Cache context error: {e}")
raise

253
app/core/config.py Normal file
View File

@ -0,0 +1,253 @@
"""
Application configuration with security improvements and validation
"""
import os
import secrets
from datetime import datetime
from typing import List, Optional, Dict, Any
from pathlib import Path
from pydantic import BaseSettings, validator, Field
from pydantic.networks import AnyHttpUrl, PostgresDsn, RedisDsn
import structlog
logger = structlog.get_logger(__name__)
class Settings(BaseSettings):
"""Application settings with validation"""
# Application
PROJECT_NAME: str = "My Uploader Bot"
PROJECT_VERSION: str = "2.0.0"
PROJECT_HOST: AnyHttpUrl = Field(default="http://127.0.0.1:15100")
SANIC_PORT: int = Field(default=15100, ge=1000, le=65535)
DEBUG: bool = Field(default=False)
# Security
SECRET_KEY: str = Field(default_factory=lambda: secrets.token_urlsafe(32))
JWT_SECRET_KEY: str = Field(default_factory=lambda: secrets.token_urlsafe(32))
JWT_EXPIRE_MINUTES: int = Field(default=60 * 24 * 7) # 7 days
ENCRYPTION_KEY: Optional[str] = None
# Rate Limiting
RATE_LIMIT_REQUESTS: int = Field(default=100)
RATE_LIMIT_WINDOW: int = Field(default=60) # seconds
RATE_LIMIT_ENABLED: bool = Field(default=True)
# Database
DATABASE_URL: PostgresDsn = Field(
default="postgresql+asyncpg://user:password@localhost:5432/uploader_bot"
)
DATABASE_POOL_SIZE: int = Field(default=10, ge=1, le=100)
DATABASE_MAX_OVERFLOW: int = Field(default=20, ge=0, le=100)
DATABASE_ECHO: bool = Field(default=False)
# Redis
REDIS_URL: RedisDsn = Field(default="redis://localhost:6379/0")
REDIS_POOL_SIZE: int = Field(default=10, ge=1, le=100)
REDIS_TTL_DEFAULT: int = Field(default=3600) # 1 hour
REDIS_TTL_SHORT: int = Field(default=300) # 5 minutes
REDIS_TTL_LONG: int = Field(default=86400) # 24 hours
# File Storage
UPLOADS_DIR: Path = Field(default=Path("/app/data"))
MAX_FILE_SIZE: int = Field(default=100 * 1024 * 1024) # 100MB
ALLOWED_CONTENT_TYPES: List[str] = Field(default=[
'image/jpeg', 'image/png', 'image/gif', 'image/webp',
'video/mp4', 'video/webm', 'video/ogg', 'video/quicktime',
'audio/mpeg', 'audio/ogg', 'audio/wav', 'audio/mp4',
'text/plain', 'application/json'
])
# Telegram
TELEGRAM_API_KEY: str = Field(..., min_length=40)
CLIENT_TELEGRAM_API_KEY: str = Field(..., min_length=40)
TELEGRAM_WEBHOOK_ENABLED: bool = Field(default=False)
TELEGRAM_WEBHOOK_URL: Optional[AnyHttpUrl] = None
TELEGRAM_WEBHOOK_SECRET: str = Field(default_factory=lambda: secrets.token_urlsafe(32))
# TON Blockchain
TESTNET: bool = Field(default=False)
TONCENTER_HOST: AnyHttpUrl = Field(default="https://toncenter.com/api/v2/")
TONCENTER_API_KEY: Optional[str] = None
TONCENTER_V3_HOST: AnyHttpUrl = Field(default="https://toncenter.com/api/v3/")
MY_PLATFORM_CONTRACT: str = Field(default="EQDmWp6hbJlYUrXZKb9N88sOrTit630ZuRijfYdXEHLtheMY")
MY_FUND_ADDRESS: str = Field(default="UQDarChHFMOI2On9IdHJNeEKttqepgo0AY4bG1trw8OAAwMY")
# Logging
LOG_LEVEL: str = Field(default="INFO", regex="^(DEBUG|INFO|WARNING|ERROR|CRITICAL)$")
LOG_DIR: Path = Field(default=Path("logs"))
LOG_FORMAT: str = Field(default="json")
LOG_ROTATION: str = Field(default="1 day")
LOG_RETENTION: str = Field(default="30 days")
# Monitoring
METRICS_ENABLED: bool = Field(default=True)
METRICS_PORT: int = Field(default=9090, ge=1000, le=65535)
HEALTH_CHECK_ENABLED: bool = Field(default=True)
# Background Services
INDEXER_ENABLED: bool = Field(default=True)
INDEXER_INTERVAL: int = Field(default=5, ge=1, le=3600)
TON_DAEMON_ENABLED: bool = Field(default=True)
TON_DAEMON_INTERVAL: int = Field(default=3, ge=1, le=3600)
LICENSE_SERVICE_ENABLED: bool = Field(default=True)
LICENSE_SERVICE_INTERVAL: int = Field(default=10, ge=1, le=3600)
CONVERT_SERVICE_ENABLED: bool = Field(default=True)
CONVERT_SERVICE_INTERVAL: int = Field(default=30, ge=1, le=3600)
# Web App URLs
WEB_APP_URLS: Dict[str, str] = Field(default={
'uploadContent': "https://web2-client.vercel.app/uploadContent"
})
# Maintenance
MAINTENANCE_MODE: bool = Field(default=False)
MAINTENANCE_MESSAGE: str = Field(default="System is under maintenance")
# Development
MOCK_EXTERNAL_SERVICES: bool = Field(default=False)
DISABLE_WEBHOOKS: bool = Field(default=False)
@validator('UPLOADS_DIR')
def create_uploads_dir(cls, v):
"""Create uploads directory if it doesn't exist"""
if not v.exists():
v.mkdir(parents=True, exist_ok=True)
return v
@validator('LOG_DIR')
def create_log_dir(cls, v):
"""Create log directory if it doesn't exist"""
if not v.exists():
v.mkdir(parents=True, exist_ok=True)
return v
@validator('DATABASE_URL')
def validate_database_url(cls, v):
"""Validate database URL format"""
if not str(v).startswith('postgresql+asyncpg://'):
raise ValueError('Database URL must use asyncpg driver')
return v
@validator('TELEGRAM_API_KEY', 'CLIENT_TELEGRAM_API_KEY')
def validate_telegram_keys(cls, v):
"""Validate Telegram bot tokens format"""
parts = v.split(':')
if len(parts) != 2 or not parts[0].isdigit() or len(parts[1]) != 35:
raise ValueError('Invalid Telegram bot token format')
return v
@validator('SECRET_KEY', 'JWT_SECRET_KEY')
def validate_secret_keys(cls, v):
"""Validate secret keys length"""
if len(v) < 32:
raise ValueError('Secret keys must be at least 32 characters long')
return v
class Config:
env_file = ".env"
case_sensitive = True
validate_assignment = True
class SecurityConfig:
"""Security-related configurations"""
# CORS settings
CORS_ORIGINS = [
"https://web2-client.vercel.app",
"https://t.me",
"https://web.telegram.org"
]
# Content Security Policy
CSP_DIRECTIVES = {
'default-src': ["'self'"],
'script-src': ["'self'", "'unsafe-inline'", "https://cdn.jsdelivr.net"],
'style-src': ["'self'", "'unsafe-inline'", "https://cdn.jsdelivr.net"],
'img-src': ["'self'", "data:", "https:"],
'connect-src': ["'self'", "https://api.telegram.org"],
'frame-ancestors': ["'none'"],
'form-action': ["'self'"],
'base-uri': ["'self'"]
}
# Request size limits
MAX_REQUEST_SIZE = 100 * 1024 * 1024 # 100MB
MAX_JSON_SIZE = 10 * 1024 * 1024 # 10MB
# Session settings
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SAMESITE = "Strict"
# Rate limiting patterns
RATE_LIMIT_PATTERNS = {
"auth": {"requests": 5, "window": 300}, # 5 requests per 5 minutes
"upload": {"requests": 10, "window": 3600}, # 10 uploads per hour
"api": {"requests": 100, "window": 60}, # 100 API calls per minute
"heavy": {"requests": 1, "window": 60} # 1 heavy operation per minute
}
# Create settings instance
settings = Settings()
# Expose commonly used settings
DATABASE_URL = str(settings.DATABASE_URL)
REDIS_URL = str(settings.REDIS_URL)
DATABASE_POOL_SIZE = settings.DATABASE_POOL_SIZE
DATABASE_MAX_OVERFLOW = settings.DATABASE_MAX_OVERFLOW
REDIS_POOL_SIZE = settings.REDIS_POOL_SIZE
TELEGRAM_API_KEY = settings.TELEGRAM_API_KEY
CLIENT_TELEGRAM_API_KEY = settings.CLIENT_TELEGRAM_API_KEY
PROJECT_HOST = str(settings.PROJECT_HOST)
SANIC_PORT = settings.SANIC_PORT
UPLOADS_DIR = settings.UPLOADS_DIR
ALLOWED_CONTENT_TYPES = settings.ALLOWED_CONTENT_TYPES
TESTNET = settings.TESTNET
TONCENTER_HOST = str(settings.TONCENTER_HOST)
TONCENTER_API_KEY = settings.TONCENTER_API_KEY
TONCENTER_V3_HOST = str(settings.TONCENTER_V3_HOST)
MY_PLATFORM_CONTRACT = settings.MY_PLATFORM_CONTRACT
MY_FUND_ADDRESS = settings.MY_FUND_ADDRESS
LOG_LEVEL = settings.LOG_LEVEL
LOG_DIR = settings.LOG_DIR
MAINTENANCE_MODE = settings.MAINTENANCE_MODE
# Cache keys patterns
CACHE_KEYS = {
"user_session": "user:session:{user_id}",
"user_data": "user:data:{user_id}",
"content_metadata": "content:meta:{content_id}",
"rate_limit": "rate_limit:{pattern}:{identifier}",
"blockchain_task": "blockchain:task:{task_id}",
"temp_upload": "upload:temp:{upload_id}",
"wallet_connection": "wallet:conn:{wallet_address}",
"ton_price": "ton:price:usd",
"system_status": "system:status:{service}",
}
# Log current configuration (without secrets)
def log_config():
"""Log current configuration without sensitive data"""
safe_config = {
"project_name": settings.PROJECT_NAME,
"project_version": settings.PROJECT_VERSION,
"debug": settings.DEBUG,
"sanic_port": settings.SANIC_PORT,
"testnet": settings.TESTNET,
"maintenance_mode": settings.MAINTENANCE_MODE,
"metrics_enabled": settings.METRICS_ENABLED,
"uploads_dir": str(settings.UPLOADS_DIR),
"log_level": settings.LOG_LEVEL,
}
logger.info("Configuration loaded", **safe_config)
# Initialize logging configuration
log_config()

View File

@ -0,0 +1,257 @@
"""Compatible configuration management with MariaDB and Redis support."""
import os
from functools import lru_cache
from typing import Optional, Dict, Any
from pydantic import BaseSettings, Field, validator
class Settings(BaseSettings):
"""Application settings with backward compatibility."""
# Application settings
app_name: str = Field(default="My Uploader Bot", env="APP_NAME")
debug: bool = Field(default=False, env="DEBUG")
environment: str = Field(default="production", env="ENVIRONMENT")
host: str = Field(default="0.0.0.0", env="HOST")
port: int = Field(default=15100, env="PORT")
# Security settings
secret_key: str = Field(env="SECRET_KEY", default="your-secret-key-change-this")
jwt_secret_key: str = Field(env="JWT_SECRET_KEY", default="jwt-secret-change-this")
jwt_algorithm: str = Field(default="HS256", env="JWT_ALGORITHM")
jwt_expire_minutes: int = Field(default=30, env="JWT_EXPIRE_MINUTES")
# MariaDB/MySQL settings (preserving existing configuration)
mysql_host: str = Field(default="maria_db", env="MYSQL_HOST")
mysql_port: int = Field(default=3306, env="MYSQL_PORT")
mysql_user: str = Field(default="myuploader", env="MYSQL_USER")
mysql_password: str = Field(default="password", env="MYSQL_PASSWORD")
mysql_database: str = Field(default="myuploader", env="MYSQL_DATABASE")
mysql_root_password: str = Field(default="password", env="MYSQL_ROOT_PASSWORD")
# Database pool settings
database_pool_size: int = Field(default=20, env="DATABASE_POOL_SIZE")
database_max_overflow: int = Field(default=30, env="DATABASE_MAX_OVERFLOW")
database_pool_timeout: int = Field(default=30, env="DATABASE_POOL_TIMEOUT")
database_pool_recycle: int = Field(default=3600, env="DATABASE_POOL_RECYCLE")
# Optional new database URL (for future migration)
database_url: Optional[str] = Field(default=None, env="DATABASE_URL")
# Redis settings (new addition)
redis_enabled: bool = Field(default=True, env="REDIS_ENABLED")
redis_host: str = Field(default="redis", env="REDIS_HOST")
redis_port: int = Field(default=6379, env="REDIS_PORT")
redis_password: Optional[str] = Field(default=None, env="REDIS_PASSWORD")
redis_db: int = Field(default=0, env="REDIS_DB")
redis_max_connections: int = Field(default=50, env="REDIS_MAX_CONNECTIONS")
redis_socket_timeout: int = Field(default=30, env="REDIS_SOCKET_TIMEOUT")
redis_socket_connect_timeout: int = Field(default=30, env="REDIS_SOCKET_CONNECT_TIMEOUT")
# Cache settings
cache_enabled: bool = Field(default=True, env="CACHE_ENABLED")
cache_default_ttl: int = Field(default=300, env="CACHE_DEFAULT_TTL") # 5 minutes
cache_user_ttl: int = Field(default=600, env="CACHE_USER_TTL") # 10 minutes
cache_content_ttl: int = Field(default=1800, env="CACHE_CONTENT_TTL") # 30 minutes
# Storage settings (preserving existing paths)
storage_path: str = Field(default="/Storage/storedContent", env="STORAGE_PATH")
logs_path: str = Field(default="/Storage/logs", env="LOGS_PATH")
sql_storage_path: str = Field(default="/Storage/sqlStorage", env="SQL_STORAGE_PATH")
# File upload settings
max_file_size: int = Field(default=100 * 1024 * 1024, env="MAX_FILE_SIZE") # 100MB
allowed_extensions: str = Field(default=".jpg,.jpeg,.png,.gif,.pdf,.doc,.docx,.txt", env="ALLOWED_EXTENSIONS")
# Rate limiting
rate_limit_enabled: bool = Field(default=True, env="RATE_LIMIT_ENABLED")
rate_limit_requests: int = Field(default=100, env="RATE_LIMIT_REQUESTS")
rate_limit_window: int = Field(default=3600, env="RATE_LIMIT_WINDOW") # 1 hour
# TON Blockchain settings (preserving existing)
ton_network: str = Field(default="mainnet", env="TON_NETWORK")
ton_api_key: Optional[str] = Field(default=None, env="TON_API_KEY")
ton_wallet_address: Optional[str] = Field(default=None, env="TON_WALLET_ADDRESS")
# License settings
license_check_enabled: bool = Field(default=True, env="LICENSE_CHECK_ENABLED")
license_server_url: Optional[str] = Field(default=None, env="LICENSE_SERVER_URL")
# Indexer settings
indexer_enabled: bool = Field(default=True, env="INDEXER_ENABLED")
indexer_interval: int = Field(default=300, env="INDEXER_INTERVAL") # 5 minutes
# Convert process settings
convert_enabled: bool = Field(default=True, env="CONVERT_ENABLED")
convert_queue_size: int = Field(default=10, env="CONVERT_QUEUE_SIZE")
# Logging settings
log_level: str = Field(default="INFO", env="LOG_LEVEL")
log_format: str = Field(default="json", env="LOG_FORMAT")
log_file_enabled: bool = Field(default=True, env="LOG_FILE_ENABLED")
log_file_max_size: int = Field(default=10 * 1024 * 1024, env="LOG_FILE_MAX_SIZE") # 10MB
log_file_backup_count: int = Field(default=5, env="LOG_FILE_BACKUP_COUNT")
# API settings
api_title: str = Field(default="My Uploader Bot API", env="API_TITLE")
api_version: str = Field(default="1.0.0", env="API_VERSION")
api_description: str = Field(default="File upload and management API", env="API_DESCRIPTION")
cors_enabled: bool = Field(default=True, env="CORS_ENABLED")
cors_origins: str = Field(default="*", env="CORS_ORIGINS")
# Health check settings
health_check_enabled: bool = Field(default=True, env="HEALTH_CHECK_ENABLED")
health_check_interval: int = Field(default=60, env="HEALTH_CHECK_INTERVAL")
# Metrics settings
metrics_enabled: bool = Field(default=True, env="METRICS_ENABLED")
metrics_endpoint: str = Field(default="/metrics", env="METRICS_ENDPOINT")
@validator("allowed_extensions")
def validate_extensions(cls, v):
"""Validate and normalize file extensions."""
if isinstance(v, str):
return [ext.strip().lower() for ext in v.split(",") if ext.strip()]
return v
@validator("cors_origins")
def validate_cors_origins(cls, v):
"""Validate and normalize CORS origins."""
if isinstance(v, str) and v != "*":
return [origin.strip() for origin in v.split(",") if origin.strip()]
return v
@validator("log_level")
def validate_log_level(cls, v):
"""Validate log level."""
valid_levels = ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]
if v.upper() not in valid_levels:
raise ValueError(f"Log level must be one of: {valid_levels}")
return v.upper()
def get_database_url(self) -> str:
"""Get complete database URL."""
if self.database_url:
return self.database_url
return f"mysql+aiomysql://{self.mysql_user}:{self.mysql_password}@{self.mysql_host}:{self.mysql_port}/{self.mysql_database}"
def get_redis_url(self) -> str:
"""Get complete Redis URL."""
if self.redis_password:
return f"redis://:{self.redis_password}@{self.redis_host}:{self.redis_port}/{self.redis_db}"
return f"redis://{self.redis_host}:{self.redis_port}/{self.redis_db}"
def get_allowed_extensions_set(self) -> set:
"""Get allowed extensions as a set."""
if isinstance(self.allowed_extensions, list):
return set(self.allowed_extensions)
return set(ext.strip().lower() for ext in self.allowed_extensions.split(",") if ext.strip())
def get_cors_origins_list(self) -> list:
"""Get CORS origins as a list."""
if self.cors_origins == "*":
return ["*"]
if isinstance(self.cors_origins, list):
return self.cors_origins
return [origin.strip() for origin in self.cors_origins.split(",") if origin.strip()]
def is_development(self) -> bool:
"""Check if running in development mode."""
return self.environment.lower() in ["development", "dev", "local"]
def is_production(self) -> bool:
"""Check if running in production mode."""
return self.environment.lower() in ["production", "prod"]
def get_cache_config(self) -> Dict[str, Any]:
"""Get cache configuration dictionary."""
return {
"enabled": self.cache_enabled and self.redis_enabled,
"default_ttl": self.cache_default_ttl,
"user_ttl": self.cache_user_ttl,
"content_ttl": self.cache_content_ttl,
"redis_url": self.get_redis_url(),
"max_connections": self.redis_max_connections,
}
def get_database_config(self) -> Dict[str, Any]:
"""Get database configuration dictionary."""
return {
"url": self.get_database_url(),
"pool_size": self.database_pool_size,
"max_overflow": self.database_max_overflow,
"pool_timeout": self.database_pool_timeout,
"pool_recycle": self.database_pool_recycle,
}
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
case_sensitive = False
@lru_cache()
def get_settings() -> Settings:
"""Get cached settings instance."""
return Settings()
# Backward compatibility functions
def get_mysql_config() -> Dict[str, Any]:
"""Get MySQL configuration for backward compatibility."""
settings = get_settings()
return {
"host": settings.mysql_host,
"port": settings.mysql_port,
"user": settings.mysql_user,
"password": settings.mysql_password,
"database": settings.mysql_database,
}
def get_storage_config() -> Dict[str, str]:
"""Get storage configuration for backward compatibility."""
settings = get_settings()
return {
"storage_path": settings.storage_path,
"logs_path": settings.logs_path,
"sql_storage_path": settings.sql_storage_path,
}
def get_redis_config() -> Dict[str, Any]:
"""Get Redis configuration."""
settings = get_settings()
return {
"enabled": settings.redis_enabled,
"host": settings.redis_host,
"port": settings.redis_port,
"password": settings.redis_password,
"db": settings.redis_db,
"max_connections": settings.redis_max_connections,
"socket_timeout": settings.redis_socket_timeout,
"socket_connect_timeout": settings.redis_socket_connect_timeout,
}
# Environment variables validation
def validate_environment():
"""Validate required environment variables."""
settings = get_settings()
required_vars = [
"SECRET_KEY",
"JWT_SECRET_KEY",
"MYSQL_PASSWORD",
]
missing_vars = []
for var in required_vars:
if not os.getenv(var):
missing_vars.append(var)
if missing_vars:
raise ValueError(f"Missing required environment variables: {', '.join(missing_vars)}")
return True

262
app/core/database.py Normal file
View File

@ -0,0 +1,262 @@
"""
Async SQLAlchemy configuration with connection pooling and Redis integration
"""
import asyncio
import logging
from contextlib import asynccontextmanager
from typing import AsyncGenerator, Optional
from datetime import timedelta
from sqlalchemy.ext.asyncio import (
create_async_engine,
AsyncSession,
async_sessionmaker,
AsyncEngine
)
from sqlalchemy.pool import NullPool, QueuePool
from sqlalchemy.sql import text
import redis.asyncio as redis
from redis.asyncio.connection import ConnectionPool
import structlog
from app.core.config import (
DATABASE_URL,
REDIS_URL,
DATABASE_POOL_SIZE,
DATABASE_MAX_OVERFLOW,
REDIS_POOL_SIZE
)
logger = structlog.get_logger(__name__)
class DatabaseManager:
"""Async database manager with connection pooling"""
def __init__(self):
self._engine: Optional[AsyncEngine] = None
self._session_factory: Optional[async_sessionmaker[AsyncSession]] = None
self._redis_pool: Optional[ConnectionPool] = None
self._redis: Optional[redis.Redis] = None
self._initialized = False
async def initialize(self) -> None:
"""Initialize database connections and Redis"""
if self._initialized:
return
# Initialize async SQLAlchemy engine
self._engine = create_async_engine(
DATABASE_URL,
poolclass=QueuePool,
pool_size=DATABASE_POOL_SIZE,
max_overflow=DATABASE_MAX_OVERFLOW,
pool_pre_ping=True,
pool_recycle=3600, # 1 hour
echo=False, # Set to True for SQL debugging
future=True,
json_serializer=lambda obj: obj,
json_deserializer=lambda obj: obj,
)
# Create session factory
self._session_factory = async_sessionmaker(
self._engine,
class_=AsyncSession,
expire_on_commit=False,
autoflush=False,
autocommit=False
)
# Initialize Redis connection pool
self._redis_pool = ConnectionPool.from_url(
REDIS_URL,
max_connections=REDIS_POOL_SIZE,
retry_on_timeout=True,
health_check_interval=30
)
self._redis = redis.Redis(
connection_pool=self._redis_pool,
decode_responses=True
)
# Test connections
await self._test_connections()
self._initialized = True
logger.info("Database and Redis connections initialized")
async def _test_connections(self) -> None:
"""Test database and Redis connections"""
# Test database
async with self._engine.begin() as conn:
result = await conn.execute(text("SELECT 1"))
assert result.scalar() == 1
# Test Redis
await self._redis.ping()
logger.info("Database and Redis connections tested successfully")
async def close(self) -> None:
"""Close all connections gracefully"""
if self._engine:
await self._engine.dispose()
if self._redis_pool:
await self._redis_pool.disconnect()
self._initialized = False
logger.info("Database and Redis connections closed")
@asynccontextmanager
async def get_session(self) -> AsyncGenerator[AsyncSession, None]:
"""Get async database session with automatic cleanup"""
if not self._initialized:
await self.initialize()
async with self._session_factory() as session:
try:
yield session
except Exception as e:
await session.rollback()
logger.error("Database session error", error=str(e))
raise
finally:
await session.close()
@asynccontextmanager
async def get_transaction(self) -> AsyncGenerator[AsyncSession, None]:
"""Get async database session with automatic transaction management"""
async with self.get_session() as session:
async with session.begin():
yield session
async def get_redis(self) -> redis.Redis:
"""Get Redis client"""
if not self._initialized:
await self.initialize()
return self._redis
@property
def engine(self) -> AsyncEngine:
"""Get SQLAlchemy engine"""
if not self._engine:
raise RuntimeError("Database not initialized")
return self._engine
class CacheManager:
"""Redis-based cache manager with TTL and serialization"""
def __init__(self, redis_client: redis.Redis):
self.redis = redis_client
async def get(self, key: str, default=None):
"""Get value from cache"""
try:
value = await self.redis.get(key)
return value if value is not None else default
except Exception as e:
logger.error("Cache get error", key=key, error=str(e))
return default
async def set(
self,
key: str,
value: str,
ttl: Optional[int] = None,
nx: bool = False
) -> bool:
"""Set value in cache with optional TTL"""
try:
return await self.redis.set(key, value, ex=ttl, nx=nx)
except Exception as e:
logger.error("Cache set error", key=key, error=str(e))
return False
async def delete(self, key: str) -> bool:
"""Delete key from cache"""
try:
return bool(await self.redis.delete(key))
except Exception as e:
logger.error("Cache delete error", key=key, error=str(e))
return False
async def exists(self, key: str) -> bool:
"""Check if key exists in cache"""
try:
return bool(await self.redis.exists(key))
except Exception as e:
logger.error("Cache exists error", key=key, error=str(e))
return False
async def incr(self, key: str, amount: int = 1) -> int:
"""Increment counter in cache"""
try:
return await self.redis.incr(key, amount)
except Exception as e:
logger.error("Cache incr error", key=key, error=str(e))
return 0
async def expire(self, key: str, ttl: int) -> bool:
"""Set TTL for existing key"""
try:
return await self.redis.expire(key, ttl)
except Exception as e:
logger.error("Cache expire error", key=key, error=str(e))
return False
async def hget(self, name: str, key: str):
"""Get hash field value"""
try:
return await self.redis.hget(name, key)
except Exception as e:
logger.error("Cache hget error", name=name, key=key, error=str(e))
return None
async def hset(self, name: str, key: str, value: str) -> bool:
"""Set hash field value"""
try:
return bool(await self.redis.hset(name, key, value))
except Exception as e:
logger.error("Cache hset error", name=name, key=key, error=str(e))
return False
async def hdel(self, name: str, key: str) -> bool:
"""Delete hash field"""
try:
return bool(await self.redis.hdel(name, key))
except Exception as e:
logger.error("Cache hdel error", name=name, key=key, error=str(e))
return False
# Global instances
db_manager = DatabaseManager()
cache_manager: Optional[CacheManager] = None
async def get_db_session() -> AsyncGenerator[AsyncSession, None]:
"""Dependency for getting database session"""
async with db_manager.get_session() as session:
yield session
async def get_cache() -> CacheManager:
"""Dependency for getting cache manager"""
global cache_manager
if not cache_manager:
redis_client = await db_manager.get_redis()
cache_manager = CacheManager(redis_client)
return cache_manager
async def init_database():
"""Initialize database connections"""
await db_manager.initialize()
async def close_database():
"""Close database connections"""
await db_manager.close()

View File

@ -0,0 +1,221 @@
"""Compatible database configuration with MariaDB support."""
import logging
from contextlib import asynccontextmanager
from typing import AsyncGenerator, Optional
from sqlalchemy import MetaData
from sqlalchemy.ext.asyncio import (
AsyncEngine,
AsyncSession,
async_sessionmaker,
create_async_engine
)
from sqlalchemy.pool import NullPool
from app.core.config import get_settings
logger = logging.getLogger(__name__)
# Global variables for database engine and session
_engine: Optional[AsyncEngine] = None
_async_session: Optional[async_sessionmaker[AsyncSession]] = None
# Naming convention for consistent constraint names
naming_convention = {
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
metadata = MetaData(naming_convention=naming_convention)
def get_database_url() -> str:
"""Get database URL from settings."""
settings = get_settings()
# Support both new DATABASE_URL and legacy MariaDB settings
if hasattr(settings, 'database_url') and settings.database_url:
return settings.database_url
# Fallback to MariaDB configuration
mysql_host = getattr(settings, 'mysql_host', 'maria_db')
mysql_port = getattr(settings, 'mysql_port', 3306)
mysql_user = getattr(settings, 'mysql_user', 'myuploader')
mysql_password = getattr(settings, 'mysql_password', 'password')
mysql_database = getattr(settings, 'mysql_database', 'myuploader')
return f"mysql+aiomysql://{mysql_user}:{mysql_password}@{mysql_host}:{mysql_port}/{mysql_database}"
async def init_database() -> None:
"""Initialize database connection."""
global _engine, _async_session
if _engine is not None:
logger.warning("Database already initialized")
return
try:
settings = get_settings()
database_url = get_database_url()
logger.info(f"Connecting to database: {database_url.split('@')[1] if '@' in database_url else 'unknown'}")
# Create async engine with MariaDB/MySQL optimizations
_engine = create_async_engine(
database_url,
echo=settings.debug if hasattr(settings, 'debug') else False,
pool_size=getattr(settings, 'database_pool_size', 20),
max_overflow=getattr(settings, 'database_max_overflow', 30),
pool_timeout=getattr(settings, 'database_pool_timeout', 30),
pool_recycle=getattr(settings, 'database_pool_recycle', 3600),
pool_pre_ping=True, # Verify connections before use
# MariaDB specific settings
connect_args={
"charset": "utf8mb4",
"use_unicode": True,
"autocommit": False,
}
)
# Create async session factory
_async_session = async_sessionmaker(
bind=_engine,
class_=AsyncSession,
expire_on_commit=False,
autoflush=True,
autocommit=False
)
# Test the connection
async with _engine.begin() as conn:
await conn.execute("SELECT 1")
logger.info("Database connection established successfully")
except Exception as e:
logger.error(f"Failed to initialize database: {e}")
raise
async def close_database() -> None:
"""Close database connection."""
global _engine, _async_session
if _engine is not None:
logger.info("Closing database connection")
await _engine.dispose()
_engine = None
_async_session = None
logger.info("Database connection closed")
def get_engine() -> AsyncEngine:
"""Get database engine."""
if _engine is None:
raise RuntimeError("Database not initialized. Call init_database() first.")
return _engine
def get_session_factory() -> async_sessionmaker[AsyncSession]:
"""Get session factory."""
if _async_session is None:
raise RuntimeError("Database not initialized. Call init_database() first.")
return _async_session
@asynccontextmanager
async def get_async_session() -> AsyncGenerator[AsyncSession, None]:
"""Get async database session with automatic cleanup."""
if _async_session is None:
raise RuntimeError("Database not initialized. Call init_database() first.")
async with _async_session() as session:
try:
yield session
except Exception as e:
logger.error(f"Database session error: {e}")
await session.rollback()
raise
finally:
await session.close()
async def check_database_health() -> bool:
"""Check database connection health."""
try:
async with get_async_session() as session:
await session.execute("SELECT 1")
return True
except Exception as e:
logger.error(f"Database health check failed: {e}")
return False
async def get_database_info() -> dict:
"""Get database information."""
try:
async with get_async_session() as session:
# Get database version
result = await session.execute("SELECT VERSION() as version")
version_row = result.fetchone()
version = version_row[0] if version_row else "Unknown"
# Get connection count (MariaDB specific)
try:
result = await session.execute("SHOW STATUS LIKE 'Threads_connected'")
conn_row = result.fetchone()
connections = int(conn_row[1]) if conn_row else 0
except:
connections = 0
# Get database size
try:
result = await session.execute("""
SELECT
ROUND(SUM(data_length + index_length) / 1024 / 1024, 2) as size_mb
FROM information_schema.tables
WHERE table_schema = DATABASE()
""")
size_row = result.fetchone()
size_mb = float(size_row[0]) if size_row and size_row[0] else 0
except:
size_mb = 0
return {
"version": version,
"connections": connections,
"size_mb": size_mb,
"engine_pool_size": _engine.pool.size() if _engine else 0,
"engine_checked_out": _engine.pool.checkedout() if _engine else 0,
}
except Exception as e:
logger.error(f"Failed to get database info: {e}")
return {"error": str(e)}
# Database session dependency for dependency injection
async def get_db_session() -> AsyncGenerator[AsyncSession, None]:
"""Database session dependency for API routes."""
async with get_async_session() as session:
yield session
# Backward compatibility functions
async def get_db() -> AsyncGenerator[AsyncSession, None]:
"""Legacy function name for backward compatibility."""
async with get_async_session() as session:
yield session
# Transaction context manager
@asynccontextmanager
async def transaction():
"""Transaction context manager."""
async with get_async_session() as session:
async with session.begin():
yield session

363
app/core/logging.py Normal file
View File

@ -0,0 +1,363 @@
"""
Structured logging configuration with monitoring and observability
"""
import asyncio
import logging
import sys
import time
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional, Union
from contextvars import ContextVar
import json
import structlog
from structlog.stdlib import LoggerFactory
from structlog.typing import EventDict, Processor
import structlog.dev
from app.core.config import settings, LOG_DIR, LOG_LEVEL
# Context variables for request tracking
request_id_var: ContextVar[Optional[str]] = ContextVar('request_id', default=None)
user_id_var: ContextVar[Optional[int]] = ContextVar('user_id', default=None)
operation_var: ContextVar[Optional[str]] = ContextVar('operation', default=None)
class RequestContextProcessor:
"""Add request context to log records"""
def __call__(self, logger, method_name, event_dict: EventDict) -> EventDict:
"""Add context variables to event dict"""
if request_id := request_id_var.get(None):
event_dict['request_id'] = request_id
if user_id := user_id_var.get(None):
event_dict['user_id'] = user_id
if operation := operation_var.get(None):
event_dict['operation'] = operation
return event_dict
class TimestampProcessor:
"""Add consistent timestamp to log records"""
def __call__(self, logger, method_name, event_dict: EventDict) -> EventDict:
"""Add timestamp to event dict"""
event_dict['timestamp'] = datetime.utcnow().isoformat() + 'Z'
return event_dict
class SecurityProcessor:
"""Filter sensitive data from logs"""
SENSITIVE_KEYS = {
'password', 'token', 'key', 'secret', 'auth', 'credential',
'private_key', 'seed', 'mnemonic', 'api_key', 'authorization'
}
def __call__(self, logger, method_name, event_dict: EventDict) -> EventDict:
"""Remove or mask sensitive data"""
return self._filter_dict(event_dict)
def _filter_dict(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Recursively filter sensitive data"""
if not isinstance(data, dict):
return data
filtered = {}
for key, value in data.items():
if any(sensitive in key.lower() for sensitive in self.SENSITIVE_KEYS):
filtered[key] = '***REDACTED***'
elif isinstance(value, dict):
filtered[key] = self._filter_dict(value)
elif isinstance(value, list):
filtered[key] = [
self._filter_dict(item) if isinstance(item, dict) else item
for item in value
]
else:
filtered[key] = value
return filtered
class PerformanceProcessor:
"""Add performance metrics to log records"""
def __call__(self, logger, method_name, event_dict: EventDict) -> EventDict:
"""Add performance data to event dict"""
# Add memory usage if available
try:
import psutil
process = psutil.Process()
event_dict['memory_mb'] = round(process.memory_info().rss / 1024 / 1024, 2)
event_dict['cpu_percent'] = process.cpu_percent()
except ImportError:
pass
return event_dict
class MetricsCollector:
"""Collect metrics from log events"""
def __init__(self):
self.counters: Dict[str, int] = {}
self.timers: Dict[str, float] = {}
self.errors: Dict[str, int] = {}
def increment_counter(self, metric: str, value: int = 1):
"""Increment counter metric"""
self.counters[metric] = self.counters.get(metric, 0) + value
def record_timer(self, metric: str, duration: float):
"""Record timer metric"""
self.timers[metric] = duration
def record_error(self, error_type: str):
"""Record error metric"""
self.errors[error_type] = self.errors.get(error_type, 0) + 1
def get_metrics(self) -> Dict[str, Any]:
"""Get all collected metrics"""
return {
'counters': self.counters,
'timers': self.timers,
'errors': self.errors
}
# Global metrics collector
metrics_collector = MetricsCollector()
class DatabaseLogHandler(logging.Handler):
"""Log handler that stores critical logs in database"""
def __init__(self):
super().__init__()
self.setLevel(logging.ERROR)
self._queue = asyncio.Queue(maxsize=1000)
self._task = None
def emit(self, record: logging.LogRecord):
"""Add log record to queue"""
try:
log_entry = {
'timestamp': datetime.utcnow(),
'level': record.levelname,
'logger': record.name,
'message': record.getMessage(),
'module': record.module,
'function': record.funcName,
'line': record.lineno,
'request_id': getattr(record, 'request_id', None),
'user_id': getattr(record, 'user_id', None),
'extra': getattr(record, '__dict__', {})
}
if not self._queue.full():
self._queue.put_nowait(log_entry)
except Exception:
# Don't let logging errors break the application
pass
async def process_logs(self):
"""Process logs from queue and store in database"""
from app.core.database import get_db_session
while True:
try:
log_entry = await self._queue.get()
# Store in database (implement based on your log model)
# async with get_db_session() as session:
# log_record = LogRecord(**log_entry)
# session.add(log_record)
# await session.commit()
except Exception as e:
# Log to stderr to avoid infinite recursion
print(f"Database log handler error: {e}", file=sys.stderr)
await asyncio.sleep(0.1)
def configure_logging():
"""Configure structured logging"""
# Configure standard library logging
logging.basicConfig(
format="%(message)s",
stream=sys.stdout,
level=getattr(logging, LOG_LEVEL.upper())
)
# Silence noisy loggers
logging.getLogger("sqlalchemy.engine").setLevel(logging.WARNING)
logging.getLogger("aioredis").setLevel(logging.WARNING)
logging.getLogger("aiogram").setLevel(logging.WARNING)
# Configure processors based on environment
processors: list[Processor] = [
structlog.contextvars.merge_contextvars,
RequestContextProcessor(),
TimestampProcessor(),
SecurityProcessor(),
structlog.processors.add_log_level,
structlog.processors.StackInfoRenderer(),
]
if settings.DEBUG:
processors.extend([
PerformanceProcessor(),
structlog.dev.ConsoleRenderer(colors=True)
])
else:
processors.append(structlog.processors.JSONRenderer())
# Configure structlog
structlog.configure(
processors=processors,
wrapper_class=structlog.make_filtering_bound_logger(
getattr(logging, LOG_LEVEL.upper())
),
logger_factory=LoggerFactory(),
cache_logger_on_first_use=True,
)
# Add file handler for persistent logging
if not settings.DEBUG:
log_file = LOG_DIR / f"app_{datetime.now().strftime('%Y%m%d')}.log"
file_handler = logging.FileHandler(log_file, encoding='utf-8')
file_handler.setFormatter(
logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
)
logging.getLogger().addHandler(file_handler)
class LoggerMixin:
"""Mixin to add structured logging to classes"""
@property
def logger(self):
"""Get logger for this class"""
return structlog.get_logger(self.__class__.__name__)
class AsyncContextLogger:
"""Context manager for async operations with automatic logging"""
def __init__(
self,
operation: str,
logger: Optional[structlog.BoundLogger] = None,
log_args: bool = True,
log_result: bool = True
):
self.operation = operation
self.logger = logger or structlog.get_logger()
self.log_args = log_args
self.log_result = log_result
self.start_time = None
async def __aenter__(self):
"""Enter async context"""
self.start_time = time.time()
operation_var.set(self.operation)
self.logger.info(
"Operation started",
operation=self.operation,
)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
"""Exit async context with performance logging"""
duration = time.time() - self.start_time
if exc_type:
self.logger.error(
"Operation failed",
operation=self.operation,
duration_ms=round(duration * 1000, 2),
error_type=exc_type.__name__,
error_message=str(exc_val)
)
metrics_collector.record_error(f"{self.operation}_error")
else:
self.logger.info(
"Operation completed",
operation=self.operation,
duration_ms=round(duration * 1000, 2)
)
metrics_collector.record_timer(f"{self.operation}_duration", duration)
operation_var.set(None)
def get_logger(name: str = None) -> structlog.BoundLogger:
"""Get configured structured logger"""
return structlog.get_logger(name)
# Compatibility wrapper for old logging
def make_log(
component: Optional[str],
message: str,
level: str = 'info',
**kwargs
):
"""Legacy logging function for backward compatibility"""
logger = get_logger(component or 'Legacy')
log_func = getattr(logger, level.lower(), logger.info)
log_func(message, **kwargs)
# Performance monitoring decorator
def log_performance(operation: str = None):
"""Decorator to log function performance"""
def decorator(func):
async def async_wrapper(*args, **kwargs):
op_name = operation or f"{func.__module__}.{func.__name__}"
async with AsyncContextLogger(op_name):
return await func(*args, **kwargs)
def sync_wrapper(*args, **kwargs):
op_name = operation or f"{func.__module__}.{func.__name__}"
start_time = time.time()
logger = get_logger(func.__module__)
try:
logger.info("Function started", function=op_name)
result = func(*args, **kwargs)
duration = time.time() - start_time
logger.info(
"Function completed",
function=op_name,
duration_ms=round(duration * 1000, 2)
)
return result
except Exception as e:
duration = time.time() - start_time
logger.error(
"Function failed",
function=op_name,
duration_ms=round(duration * 1000, 2),
error=str(e)
)
raise
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
return decorator
# Initialize logging
configure_logging()

566
app/core/metrics.py Normal file
View File

@ -0,0 +1,566 @@
"""Prometheus metrics collection for my-uploader-bot."""
import logging
import time
from datetime import datetime
from functools import wraps
from typing import Dict, Any, Optional, Callable
from prometheus_client import Counter, Histogram, Gauge, Info, generate_latest, CONTENT_TYPE_LATEST
from sanic import Request, Response
logger = logging.getLogger(__name__)
# Application info
APP_INFO = Info('myuploader_app_info', 'Application information')
APP_INFO.info({
'version': '2.0.0',
'name': 'my-uploader-bot',
'python_version': '3.11+'
})
# HTTP request metrics
HTTP_REQUESTS_TOTAL = Counter(
'http_requests_total',
'Total HTTP requests',
['method', 'endpoint', 'status_code']
)
HTTP_REQUEST_DURATION = Histogram(
'http_request_duration_seconds',
'HTTP request duration in seconds',
['method', 'endpoint']
)
HTTP_REQUEST_SIZE = Histogram(
'http_request_size_bytes',
'HTTP request size in bytes',
['method', 'endpoint']
)
HTTP_RESPONSE_SIZE = Histogram(
'http_response_size_bytes',
'HTTP response size in bytes',
['method', 'endpoint']
)
# Authentication metrics
AUTH_LOGIN_ATTEMPTS_TOTAL = Counter(
'auth_login_attempts_total',
'Total login attempts',
['status']
)
AUTH_LOGIN_FAILURES_TOTAL = Counter(
'auth_login_failures_total',
'Total login failures',
['reason']
)
AUTH_API_KEY_USAGE_TOTAL = Counter(
'auth_api_key_usage_total',
'Total API key usage',
['key_id', 'status']
)
# File upload metrics
UPLOAD_REQUESTS_TOTAL = Counter(
'upload_requests_total',
'Total upload requests',
['status', 'file_type']
)
UPLOAD_SIZE_BYTES = Histogram(
'upload_size_bytes',
'File upload size in bytes',
['file_type']
)
UPLOAD_DURATION_SECONDS = Histogram(
'upload_duration_seconds',
'File upload duration in seconds',
['file_type']
)
UPLOAD_QUEUE_SIZE = Gauge(
'upload_queue_size',
'Number of files in upload queue'
)
UPLOAD_FAILURES_TOTAL = Counter(
'upload_failures_total',
'Total upload failures',
['reason', 'file_type']
)
# File processing metrics
PROCESSING_QUEUE_SIZE = Gauge(
'processing_queue_size',
'Number of files in processing queue'
)
PROCESSING_DURATION_SECONDS = Histogram(
'processing_duration_seconds',
'File processing duration in seconds',
['file_type', 'operation']
)
PROCESSING_FAILURES_TOTAL = Counter(
'processing_failures_total',
'Total processing failures',
['file_type', 'operation']
)
# Database metrics
DB_CONNECTIONS_ACTIVE = Gauge(
'db_connections_active',
'Number of active database connections'
)
DB_CONNECTIONS_IDLE = Gauge(
'db_connections_idle',
'Number of idle database connections'
)
DB_QUERY_DURATION_SECONDS = Histogram(
'db_query_duration_seconds',
'Database query duration in seconds',
['operation']
)
DB_TRANSACTIONS_TOTAL = Counter(
'db_transactions_total',
'Total database transactions',
['status']
)
# Cache metrics
CACHE_OPERATIONS_TOTAL = Counter(
'cache_operations_total',
'Total cache operations',
['operation', 'status']
)
CACHE_HIT_RATIO = Gauge(
'cache_hit_ratio',
'Cache hit ratio'
)
CACHE_KEYS_TOTAL = Gauge(
'cache_keys_total',
'Total number of cache keys'
)
CACHE_MEMORY_USAGE_BYTES = Gauge(
'cache_memory_usage_bytes',
'Cache memory usage in bytes'
)
# Storage metrics
STORAGE_OPERATIONS_TOTAL = Counter(
'storage_operations_total',
'Total storage operations',
['operation', 'backend', 'status']
)
STORAGE_AVAILABLE_BYTES = Gauge(
'storage_available_bytes',
'Available storage space in bytes',
['backend']
)
STORAGE_TOTAL_BYTES = Gauge(
'storage_total_bytes',
'Total storage space in bytes',
['backend']
)
STORAGE_FILES_TOTAL = Gauge(
'storage_files_total',
'Total number of stored files',
['backend']
)
# Blockchain metrics
BLOCKCHAIN_TRANSACTIONS_TOTAL = Counter(
'blockchain_transactions_total',
'Total blockchain transactions',
['status', 'network']
)
BLOCKCHAIN_TRANSACTION_FEES = Histogram(
'blockchain_transaction_fees',
'Blockchain transaction fees',
['network']
)
BLOCKCHAIN_PENDING_TRANSACTIONS = Gauge(
'blockchain_pending_transactions',
'Number of pending blockchain transactions'
)
BLOCKCHAIN_WALLET_BALANCES = Gauge(
'blockchain_wallet_balances',
'Wallet balances',
['wallet_id', 'currency']
)
TON_SERVICE_UP = Gauge(
'ton_service_up',
'TON service availability (1 = up, 0 = down)'
)
# Security metrics
RATE_LIMIT_HITS_TOTAL = Counter(
'rate_limit_hits_total',
'Total rate limit hits',
['endpoint', 'user_id']
)
SECURITY_EVENTS_TOTAL = Counter(
'security_events_total',
'Total security events',
['event_type', 'severity']
)
SECURITY_SUSPICIOUS_EVENTS = Gauge(
'security_suspicious_events',
'Number of suspicious security events in the last hour'
)
FAILED_LOGIN_ATTEMPTS = Counter(
'failed_login_attempts_total',
'Total failed login attempts',
['ip_address', 'reason']
)
# System metrics
SYSTEM_UPTIME_SECONDS = Gauge(
'system_uptime_seconds',
'System uptime in seconds'
)
BACKGROUND_TASKS_ACTIVE = Gauge(
'background_tasks_active',
'Number of active background tasks',
['service']
)
BACKGROUND_TASKS_COMPLETED = Counter(
'background_tasks_completed_total',
'Total completed background tasks',
['service', 'status']
)
# Error metrics
ERROR_RATE = Gauge(
'error_rate',
'Application error rate'
)
EXCEPTIONS_TOTAL = Counter(
'exceptions_total',
'Total exceptions',
['exception_type', 'handler']
)
class MetricsCollector:
"""Centralized metrics collection and management."""
def __init__(self):
self.start_time = time.time()
self._cache_stats = {
'hits': 0,
'misses': 0,
'operations': 0
}
def record_http_request(
self,
method: str,
endpoint: str,
status_code: int,
duration: float,
request_size: int = 0,
response_size: int = 0
):
"""Record HTTP request metrics."""
HTTP_REQUESTS_TOTAL.labels(
method=method,
endpoint=endpoint,
status_code=status_code
).inc()
HTTP_REQUEST_DURATION.labels(
method=method,
endpoint=endpoint
).observe(duration)
if request_size > 0:
HTTP_REQUEST_SIZE.labels(
method=method,
endpoint=endpoint
).observe(request_size)
if response_size > 0:
HTTP_RESPONSE_SIZE.labels(
method=method,
endpoint=endpoint
).observe(response_size)
def record_auth_event(self, event_type: str, status: str, **labels):
"""Record authentication events."""
if event_type == 'login':
AUTH_LOGIN_ATTEMPTS_TOTAL.labels(status=status).inc()
if status == 'failed':
reason = labels.get('reason', 'unknown')
AUTH_LOGIN_FAILURES_TOTAL.labels(reason=reason).inc()
elif event_type == 'api_key':
key_id = labels.get('key_id', 'unknown')
AUTH_API_KEY_USAGE_TOTAL.labels(key_id=key_id, status=status).inc()
def record_upload_event(
self,
status: str,
file_type: str,
file_size: int = 0,
duration: float = 0,
**kwargs
):
"""Record file upload events."""
UPLOAD_REQUESTS_TOTAL.labels(status=status, file_type=file_type).inc()
if file_size > 0:
UPLOAD_SIZE_BYTES.labels(file_type=file_type).observe(file_size)
if duration > 0:
UPLOAD_DURATION_SECONDS.labels(file_type=file_type).observe(duration)
if status == 'failed':
reason = kwargs.get('reason', 'unknown')
UPLOAD_FAILURES_TOTAL.labels(reason=reason, file_type=file_type).inc()
def record_processing_event(
self,
file_type: str,
operation: str,
duration: float = 0,
status: str = 'success'
):
"""Record file processing events."""
if duration > 0:
PROCESSING_DURATION_SECONDS.labels(
file_type=file_type,
operation=operation
).observe(duration)
if status == 'failed':
PROCESSING_FAILURES_TOTAL.labels(
file_type=file_type,
operation=operation
).inc()
def record_db_event(self, operation: str, duration: float = 0, status: str = 'success'):
"""Record database events."""
if duration > 0:
DB_QUERY_DURATION_SECONDS.labels(operation=operation).observe(duration)
DB_TRANSACTIONS_TOTAL.labels(status=status).inc()
def record_cache_event(self, operation: str, status: str):
"""Record cache events."""
CACHE_OPERATIONS_TOTAL.labels(operation=operation, status=status).inc()
# Update cache stats
self._cache_stats['operations'] += 1
if status == 'hit':
self._cache_stats['hits'] += 1
elif status == 'miss':
self._cache_stats['misses'] += 1
# Update hit ratio
if self._cache_stats['operations'] > 0:
hit_ratio = self._cache_stats['hits'] / self._cache_stats['operations']
CACHE_HIT_RATIO.set(hit_ratio)
def record_blockchain_event(
self,
event_type: str,
status: str,
network: str = 'mainnet',
**kwargs
):
"""Record blockchain events."""
if event_type == 'transaction':
BLOCKCHAIN_TRANSACTIONS_TOTAL.labels(status=status, network=network).inc()
if 'fee' in kwargs:
BLOCKCHAIN_TRANSACTION_FEES.labels(network=network).observe(kwargs['fee'])
def record_security_event(self, event_type: str, severity: str = 'info', **kwargs):
"""Record security events."""
SECURITY_EVENTS_TOTAL.labels(event_type=event_type, severity=severity).inc()
if event_type == 'rate_limit':
endpoint = kwargs.get('endpoint', 'unknown')
user_id = kwargs.get('user_id', 'anonymous')
RATE_LIMIT_HITS_TOTAL.labels(endpoint=endpoint, user_id=user_id).inc()
elif event_type == 'failed_login':
ip_address = kwargs.get('ip_address', 'unknown')
reason = kwargs.get('reason', 'unknown')
FAILED_LOGIN_ATTEMPTS.labels(ip_address=ip_address, reason=reason).inc()
def update_system_metrics(self):
"""Update system-level metrics."""
uptime = time.time() - self.start_time
SYSTEM_UPTIME_SECONDS.set(uptime)
def update_gauge_metrics(self, metrics_data: Dict[str, Any]):
"""Update gauge metrics from external data."""
# Database metrics
if 'db_connections' in metrics_data:
db_conn = metrics_data['db_connections']
DB_CONNECTIONS_ACTIVE.set(db_conn.get('active', 0))
DB_CONNECTIONS_IDLE.set(db_conn.get('idle', 0))
# Cache metrics
if 'cache' in metrics_data:
cache_data = metrics_data['cache']
CACHE_KEYS_TOTAL.set(cache_data.get('keys', 0))
CACHE_MEMORY_USAGE_BYTES.set(cache_data.get('memory_usage', 0))
# Storage metrics
if 'storage' in metrics_data:
storage_data = metrics_data['storage']
for backend, data in storage_data.items():
STORAGE_AVAILABLE_BYTES.labels(backend=backend).set(data.get('available', 0))
STORAGE_TOTAL_BYTES.labels(backend=backend).set(data.get('total', 0))
STORAGE_FILES_TOTAL.labels(backend=backend).set(data.get('files', 0))
# Queue metrics
if 'queues' in metrics_data:
queues = metrics_data['queues']
UPLOAD_QUEUE_SIZE.set(queues.get('upload', 0))
PROCESSING_QUEUE_SIZE.set(queues.get('processing', 0))
# Blockchain metrics
if 'blockchain' in metrics_data:
blockchain_data = metrics_data['blockchain']
BLOCKCHAIN_PENDING_TRANSACTIONS.set(blockchain_data.get('pending_transactions', 0))
TON_SERVICE_UP.set(1 if blockchain_data.get('ton_service_up') else 0)
# Wallet balances
for wallet_id, balance_data in blockchain_data.get('wallet_balances', {}).items():
for currency, balance in balance_data.items():
BLOCKCHAIN_WALLET_BALANCES.labels(
wallet_id=wallet_id,
currency=currency
).set(balance)
# Background tasks
if 'background_tasks' in metrics_data:
tasks_data = metrics_data['background_tasks']
for service, count in tasks_data.items():
BACKGROUND_TASKS_ACTIVE.labels(service=service).set(count)
# Global metrics collector instance
metrics_collector = MetricsCollector()
def metrics_middleware(request: Request, response: Response):
"""Middleware to collect HTTP metrics."""
start_time = time.time()
# After request processing
duration = time.time() - start_time
# Get endpoint info
endpoint = request.path
method = request.method
status_code = response.status
# Get request/response sizes
request_size = len(request.body) if request.body else 0
response_size = len(response.body) if hasattr(response, 'body') and response.body else 0
# Record metrics
metrics_collector.record_http_request(
method=method,
endpoint=endpoint,
status_code=status_code,
duration=duration,
request_size=request_size,
response_size=response_size
)
def track_function_calls(func_name: str, labels: Optional[Dict[str, str]] = None):
"""Decorator to track function call metrics."""
def decorator(func: Callable) -> Callable:
@wraps(func)
async def async_wrapper(*args, **kwargs):
start_time = time.time()
status = 'success'
try:
result = await func(*args, **kwargs)
return result
except Exception as e:
status = 'error'
EXCEPTIONS_TOTAL.labels(
exception_type=type(e).__name__,
handler=func_name
).inc()
raise
finally:
duration = time.time() - start_time
# Record custom metrics based on function type
if func_name.startswith('db_'):
metrics_collector.record_db_event(func_name, duration, status)
elif func_name.startswith('cache_'):
metrics_collector.record_cache_event(func_name, status)
@wraps(func)
def sync_wrapper(*args, **kwargs):
start_time = time.time()
status = 'success'
try:
result = func(*args, **kwargs)
return result
except Exception as e:
status = 'error'
EXCEPTIONS_TOTAL.labels(
exception_type=type(e).__name__,
handler=func_name
).inc()
raise
finally:
duration = time.time() - start_time
# Record custom metrics based on function type
if func_name.startswith('db_'):
metrics_collector.record_db_event(func_name, duration, status)
elif func_name.startswith('cache_'):
metrics_collector.record_cache_event(func_name, status)
return async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
return decorator
async def get_metrics():
"""Get Prometheus metrics."""
# Update system metrics before generating output
metrics_collector.update_system_metrics()
# Generate metrics in Prometheus format
return generate_latest()
def get_metrics_content_type():
"""Get the content type for metrics."""
return CONTENT_TYPE_LATEST

View File

@ -1,3 +1,277 @@
from sqlalchemy.ext.declarative import declarative_base """
Base model classes with async SQLAlchemy support
"""
import uuid
from datetime import datetime
from typing import Any, Dict, Optional, Type, TypeVar, Union
AlchemyBase = declarative_base() from sqlalchemy import Column, DateTime, String, Boolean, Integer, Text, JSON
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.future import select
from sqlalchemy.orm import sessionmaker
from pydantic import BaseModel
import structlog
logger = structlog.get_logger(__name__)
# Create declarative base
Base = declarative_base()
# Type variable for model classes
ModelType = TypeVar("ModelType", bound="BaseModel")
class TimestampMixin:
"""Mixin for automatic timestamp fields"""
created_at = Column(
DateTime,
nullable=False,
default=datetime.utcnow,
comment="Record creation timestamp"
)
updated_at = Column(
DateTime,
nullable=False,
default=datetime.utcnow,
onupdate=datetime.utcnow,
comment="Record last update timestamp"
)
class UUIDMixin:
"""Mixin for UUID primary key"""
id = Column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
comment="Unique identifier"
)
class SoftDeleteMixin:
"""Mixin for soft delete functionality"""
deleted_at = Column(
DateTime,
nullable=True,
comment="Soft delete timestamp"
)
@property
def is_deleted(self) -> bool:
"""Check if record is soft deleted"""
return self.deleted_at is not None
def soft_delete(self):
"""Mark record as soft deleted"""
self.deleted_at = datetime.utcnow()
def restore(self):
"""Restore soft deleted record"""
self.deleted_at = None
class MetadataMixin:
"""Mixin for flexible metadata storage"""
metadata = Column(
JSON,
nullable=False,
default=dict,
comment="Flexible metadata storage"
)
def set_meta(self, key: str, value: Any) -> None:
"""Set metadata value"""
if self.metadata is None:
self.metadata = {}
self.metadata[key] = value
def get_meta(self, key: str, default: Any = None) -> Any:
"""Get metadata value"""
if self.metadata is None:
return default
return self.metadata.get(key, default)
def update_meta(self, updates: Dict[str, Any]) -> None:
"""Update multiple metadata values"""
if self.metadata is None:
self.metadata = {}
self.metadata.update(updates)
class StatusMixin:
"""Mixin for status tracking"""
status = Column(
String(64),
nullable=False,
default="active",
index=True,
comment="Record status"
)
def set_status(self, status: str, reason: Optional[str] = None):
"""Set status with optional reason"""
self.status = status
if reason:
self.set_meta("status_reason", reason)
self.set_meta("status_changed_at", datetime.utcnow().isoformat())
class BaseModelMixin:
"""Base mixin with common functionality"""
def to_dict(self) -> Dict[str, Any]:
"""Convert model to dictionary"""
result = {}
for column in self.__table__.columns:
value = getattr(self, column.name)
if isinstance(value, datetime):
value = value.isoformat()
elif hasattr(value, '__dict__'):
value = str(value)
result[column.name] = value
return result
def update_from_dict(self, data: Dict[str, Any]) -> None:
"""Update model from dictionary"""
for key, value in data.items():
if hasattr(self, key):
setattr(self, key, value)
@classmethod
async def get_by_id(
cls: Type[ModelType],
session: AsyncSession,
id_value: Union[int, str, uuid.UUID]
) -> Optional[ModelType]:
"""Get record by ID"""
try:
stmt = select(cls).where(cls.id == id_value)
result = await session.execute(stmt)
return result.scalar_one_or_none()
except Exception as e:
logger.error("Error getting record by ID", model=cls.__name__, id=id_value, error=str(e))
return None
@classmethod
async def get_all(
cls: Type[ModelType],
session: AsyncSession,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> list[ModelType]:
"""Get all records with optional pagination"""
try:
stmt = select(cls)
if offset:
stmt = stmt.offset(offset)
if limit:
stmt = stmt.limit(limit)
result = await session.execute(stmt)
return result.scalars().all()
except Exception as e:
logger.error("Error getting all records", model=cls.__name__, error=str(e))
return []
@classmethod
async def count(cls: Type[ModelType], session: AsyncSession) -> int:
"""Get total count of records"""
try:
from sqlalchemy import func
stmt = select(func.count(cls.id))
result = await session.execute(stmt)
return result.scalar() or 0
except Exception as e:
logger.error("Error counting records", model=cls.__name__, error=str(e))
return 0
async def save(self, session: AsyncSession) -> None:
"""Save model to database"""
try:
session.add(self)
await session.commit()
await session.refresh(self)
except Exception as e:
await session.rollback()
logger.error("Error saving model", model=self.__class__.__name__, error=str(e))
raise
async def delete(self, session: AsyncSession) -> None:
"""Delete model from database"""
try:
await session.delete(self)
await session.commit()
except Exception as e:
await session.rollback()
logger.error("Error deleting model", model=self.__class__.__name__, error=str(e))
raise
class AuditMixin:
"""Mixin for audit trail"""
created_by = Column(
UUID(as_uuid=True),
nullable=True,
comment="User who created the record"
)
updated_by = Column(
UUID(as_uuid=True),
nullable=True,
comment="User who last updated the record"
)
def set_audit_info(self, user_id: Optional[uuid.UUID] = None):
"""Set audit information"""
if user_id:
if not hasattr(self, 'created_at') or not self.created_at:
self.created_by = user_id
self.updated_by = user_id
class CacheableMixin:
"""Mixin for cacheable models"""
@property
def cache_key(self) -> str:
"""Generate cache key for this model"""
return f"{self.__class__.__name__.lower()}:{self.id}"
@property
def cache_ttl(self) -> int:
"""Default cache TTL in seconds"""
return 3600 # 1 hour
def get_cache_data(self) -> Dict[str, Any]:
"""Get data for caching"""
return self.to_dict()
# Combined base model class
class BaseModel(
Base,
BaseModelMixin,
TimestampMixin,
UUIDMixin,
SoftDeleteMixin,
MetadataMixin,
StatusMixin,
AuditMixin,
CacheableMixin
):
"""Base model with all mixins"""
__abstract__ = True
def __repr__(self) -> str:
"""String representation of model"""
return f"<{self.__class__.__name__}(id={self.id})>"
# Compatibility with old model base
AlchemyBase = Base

View File

@ -0,0 +1,88 @@
"""Compatible SQLAlchemy base models for MariaDB."""
from datetime import datetime
from typing import Optional, Dict, Any
from sqlalchemy import Column, Integer, DateTime, text
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from sqlalchemy.orm import sessionmaker
# Create base class
Base = declarative_base()
class TimestampMixin:
"""Mixin for adding timestamp fields."""
@declared_attr
def created_at(cls):
return Column(
DateTime,
nullable=False,
default=datetime.utcnow,
server_default=text('CURRENT_TIMESTAMP')
)
@declared_attr
def updated_at(cls):
return Column(
DateTime,
nullable=False,
default=datetime.utcnow,
onupdate=datetime.utcnow,
server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP')
)
class BaseModel(Base, TimestampMixin):
"""Base model with common fields for all entities."""
__abstract__ = True
id = Column(Integer, primary_key=True, autoincrement=True)
def to_dict(self, exclude: Optional[set] = None) -> Dict[str, Any]:
"""Convert model instance to dictionary."""
exclude = exclude or set()
result = {}
for column in self.__table__.columns:
if column.name not in exclude:
value = getattr(self, column.name)
# Handle datetime serialization
if isinstance(value, datetime):
result[column.name] = value.isoformat()
else:
result[column.name] = value
return result
def update_from_dict(self, data: Dict[str, Any], exclude: Optional[set] = None) -> None:
"""Update model instance from dictionary."""
exclude = exclude or {"id", "created_at", "updated_at"}
for key, value in data.items():
if key not in exclude and hasattr(self, key):
setattr(self, key, value)
@classmethod
def get_table_name(cls) -> str:
"""Get table name."""
return cls.__tablename__
@classmethod
def get_columns(cls) -> list:
"""Get list of column names."""
return [column.name for column in cls.__table__.columns]
def __repr__(self) -> str:
"""String representation of model."""
return f"<{self.__class__.__name__}(id={getattr(self, 'id', None)})>"
# Legacy session factory for backward compatibility
SessionLocal = sessionmaker()
def get_session():
"""Get database session (legacy function for compatibility)."""
return SessionLocal()

View File

@ -0,0 +1,445 @@
"""
Blockchain-related models for TON network integration.
Handles transaction records, wallet management, and smart contract interactions.
"""
from datetime import datetime
from decimal import Decimal
from typing import Dict, List, Optional, Any
from uuid import UUID
import sqlalchemy as sa
from sqlalchemy import Column, String, Integer, DateTime, Boolean, Text, JSON, ForeignKey, Index
from sqlalchemy.orm import relationship, validates
from sqlalchemy.dialects.postgresql import UUID as PostgreSQLUUID
from app.core.models.base import Base, TimestampMixin, UUIDMixin
class BlockchainTransaction(Base, UUIDMixin, TimestampMixin):
"""Model for storing blockchain transaction records."""
__tablename__ = "blockchain_transactions"
# User relationship
user_id = Column(PostgreSQLUUID(as_uuid=True), ForeignKey("users.id"), nullable=False)
user = relationship("User", back_populates="blockchain_transactions")
# Transaction details
transaction_hash = Column(String(64), unique=True, nullable=False, index=True)
transaction_type = Column(String(20), nullable=False) # transfer, mint, burn, stake, etc.
status = Column(String(20), nullable=False, default="pending") # pending, confirmed, failed
# Amount and fees
amount = Column(sa.BIGINT, nullable=False, default=0) # Amount in nanotons
network_fee = Column(sa.BIGINT, nullable=False, default=0) # Network fee in nanotons
# Addresses
sender_address = Column(String(48), nullable=True, index=True)
recipient_address = Column(String(48), nullable=True, index=True)
# Message and metadata
message = Column(Text, nullable=True)
metadata = Column(JSON, nullable=True)
# Blockchain specific fields
block_hash = Column(String(64), nullable=True)
logical_time = Column(sa.BIGINT, nullable=True) # TON logical time
confirmations = Column(Integer, nullable=False, default=0)
# Timing
confirmed_at = Column(DateTime, nullable=True)
failed_at = Column(DateTime, nullable=True)
# Smart contract interaction
contract_address = Column(String(48), nullable=True)
contract_method = Column(String(100), nullable=True)
contract_data = Column(JSON, nullable=True)
# Internal tracking
retry_count = Column(Integer, nullable=False, default=0)
last_retry_at = Column(DateTime, nullable=True)
error_message = Column(Text, nullable=True)
# Indexes for performance
__table_args__ = (
Index("idx_blockchain_tx_user_status", "user_id", "status"),
Index("idx_blockchain_tx_hash", "transaction_hash"),
Index("idx_blockchain_tx_addresses", "sender_address", "recipient_address"),
Index("idx_blockchain_tx_created", "created_at"),
Index("idx_blockchain_tx_type_status", "transaction_type", "status"),
)
@validates('transaction_type')
def validate_transaction_type(self, key, transaction_type):
"""Validate transaction type."""
allowed_types = {
'transfer', 'mint', 'burn', 'stake', 'unstake',
'contract_call', 'deploy', 'withdraw', 'deposit'
}
if transaction_type not in allowed_types:
raise ValueError(f"Invalid transaction type: {transaction_type}")
return transaction_type
@validates('status')
def validate_status(self, key, status):
"""Validate transaction status."""
allowed_statuses = {'pending', 'confirmed', 'failed', 'cancelled'}
if status not in allowed_statuses:
raise ValueError(f"Invalid status: {status}")
return status
@property
def amount_tons(self) -> Decimal:
"""Convert nanotons to TON."""
return Decimal(self.amount) / Decimal("1000000000")
@property
def fee_tons(self) -> Decimal:
"""Convert fee nanotons to TON."""
return Decimal(self.network_fee) / Decimal("1000000000")
@property
def is_incoming(self) -> bool:
"""Check if transaction is incoming to user's wallet."""
return self.transaction_type in {'transfer', 'mint', 'deposit'} and self.recipient_address
@property
def is_outgoing(self) -> bool:
"""Check if transaction is outgoing from user's wallet."""
return self.transaction_type in {'transfer', 'burn', 'withdraw'} and self.sender_address
def to_dict(self) -> Dict[str, Any]:
"""Convert transaction to dictionary."""
return {
"id": str(self.id),
"hash": self.transaction_hash,
"type": self.transaction_type,
"status": self.status,
"amount": self.amount,
"amount_tons": str(self.amount_tons),
"fee": self.network_fee,
"fee_tons": str(self.fee_tons),
"sender": self.sender_address,
"recipient": self.recipient_address,
"message": self.message,
"block_hash": self.block_hash,
"confirmations": self.confirmations,
"created_at": self.created_at.isoformat() if self.created_at else None,
"confirmed_at": self.confirmed_at.isoformat() if self.confirmed_at else None,
"is_incoming": self.is_incoming,
"is_outgoing": self.is_outgoing
}
class SmartContract(Base, UUIDMixin, TimestampMixin):
"""Model for smart contract management."""
__tablename__ = "smart_contracts"
# Contract details
address = Column(String(48), unique=True, nullable=False, index=True)
name = Column(String(100), nullable=False)
description = Column(Text, nullable=True)
contract_type = Column(String(50), nullable=False) # nft, token, defi, etc.
# Contract metadata
abi = Column(JSON, nullable=True) # Contract ABI if available
source_code = Column(Text, nullable=True)
compiler_version = Column(String(20), nullable=True)
# Deployment info
deployer_address = Column(String(48), nullable=True)
deployment_tx_hash = Column(String(64), nullable=True)
deployment_block = Column(sa.BIGINT, nullable=True)
# Status and verification
is_verified = Column(Boolean, nullable=False, default=False)
is_active = Column(Boolean, nullable=False, default=True)
verification_date = Column(DateTime, nullable=True)
# Usage statistics
interaction_count = Column(Integer, nullable=False, default=0)
last_interaction_at = Column(DateTime, nullable=True)
# Relationships
transactions = relationship(
"BlockchainTransaction",
foreign_keys="BlockchainTransaction.contract_address",
primaryjoin="SmartContract.address == BlockchainTransaction.contract_address",
back_populates=None
)
__table_args__ = (
Index("idx_smart_contract_address", "address"),
Index("idx_smart_contract_type", "contract_type"),
Index("idx_smart_contract_active", "is_active"),
)
@validates('contract_type')
def validate_contract_type(self, key, contract_type):
"""Validate contract type."""
allowed_types = {
'nft', 'token', 'defi', 'game', 'dao', 'bridge',
'oracle', 'multisig', 'custom'
}
if contract_type not in allowed_types:
raise ValueError(f"Invalid contract type: {contract_type}")
return contract_type
class TokenBalance(Base, UUIDMixin, TimestampMixin):
"""Model for tracking user token balances."""
__tablename__ = "token_balances"
# User relationship
user_id = Column(PostgreSQLUUID(as_uuid=True), ForeignKey("users.id"), nullable=False)
user = relationship("User", back_populates="token_balances")
# Token details
token_address = Column(String(48), nullable=False, index=True)
token_name = Column(String(100), nullable=True)
token_symbol = Column(String(10), nullable=True)
token_decimals = Column(Integer, nullable=False, default=9)
# Balance information
balance = Column(sa.BIGINT, nullable=False, default=0) # Raw balance
locked_balance = Column(sa.BIGINT, nullable=False, default=0) # Locked in contracts
# Metadata
last_update_block = Column(sa.BIGINT, nullable=True)
last_update_tx = Column(String(64), nullable=True)
# Unique constraint
__table_args__ = (
sa.UniqueConstraint("user_id", "token_address", name="uq_user_token"),
Index("idx_token_balance_user", "user_id"),
Index("idx_token_balance_token", "token_address"),
Index("idx_token_balance_updated", "updated_at"),
)
@property
def available_balance(self) -> int:
"""Get available (unlocked) balance."""
return max(0, self.balance - self.locked_balance)
@property
def formatted_balance(self) -> Decimal:
"""Get balance formatted with decimals."""
return Decimal(self.balance) / Decimal(10 ** self.token_decimals)
@property
def formatted_available_balance(self) -> Decimal:
"""Get available balance formatted with decimals."""
return Decimal(self.available_balance) / Decimal(10 ** self.token_decimals)
class StakingPosition(Base, UUIDMixin, TimestampMixin):
"""Model for staking positions."""
__tablename__ = "staking_positions"
# User relationship
user_id = Column(PostgreSQLUUID(as_uuid=True), ForeignKey("users.id"), nullable=False)
user = relationship("User", back_populates="staking_positions")
# Staking details
validator_address = Column(String(48), nullable=False, index=True)
pool_address = Column(String(48), nullable=True)
# Amount and timing
staked_amount = Column(sa.BIGINT, nullable=False) # Amount in nanotons
stake_tx_hash = Column(String(64), nullable=False)
stake_block = Column(sa.BIGINT, nullable=True)
# Status
status = Column(String(20), nullable=False, default="active") # active, unstaking, withdrawn
unstake_tx_hash = Column(String(64), nullable=True)
unstake_requested_at = Column(DateTime, nullable=True)
withdrawn_at = Column(DateTime, nullable=True)
# Rewards
rewards_earned = Column(sa.BIGINT, nullable=False, default=0)
last_reward_claim = Column(DateTime, nullable=True)
last_reward_tx = Column(String(64), nullable=True)
# Lock period
lock_period_days = Column(Integer, nullable=False, default=0)
unlock_date = Column(DateTime, nullable=True)
__table_args__ = (
Index("idx_staking_user_status", "user_id", "status"),
Index("idx_staking_validator", "validator_address"),
Index("idx_staking_unlock", "unlock_date"),
)
@validates('status')
def validate_status(self, key, status):
"""Validate staking status."""
allowed_statuses = {'active', 'unstaking', 'withdrawn', 'slashed'}
if status not in allowed_statuses:
raise ValueError(f"Invalid staking status: {status}")
return status
@property
def staked_tons(self) -> Decimal:
"""Get staked amount in TON."""
return Decimal(self.staked_amount) / Decimal("1000000000")
@property
def rewards_tons(self) -> Decimal:
"""Get rewards amount in TON."""
return Decimal(self.rewards_earned) / Decimal("1000000000")
@property
def is_locked(self) -> bool:
"""Check if staking position is still locked."""
if not self.unlock_date:
return False
return datetime.utcnow() < self.unlock_date
class NFTCollection(Base, UUIDMixin, TimestampMixin):
"""Model for NFT collections."""
__tablename__ = "nft_collections"
# Collection details
contract_address = Column(String(48), unique=True, nullable=False, index=True)
name = Column(String(100), nullable=False)
description = Column(Text, nullable=True)
symbol = Column(String(10), nullable=True)
# Creator and metadata
creator_address = Column(String(48), nullable=False)
metadata_uri = Column(String(500), nullable=True)
base_uri = Column(String(500), nullable=True)
# Collection stats
total_supply = Column(Integer, nullable=False, default=0)
max_supply = Column(Integer, nullable=True)
floor_price = Column(sa.BIGINT, nullable=True) # In nanotons
# Status
is_verified = Column(Boolean, nullable=False, default=False)
is_active = Column(Boolean, nullable=False, default=True)
# Relationships
nfts = relationship("NFTToken", back_populates="collection")
__table_args__ = (
Index("idx_nft_collection_address", "contract_address"),
Index("idx_nft_collection_creator", "creator_address"),
Index("idx_nft_collection_verified", "is_verified"),
)
class NFTToken(Base, UUIDMixin, TimestampMixin):
"""Model for individual NFT tokens."""
__tablename__ = "nft_tokens"
# Token identification
collection_id = Column(PostgreSQLUUID(as_uuid=True), ForeignKey("nft_collections.id"), nullable=False)
collection = relationship("NFTCollection", back_populates="nfts")
token_id = Column(String(100), nullable=False) # Token ID within collection
token_address = Column(String(48), unique=True, nullable=False, index=True)
# Ownership
owner_address = Column(String(48), nullable=False, index=True)
# Metadata
name = Column(String(200), nullable=True)
description = Column(Text, nullable=True)
image_uri = Column(String(500), nullable=True)
metadata_uri = Column(String(500), nullable=True)
attributes = Column(JSON, nullable=True)
# Trading
last_sale_price = Column(sa.BIGINT, nullable=True) # In nanotons
last_sale_tx = Column(String(64), nullable=True)
last_sale_date = Column(DateTime, nullable=True)
# Status
is_burned = Column(Boolean, nullable=False, default=False)
burned_at = Column(DateTime, nullable=True)
__table_args__ = (
sa.UniqueConstraint("collection_id", "token_id", name="uq_collection_token"),
Index("idx_nft_token_address", "token_address"),
Index("idx_nft_token_owner", "owner_address"),
Index("idx_nft_token_collection", "collection_id"),
)
@property
def last_sale_tons(self) -> Optional[Decimal]:
"""Get last sale price in TON."""
if self.last_sale_price is None:
return None
return Decimal(self.last_sale_price) / Decimal("1000000000")
class DeFiPosition(Base, UUIDMixin, TimestampMixin):
"""Model for DeFi protocol positions."""
__tablename__ = "defi_positions"
# User relationship
user_id = Column(PostgreSQLUUID(as_uuid=True), ForeignKey("users.id"), nullable=False)
user = relationship("User", back_populates="defi_positions")
# Protocol details
protocol_name = Column(String(100), nullable=False)
protocol_address = Column(String(48), nullable=False)
position_type = Column(String(50), nullable=False) # liquidity, lending, borrowing, etc.
# Position details
token_a_address = Column(String(48), nullable=True)
token_a_amount = Column(sa.BIGINT, nullable=False, default=0)
token_b_address = Column(String(48), nullable=True)
token_b_amount = Column(sa.BIGINT, nullable=False, default=0)
# Value tracking
initial_value = Column(sa.BIGINT, nullable=False, default=0) # In nanotons
current_value = Column(sa.BIGINT, nullable=False, default=0)
last_value_update = Column(DateTime, nullable=True)
# Rewards and fees
rewards_earned = Column(sa.BIGINT, nullable=False, default=0)
fees_paid = Column(sa.BIGINT, nullable=False, default=0)
# Status
status = Column(String(20), nullable=False, default="active") # active, closed, liquidated
opened_tx = Column(String(64), nullable=False)
closed_tx = Column(String(64), nullable=True)
closed_at = Column(DateTime, nullable=True)
__table_args__ = (
Index("idx_defi_user_protocol", "user_id", "protocol_name"),
Index("idx_defi_position_type", "position_type"),
Index("idx_defi_status", "status"),
)
@validates('position_type')
def validate_position_type(self, key, position_type):
"""Validate position type."""
allowed_types = {
'liquidity', 'lending', 'borrowing', 'farming',
'staking', 'options', 'futures', 'insurance'
}
if position_type not in allowed_types:
raise ValueError(f"Invalid position type: {position_type}")
return position_type
@validates('status')
def validate_status(self, key, status):
"""Validate position status."""
allowed_statuses = {'active', 'closed', 'liquidated', 'expired'}
if status not in allowed_statuses:
raise ValueError(f"Invalid position status: {status}")
return status
@property
def current_value_tons(self) -> Decimal:
"""Get current value in TON."""
return Decimal(self.current_value) / Decimal("1000000000")
@property
def pnl_tons(self) -> Decimal:
"""Get profit/loss in TON."""
return Decimal(self.current_value - self.initial_value) / Decimal("1000000000")
@property
def pnl_percentage(self) -> Decimal:
"""Get profit/loss percentage."""
if self.initial_value == 0:
return Decimal("0")
return (Decimal(self.current_value - self.initial_value) / Decimal(self.initial_value)) * 100

731
app/core/models/content.py Normal file
View File

@ -0,0 +1,731 @@
"""
Content models with async support and enhanced features
"""
import hashlib
import mimetypes
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Optional, List, Dict, Any, Union
from urllib.parse import urljoin
from sqlalchemy import Column, String, Integer, BigInteger, Boolean, Text, ForeignKey, Index, text
from sqlalchemy.dialects.postgresql import JSONB, ARRAY
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from sqlalchemy.orm import relationship
import structlog
from app.core.models.base import BaseModel
from app.core.config import settings, PROJECT_HOST
logger = structlog.get_logger(__name__)
class ContentType(str, Enum):
"""Content type enumeration"""
AUDIO = "audio"
VIDEO = "video"
IMAGE = "image"
TEXT = "text"
DOCUMENT = "document"
UNKNOWN = "unknown"
class ContentStatus(str, Enum):
"""Content status enumeration"""
UPLOADING = "uploading"
PROCESSING = "processing"
READY = "ready"
FAILED = "failed"
DISABLED = "disabled"
DELETED = "deleted"
class StorageType(str, Enum):
"""Storage type enumeration"""
LOCAL = "local"
ONCHAIN = "onchain"
IPFS = "ipfs"
HYBRID = "hybrid"
class LicenseType(str, Enum):
"""License type enumeration"""
LISTEN = "listen"
USE = "use"
RESALE = "resale"
EXCLUSIVE = "exclusive"
class StoredContent(BaseModel):
"""Enhanced content storage model"""
__tablename__ = 'stored_content'
# Content identification
hash = Column(
String(128),
nullable=False,
unique=True,
index=True,
comment="Content hash (SHA-256 or custom)"
)
content_id = Column(
String(256),
nullable=True,
index=True,
comment="Content identifier (CID for IPFS)"
)
# File information
filename = Column(
String(512),
nullable=False,
comment="Original filename"
)
file_size = Column(
BigInteger,
nullable=False,
default=0,
comment="File size in bytes"
)
mime_type = Column(
String(128),
nullable=True,
comment="MIME type of the content"
)
# Content type and storage
content_type = Column(
String(32),
nullable=False,
default=ContentType.UNKNOWN.value,
index=True,
comment="Content type category"
)
storage_type = Column(
String(32),
nullable=False,
default=StorageType.LOCAL.value,
index=True,
comment="Storage type"
)
# File path and URLs
file_path = Column(
String(1024),
nullable=True,
comment="Local file path"
)
external_url = Column(
String(2048),
nullable=True,
comment="External URL for remote content"
)
# Blockchain related
onchain_index = Column(
Integer,
nullable=True,
index=True,
comment="On-chain index number"
)
owner_address = Column(
String(256),
nullable=True,
index=True,
comment="Blockchain owner address"
)
# User and access
user_id = Column(
String(36), # UUID
ForeignKey('users.id'),
nullable=True,
index=True,
comment="User who uploaded the content"
)
# Encryption and security
encrypted = Column(
Boolean,
nullable=False,
default=False,
comment="Whether content is encrypted"
)
encryption_key_id = Column(
String(36), # UUID
ForeignKey('encryption_keys.id'),
nullable=True,
comment="Encryption key reference"
)
# Processing status
disabled = Column(
Boolean,
nullable=False,
default=False,
index=True,
comment="Whether content is disabled"
)
# Content metadata
title = Column(
String(512),
nullable=True,
comment="Content title"
)
description = Column(
Text,
nullable=True,
comment="Content description"
)
tags = Column(
ARRAY(String),
nullable=False,
default=list,
comment="Content tags"
)
# Media-specific metadata
duration = Column(
Integer,
nullable=True,
comment="Duration in seconds (for audio/video)"
)
width = Column(
Integer,
nullable=True,
comment="Width in pixels (for images/video)"
)
height = Column(
Integer,
nullable=True,
comment="Height in pixels (for images/video)"
)
bitrate = Column(
Integer,
nullable=True,
comment="Bitrate (for audio/video)"
)
# Conversion and processing
processing_status = Column(
String(32),
nullable=False,
default=ContentStatus.READY.value,
index=True,
comment="Processing status"
)
conversion_data = Column(
JSONB,
nullable=False,
default=dict,
comment="Conversion and processing data"
)
# Statistics
download_count = Column(
Integer,
nullable=False,
default=0,
comment="Number of downloads"
)
view_count = Column(
Integer,
nullable=False,
default=0,
comment="Number of views"
)
# Relationships
user = relationship('User', back_populates='content_items')
encryption_key = relationship('EncryptionKey', back_populates='content_items')
user_contents = relationship('UserContent', back_populates='content')
user_actions = relationship('UserAction', back_populates='content')
# Indexes for performance
__table_args__ = (
Index('idx_content_hash', 'hash'),
Index('idx_content_user_type', 'user_id', 'content_type'),
Index('idx_content_storage_status', 'storage_type', 'status'),
Index('idx_content_onchain', 'onchain_index'),
Index('idx_content_created', 'created_at'),
Index('idx_content_disabled', 'disabled'),
)
def __str__(self) -> str:
"""String representation"""
return f"StoredContent({self.id}, hash={self.hash[:8]}..., filename={self.filename})"
@property
def file_extension(self) -> str:
"""Get file extension"""
return Path(self.filename).suffix.lower()
@property
def web_url(self) -> str:
"""Get web accessible URL"""
if self.external_url:
return self.external_url
if self.hash:
return urljoin(str(PROJECT_HOST), f"/api/v1.5/storage/{self.hash}")
return ""
@property
def download_url(self) -> str:
"""Get download URL"""
if self.hash:
return urljoin(str(PROJECT_HOST), f"/api/v1/storage/{self.hash}")
return ""
@property
def is_media(self) -> bool:
"""Check if content is media (audio/video/image)"""
return self.content_type in [ContentType.AUDIO, ContentType.VIDEO, ContentType.IMAGE]
@property
def is_processed(self) -> bool:
"""Check if content is fully processed"""
return self.processing_status == ContentStatus.READY.value
@property
def cache_key(self) -> str:
"""Override cache key to use hash"""
return f"content:hash:{self.hash}"
def detect_content_type(self) -> ContentType:
"""Detect content type from MIME type"""
if not self.mime_type:
# Try to guess from extension
mime_type, _ = mimetypes.guess_type(self.filename)
self.mime_type = mime_type
if self.mime_type:
if self.mime_type.startswith('audio/'):
return ContentType.AUDIO
elif self.mime_type.startswith('video/'):
return ContentType.VIDEO
elif self.mime_type.startswith('image/'):
return ContentType.IMAGE
elif self.mime_type.startswith('text/'):
return ContentType.TEXT
elif 'application/' in self.mime_type:
return ContentType.DOCUMENT
return ContentType.UNKNOWN
def calculate_hash(self, file_data: bytes) -> str:
"""Calculate hash for file data"""
return hashlib.sha256(file_data).hexdigest()
def set_conversion_data(self, key: str, value: Any) -> None:
"""Set conversion data"""
if not self.conversion_data:
self.conversion_data = {}
self.conversion_data[key] = value
def get_conversion_data(self, key: str, default: Any = None) -> Any:
"""Get conversion data"""
if not self.conversion_data:
return default
return self.conversion_data.get(key, default)
def add_tag(self, tag: str) -> None:
"""Add tag to content"""
if not self.tags:
self.tags = []
tag = tag.strip().lower()
if tag and tag not in self.tags:
self.tags.append(tag)
def remove_tag(self, tag: str) -> None:
"""Remove tag from content"""
if self.tags:
tag = tag.strip().lower()
if tag in self.tags:
self.tags.remove(tag)
def increment_download_count(self) -> None:
"""Increment download counter"""
self.download_count += 1
def increment_view_count(self) -> None:
"""Increment view counter"""
self.view_count += 1
@classmethod
async def get_by_hash(
cls,
session: AsyncSession,
content_hash: str
) -> Optional['StoredContent']:
"""Get content by hash"""
try:
stmt = select(cls).where(cls.hash == content_hash)
result = await session.execute(stmt)
return result.scalar_one_or_none()
except Exception as e:
logger.error("Error getting content by hash", hash=content_hash, error=str(e))
return None
@classmethod
async def get_by_user(
cls,
session: AsyncSession,
user_id: str,
content_type: Optional[ContentType] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> List['StoredContent']:
"""Get content by user"""
try:
stmt = select(cls).where(cls.user_id == user_id)
if content_type:
stmt = stmt.where(cls.content_type == content_type.value)
stmt = stmt.order_by(cls.created_at.desc())
if offset:
stmt = stmt.offset(offset)
if limit:
stmt = stmt.limit(limit)
result = await session.execute(stmt)
return result.scalars().all()
except Exception as e:
logger.error("Error getting content by user", user_id=user_id, error=str(e))
return []
@classmethod
async def get_recent(
cls,
session: AsyncSession,
days: int = 7,
content_type: Optional[ContentType] = None,
limit: Optional[int] = None
) -> List['StoredContent']:
"""Get recent content"""
try:
cutoff_date = datetime.utcnow() - timedelta(days=days)
stmt = select(cls).where(
cls.created_at >= cutoff_date,
cls.disabled == False,
cls.processing_status == ContentStatus.READY.value
)
if content_type:
stmt = stmt.where(cls.content_type == content_type.value)
stmt = stmt.order_by(cls.created_at.desc())
if limit:
stmt = stmt.limit(limit)
result = await session.execute(stmt)
return result.scalars().all()
except Exception as e:
logger.error("Error getting recent content", days=days, error=str(e))
return []
@classmethod
async def search(
cls,
session: AsyncSession,
query: str,
content_type: Optional[ContentType] = None,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> List['StoredContent']:
"""Search content by title and description"""
try:
search_pattern = f"%{query.lower()}%"
stmt = select(cls).where(
(cls.title.ilike(search_pattern)) |
(cls.description.ilike(search_pattern)) |
(cls.filename.ilike(search_pattern)),
cls.disabled == False,
cls.processing_status == ContentStatus.READY.value
)
if content_type:
stmt = stmt.where(cls.content_type == content_type.value)
stmt = stmt.order_by(cls.created_at.desc())
if offset:
stmt = stmt.offset(offset)
if limit:
stmt = stmt.limit(limit)
result = await session.execute(stmt)
return result.scalars().all()
except Exception as e:
logger.error("Error searching content", query=query, error=str(e))
return []
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary with additional computed fields"""
data = super().to_dict()
data.update({
'web_url': self.web_url,
'download_url': self.download_url,
'file_extension': self.file_extension,
'is_media': self.is_media,
'is_processed': self.is_processed
})
return data
class UserContent(BaseModel):
"""User content ownership and licensing"""
__tablename__ = 'user_content'
# Content relationship
content_id = Column(
String(36), # UUID
ForeignKey('stored_content.id'),
nullable=False,
index=True,
comment="Reference to stored content"
)
user_id = Column(
String(36), # UUID
ForeignKey('users.id'),
nullable=False,
index=True,
comment="User who owns this content"
)
# License information
license_type = Column(
String(32),
nullable=False,
default=LicenseType.LISTEN.value,
comment="Type of license"
)
# Blockchain data
onchain_address = Column(
String(256),
nullable=True,
index=True,
comment="On-chain contract address"
)
owner_address = Column(
String(256),
nullable=True,
index=True,
comment="Blockchain owner address"
)
# Transaction data
purchase_transaction = Column(
String(128),
nullable=True,
comment="Purchase transaction hash"
)
purchase_amount = Column(
BigInteger,
nullable=True,
comment="Purchase amount in minimal units"
)
# Wallet connection
wallet_connection_id = Column(
String(36), # UUID
ForeignKey('wallet_connections.id'),
nullable=True,
comment="Wallet connection used for purchase"
)
# Access control
access_granted = Column(
Boolean,
nullable=False,
default=False,
comment="Whether access is granted"
)
access_expires_at = Column(
DateTime,
nullable=True,
comment="When access expires (for temporary licenses)"
)
# Usage tracking
download_count = Column(
Integer,
nullable=False,
default=0,
comment="Number of downloads by this user"
)
last_accessed = Column(
DateTime,
nullable=True,
comment="Last access timestamp"
)
# Relationships
user = relationship('User', back_populates='content_items')
content = relationship('StoredContent', back_populates='user_contents')
wallet_connection = relationship('WalletConnection', back_populates='user_contents')
# Indexes
__table_args__ = (
Index('idx_user_content_user', 'user_id'),
Index('idx_user_content_content', 'content_id'),
Index('idx_user_content_onchain', 'onchain_address'),
Index('idx_user_content_owner', 'owner_address'),
Index('idx_user_content_status', 'status'),
)
def __str__(self) -> str:
"""String representation"""
return f"UserContent({self.id}, user={self.user_id}, content={self.content_id})"
@property
def is_expired(self) -> bool:
"""Check if access has expired"""
if not self.access_expires_at:
return False
return datetime.utcnow() > self.access_expires_at
@property
def is_accessible(self) -> bool:
"""Check if content is accessible"""
return self.access_granted and not self.is_expired and self.status == 'active'
def grant_access(self, expires_at: Optional[datetime] = None) -> None:
"""Grant access to content"""
self.access_granted = True
self.access_expires_at = expires_at
self.last_accessed = datetime.utcnow()
def revoke_access(self) -> None:
"""Revoke access to content"""
self.access_granted = False
def record_download(self) -> None:
"""Record a download"""
self.download_count += 1
self.last_accessed = datetime.utcnow()
@classmethod
async def get_user_access(
cls,
session: AsyncSession,
user_id: str,
content_id: str
) -> Optional['UserContent']:
"""Get user access to specific content"""
try:
stmt = select(cls).where(
cls.user_id == user_id,
cls.content_id == content_id,
cls.status == 'active'
)
result = await session.execute(stmt)
return result.scalar_one_or_none()
except Exception as e:
logger.error("Error getting user access", user_id=user_id, content_id=content_id, error=str(e))
return None
@classmethod
async def get_user_content(
cls,
session: AsyncSession,
user_id: str,
limit: Optional[int] = None,
offset: Optional[int] = None
) -> List['UserContent']:
"""Get all content accessible by user"""
try:
stmt = select(cls).where(
cls.user_id == user_id,
cls.status == 'active',
cls.access_granted == True
).order_by(cls.created_at.desc())
if offset:
stmt = stmt.offset(offset)
if limit:
stmt = stmt.limit(limit)
result = await session.execute(stmt)
return result.scalars().all()
except Exception as e:
logger.error("Error getting user content", user_id=user_id, error=str(e))
return []
class EncryptionKey(BaseModel):
"""Encryption key management"""
__tablename__ = 'encryption_keys'
# Key identification
key_hash = Column(
String(128),
nullable=False,
unique=True,
index=True,
comment="Hash of the encryption key"
)
algorithm = Column(
String(32),
nullable=False,
default="AES-256-GCM",
comment="Encryption algorithm used"
)
# Key metadata
purpose = Column(
String(64),
nullable=False,
comment="Purpose of the key (content, user_data, etc.)"
)
# Access control
owner_id = Column(
String(36), # UUID
ForeignKey('users.id'),
nullable=True,
comment="Key owner (if user-specific)"
)
# Key lifecycle
expires_at = Column(
DateTime,
nullable=True,
comment="Key expiration timestamp"
)
revoked_at = Column(
DateTime,
nullable=True,
comment="Key revocation timestamp"
)
# Relationships
owner = relationship('User', back_populates='encryption_keys')
content_items = relationship('StoredContent', back_populates='encryption_key')
def __str__(self) -> str:
"""String representation"""
return f"EncryptionKey({self.id}, hash={self.key_hash[:8]}...)"
@property
def is_valid(self) -> bool:
"""Check if key is valid (not expired or revoked)"""
now = datetime.utcnow()
if self.revoked_at and self.revoked_at <= now:
return False
if self.expires_at and self.expires_at <= now:
return False
return True
def revoke(self) -> None:
"""Revoke the key"""
self.revoked_at = datetime.utcnow()

View File

@ -0,0 +1,388 @@
"""Compatible content models for MariaDB."""
from datetime import datetime
from typing import Optional, List, Dict, Any
from sqlalchemy import Column, String, Boolean, Text, Integer, DateTime, BigInteger, Index, ForeignKey
from sqlalchemy.orm import relationship
from app.core.models.base_compatible import BaseModel
class Content(BaseModel):
"""Content model compatible with existing MariaDB schema."""
__tablename__ = "content"
# Basic content information
user_id = Column(Integer, ForeignKey('users.id'), nullable=False, index=True)
filename = Column(String(255), nullable=False)
original_filename = Column(String(255), nullable=False)
file_path = Column(String(500), nullable=False)
# File metadata
file_size = Column(BigInteger, nullable=False) # bytes
file_type = Column(String(100), nullable=False)
mime_type = Column(String(100), nullable=False)
file_extension = Column(String(10), nullable=False)
# Content metadata
title = Column(String(255), nullable=True)
description = Column(Text, nullable=True)
tags = Column(Text, nullable=True) # JSON or comma-separated
# Status and visibility
is_public = Column(Boolean, default=False, nullable=False)
is_active = Column(Boolean, default=True, nullable=False)
is_indexed = Column(Boolean, default=False, nullable=False)
is_converted = Column(Boolean, default=False, nullable=False)
# Access and security
access_password = Column(String(255), nullable=True)
download_count = Column(Integer, default=0, nullable=False)
view_count = Column(Integer, default=0, nullable=False)
# Processing status
processing_status = Column(String(50), default="pending", nullable=False)
processing_error = Column(Text, nullable=True)
processing_started = Column(DateTime, nullable=True)
processing_completed = Column(DateTime, nullable=True)
# File hashes for integrity
md5_hash = Column(String(32), nullable=True, index=True)
sha256_hash = Column(String(64), nullable=True, index=True)
# Thumbnails and previews
thumbnail_path = Column(String(500), nullable=True)
preview_path = Column(String(500), nullable=True)
# TON Blockchain integration
ton_transaction_hash = Column(String(100), nullable=True, index=True)
ton_storage_proof = Column(Text, nullable=True)
ton_storage_fee = Column(BigInteger, default=0, nullable=False) # nanotons
# Expiration and cleanup
expires_at = Column(DateTime, nullable=True)
auto_delete = Column(Boolean, default=False, nullable=False)
# Relationships
user = relationship("User", back_populates="content")
# Table indexes for performance
__table_args__ = (
Index('idx_content_user_active', 'user_id', 'is_active'),
Index('idx_content_public_indexed', 'is_public', 'is_indexed'),
Index('idx_content_file_type', 'file_type', 'mime_type'),
Index('idx_content_created', 'created_at'),
Index('idx_content_size', 'file_size'),
Index('idx_content_processing', 'processing_status'),
Index('idx_content_ton_tx', 'ton_transaction_hash'),
Index('idx_content_expires', 'expires_at', 'auto_delete'),
)
def is_expired(self) -> bool:
"""Check if content is expired."""
if not self.expires_at:
return False
return datetime.utcnow() > self.expires_at
def is_image(self) -> bool:
"""Check if content is an image."""
return self.file_type.lower() in ['image', 'img'] or \
self.mime_type.startswith('image/')
def is_video(self) -> bool:
"""Check if content is a video."""
return self.file_type.lower() == 'video' or \
self.mime_type.startswith('video/')
def is_document(self) -> bool:
"""Check if content is a document."""
return self.file_type.lower() in ['document', 'doc', 'pdf'] or \
self.mime_type in ['application/pdf', 'application/msword', 'text/plain']
def get_file_size_human(self) -> str:
"""Get human-readable file size."""
size = self.file_size
for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
if size < 1024.0:
return f"{size:.1f} {unit}"
size /= 1024.0
return f"{size:.1f} PB"
def increment_download_count(self) -> None:
"""Increment download counter."""
self.download_count += 1
def increment_view_count(self) -> None:
"""Increment view counter."""
self.view_count += 1
def mark_as_indexed(self) -> None:
"""Mark content as indexed."""
self.is_indexed = True
def mark_as_converted(self) -> None:
"""Mark content as converted."""
self.is_converted = True
self.processing_status = "completed"
self.processing_completed = datetime.utcnow()
def set_processing_error(self, error: str) -> None:
"""Set processing error."""
self.processing_status = "error"
self.processing_error = error
self.processing_completed = datetime.utcnow()
def start_processing(self) -> None:
"""Mark processing as started."""
self.processing_status = "processing"
self.processing_started = datetime.utcnow()
self.processing_error = None
def get_tags_list(self) -> List[str]:
"""Get tags as list."""
if not self.tags:
return []
# Try to parse as JSON first, fallback to comma-separated
try:
import json
return json.loads(self.tags)
except:
return [tag.strip() for tag in self.tags.split(',') if tag.strip()]
def set_tags_list(self, tags: List[str]) -> None:
"""Set tags from list."""
import json
self.tags = json.dumps(tags) if tags else None
def to_dict(self, include_sensitive: bool = False) -> Dict[str, Any]:
"""Convert to dictionary with option to exclude sensitive data."""
exclude = set()
if not include_sensitive:
exclude.update({"access_password", "file_path", "processing_error"})
data = super().to_dict(exclude=exclude)
# Add computed fields
data.update({
"file_size_human": self.get_file_size_human(),
"is_image": self.is_image(),
"is_video": self.is_video(),
"is_document": self.is_document(),
"is_expired": self.is_expired(),
"tags_list": self.get_tags_list(),
})
return data
def to_public_dict(self) -> Dict[str, Any]:
"""Convert to public dictionary (minimal content info)."""
return {
"id": self.id,
"filename": self.filename,
"title": self.title,
"description": self.description,
"file_type": self.file_type,
"file_size": self.file_size,
"file_size_human": self.get_file_size_human(),
"is_image": self.is_image(),
"is_video": self.is_video(),
"is_document": self.is_document(),
"download_count": self.download_count,
"view_count": self.view_count,
"tags_list": self.get_tags_list(),
"created_at": self.created_at.isoformat() if self.created_at else None,
}
class ContentShare(BaseModel):
"""Content sharing model for tracking shared content."""
__tablename__ = "content_shares"
content_id = Column(Integer, ForeignKey('content.id'), nullable=False, index=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=True, index=True) # Can be null for anonymous shares
# Share metadata
share_token = Column(String(100), unique=True, nullable=False, index=True)
share_url = Column(String(500), nullable=False)
# Share settings
is_active = Column(Boolean, default=True, nullable=False)
is_password_protected = Column(Boolean, default=False, nullable=False)
share_password = Column(String(255), nullable=True)
# Access control
max_downloads = Column(Integer, nullable=True) # Null = unlimited
download_count = Column(Integer, default=0, nullable=False)
view_count = Column(Integer, default=0, nullable=False)
# Time limits
expires_at = Column(DateTime, nullable=True)
# Tracking
ip_address = Column(String(45), nullable=True)
user_agent = Column(Text, nullable=True)
# Relationships
content = relationship("Content")
user = relationship("User")
__table_args__ = (
Index('idx_shares_content_active', 'content_id', 'is_active'),
Index('idx_shares_token', 'share_token'),
Index('idx_shares_expires', 'expires_at'),
)
def is_expired(self) -> bool:
"""Check if share is expired."""
if not self.expires_at:
return False
return datetime.utcnow() > self.expires_at
def is_download_limit_reached(self) -> bool:
"""Check if download limit is reached."""
if not self.max_downloads:
return False
return self.download_count >= self.max_downloads
def is_valid(self) -> bool:
"""Check if share is valid."""
return (self.is_active and
not self.is_expired() and
not self.is_download_limit_reached())
def increment_download(self) -> bool:
"""Increment download count and return if still valid."""
if not self.is_valid():
return False
self.download_count += 1
return self.is_valid()
def increment_view(self) -> None:
"""Increment view count."""
self.view_count += 1
class ContentMetadata(BaseModel):
"""Extended metadata for content files."""
__tablename__ = "content_metadata"
content_id = Column(Integer, ForeignKey('content.id'), unique=True, nullable=False, index=True)
# Image metadata
image_width = Column(Integer, nullable=True)
image_height = Column(Integer, nullable=True)
image_dpi = Column(Integer, nullable=True)
image_color_space = Column(String(50), nullable=True)
# Video metadata
video_duration = Column(Integer, nullable=True) # seconds
video_bitrate = Column(Integer, nullable=True)
video_fps = Column(Integer, nullable=True)
video_resolution = Column(String(20), nullable=True) # e.g., "1920x1080"
video_codec = Column(String(50), nullable=True)
# Audio metadata
audio_duration = Column(Integer, nullable=True) # seconds
audio_bitrate = Column(Integer, nullable=True)
audio_sample_rate = Column(Integer, nullable=True)
audio_channels = Column(Integer, nullable=True)
audio_codec = Column(String(50), nullable=True)
# Document metadata
document_pages = Column(Integer, nullable=True)
document_words = Column(Integer, nullable=True)
document_language = Column(String(10), nullable=True)
document_author = Column(String(255), nullable=True)
# EXIF data (JSON)
exif_data = Column(Text, nullable=True)
# GPS coordinates
gps_latitude = Column(String(50), nullable=True)
gps_longitude = Column(String(50), nullable=True)
gps_altitude = Column(String(50), nullable=True)
# Technical metadata
compression_ratio = Column(String(20), nullable=True)
quality_score = Column(Integer, nullable=True) # 0-100
# Relationships
content = relationship("Content")
def to_dict(self) -> Dict[str, Any]:
"""Convert metadata to dictionary."""
data = super().to_dict(exclude={"content_id"})
# Parse EXIF data if present
if self.exif_data:
try:
import json
data["exif_data"] = json.loads(self.exif_data)
except:
data["exif_data"] = None
return data
def set_exif_data(self, exif_dict: Dict[str, Any]) -> None:
"""Set EXIF data from dictionary."""
if exif_dict:
import json
self.exif_data = json.dumps(exif_dict)
else:
self.exif_data = None
def get_exif_data(self) -> Optional[Dict[str, Any]]:
"""Get EXIF data as dictionary."""
if not self.exif_data:
return None
try:
import json
return json.loads(self.exif_data)
except:
return None
class ContentVersion(BaseModel):
"""Content version history for tracking changes."""
__tablename__ = "content_versions"
content_id = Column(Integer, ForeignKey('content.id'), nullable=False, index=True)
user_id = Column(Integer, ForeignKey('users.id'), nullable=False, index=True)
# Version information
version_number = Column(Integer, nullable=False)
version_name = Column(String(100), nullable=True)
change_description = Column(Text, nullable=True)
# File information
file_path = Column(String(500), nullable=False)
file_size = Column(BigInteger, nullable=False)
file_hash = Column(String(64), nullable=False)
# Status
is_current = Column(Boolean, default=False, nullable=False)
# Relationships
content = relationship("Content")
user = relationship("User")
__table_args__ = (
Index('idx_versions_content_number', 'content_id', 'version_number'),
Index('idx_versions_current', 'content_id', 'is_current'),
)
def mark_as_current(self) -> None:
"""Mark this version as current."""
self.is_current = True
# Add relationship to User model
# This would be added to the User model:
# content = relationship("Content", back_populates="user")

420
app/core/models/user.py Normal file
View File

@ -0,0 +1,420 @@
"""
User model with async support and enhanced security
"""
import hashlib
import secrets
from datetime import datetime, timedelta
from typing import Optional, List, Dict, Any
from enum import Enum
from sqlalchemy import Column, String, BigInteger, Boolean, Integer, Index, text
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from sqlalchemy.orm import relationship
import structlog
from app.core.models.base import BaseModel
from app.core.config import settings
logger = structlog.get_logger(__name__)
class UserRole(str, Enum):
"""User role enumeration"""
USER = "user"
MODERATOR = "moderator"
ADMIN = "admin"
SUPER_ADMIN = "super_admin"
class UserStatus(str, Enum):
"""User status enumeration"""
ACTIVE = "active"
SUSPENDED = "suspended"
BANNED = "banned"
PENDING = "pending"
class User(BaseModel):
"""Enhanced User model with security and async support"""
__tablename__ = 'users'
# Telegram specific fields
telegram_id = Column(
BigInteger,
nullable=False,
unique=True,
index=True,
comment="Telegram user ID"
)
username = Column(
String(512),
nullable=True,
index=True,
comment="Telegram username"
)
first_name = Column(
String(256),
nullable=True,
comment="User first name"
)
last_name = Column(
String(256),
nullable=True,
comment="User last name"
)
# Localization
language_code = Column(
String(8),
nullable=False,
default="en",
comment="User language code"
)
# Security and access control
role = Column(
String(32),
nullable=False,
default=UserRole.USER.value,
index=True,
comment="User role"
)
permissions = Column(
ARRAY(String),
nullable=False,
default=list,
comment="User permissions list"
)
# Activity tracking
last_activity = Column(
"last_use", # Keep old column name for compatibility
DateTime,
nullable=False,
default=datetime.utcnow,
index=True,
comment="Last user activity timestamp"
)
login_count = Column(
Integer,
nullable=False,
default=0,
comment="Total login count"
)
# Account status
is_verified = Column(
Boolean,
nullable=False,
default=False,
comment="Whether user is verified"
)
is_premium = Column(
Boolean,
nullable=False,
default=False,
comment="Whether user has premium access"
)
# Security settings
two_factor_enabled = Column(
Boolean,
nullable=False,
default=False,
comment="Whether 2FA is enabled"
)
security_settings = Column(
JSONB,
nullable=False,
default=dict,
comment="User security settings"
)
# Preferences
preferences = Column(
JSONB,
nullable=False,
default=dict,
comment="User preferences and settings"
)
# Statistics
content_uploaded_count = Column(
Integer,
nullable=False,
default=0,
comment="Number of content items uploaded"
)
content_purchased_count = Column(
Integer,
nullable=False,
default=0,
comment="Number of content items purchased"
)
# Rate limiting
rate_limit_reset = Column(
DateTime,
nullable=True,
comment="Rate limit reset timestamp"
)
rate_limit_count = Column(
Integer,
nullable=False,
default=0,
comment="Current rate limit count"
)
# Relationships
balances = relationship('UserBalance', back_populates='user', cascade="all, delete-orphan")
transactions = relationship('InternalTransaction', back_populates='user', cascade="all, delete-orphan")
wallet_connections = relationship('WalletConnection', back_populates='user', cascade="all, delete-orphan")
content_items = relationship('UserContent', back_populates='user', cascade="all, delete-orphan")
actions = relationship('UserAction', back_populates='user', cascade="all, delete-orphan")
activities = relationship('UserActivity', back_populates='user', cascade="all, delete-orphan")
# Indexes for performance
__table_args__ = (
Index('idx_users_telegram_id', 'telegram_id'),
Index('idx_users_username', 'username'),
Index('idx_users_role_status', 'role', 'status'),
Index('idx_users_last_activity', 'last_activity'),
Index('idx_users_created_at', 'created_at'),
)
def __str__(self) -> str:
"""String representation"""
return f"User({self.id}, telegram_id={self.telegram_id}, username={self.username})"
@property
def full_name(self) -> str:
"""Get user's full name"""
parts = [self.first_name, self.last_name]
return " ".join(filter(None, parts)) or self.username or f"User_{self.telegram_id}"
@property
def display_name(self) -> str:
"""Get user's display name"""
return self.username or self.full_name
@property
def is_admin(self) -> bool:
"""Check if user is admin"""
return self.role in [UserRole.ADMIN.value, UserRole.SUPER_ADMIN.value]
@property
def is_moderator(self) -> bool:
"""Check if user is moderator or higher"""
return self.role in [UserRole.MODERATOR.value, UserRole.ADMIN.value, UserRole.SUPER_ADMIN.value]
@property
def cache_key(self) -> str:
"""Override cache key to include telegram_id"""
return f"user:telegram:{self.telegram_id}"
def has_permission(self, permission: str) -> bool:
"""Check if user has specific permission"""
if self.is_admin:
return True
return permission in (self.permissions or [])
def add_permission(self, permission: str) -> None:
"""Add permission to user"""
if not self.permissions:
self.permissions = []
if permission not in self.permissions:
self.permissions.append(permission)
def remove_permission(self, permission: str) -> None:
"""Remove permission from user"""
if self.permissions and permission in self.permissions:
self.permissions.remove(permission)
def update_activity(self) -> None:
"""Update user activity timestamp"""
self.last_activity = datetime.utcnow()
self.login_count += 1
def check_rate_limit(self, limit: int = None, window: int = None) -> bool:
"""Check if user is within rate limits"""
if self.is_admin:
return True
limit = limit or settings.RATE_LIMIT_REQUESTS
window = window or settings.RATE_LIMIT_WINDOW
now = datetime.utcnow()
# Reset counter if window has passed
if not self.rate_limit_reset or now > self.rate_limit_reset:
self.rate_limit_reset = now + timedelta(seconds=window)
self.rate_limit_count = 0
return self.rate_limit_count < limit
def increment_rate_limit(self) -> None:
"""Increment rate limit counter"""
if not self.is_admin:
self.rate_limit_count += 1
def set_preference(self, key: str, value: Any) -> None:
"""Set user preference"""
if not self.preferences:
self.preferences = {}
self.preferences[key] = value
def get_preference(self, key: str, default: Any = None) -> Any:
"""Get user preference"""
if not self.preferences:
return default
return self.preferences.get(key, default)
def set_security_setting(self, key: str, value: Any) -> None:
"""Set security setting"""
if not self.security_settings:
self.security_settings = {}
self.security_settings[key] = value
def get_security_setting(self, key: str, default: Any = None) -> Any:
"""Get security setting"""
if not self.security_settings:
return default
return self.security_settings.get(key, default)
def generate_api_token(self) -> str:
"""Generate secure API token for user"""
token_data = f"{self.id}:{self.telegram_id}:{datetime.utcnow().timestamp()}:{secrets.token_hex(16)}"
return hashlib.sha256(token_data.encode()).hexdigest()
def can_upload_content(self) -> bool:
"""Check if user can upload content"""
if self.status != UserStatus.ACTIVE.value:
return False
if not self.check_rate_limit(limit=10, window=3600): # 10 uploads per hour
return False
return True
def can_purchase_content(self) -> bool:
"""Check if user can purchase content"""
return self.status == UserStatus.ACTIVE.value
@classmethod
async def get_by_telegram_id(
cls,
session: AsyncSession,
telegram_id: int
) -> Optional['User']:
"""Get user by Telegram ID"""
try:
stmt = select(cls).where(cls.telegram_id == telegram_id)
result = await session.execute(stmt)
return result.scalar_one_or_none()
except Exception as e:
logger.error("Error getting user by telegram_id", telegram_id=telegram_id, error=str(e))
return None
@classmethod
async def get_by_username(
cls,
session: AsyncSession,
username: str
) -> Optional['User']:
"""Get user by username"""
try:
stmt = select(cls).where(cls.username == username)
result = await session.execute(stmt)
return result.scalar_one_or_none()
except Exception as e:
logger.error("Error getting user by username", username=username, error=str(e))
return None
@classmethod
async def get_active_users(
cls,
session: AsyncSession,
days: int = 30,
limit: Optional[int] = None
) -> List['User']:
"""Get active users within specified days"""
try:
cutoff_date = datetime.utcnow() - timedelta(days=days)
stmt = select(cls).where(
cls.last_activity >= cutoff_date,
cls.status == UserStatus.ACTIVE.value
).order_by(cls.last_activity.desc())
if limit:
stmt = stmt.limit(limit)
result = await session.execute(stmt)
return result.scalars().all()
except Exception as e:
logger.error("Error getting active users", days=days, error=str(e))
return []
@classmethod
async def get_admins(cls, session: AsyncSession) -> List['User']:
"""Get all admin users"""
try:
stmt = select(cls).where(
cls.role.in_([UserRole.ADMIN.value, UserRole.SUPER_ADMIN.value])
)
result = await session.execute(stmt)
return result.scalars().all()
except Exception as e:
logger.error("Error getting admin users", error=str(e))
return []
@classmethod
async def create_from_telegram(
cls,
session: AsyncSession,
telegram_id: int,
username: Optional[str] = None,
first_name: Optional[str] = None,
last_name: Optional[str] = None,
language_code: str = "en"
) -> 'User':
"""Create user from Telegram data"""
user = cls(
telegram_id=telegram_id,
username=username,
first_name=first_name,
last_name=last_name,
language_code=language_code,
status=UserStatus.ACTIVE.value
)
session.add(user)
await session.commit()
await session.refresh(user)
logger.info("User created from Telegram", telegram_id=telegram_id, user_id=user.id)
return user
def to_dict(self) -> Dict[str, Any]:
"""Convert to dictionary with safe data"""
data = super().to_dict()
# Remove sensitive fields
sensitive_fields = ['security_settings', 'permissions']
for field in sensitive_fields:
data.pop(field, None)
return data
def to_public_dict(self) -> Dict[str, Any]:
"""Convert to public dictionary with minimal data"""
return {
'id': str(self.id),
'username': self.username,
'display_name': self.display_name,
'is_verified': self.is_verified,
'is_premium': self.is_premium,
'created_at': self.created_at.isoformat() if self.created_at else None
}

View File

@ -0,0 +1,247 @@
"""Compatible user models for MariaDB."""
from datetime import datetime
from typing import Optional, List, Dict, Any
from sqlalchemy import Column, String, Boolean, Text, Integer, DateTime, Index
from sqlalchemy.orm import relationship
from app.core.models.base_compatible import BaseModel
class User(BaseModel):
"""User model compatible with existing MariaDB schema."""
__tablename__ = "users"
# Basic user information
username = Column(String(50), unique=True, nullable=False, index=True)
email = Column(String(100), unique=True, nullable=True, index=True)
password_hash = Column(String(255), nullable=False)
# User status and flags
is_active = Column(Boolean, default=True, nullable=False)
is_verified = Column(Boolean, default=False, nullable=False)
is_admin = Column(Boolean, default=False, nullable=False)
# Profile information
first_name = Column(String(50), nullable=True)
last_name = Column(String(50), nullable=True)
bio = Column(Text, nullable=True)
avatar_url = Column(String(255), nullable=True)
# System tracking
last_login = Column(DateTime, nullable=True)
login_count = Column(Integer, default=0, nullable=False)
# Storage and limits
storage_used = Column(Integer, default=0, nullable=False) # bytes
storage_limit = Column(Integer, default=100*1024*1024, nullable=False) # 100MB default
# TON Blockchain integration
ton_wallet_address = Column(String(100), nullable=True, index=True)
ton_balance = Column(Integer, default=0, nullable=False) # nanotons
# License and subscription
license_key = Column(String(100), nullable=True, index=True)
license_expires = Column(DateTime, nullable=True)
subscription_level = Column(String(20), default="free", nullable=False)
# API access
api_key = Column(String(100), nullable=True, unique=True, index=True)
api_calls_count = Column(Integer, default=0, nullable=False)
api_calls_limit = Column(Integer, default=1000, nullable=False)
# Relationships will be defined when we create content models
# Table indexes for performance
__table_args__ = (
Index('idx_users_username_active', 'username', 'is_active'),
Index('idx_users_email_verified', 'email', 'is_verified'),
Index('idx_users_ton_wallet', 'ton_wallet_address'),
Index('idx_users_license', 'license_key', 'license_expires'),
)
def check_storage_limit(self, file_size: int) -> bool:
"""Check if user can upload file of given size."""
return (self.storage_used + file_size) <= self.storage_limit
def update_storage_usage(self, size_change: int) -> None:
"""Update user's storage usage."""
self.storage_used = max(0, self.storage_used + size_change)
def is_license_valid(self) -> bool:
"""Check if user's license is valid."""
if not self.license_key or not self.license_expires:
return False
return self.license_expires > datetime.utcnow()
def can_make_api_call(self) -> bool:
"""Check if user can make API call."""
return self.api_calls_count < self.api_calls_limit
def increment_api_calls(self) -> None:
"""Increment API calls counter."""
self.api_calls_count += 1
def reset_api_calls(self) -> None:
"""Reset API calls counter (for monthly reset)."""
self.api_calls_count = 0
def get_storage_usage_percent(self) -> float:
"""Get storage usage as percentage."""
if self.storage_limit == 0:
return 0.0
return (self.storage_used / self.storage_limit) * 100
def get_api_usage_percent(self) -> float:
"""Get API usage as percentage."""
if self.api_calls_limit == 0:
return 0.0
return (self.api_calls_count / self.api_calls_limit) * 100
def get_display_name(self) -> str:
"""Get user's display name."""
if self.first_name and self.last_name:
return f"{self.first_name} {self.last_name}"
elif self.first_name:
return self.first_name
return self.username
def to_dict(self, include_sensitive: bool = False) -> Dict[str, Any]:
"""Convert to dictionary with option to exclude sensitive data."""
exclude = set()
if not include_sensitive:
exclude.update({"password_hash", "api_key", "license_key"})
data = super().to_dict(exclude=exclude)
# Add computed fields
data.update({
"display_name": self.get_display_name(),
"storage_usage_percent": self.get_storage_usage_percent(),
"api_usage_percent": self.get_api_usage_percent(),
"license_valid": self.is_license_valid(),
})
return data
def to_public_dict(self) -> Dict[str, Any]:
"""Convert to public dictionary (minimal user info)."""
return {
"id": self.id,
"username": self.username,
"display_name": self.get_display_name(),
"avatar_url": self.avatar_url,
"is_verified": self.is_verified,
"subscription_level": self.subscription_level,
"created_at": self.created_at.isoformat() if self.created_at else None,
}
class UserSession(BaseModel):
"""User session model for tracking active sessions."""
__tablename__ = "user_sessions"
user_id = Column(Integer, nullable=False, index=True)
session_token = Column(String(255), unique=True, nullable=False, index=True)
refresh_token = Column(String(255), unique=True, nullable=True, index=True)
# Session metadata
ip_address = Column(String(45), nullable=True) # IPv6 support
user_agent = Column(Text, nullable=True)
device_info = Column(Text, nullable=True)
# Session status
is_active = Column(Boolean, default=True, nullable=False)
expires_at = Column(DateTime, nullable=False)
last_activity = Column(DateTime, default=datetime.utcnow, nullable=False)
# Security flags
is_suspicious = Column(Boolean, default=False, nullable=False)
failed_attempts = Column(Integer, default=0, nullable=False)
__table_args__ = (
Index('idx_sessions_user_active', 'user_id', 'is_active'),
Index('idx_sessions_token', 'session_token'),
Index('idx_sessions_expires', 'expires_at'),
)
def is_expired(self) -> bool:
"""Check if session is expired."""
return datetime.utcnow() > self.expires_at
def is_valid(self) -> bool:
"""Check if session is valid."""
return self.is_active and not self.is_expired()
def extend_session(self, hours: int = 24) -> None:
"""Extend session expiration."""
from datetime import timedelta
self.expires_at = datetime.utcnow() + timedelta(hours=hours)
self.last_activity = datetime.utcnow()
def mark_suspicious(self) -> None:
"""Mark session as suspicious."""
self.is_suspicious = True
self.failed_attempts += 1
def deactivate(self) -> None:
"""Deactivate session."""
self.is_active = False
class UserPreferences(BaseModel):
"""User preferences and settings."""
__tablename__ = "user_preferences"
user_id = Column(Integer, unique=True, nullable=False, index=True)
# UI preferences
theme = Column(String(20), default="light", nullable=False)
language = Column(String(10), default="en", nullable=False)
timezone = Column(String(50), default="UTC", nullable=False)
# Notification preferences
email_notifications = Column(Boolean, default=True, nullable=False)
upload_notifications = Column(Boolean, default=True, nullable=False)
storage_alerts = Column(Boolean, default=True, nullable=False)
# Privacy settings
public_profile = Column(Boolean, default=False, nullable=False)
show_email = Column(Boolean, default=False, nullable=False)
allow_indexing = Column(Boolean, default=True, nullable=False)
# Upload preferences
auto_optimize_images = Column(Boolean, default=True, nullable=False)
default_privacy = Column(String(20), default="private", nullable=False)
max_file_size_mb = Column(Integer, default=10, nullable=False)
# Cache and performance
cache_thumbnails = Column(Boolean, default=True, nullable=False)
preload_content = Column(Boolean, default=False, nullable=False)
def to_dict(self) -> Dict[str, Any]:
"""Convert preferences to dictionary."""
return super().to_dict(exclude={"user_id"})
@classmethod
def get_default_preferences(cls) -> Dict[str, Any]:
"""Get default user preferences."""
return {
"theme": "light",
"language": "en",
"timezone": "UTC",
"email_notifications": True,
"upload_notifications": True,
"storage_alerts": True,
"public_profile": False,
"show_email": False,
"allow_indexing": True,
"auto_optimize_images": True,
"default_privacy": "private",
"max_file_size_mb": 10,
"cache_thumbnails": True,
"preload_content": False,
}

View File

@ -0,0 +1,13 @@
"""MY Network - Distributed Content Replication System."""
from .node_service import MyNetworkNodeService
from .sync_manager import ContentSyncManager
from .peer_manager import PeerManager
from .bootstrap_manager import BootstrapManager
__all__ = [
'MyNetworkNodeService',
'ContentSyncManager',
'PeerManager',
'BootstrapManager'
]

View File

@ -0,0 +1,312 @@
"""Bootstrap Manager - управление bootstrap нодами и начальной конфигурацией."""
import json
import logging
from pathlib import Path
from typing import Dict, List, Optional, Any
from datetime import datetime
logger = logging.getLogger(__name__)
class BootstrapManager:
"""Менеджер для работы с bootstrap конфигурацией."""
def __init__(self, bootstrap_path: str = "bootstrap.json"):
self.bootstrap_path = Path(bootstrap_path)
self.config = {}
self.nodes_history_path = Path("nodes_history.json")
self.nodes_history = {"successful_connections": [], "last_updated": None}
logger.info(f"Bootstrap Manager initialized with path: {self.bootstrap_path}")
async def load_bootstrap_config(self) -> Dict[str, Any]:
"""Загрузка bootstrap конфигурации."""
try:
if not self.bootstrap_path.exists():
logger.error(f"Bootstrap config not found: {self.bootstrap_path}")
raise FileNotFoundError(f"Bootstrap config not found: {self.bootstrap_path}")
with open(self.bootstrap_path, 'r', encoding='utf-8') as f:
self.config = json.load(f)
logger.info(f"Bootstrap config loaded: {len(self.config.get('bootstrap_nodes', []))} nodes")
# Загрузить историю нод
await self._load_nodes_history()
return self.config
except Exception as e:
logger.error(f"Error loading bootstrap config: {e}")
raise
async def _load_nodes_history(self) -> None:
"""Загрузка истории подключенных нод."""
try:
if self.nodes_history_path.exists():
with open(self.nodes_history_path, 'r', encoding='utf-8') as f:
self.nodes_history = json.load(f)
logger.info(f"Loaded nodes history: {len(self.nodes_history.get('successful_connections', []))} nodes")
else:
logger.info("No nodes history found, starting fresh")
except Exception as e:
logger.error(f"Error loading nodes history: {e}")
self.nodes_history = {"successful_connections": [], "last_updated": None}
async def save_nodes_history(self) -> None:
"""Сохранение истории нод."""
try:
self.nodes_history["last_updated"] = datetime.utcnow().isoformat()
with open(self.nodes_history_path, 'w', encoding='utf-8') as f:
json.dump(self.nodes_history, f, indent=2, ensure_ascii=False)
logger.debug("Nodes history saved")
except Exception as e:
logger.error(f"Error saving nodes history: {e}")
def get_bootstrap_nodes(self) -> List[Dict[str, Any]]:
"""Получить список bootstrap нод."""
return self.config.get('bootstrap_nodes', [])
def get_network_settings(self) -> Dict[str, Any]:
"""Получить настройки сети."""
return self.config.get('network_settings', {})
def get_sync_settings(self) -> Dict[str, Any]:
"""Получить настройки синхронизации."""
return self.config.get('sync_settings', {})
def get_content_settings(self) -> Dict[str, Any]:
"""Получить настройки контента."""
return self.config.get('content_settings', {})
def get_security_settings(self) -> Dict[str, Any]:
"""Получить настройки безопасности."""
return self.config.get('security_settings', {})
def get_api_settings(self) -> Dict[str, Any]:
"""Получить настройки API."""
return self.config.get('api_settings', {})
def get_monitoring_settings(self) -> Dict[str, Any]:
"""Получить настройки мониторинга."""
return self.config.get('monitoring_settings', {})
def get_storage_settings(self) -> Dict[str, Any]:
"""Получить настройки хранилища."""
return self.config.get('storage_settings', {})
def get_consensus_settings(self) -> Dict[str, Any]:
"""Получить настройки консенсуса."""
return self.config.get('consensus', {})
def get_feature_flags(self) -> Dict[str, Any]:
"""Получить флаги функций."""
return self.config.get('feature_flags', {})
def is_feature_enabled(self, feature_name: str) -> bool:
"""Проверить, включена ли функция."""
return self.get_feature_flags().get(feature_name, False)
def get_regional_settings(self, region: str = None) -> Dict[str, Any]:
"""Получить региональные настройки."""
regional_settings = self.config.get('regional_settings', {})
if region and region in regional_settings:
return regional_settings[region]
return regional_settings
def get_emergency_settings(self) -> Dict[str, Any]:
"""Получить настройки экстренных ситуаций."""
return self.config.get('emergency_settings', {})
def is_emergency_mode(self) -> bool:
"""Проверить, включен ли режим экстренной ситуации."""
return self.get_emergency_settings().get('emergency_mode', False)
def get_nodes_from_history(self) -> List[Dict[str, Any]]:
"""Получить ноды из истории успешных подключений."""
return self.nodes_history.get('successful_connections', [])
def add_successful_connection(self, node_info: Dict[str, Any]) -> None:
"""Добавить информацию об успешном подключении."""
try:
# Обновить существующую запись или добавить новую
existing_node = None
for i, node in enumerate(self.nodes_history['successful_connections']):
if node['node_id'] == node_info['node_id']:
existing_node = i
break
connection_info = {
"node_id": node_info['node_id'],
"address": node_info['address'],
"last_seen": datetime.utcnow().isoformat(),
"connection_count": node_info.get('connection_count', 1),
"performance_score": node_info.get('performance_score', 1.0),
"features": node_info.get('features', []),
"region": node_info.get('region', 'unknown'),
"metadata": node_info.get('metadata', {})
}
if existing_node is not None:
# Обновить существующую запись
old_info = self.nodes_history['successful_connections'][existing_node]
connection_info['connection_count'] = old_info.get('connection_count', 0) + 1
connection_info['first_seen'] = old_info.get('first_seen', connection_info['last_seen'])
self.nodes_history['successful_connections'][existing_node] = connection_info
else:
# Добавить новую запись
connection_info['first_seen'] = connection_info['last_seen']
self.nodes_history['successful_connections'].append(connection_info)
# Ограничить историю (максимум 100 нод)
if len(self.nodes_history['successful_connections']) > 100:
# Сортировать по последнему подключению и оставить 100 самых свежих
self.nodes_history['successful_connections'].sort(
key=lambda x: x['last_seen'],
reverse=True
)
self.nodes_history['successful_connections'] = \
self.nodes_history['successful_connections'][:100]
logger.debug(f"Added successful connection to history: {node_info['node_id']}")
except Exception as e:
logger.error(f"Error adding successful connection: {e}")
def remove_failed_connection(self, node_id: str) -> None:
"""Удалить ноду из истории при неудачном подключении."""
try:
self.nodes_history['successful_connections'] = [
node for node in self.nodes_history['successful_connections']
if node['node_id'] != node_id
]
logger.debug(f"Removed failed connection from history: {node_id}")
except Exception as e:
logger.error(f"Error removing failed connection: {e}")
def get_preferred_nodes(self, max_nodes: int = 10) -> List[Dict[str, Any]]:
"""Получить предпочтительные ноды для подключения."""
try:
# Комбинировать bootstrap ноды и ноды из истории
all_nodes = []
# Добавить bootstrap ноды (высокий приоритет)
for node in self.get_bootstrap_nodes():
all_nodes.append({
"node_id": node['id'],
"address": node['address'],
"priority": 100, # Высокий приоритет для bootstrap
"features": node.get('features', []),
"region": node.get('region', 'unknown'),
"source": "bootstrap"
})
# Добавить ноды из истории
for node in self.get_nodes_from_history():
# Пропустить, если уже есть в bootstrap
if any(n['node_id'] == node['node_id'] for n in all_nodes):
continue
# Рассчитать приоритет на основе performance_score и connection_count
priority = min(90, node.get('performance_score', 0.5) * 50 +
min(40, node.get('connection_count', 1) * 2))
all_nodes.append({
"node_id": node['node_id'],
"address": node['address'],
"priority": priority,
"features": node.get('features', []),
"region": node.get('region', 'unknown'),
"source": "history"
})
# Сортировать по приоритету и взять топ
all_nodes.sort(key=lambda x: x['priority'], reverse=True)
return all_nodes[:max_nodes]
except Exception as e:
logger.error(f"Error getting preferred nodes: {e}")
return []
def validate_config(self) -> bool:
"""Валидация конфигурации bootstrap."""
try:
required_fields = ['version', 'network_id', 'bootstrap_nodes']
for field in required_fields:
if field not in self.config:
logger.error(f"Missing required field: {field}")
return False
# Проверить bootstrap ноды
bootstrap_nodes = self.config.get('bootstrap_nodes', [])
if not bootstrap_nodes:
logger.error("No bootstrap nodes configured")
return False
for node in bootstrap_nodes:
required_node_fields = ['id', 'address']
for field in required_node_fields:
if field not in node:
logger.error(f"Bootstrap node missing field: {field}")
return False
logger.info("Bootstrap configuration validated successfully")
return True
except Exception as e:
logger.error(f"Error validating config: {e}")
return False
def get_config_checksum(self) -> str:
"""Получить чек-сумму конфигурации."""
return self.config.get('checksum', '')
def verify_config_signature(self) -> bool:
"""Проверить подпись конфигурации."""
# Заглушка для проверки подписи
# В реальной реализации здесь была бы криптографическая проверка
signature = self.config.get('signature', '')
return bool(signature)
async def update_bootstrap_config(self, new_config: Dict[str, Any]) -> bool:
"""Обновление bootstrap конфигурации."""
try:
# Сохранить резервную копию
backup_path = self.bootstrap_path.with_suffix('.backup')
if self.bootstrap_path.exists():
self.bootstrap_path.rename(backup_path)
# Сохранить новую конфигурацию
with open(self.bootstrap_path, 'w', encoding='utf-8') as f:
json.dump(new_config, f, indent=2, ensure_ascii=False)
# Перезагрузить конфигурацию
await self.load_bootstrap_config()
logger.info("Bootstrap configuration updated successfully")
return True
except Exception as e:
logger.error(f"Error updating bootstrap config: {e}")
# Восстановить из резервной копии
try:
if backup_path.exists():
backup_path.rename(self.bootstrap_path)
except:
pass
return False

View File

@ -0,0 +1,386 @@
"""MY Network Node Service - основной сервис ноды."""
import asyncio
import json
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Set, Any
from pathlib import Path
from app.core.database_compatible import get_async_session
from app.core.models.content_compatible import Content
from app.core.cache import cache
from .bootstrap_manager import BootstrapManager
from .peer_manager import PeerManager
from .sync_manager import ContentSyncManager
logger = logging.getLogger(__name__)
class MyNetworkNodeService:
"""Основной сервис ноды MY Network."""
def __init__(self, node_id: str = None, storage_path: str = "./storage/my-network"):
self.node_id = node_id or self._generate_node_id()
self.storage_path = Path(storage_path)
self.storage_path.mkdir(parents=True, exist_ok=True)
# Инициализация менеджеров
self.bootstrap_manager = BootstrapManager()
self.peer_manager = PeerManager(self.node_id)
self.sync_manager = ContentSyncManager(self.node_id)
# Состояние ноды
self.is_running = False
self.start_time = None
self.last_sync_time = None
self.node_metrics = {
"requests_30min": 0,
"total_requests": 0,
"content_synced": 0,
"active_peers": 0,
"storage_used_mb": 0
}
# История запросов для балансировки нагрузки
self.request_history = []
logger.info(f"MY Network Node Service initialized with ID: {self.node_id}")
def _generate_node_id(self) -> str:
"""Генерация уникального ID ноды."""
import uuid
return f"node-{uuid.uuid4().hex[:8]}"
async def start(self) -> None:
"""Запуск ноды MY Network."""
try:
logger.info(f"Starting MY Network Node: {self.node_id}")
# Загрузка bootstrap конфигурации
await self.bootstrap_manager.load_bootstrap_config()
# Инициализация peer manager
await self.peer_manager.initialize()
# Подключение к bootstrap нодам
await self._connect_to_bootstrap_nodes()
# Обнаружение других нод в сети
await self._discover_network_nodes()
# Запуск синхронизации контента
await self.sync_manager.start_sync_process()
# Запуск фоновых задач
asyncio.create_task(self._background_tasks())
self.is_running = True
self.start_time = datetime.utcnow()
logger.info(f"MY Network Node {self.node_id} started successfully")
except Exception as e:
logger.error(f"Failed to start MY Network Node: {e}")
raise
async def stop(self) -> None:
"""Остановка ноды MY Network."""
try:
logger.info(f"Stopping MY Network Node: {self.node_id}")
self.is_running = False
# Остановка синхронизации
await self.sync_manager.stop_sync_process()
# Отключение от пиров
await self.peer_manager.disconnect_all()
logger.info(f"MY Network Node {self.node_id} stopped")
except Exception as e:
logger.error(f"Error stopping MY Network Node: {e}")
async def _connect_to_bootstrap_nodes(self) -> None:
"""Подключение к bootstrap нодам."""
bootstrap_nodes = self.bootstrap_manager.get_bootstrap_nodes()
for node in bootstrap_nodes:
try:
# Не подключаться к самому себе
if node["id"] == self.node_id:
continue
success = await self.peer_manager.connect_to_peer(
node["id"],
node["address"]
)
if success:
logger.info(f"Connected to bootstrap node: {node['id']}")
else:
logger.warning(f"Failed to connect to bootstrap node: {node['id']}")
except Exception as e:
logger.error(f"Error connecting to bootstrap node {node['id']}: {e}")
async def _discover_network_nodes(self) -> None:
"""Обнаружение других нод в сети."""
try:
# Запрос списка нод у подключенных пиров
connected_peers = self.peer_manager.get_connected_peers()
for peer_id in connected_peers:
try:
nodes_list = await self.peer_manager.request_nodes_list(peer_id)
for node_info in nodes_list:
# Пропустить себя
if node_info["id"] == self.node_id:
continue
# Попытаться подключиться к новой ноде
if not self.peer_manager.is_connected(node_info["id"]):
await self.peer_manager.connect_to_peer(
node_info["id"],
node_info["address"]
)
except Exception as e:
logger.error(f"Error discovering nodes from peer {peer_id}: {e}")
except Exception as e:
logger.error(f"Error in network discovery: {e}")
async def _background_tasks(self) -> None:
"""Фоновые задачи ноды."""
while self.is_running:
try:
# Обновление метрик
await self._update_metrics()
# Очистка истории запросов (оставляем только за последние 30 минут)
await self._cleanup_request_history()
# Проверка состояния пиров
await self.peer_manager.check_peers_health()
# Периодическая синхронизация
if self._should_sync():
await self.sync_manager.sync_with_network()
self.last_sync_time = datetime.utcnow()
# Обновление кэша статистики
await self._update_cache_stats()
await asyncio.sleep(30) # Проверка каждые 30 секунд
except Exception as e:
logger.error(f"Error in background tasks: {e}")
await asyncio.sleep(60) # Увеличиваем интервал при ошибке
async def _update_metrics(self) -> None:
"""Обновление метрик ноды."""
try:
# Подсчет запросов за последние 30 минут
cutoff_time = datetime.utcnow() - timedelta(minutes=30)
recent_requests = [
req for req in self.request_history
if req["timestamp"] > cutoff_time
]
self.node_metrics.update({
"requests_30min": len(recent_requests),
"active_peers": len(self.peer_manager.get_connected_peers()),
"storage_used_mb": await self._calculate_storage_usage(),
"uptime_hours": self._get_uptime_hours()
})
# Сохранение в кэш для быстрого доступа
await cache.set(
f"my_network:node:{self.node_id}:metrics",
self.node_metrics,
ttl=60
)
except Exception as e:
logger.error(f"Error updating metrics: {e}")
async def _cleanup_request_history(self) -> None:
"""Очистка истории запросов."""
cutoff_time = datetime.utcnow() - timedelta(minutes=30)
self.request_history = [
req for req in self.request_history
if req["timestamp"] > cutoff_time
]
def _should_sync(self) -> bool:
"""Проверка, нужно ли запускать синхронизацию."""
if not self.last_sync_time:
return True
# Синхронизация каждые 5 минут
return datetime.utcnow() - self.last_sync_time > timedelta(minutes=5)
async def _calculate_storage_usage(self) -> int:
"""Подсчет использования хранилища в МБ."""
try:
total_size = 0
if self.storage_path.exists():
for file_path in self.storage_path.rglob("*"):
if file_path.is_file():
total_size += file_path.stat().st_size
return total_size // (1024 * 1024) # Конвертация в МБ
except Exception as e:
logger.error(f"Error calculating storage usage: {e}")
return 0
def _get_uptime_hours(self) -> float:
"""Получение времени работы ноды в часах."""
if not self.start_time:
return 0.0
uptime = datetime.utcnow() - self.start_time
return uptime.total_seconds() / 3600
async def _update_cache_stats(self) -> None:
"""Обновление статистики в кэше."""
try:
stats = {
"node_id": self.node_id,
"is_running": self.is_running,
"start_time": self.start_time.isoformat() if self.start_time else None,
"last_sync_time": self.last_sync_time.isoformat() if self.last_sync_time else None,
"metrics": self.node_metrics,
"connected_peers": list(self.peer_manager.get_connected_peers()),
"sync_status": await self.sync_manager.get_sync_status()
}
await cache.set(
f"my_network:node:{self.node_id}:status",
stats,
ttl=30
)
except Exception as e:
logger.error(f"Error updating cache stats: {e}")
def record_request(self, request_info: Dict[str, Any]) -> None:
"""Записать информацию о запросе для метрик."""
self.request_history.append({
"timestamp": datetime.utcnow(),
"endpoint": request_info.get("endpoint", "unknown"),
"method": request_info.get("method", "GET"),
"client_ip": request_info.get("client_ip", "unknown")
})
self.node_metrics["total_requests"] += 1
def get_load_info(self) -> Dict[str, Any]:
"""Получить информацию о нагрузке ноды для балансировки."""
return {
"node_id": self.node_id,
"requests_30min": self.node_metrics["requests_30min"],
"load_percentage": min(100, (self.node_metrics["requests_30min"] / 1000) * 100),
"active_peers": self.node_metrics["active_peers"],
"storage_used_mb": self.node_metrics["storage_used_mb"],
"uptime_hours": self._get_uptime_hours(),
"is_healthy": self.is_running and self.node_metrics["active_peers"] > 0
}
async def replicate_content(self, content_hash: str, target_nodes: List[str] = None) -> Dict[str, Any]:
"""Реплицировать контент на другие ноды."""
try:
logger.info(f"Starting replication of content: {content_hash}")
# Найти контент в локальной БД
async with get_async_session() as session:
content = await session.get(Content, {"hash": content_hash})
if not content:
raise ValueError(f"Content not found: {content_hash}")
# Определить целевые ноды
if not target_nodes:
target_nodes = self.peer_manager.select_replication_nodes()
# Запустить репликацию через sync manager
result = await self.sync_manager.replicate_content_to_nodes(
content_hash,
target_nodes
)
logger.info(f"Content replication completed: {content_hash}")
return result
except Exception as e:
logger.error(f"Error replicating content {content_hash}: {e}")
raise
async def get_network_status(self) -> Dict[str, Any]:
"""Получить статус всей сети MY Network."""
try:
connected_peers = self.peer_manager.get_connected_peers()
sync_status = await self.sync_manager.get_sync_status()
# Получить статус от всех подключенных пиров
peer_statuses = {}
for peer_id in connected_peers:
try:
peer_status = await self.peer_manager.request_peer_status(peer_id)
peer_statuses[peer_id] = peer_status
except Exception as e:
peer_statuses[peer_id] = {"error": str(e)}
return {
"local_node": {
"id": self.node_id,
"status": "running" if self.is_running else "stopped",
"metrics": self.node_metrics,
"uptime_hours": self._get_uptime_hours()
},
"network": {
"connected_peers": len(connected_peers),
"total_discovered_nodes": len(peer_statuses) + 1,
"sync_status": sync_status,
"last_sync": self.last_sync_time.isoformat() if self.last_sync_time else None
},
"peers": peer_statuses
}
except Exception as e:
logger.error(f"Error getting network status: {e}")
return {"error": str(e)}
async def get_content_sync_status(self, content_hash: str) -> Dict[str, Any]:
"""Получить статус синхронизации конкретного контента."""
return await self.sync_manager.get_content_sync_status(content_hash)
# Глобальный экземпляр сервиса ноды
_node_service: Optional[MyNetworkNodeService] = None
def get_node_service() -> MyNetworkNodeService:
"""Получить глобальный экземпляр сервиса ноды."""
global _node_service
if _node_service is None:
_node_service = MyNetworkNodeService()
return _node_service
async def initialize_my_network() -> None:
"""Инициализация MY Network."""
node_service = get_node_service()
await node_service.start()
async def shutdown_my_network() -> None:
"""Остановка MY Network."""
global _node_service
if _node_service:
await _node_service.stop()
_node_service = None

View File

@ -0,0 +1,477 @@
"""Peer Manager - управление подключениями к другим нодам."""
import asyncio
import aiohttp
import logging
from datetime import datetime, timedelta
from typing import Dict, List, Set, Optional, Any
from urllib.parse import urlparse
logger = logging.getLogger(__name__)
class PeerConnection:
"""Представление подключения к пиру."""
def __init__(self, peer_id: str, address: str):
self.peer_id = peer_id
self.address = address
self.connected_at = datetime.utcnow()
self.last_ping = None
self.last_pong = None
self.is_healthy = True
self.ping_failures = 0
self.request_count = 0
self.features = []
self.metadata = {}
@property
def uptime(self) -> timedelta:
"""Время подключения."""
return datetime.utcnow() - self.connected_at
@property
def ping_latency(self) -> Optional[float]:
"""Задержка пинга в миллисекундах."""
if self.last_ping and self.last_pong:
return (self.last_pong - self.last_ping).total_seconds() * 1000
return None
def mark_ping_sent(self):
"""Отметить отправку пинга."""
self.last_ping = datetime.utcnow()
def mark_pong_received(self):
"""Отметить получение понга."""
self.last_pong = datetime.utcnow()
self.ping_failures = 0
self.is_healthy = True
def mark_ping_failed(self):
"""Отметить неудачный пинг."""
self.ping_failures += 1
if self.ping_failures >= 3:
self.is_healthy = False
class PeerManager:
"""Менеджер для управления подключениями к пирам."""
def __init__(self, node_id: str):
self.node_id = node_id
self.connections: Dict[str, PeerConnection] = {}
self.blacklisted_peers: Set[str] = set()
self.connection_semaphore = asyncio.Semaphore(25) # Макс 25 исходящих подключений
self.session: Optional[aiohttp.ClientSession] = None
logger.info(f"Peer Manager initialized for node: {node_id}")
async def initialize(self) -> None:
"""Инициализация менеджера пиров."""
try:
# Создать HTTP сессию для запросов
timeout = aiohttp.ClientTimeout(total=30, connect=10)
self.session = aiohttp.ClientSession(
timeout=timeout,
headers={'User-Agent': f'MY-Network-Node/{self.node_id}'}
)
logger.info("Peer Manager initialized successfully")
except Exception as e:
logger.error(f"Error initializing Peer Manager: {e}")
raise
async def cleanup(self) -> None:
"""Очистка ресурсов."""
if self.session:
await self.session.close()
self.session = None
self.connections.clear()
logger.info("Peer Manager cleaned up")
async def connect_to_peer(self, peer_id: str, address: str) -> bool:
"""Подключение к пиру."""
try:
# Проверить, что не подключаемся к себе
if peer_id == self.node_id:
logger.debug(f"Skipping connection to self: {peer_id}")
return False
# Проверить черный список
if peer_id in self.blacklisted_peers:
logger.debug(f"Peer {peer_id} is blacklisted")
return False
# Проверить, уже подключены ли
if peer_id in self.connections:
connection = self.connections[peer_id]
if connection.is_healthy:
logger.debug(f"Already connected to peer: {peer_id}")
return True
else:
# Удалить нездоровое подключение
del self.connections[peer_id]
async with self.connection_semaphore:
logger.info(f"Connecting to peer: {peer_id} at {address}")
# Попытка подключения через handshake
success = await self._perform_handshake(peer_id, address)
if success:
# Создать подключение
connection = PeerConnection(peer_id, address)
self.connections[peer_id] = connection
logger.info(f"Successfully connected to peer: {peer_id}")
return True
else:
logger.warning(f"Failed to connect to peer: {peer_id}")
return False
except Exception as e:
logger.error(f"Error connecting to peer {peer_id}: {e}")
return False
async def _perform_handshake(self, peer_id: str, address: str) -> bool:
"""Выполнить handshake с пиром."""
try:
if not self.session:
return False
# Парсить адрес
parsed_url = self._parse_peer_address(address)
if not parsed_url:
return False
handshake_url = f"{parsed_url}/api/my/handshake"
handshake_data = {
"node_id": self.node_id,
"protocol_version": "1.0.0",
"features": [
"content_sync",
"consensus",
"monitoring"
],
"timestamp": datetime.utcnow().isoformat()
}
async with self.session.post(handshake_url, json=handshake_data) as response:
if response.status == 200:
response_data = await response.json()
# Проверить ответ
if (response_data.get("node_id") == peer_id and
response_data.get("status") == "accepted"):
# Сохранить информацию о пире
if peer_id in self.connections:
self.connections[peer_id].features = response_data.get("features", [])
self.connections[peer_id].metadata = response_data.get("metadata", {})
return True
logger.warning(f"Handshake failed with peer {peer_id}: HTTP {response.status}")
return False
except Exception as e:
logger.error(f"Error in handshake with peer {peer_id}: {e}")
return False
def _parse_peer_address(self, address: str) -> Optional[str]:
"""Парсинг адреса пира."""
try:
# Поддержка форматов:
# my://host:port
# http://host:port
# https://host:port
# host:port
if address.startswith("my://"):
# Конвертировать MY протокол в HTTP
address = address.replace("my://", "http://")
elif not address.startswith(("http://", "https://")):
# Добавить HTTP префикс
address = f"http://{address}"
parsed = urlparse(address)
if parsed.hostname:
return f"{parsed.scheme}://{parsed.netloc}"
return None
except Exception as e:
logger.error(f"Error parsing peer address {address}: {e}")
return None
async def disconnect_from_peer(self, peer_id: str) -> None:
"""Отключение от пира."""
try:
if peer_id in self.connections:
connection = self.connections[peer_id]
# Попытаться отправить уведомление об отключении
try:
await self._send_disconnect_notification(peer_id)
except:
pass # Игнорировать ошибки при отключении
# Удалить подключение
del self.connections[peer_id]
logger.info(f"Disconnected from peer: {peer_id}")
except Exception as e:
logger.error(f"Error disconnecting from peer {peer_id}: {e}")
async def _send_disconnect_notification(self, peer_id: str) -> None:
"""Отправить уведомление об отключении."""
try:
if peer_id not in self.connections or not self.session:
return
connection = self.connections[peer_id]
parsed_url = self._parse_peer_address(connection.address)
if parsed_url:
disconnect_url = f"{parsed_url}/api/my/disconnect"
disconnect_data = {
"node_id": self.node_id,
"reason": "graceful_shutdown",
"timestamp": datetime.utcnow().isoformat()
}
async with self.session.post(disconnect_url, json=disconnect_data) as response:
if response.status == 200:
logger.debug(f"Disconnect notification sent to {peer_id}")
except Exception as e:
logger.debug(f"Error sending disconnect notification to {peer_id}: {e}")
async def disconnect_all(self) -> None:
"""Отключение от всех пиров."""
disconnect_tasks = []
for peer_id in list(self.connections.keys()):
disconnect_tasks.append(self.disconnect_from_peer(peer_id))
if disconnect_tasks:
await asyncio.gather(*disconnect_tasks, return_exceptions=True)
logger.info("Disconnected from all peers")
async def check_peers_health(self) -> None:
"""Проверка здоровья всех подключений."""
ping_tasks = []
for peer_id in list(self.connections.keys()):
ping_tasks.append(self._ping_peer(peer_id))
if ping_tasks:
await asyncio.gather(*ping_tasks, return_exceptions=True)
# Удалить нездоровые подключения
unhealthy_peers = [
peer_id for peer_id, conn in self.connections.items()
if not conn.is_healthy
]
for peer_id in unhealthy_peers:
logger.warning(f"Removing unhealthy peer: {peer_id}")
await self.disconnect_from_peer(peer_id)
async def _ping_peer(self, peer_id: str) -> None:
"""Пинг пира."""
try:
if peer_id not in self.connections or not self.session:
return
connection = self.connections[peer_id]
parsed_url = self._parse_peer_address(connection.address)
if not parsed_url:
connection.mark_ping_failed()
return
ping_url = f"{parsed_url}/api/my/ping"
connection.mark_ping_sent()
async with self.session.get(ping_url) as response:
if response.status == 200:
connection.mark_pong_received()
logger.debug(f"Ping successful to {peer_id}, latency: {connection.ping_latency:.1f}ms")
else:
connection.mark_ping_failed()
logger.debug(f"Ping failed to {peer_id}: HTTP {response.status}")
except Exception as e:
if peer_id in self.connections:
self.connections[peer_id].mark_ping_failed()
logger.debug(f"Ping error to {peer_id}: {e}")
def get_connected_peers(self) -> Set[str]:
"""Получить множество подключенных пиров."""
return {
peer_id for peer_id, conn in self.connections.items()
if conn.is_healthy
}
def is_connected(self, peer_id: str) -> bool:
"""Проверить, подключены ли к пиру."""
return (peer_id in self.connections and
self.connections[peer_id].is_healthy)
def get_peer_info(self, peer_id: str) -> Optional[Dict[str, Any]]:
"""Получить информацию о пире."""
if peer_id not in self.connections:
return None
connection = self.connections[peer_id]
return {
"peer_id": peer_id,
"address": connection.address,
"connected_at": connection.connected_at.isoformat(),
"uptime_seconds": connection.uptime.total_seconds(),
"is_healthy": connection.is_healthy,
"ping_latency_ms": connection.ping_latency,
"ping_failures": connection.ping_failures,
"request_count": connection.request_count,
"features": connection.features,
"metadata": connection.metadata
}
def get_all_peers_info(self) -> Dict[str, Dict[str, Any]]:
"""Получить информацию обо всех пирах."""
return {
peer_id: self.get_peer_info(peer_id)
for peer_id in self.connections.keys()
}
def select_replication_nodes(self, count: int = 3) -> List[str]:
"""Выбрать ноды для репликации контента."""
healthy_peers = [
peer_id for peer_id, conn in self.connections.items()
if conn.is_healthy
]
if len(healthy_peers) <= count:
return healthy_peers
# Выбрать ноды с лучшими характеристиками
peer_scores = []
for peer_id in healthy_peers:
connection = self.connections[peer_id]
# Рассчитать оценку на основе различных факторов
latency_score = 1.0
if connection.ping_latency:
latency_score = max(0.1, 1.0 - (connection.ping_latency / 1000))
uptime_score = min(1.0, connection.uptime.total_seconds() / 3600) # Время работы в часах
failure_score = max(0.1, 1.0 - (connection.ping_failures / 10))
total_score = (latency_score * 0.4 + uptime_score * 0.3 + failure_score * 0.3)
peer_scores.append((peer_id, total_score))
# Сортировать по оценке и взять топ
peer_scores.sort(key=lambda x: x[1], reverse=True)
return [peer_id for peer_id, _ in peer_scores[:count]]
async def request_nodes_list(self, peer_id: str) -> List[Dict[str, Any]]:
"""Запросить список нод у пира."""
try:
if peer_id not in self.connections or not self.session:
return []
connection = self.connections[peer_id]
parsed_url = self._parse_peer_address(connection.address)
if not parsed_url:
return []
nodes_url = f"{parsed_url}/api/my/nodes"
async with self.session.get(nodes_url) as response:
if response.status == 200:
data = await response.json()
return data.get("nodes", [])
else:
logger.warning(f"Failed to get nodes list from {peer_id}: HTTP {response.status}")
return []
except Exception as e:
logger.error(f"Error requesting nodes list from {peer_id}: {e}")
return []
async def request_peer_status(self, peer_id: str) -> Dict[str, Any]:
"""Запросить статус пира."""
try:
if peer_id not in self.connections or not self.session:
return {"error": "Not connected"}
connection = self.connections[peer_id]
parsed_url = self._parse_peer_address(connection.address)
if not parsed_url:
return {"error": "Invalid address"}
status_url = f"{parsed_url}/api/my/status"
async with self.session.get(status_url) as response:
if response.status == 200:
return await response.json()
else:
return {"error": f"HTTP {response.status}"}
except Exception as e:
logger.error(f"Error requesting peer status from {peer_id}: {e}")
return {"error": str(e)}
def add_to_blacklist(self, peer_id: str, duration_hours: int = 24) -> None:
"""Добавить пира в черный список."""
self.blacklisted_peers.add(peer_id)
# Запланировать удаление из черного списка
async def remove_from_blacklist():
await asyncio.sleep(duration_hours * 3600)
self.blacklisted_peers.discard(peer_id)
logger.info(f"Removed {peer_id} from blacklist")
asyncio.create_task(remove_from_blacklist())
logger.info(f"Added {peer_id} to blacklist for {duration_hours} hours")
def get_connection_stats(self) -> Dict[str, Any]:
"""Получить статистику подключений."""
healthy_connections = sum(1 for conn in self.connections.values() if conn.is_healthy)
return {
"total_connections": len(self.connections),
"healthy_connections": healthy_connections,
"blacklisted_peers": len(self.blacklisted_peers),
"average_latency_ms": self._calculate_average_latency(),
"connection_details": [
{
"peer_id": peer_id,
"uptime_hours": conn.uptime.total_seconds() / 3600,
"ping_latency_ms": conn.ping_latency,
"is_healthy": conn.is_healthy
}
for peer_id, conn in self.connections.items()
]
}
def _calculate_average_latency(self) -> Optional[float]:
"""Рассчитать среднюю задержку."""
latencies = [
conn.ping_latency for conn in self.connections.values()
if conn.ping_latency is not None and conn.is_healthy
]
if latencies:
return sum(latencies) / len(latencies)
return None

View File

@ -0,0 +1,698 @@
"""Content Sync Manager - синхронизация контента между нодами."""
import asyncio
import aiohttp
import hashlib
import logging
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, Any, Set
from sqlalchemy import select, and_
from app.core.database_compatible import get_async_session
from app.core.models.content_compatible import Content, ContentMetadata
from app.core.cache import cache
logger = logging.getLogger(__name__)
class ContentSyncStatus:
"""Статус синхронизации контента."""
def __init__(self, content_hash: str):
self.content_hash = content_hash
self.sync_started = datetime.utcnow()
self.sync_completed = None
self.nodes_synced = set()
self.nodes_failed = set()
self.total_nodes = 0
self.bytes_synced = 0
self.status = "syncing" # syncing, completed, failed, partial
self.error_message = None
@property
def is_completed(self) -> bool:
return self.status in ["completed", "partial"]
@property
def success_rate(self) -> float:
if self.total_nodes == 0:
return 0.0
return len(self.nodes_synced) / self.total_nodes
def to_dict(self) -> Dict[str, Any]:
return {
"content_hash": self.content_hash,
"status": self.status,
"sync_started": self.sync_started.isoformat(),
"sync_completed": self.sync_completed.isoformat() if self.sync_completed else None,
"nodes_synced": list(self.nodes_synced),
"nodes_failed": list(self.nodes_failed),
"total_nodes": self.total_nodes,
"bytes_synced": self.bytes_synced,
"success_rate": self.success_rate,
"error_message": self.error_message
}
class ContentSyncManager:
"""Менеджер синхронизации контента между нодами MY Network."""
def __init__(self, node_id: str):
self.node_id = node_id
self.sync_queue: asyncio.Queue = asyncio.Queue()
self.active_syncs: Dict[str, ContentSyncStatus] = {}
self.sync_history: List[ContentSyncStatus] = []
self.is_running = False
self.sync_workers: List[asyncio.Task] = []
self.session: Optional[aiohttp.ClientSession] = None
# Настройки синхронизации
self.max_concurrent_syncs = 5
self.chunk_size = 1024 * 1024 # 1MB chunks
self.sync_timeout = 300 # 5 minutes per content
self.retry_attempts = 3
logger.info(f"Content Sync Manager initialized for node: {node_id}")
async def start_sync_process(self) -> None:
"""Запуск процесса синхронизации."""
try:
# Создать HTTP сессию
timeout = aiohttp.ClientTimeout(total=self.sync_timeout)
self.session = aiohttp.ClientSession(timeout=timeout)
# Запустить worker'ы для синхронизации
self.is_running = True
for i in range(self.max_concurrent_syncs):
worker = asyncio.create_task(self._sync_worker(f"worker-{i}"))
self.sync_workers.append(worker)
logger.info(f"Started {len(self.sync_workers)} sync workers")
except Exception as e:
logger.error(f"Error starting sync process: {e}")
raise
async def stop_sync_process(self) -> None:
"""Остановка процесса синхронизации."""
try:
self.is_running = False
# Остановить worker'ы
for worker in self.sync_workers:
worker.cancel()
if self.sync_workers:
await asyncio.gather(*self.sync_workers, return_exceptions=True)
# Закрыть HTTP сессию
if self.session:
await self.session.close()
self.session = None
self.sync_workers.clear()
logger.info("Sync process stopped")
except Exception as e:
logger.error(f"Error stopping sync process: {e}")
async def _sync_worker(self, worker_name: str) -> None:
"""Worker для обработки очереди синхронизации."""
logger.info(f"Sync worker {worker_name} started")
while self.is_running:
try:
# Получить задачу из очереди
sync_task = await asyncio.wait_for(
self.sync_queue.get(),
timeout=1.0
)
# Обработать задачу синхронизации
await self._process_sync_task(sync_task)
except asyncio.TimeoutError:
continue # Продолжить ожидание
except Exception as e:
logger.error(f"Error in sync worker {worker_name}: {e}")
await asyncio.sleep(5) # Пауза при ошибке
logger.info(f"Sync worker {worker_name} stopped")
async def _process_sync_task(self, sync_task: Dict[str, Any]) -> None:
"""Обработка задачи синхронизации."""
try:
task_type = sync_task.get("type")
content_hash = sync_task.get("content_hash")
target_nodes = sync_task.get("target_nodes", [])
if task_type == "replicate":
await self._replicate_content(content_hash, target_nodes)
elif task_type == "download":
source_node = sync_task.get("source_node")
await self._download_content(content_hash, source_node)
elif task_type == "verify":
await self._verify_content_integrity(content_hash)
else:
logger.warning(f"Unknown sync task type: {task_type}")
except Exception as e:
logger.error(f"Error processing sync task: {e}")
async def replicate_content_to_nodes(self, content_hash: str, target_nodes: List[str]) -> Dict[str, Any]:
"""Реплицировать контент на указанные ноды."""
try:
# Создать статус синхронизации
sync_status = ContentSyncStatus(content_hash)
sync_status.total_nodes = len(target_nodes)
self.active_syncs[content_hash] = sync_status
# Добавить задачу в очередь
sync_task = {
"type": "replicate",
"content_hash": content_hash,
"target_nodes": target_nodes
}
await self.sync_queue.put(sync_task)
logger.info(f"Queued replication of {content_hash} to {len(target_nodes)} nodes")
return {
"status": "queued",
"content_hash": content_hash,
"target_nodes": target_nodes,
"sync_id": content_hash
}
except Exception as e:
logger.error(f"Error queuing content replication: {e}")
raise
async def _replicate_content(self, content_hash: str, target_nodes: List[str]) -> None:
"""Реплицировать контент на целевые ноды."""
try:
if content_hash not in self.active_syncs:
logger.warning(f"No sync status found for content: {content_hash}")
return
sync_status = self.active_syncs[content_hash]
# Получить контент из локальной БД
content_info = await self._get_local_content_info(content_hash)
if not content_info:
sync_status.status = "failed"
sync_status.error_message = "Content not found locally"
return
# Реплицировать на каждую ноду
replication_tasks = []
for node_id in target_nodes:
task = self._replicate_to_single_node(content_hash, node_id, content_info)
replication_tasks.append(task)
# Ждать завершения всех репликаций
results = await asyncio.gather(*replication_tasks, return_exceptions=True)
# Обработать результаты
for i, result in enumerate(results):
node_id = target_nodes[i]
if isinstance(result, Exception):
sync_status.nodes_failed.add(node_id)
logger.error(f"Replication to {node_id} failed: {result}")
elif result:
sync_status.nodes_synced.add(node_id)
sync_status.bytes_synced += content_info.get("file_size", 0)
logger.info(f"Successfully replicated to {node_id}")
else:
sync_status.nodes_failed.add(node_id)
# Завершить синхронизацию
self._complete_sync(sync_status)
except Exception as e:
if content_hash in self.active_syncs:
self.active_syncs[content_hash].status = "failed"
self.active_syncs[content_hash].error_message = str(e)
logger.error(f"Error replicating content {content_hash}: {e}")
async def _replicate_to_single_node(self, content_hash: str, node_id: str, content_info: Dict[str, Any]) -> bool:
"""Реплицировать контент на одну ноду."""
try:
if not self.session:
return False
# Получить адрес ноды (через peer manager)
from .node_service import get_node_service
node_service = get_node_service()
peer_info = node_service.peer_manager.get_peer_info(node_id)
if not peer_info:
logger.warning(f"No peer info for node: {node_id}")
return False
# Парсить адрес
peer_address = node_service.peer_manager._parse_peer_address(peer_info["address"])
if not peer_address:
return False
# Проверить, нужна ли репликация
check_url = f"{peer_address}/api/my/content/{content_hash}/exists"
async with self.session.get(check_url) as response:
if response.status == 200:
exists_data = await response.json()
if exists_data.get("exists", False):
logger.debug(f"Content {content_hash} already exists on {node_id}")
return True
# Начать репликацию
replicate_url = f"{peer_address}/api/my/content/replicate"
# Подготовить данные для репликации
replication_data = {
"content_hash": content_hash,
"metadata": content_info,
"source_node": self.node_id
}
async with self.session.post(replicate_url, json=replication_data) as response:
if response.status == 200:
# Передать сам файл
success = await self._upload_content_to_node(
content_hash,
peer_address,
content_info
)
return success
else:
logger.warning(f"Replication request failed to {node_id}: HTTP {response.status}")
return False
except Exception as e:
logger.error(f"Error replicating to node {node_id}: {e}")
return False
async def _upload_content_to_node(self, content_hash: str, peer_address: str, content_info: Dict[str, Any]) -> bool:
"""Загрузить файл контента на ноду."""
try:
if not self.session:
return False
# Найти файл локально
file_path = Path(content_info.get("file_path", ""))
if not file_path.exists():
logger.error(f"Local file not found: {file_path}")
return False
upload_url = f"{peer_address}/api/my/content/{content_hash}/upload"
# Создать multipart upload
with open(file_path, 'rb') as file:
data = aiohttp.FormData()
data.add_field('file', file, filename=content_info.get("filename", "unknown"))
async with self.session.post(upload_url, data=data) as response:
if response.status == 200:
result = await response.json()
return result.get("success", False)
else:
logger.error(f"File upload failed: HTTP {response.status}")
return False
except Exception as e:
logger.error(f"Error uploading content to node: {e}")
return False
async def _get_local_content_info(self, content_hash: str) -> Optional[Dict[str, Any]]:
"""Получить информацию о локальном контенте."""
try:
async with get_async_session() as session:
# Найти контент по хешу
stmt = select(Content).where(Content.md5_hash == content_hash or Content.sha256_hash == content_hash)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content:
return None
# Получить метаданные
metadata_stmt = select(ContentMetadata).where(ContentMetadata.content_id == content.id)
metadata_result = await session.execute(metadata_stmt)
metadata = metadata_result.scalar_one_or_none()
return {
"id": content.id,
"hash": content_hash,
"filename": content.filename,
"original_filename": content.original_filename,
"file_path": content.file_path,
"file_size": content.file_size,
"file_type": content.file_type,
"mime_type": content.mime_type,
"encrypted": content.encrypted if hasattr(content, 'encrypted') else False,
"metadata": metadata.to_dict() if metadata else {}
}
except Exception as e:
logger.error(f"Error getting local content info: {e}")
return None
async def download_content_from_network(self, content_hash: str, source_nodes: List[str] = None) -> bool:
"""Скачать контент из сети."""
try:
# Добавить задачу загрузки в очередь
for source_node in (source_nodes or []):
sync_task = {
"type": "download",
"content_hash": content_hash,
"source_node": source_node
}
await self.sync_queue.put(sync_task)
logger.info(f"Queued download of {content_hash} from {len(source_nodes or [])} nodes")
return True
except Exception as e:
logger.error(f"Error queuing content download: {e}")
return False
async def _download_content(self, content_hash: str, source_node: str) -> bool:
"""Скачать контент с конкретной ноды."""
try:
if not self.session:
return False
# Получить адрес исходной ноды
from .node_service import get_node_service
node_service = get_node_service()
peer_info = node_service.peer_manager.get_peer_info(source_node)
if not peer_info:
logger.warning(f"No peer info for source node: {source_node}")
return False
peer_address = node_service.peer_manager._parse_peer_address(peer_info["address"])
if not peer_address:
return False
# Получить метаданные контента
metadata_url = f"{peer_address}/api/my/content/{content_hash}/metadata"
async with self.session.get(metadata_url) as response:
if response.status != 200:
logger.error(f"Failed to get content metadata: HTTP {response.status}")
return False
content_metadata = await response.json()
# Скачать файл
download_url = f"{peer_address}/api/my/content/{content_hash}/download"
async with self.session.get(download_url) as response:
if response.status != 200:
logger.error(f"Failed to download content: HTTP {response.status}")
return False
# Сохранить файл локально
local_path = await self._save_downloaded_content(
content_hash,
response,
content_metadata
)
if local_path:
# Сохранить в базу данных
await self._save_content_to_db(content_hash, local_path, content_metadata)
logger.info(f"Successfully downloaded content {content_hash} from {source_node}")
return True
return False
except Exception as e:
logger.error(f"Error downloading content from {source_node}: {e}")
return False
async def _save_downloaded_content(self, content_hash: str, response: aiohttp.ClientResponse, metadata: Dict[str, Any]) -> Optional[Path]:
"""Сохранить скачанный контент."""
try:
# Создать путь для сохранения
storage_path = Path("./storage/my-network/downloaded")
storage_path.mkdir(parents=True, exist_ok=True)
filename = metadata.get("filename", f"{content_hash}")
file_path = storage_path / filename
# Сохранить файл
with open(file_path, 'wb') as f:
async for chunk in response.content.iter_chunked(self.chunk_size):
f.write(chunk)
# Проверить целостность
if await self._verify_file_integrity(file_path, content_hash):
return file_path
else:
file_path.unlink() # Удалить поврежденный файл
return None
except Exception as e:
logger.error(f"Error saving downloaded content: {e}")
return None
async def _verify_file_integrity(self, file_path: Path, expected_hash: str) -> bool:
"""Проверить целостность файла."""
try:
# Вычислить хеш файла
hash_md5 = hashlib.md5()
hash_sha256 = hashlib.sha256()
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(self.chunk_size), b""):
hash_md5.update(chunk)
hash_sha256.update(chunk)
file_md5 = hash_md5.hexdigest()
file_sha256 = hash_sha256.hexdigest()
# Проверить соответствие
return expected_hash in [file_md5, file_sha256]
except Exception as e:
logger.error(f"Error verifying file integrity: {e}")
return False
async def _save_content_to_db(self, content_hash: str, file_path: Path, metadata: Dict[str, Any]) -> None:
"""Сохранить информацию о контенте в базу данных."""
try:
async with get_async_session() as session:
# Создать запись контента
content = Content(
filename=metadata.get("filename", file_path.name),
original_filename=metadata.get("original_filename", file_path.name),
file_path=str(file_path),
file_size=file_path.stat().st_size,
file_type=metadata.get("file_type", "unknown"),
mime_type=metadata.get("mime_type", "application/octet-stream"),
md5_hash=content_hash if len(content_hash) == 32 else None,
sha256_hash=content_hash if len(content_hash) == 64 else None,
is_active=True,
processing_status="completed"
)
session.add(content)
await session.flush()
# Сохранить метаданные если есть
if metadata.get("metadata"):
content_metadata = ContentMetadata(
content_id=content.id,
**metadata["metadata"]
)
session.add(content_metadata)
await session.commit()
logger.info(f"Saved content {content_hash} to database")
except Exception as e:
logger.error(f"Error saving content to database: {e}")
def _complete_sync(self, sync_status: ContentSyncStatus) -> None:
"""Завершить синхронизацию."""
sync_status.sync_completed = datetime.utcnow()
# Определить итоговый статус
if len(sync_status.nodes_synced) == sync_status.total_nodes:
sync_status.status = "completed"
elif len(sync_status.nodes_synced) > 0:
sync_status.status = "partial"
else:
sync_status.status = "failed"
# Переместить в историю
self.sync_history.append(sync_status)
del self.active_syncs[sync_status.content_hash]
# Ограничить историю
if len(self.sync_history) > 100:
self.sync_history = self.sync_history[-100:]
logger.info(f"Sync completed for {sync_status.content_hash}: {sync_status.status}")
async def sync_with_network(self) -> Dict[str, Any]:
"""Синхронизация с сетью - обнаружение и загрузка нового контента."""
try:
from .node_service import get_node_service
node_service = get_node_service()
connected_peers = node_service.peer_manager.get_connected_peers()
if not connected_peers:
return {"status": "no_peers", "message": "No connected peers for sync"}
# Получить списки контента от всех пиров
network_content = {}
for peer_id in connected_peers:
try:
peer_content = await self._get_peer_content_list(peer_id)
network_content[peer_id] = peer_content
except Exception as e:
logger.error(f"Error getting content list from {peer_id}: {e}")
# Найти новый контент для загрузки
new_content = await self._identify_new_content(network_content)
# Запустить загрузку нового контента
download_tasks = []
for content_hash, source_nodes in new_content.items():
download_tasks.append(
self.download_content_from_network(content_hash, source_nodes)
)
if download_tasks:
results = await asyncio.gather(*download_tasks, return_exceptions=True)
successful_downloads = sum(1 for r in results if r is True)
return {
"status": "sync_completed",
"new_content_found": len(new_content),
"downloads_queued": len(download_tasks),
"immediate_successes": successful_downloads
}
else:
return {
"status": "up_to_date",
"message": "No new content found"
}
except Exception as e:
logger.error(f"Error in network sync: {e}")
return {"status": "error", "message": str(e)}
async def _get_peer_content_list(self, peer_id: str) -> List[Dict[str, Any]]:
"""Получить список контента от пира."""
try:
if not self.session:
return []
from .node_service import get_node_service
node_service = get_node_service()
peer_info = node_service.peer_manager.get_peer_info(peer_id)
if not peer_info:
return []
peer_address = node_service.peer_manager._parse_peer_address(peer_info["address"])
if not peer_address:
return []
content_list_url = f"{peer_address}/api/my/content/list"
async with self.session.get(content_list_url) as response:
if response.status == 200:
data = await response.json()
return data.get("content", [])
else:
logger.warning(f"Failed to get content list from {peer_id}: HTTP {response.status}")
return []
except Exception as e:
logger.error(f"Error getting content list from {peer_id}: {e}")
return []
async def _identify_new_content(self, network_content: Dict[str, List[Dict[str, Any]]]) -> Dict[str, List[str]]:
"""Определить новый контент для загрузки."""
try:
# Получить список локального контента
local_hashes = await self._get_local_content_hashes()
# Найти новый контент
new_content = {}
for peer_id, content_list in network_content.items():
for content_info in content_list:
content_hash = content_info.get("hash")
if not content_hash:
continue
# Проверить, есть ли у нас этот контент
if content_hash not in local_hashes:
if content_hash not in new_content:
new_content[content_hash] = []
new_content[content_hash].append(peer_id)
return new_content
except Exception as e:
logger.error(f"Error identifying new content: {e}")
return {}
async def _get_local_content_hashes(self) -> Set[str]:
"""Получить множество хешей локального контента."""
try:
async with get_async_session() as session:
stmt = select(Content.md5_hash, Content.sha256_hash).where(Content.is_active == True)
result = await session.execute(stmt)
hashes = set()
for row in result:
if row.md5_hash:
hashes.add(row.md5_hash)
if row.sha256_hash:
hashes.add(row.sha256_hash)
return hashes
except Exception as e:
logger.error(f"Error getting local content hashes: {e}")
return set()
async def get_sync_status(self) -> Dict[str, Any]:
"""Получить статус синхронизации."""
return {
"is_running": self.is_running,
"active_syncs": len(self.active_syncs),
"queue_size": self.sync_queue.qsize(),
"workers_count": len(self.sync_workers),
"recent_syncs": [
sync.to_dict() for sync in self.sync_history[-10:]
],
"current_syncs": {
content_hash: sync.to_dict()
for content_hash, sync in self.active_syncs.items()
}
}
async def get_content_sync_status(self, content_hash: str) -> Dict[str, Any]:
"""Получить статус синхронизации конкретного контента."""
# Проверить активные синхронизации
if content_hash in self.active_syncs:
return self.active_syncs[content_hash].to_dict()
# Проверить историю
for sync in reversed(self.sync_history):
if sync.content_hash == content_hash:
return sync.to_dict()
return {
"content_hash": content_hash,
"status": "not_found",
"message": "No sync information found for this content"
}

571
app/core/security.py Normal file
View File

@ -0,0 +1,571 @@
"""
Comprehensive security module with encryption, JWT tokens, password hashing, and access control.
Provides secure file encryption, token management, and authentication utilities.
"""
import hashlib
import hmac
import secrets
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Any, Union
from uuid import UUID
import bcrypt
import jwt
from cryptography.fernet import Fernet
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
import base64
from app.core.config import get_settings
from app.core.logging import get_logger
logger = get_logger(__name__)
settings = get_settings()
class SecurityManager:
"""Main security manager for encryption, tokens, and authentication."""
def __init__(self):
self.fernet_key = self._get_or_create_fernet_key()
self.fernet = Fernet(self.fernet_key)
def _get_or_create_fernet_key(self) -> bytes:
"""Get or create Fernet encryption key from settings."""
if hasattr(settings, 'ENCRYPTION_KEY') and settings.ENCRYPTION_KEY:
# Derive key from settings
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=settings.SECRET_KEY.encode()[:16],
iterations=100000,
)
key = base64.urlsafe_b64encode(kdf.derive(settings.ENCRYPTION_KEY.encode()))
return key
else:
# Generate random key (for development only)
return Fernet.generate_key()
# Global security manager instance
_security_manager = SecurityManager()
def hash_password(password: str) -> str:
"""
Hash password using bcrypt with salt.
Args:
password: Plain text password
Returns:
str: Hashed password
"""
try:
salt = bcrypt.gensalt(rounds=12)
hashed = bcrypt.hashpw(password.encode('utf-8'), salt)
return hashed.decode('utf-8')
except Exception as e:
logger.error("Failed to hash password", error=str(e))
raise
def verify_password(password: str, hashed_password: str) -> bool:
"""
Verify password against hash.
Args:
password: Plain text password
hashed_password: Bcrypt hashed password
Returns:
bool: True if password matches
"""
try:
return bcrypt.checkpw(password.encode('utf-8'), hashed_password.encode('utf-8'))
except Exception as e:
logger.error("Failed to verify password", error=str(e))
return False
def generate_access_token(
payload: Dict[str, Any],
expires_in: int = 3600,
token_type: str = "access"
) -> str:
"""
Generate JWT access token.
Args:
payload: Token payload data
expires_in: Token expiration time in seconds
token_type: Type of token (access, refresh, api)
Returns:
str: JWT token
"""
try:
now = datetime.utcnow()
token_payload = {
"iat": now,
"exp": now + timedelta(seconds=expires_in),
"type": token_type,
"jti": secrets.token_urlsafe(16), # Unique token ID
**payload
}
token = jwt.encode(
token_payload,
settings.SECRET_KEY,
algorithm="HS256"
)
logger.debug(
"Access token generated",
token_type=token_type,
expires_in=expires_in,
user_id=payload.get("user_id")
)
return token
except Exception as e:
logger.error("Failed to generate access token", error=str(e))
raise
def verify_access_token(token: str, token_type: str = "access") -> Optional[Dict[str, Any]]:
"""
Verify and decode JWT token.
Args:
token: JWT token string
token_type: Expected token type
Returns:
Optional[Dict]: Decoded payload or None if invalid
"""
try:
payload = jwt.decode(
token,
settings.SECRET_KEY,
algorithms=["HS256"]
)
# Verify token type
if payload.get("type") != token_type:
logger.warning("Token type mismatch", expected=token_type, actual=payload.get("type"))
return None
# Check expiration
if datetime.utcnow() > datetime.fromtimestamp(payload["exp"]):
logger.warning("Token expired", exp=payload["exp"])
return None
return payload
except jwt.ExpiredSignatureError:
logger.warning("Token expired")
return None
except jwt.InvalidTokenError as e:
logger.warning("Invalid token", error=str(e))
return None
except Exception as e:
logger.error("Failed to verify token", error=str(e))
return None
def generate_refresh_token(user_id: UUID, device_id: Optional[str] = None) -> str:
"""
Generate long-lived refresh token.
Args:
user_id: User UUID
device_id: Optional device identifier
Returns:
str: Refresh token
"""
payload = {
"user_id": str(user_id),
"device_id": device_id,
"token_family": secrets.token_urlsafe(16) # For token rotation
}
return generate_access_token(
payload,
expires_in=settings.REFRESH_TOKEN_EXPIRE_DAYS * 24 * 3600,
token_type="refresh"
)
def generate_api_key(
user_id: UUID,
permissions: List[str],
name: str,
expires_in: Optional[int] = None
) -> str:
"""
Generate API key with specific permissions.
Args:
user_id: User UUID
permissions: List of permissions
name: API key name
expires_in: Optional expiration time in seconds
Returns:
str: API key token
"""
payload = {
"user_id": str(user_id),
"permissions": permissions,
"name": name,
"key_id": secrets.token_urlsafe(16)
}
expires = expires_in or (365 * 24 * 3600) # Default 1 year
return generate_access_token(payload, expires_in=expires, token_type="api")
def encrypt_data(data: Union[str, bytes], context: str = "") -> str:
"""
Encrypt data using Fernet symmetric encryption.
Args:
data: Data to encrypt
context: Optional context for additional security
Returns:
str: Base64 encoded encrypted data
"""
try:
if isinstance(data, str):
data = data.encode('utf-8')
# Add context to data for additional security
if context:
data = f"{context}:{len(data)}:".encode('utf-8') + data
encrypted = _security_manager.fernet.encrypt(data)
return base64.urlsafe_b64encode(encrypted).decode('utf-8')
except Exception as e:
logger.error("Failed to encrypt data", error=str(e))
raise
def decrypt_data(encrypted_data: str, context: str = "") -> Union[str, bytes]:
"""
Decrypt data using Fernet symmetric encryption.
Args:
encrypted_data: Base64 encoded encrypted data
context: Optional context for verification
Returns:
Union[str, bytes]: Decrypted data
"""
try:
encrypted_bytes = base64.urlsafe_b64decode(encrypted_data.encode('utf-8'))
decrypted = _security_manager.fernet.decrypt(encrypted_bytes)
# Verify and remove context if provided
if context:
context_prefix = f"{context}:".encode('utf-8')
if not decrypted.startswith(context_prefix):
raise ValueError("Context mismatch during decryption")
# Extract length and data
remaining = decrypted[len(context_prefix):]
length_end = remaining.find(b':')
if length_end == -1:
raise ValueError("Invalid encrypted data format")
expected_length = int(remaining[:length_end].decode('utf-8'))
data = remaining[length_end + 1:]
if len(data) != expected_length:
raise ValueError("Data length mismatch")
return data
return decrypted
except Exception as e:
logger.error("Failed to decrypt data", error=str(e))
raise
def encrypt_file(file_data: bytes, file_id: str) -> bytes:
"""
Encrypt file data with file-specific context.
Args:
file_data: File bytes to encrypt
file_id: Unique file identifier
Returns:
bytes: Encrypted file data
"""
try:
encrypted_str = encrypt_data(file_data, context=f"file:{file_id}")
return base64.urlsafe_b64decode(encrypted_str.encode('utf-8'))
except Exception as e:
logger.error("Failed to encrypt file", file_id=file_id, error=str(e))
raise
def decrypt_file(encrypted_data: bytes, file_id: str) -> bytes:
"""
Decrypt file data with file-specific context.
Args:
encrypted_data: Encrypted file bytes
file_id: Unique file identifier
Returns:
bytes: Decrypted file data
"""
try:
encrypted_str = base64.urlsafe_b64encode(encrypted_data).decode('utf-8')
decrypted = decrypt_data(encrypted_str, context=f"file:{file_id}")
return decrypted if isinstance(decrypted, bytes) else decrypted.encode('utf-8')
except Exception as e:
logger.error("Failed to decrypt file", file_id=file_id, error=str(e))
raise
def generate_secure_filename(original_filename: str, user_id: UUID) -> str:
"""
Generate secure filename to prevent path traversal and collisions.
Args:
original_filename: Original filename
user_id: User UUID
Returns:
str: Secure filename
"""
# Extract extension
parts = original_filename.rsplit('.', 1)
extension = parts[1] if len(parts) > 1 else ''
# Generate secure base name
timestamp = datetime.utcnow().strftime('%Y%m%d_%H%M%S')
random_part = secrets.token_urlsafe(8)
user_hash = hashlib.sha256(str(user_id).encode()).hexdigest()[:8]
secure_name = f"{timestamp}_{user_hash}_{random_part}"
if extension:
# Validate extension
allowed_extensions = {
'txt', 'pdf', 'doc', 'docx', 'xls', 'xlsx', 'ppt', 'pptx',
'jpg', 'jpeg', 'png', 'gif', 'bmp', 'webp', 'svg',
'mp3', 'wav', 'flac', 'ogg', 'mp4', 'avi', 'mkv', 'webm',
'zip', 'rar', '7z', 'tar', 'gz', 'json', 'xml', 'csv'
}
clean_extension = extension.lower().strip()
if clean_extension in allowed_extensions:
secure_name += f".{clean_extension}"
return secure_name
def validate_file_signature(file_data: bytes, claimed_type: str) -> bool:
"""
Validate file signature against claimed MIME type.
Args:
file_data: File bytes to validate
claimed_type: Claimed MIME type
Returns:
bool: True if signature matches type
"""
if len(file_data) < 8:
return False
# File signatures (magic numbers)
signatures = {
'image/jpeg': [b'\xFF\xD8\xFF'],
'image/png': [b'\x89PNG\r\n\x1a\n'],
'image/gif': [b'GIF87a', b'GIF89a'],
'image/webp': [b'RIFF', b'WEBP'],
'application/pdf': [b'%PDF-'],
'application/zip': [b'PK\x03\x04', b'PK\x05\x06', b'PK\x07\x08'],
'audio/mpeg': [b'ID3', b'\xFF\xFB', b'\xFF\xF3', b'\xFF\xF2'],
'video/mp4': [b'\x00\x00\x00\x18ftypmp4', b'\x00\x00\x00\x20ftypmp4'],
'text/plain': [], # Text files don't have reliable signatures
}
expected_sigs = signatures.get(claimed_type, [])
# If no signatures defined, allow (like text files)
if not expected_sigs:
return True
# Check if file starts with any expected signature
file_start = file_data[:32] # Check first 32 bytes
for sig in expected_sigs:
if file_start.startswith(sig):
return True
return False
def generate_csrf_token(user_id: UUID, session_id: str) -> str:
"""
Generate CSRF token for form protection.
Args:
user_id: User UUID
session_id: Session identifier
Returns:
str: CSRF token
"""
timestamp = str(int(datetime.utcnow().timestamp()))
data = f"{user_id}:{session_id}:{timestamp}"
signature = hmac.new(
settings.SECRET_KEY.encode(),
data.encode(),
hashlib.sha256
).hexdigest()
token_data = f"{data}:{signature}"
return base64.urlsafe_b64encode(token_data.encode()).decode()
def verify_csrf_token(token: str, user_id: UUID, session_id: str, max_age: int = 3600) -> bool:
"""
Verify CSRF token.
Args:
token: CSRF token to verify
user_id: User UUID
session_id: Session identifier
max_age: Maximum token age in seconds
Returns:
bool: True if token is valid
"""
try:
token_data = base64.urlsafe_b64decode(token.encode()).decode()
parts = token_data.split(':')
if len(parts) != 4:
return False
token_user_id, token_session_id, timestamp, signature = parts
# Verify components
if token_user_id != str(user_id) or token_session_id != session_id:
return False
# Check age
token_time = int(timestamp)
current_time = int(datetime.utcnow().timestamp())
if current_time - token_time > max_age:
return False
# Verify signature
data = f"{token_user_id}:{token_session_id}:{timestamp}"
expected_signature = hmac.new(
settings.SECRET_KEY.encode(),
data.encode(),
hashlib.sha256
).hexdigest()
return hmac.compare_digest(signature, expected_signature)
except Exception as e:
logger.warning("Failed to verify CSRF token", error=str(e))
return False
def sanitize_input(input_data: str, max_length: int = 1000) -> str:
"""
Sanitize user input to prevent XSS and injection attacks.
Args:
input_data: Input string to sanitize
max_length: Maximum allowed length
Returns:
str: Sanitized input
"""
if not input_data:
return ""
# Truncate if too long
if len(input_data) > max_length:
input_data = input_data[:max_length]
# Remove/escape dangerous characters
dangerous_chars = ['<', '>', '"', "'", '&', '\x00', '\r', '\n']
for char in dangerous_chars:
if char in input_data:
input_data = input_data.replace(char, '')
# Strip whitespace
return input_data.strip()
def check_permission(user_permissions: List[str], required_permission: str) -> bool:
"""
Check if user has required permission.
Args:
user_permissions: List of user permissions
required_permission: Required permission string
Returns:
bool: True if user has permission
"""
# Admin has all permissions
if 'admin' in user_permissions:
return True
# Check exact permission
if required_permission in user_permissions:
return True
# Check wildcard permissions
permission_parts = required_permission.split('.')
for i in range(len(permission_parts)):
wildcard_perm = '.'.join(permission_parts[:i+1]) + '.*'
if wildcard_perm in user_permissions:
return True
return False
def rate_limit_key(identifier: str, action: str, window: str = "default") -> str:
"""
Generate rate limiting key.
Args:
identifier: User/IP identifier
action: Action being rate limited
window: Time window identifier
Returns:
str: Rate limit cache key
"""
key_data = f"rate_limit:{action}:{window}:{identifier}"
return hashlib.sha256(key_data.encode()).hexdigest()
def generate_otp(length: int = 6) -> str:
"""
Generate one-time password.
Args:
length: Length of OTP
Returns:
str: Numeric OTP
"""
return ''.join(secrets.choice('0123456789') for _ in range(length))
def constant_time_compare(a: str, b: str) -> bool:
"""
Constant time string comparison to prevent timing attacks.
Args:
a: First string
b: Second string
Returns:
bool: True if strings are equal
"""
return hmac.compare_digest(a.encode('utf-8'), b.encode('utf-8'))

View File

@ -1,45 +1,574 @@
import time """
from contextlib import contextmanager Comprehensive storage management with chunked uploads, multiple backends, and security.
Supports local storage, S3-compatible storage, and async operations with Redis caching.
"""
from sqlalchemy import create_engine import asyncio
from sqlalchemy.orm import sessionmaker import hashlib
from sqlalchemy.sql import text import mimetypes
import os
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional, AsyncGenerator, Any, Tuple
from uuid import UUID, uuid4
from app.core._config import MYSQL_URI, MYSQL_DATABASE import aiofiles
from app.core.logger import make_log import aiofiles.os
from sqlalchemy.pool import NullPool from sqlalchemy import select, update
from sqlalchemy.orm import selectinload
engine = create_engine(MYSQL_URI, poolclass=NullPool) #, echo=True) from app.core.config import get_settings
Session = sessionmaker(bind=engine) from app.core.database import get_async_session, get_cache_manager
from app.core.logging import get_logger
from app.core.models.content import Content, ContentChunk
from app.core.security import encrypt_file, decrypt_file, generate_access_token
logger = get_logger(__name__)
settings = get_settings()
database_initialized = False class StorageBackend:
while not database_initialized: """Abstract base class for storage backends."""
async def store_chunk(self, upload_id: UUID, chunk_index: int, data: bytes) -> str:
"""Store a file chunk and return its identifier."""
raise NotImplementedError
async def retrieve_chunk(self, chunk_id: str) -> bytes:
"""Retrieve a file chunk by its identifier."""
raise NotImplementedError
async def delete_chunk(self, chunk_id: str) -> bool:
"""Delete a file chunk."""
raise NotImplementedError
async def assemble_file(self, upload_id: UUID, chunks: List[str]) -> str:
"""Assemble chunks into final file and return file path."""
raise NotImplementedError
async def delete_file(self, file_path: str) -> bool:
"""Delete a complete file."""
raise NotImplementedError
async def get_file_stream(self, file_path: str) -> AsyncGenerator[bytes, None]:
"""Get async file stream for download."""
raise NotImplementedError
class LocalStorageBackend(StorageBackend):
"""Local filesystem storage backend with encryption support."""
def __init__(self):
self.base_path = Path(settings.STORAGE_PATH)
self.chunks_path = self.base_path / "chunks"
self.files_path = self.base_path / "files"
# Create directories if they don't exist
self.chunks_path.mkdir(parents=True, exist_ok=True)
self.files_path.mkdir(parents=True, exist_ok=True)
async def store_chunk(self, upload_id: UUID, chunk_index: int, data: bytes) -> str:
"""Store chunk to local filesystem with optional encryption."""
try: try:
with Session() as session: chunk_id = f"{upload_id}_{chunk_index:06d}"
databases_list = session.execute(text("SHOW DATABASES;")) chunk_path = self.chunks_path / f"{chunk_id}.chunk"
databases_list = [row[0] for row in databases_list]
make_log("SQL", 'Database list: ' + str(databases_list), level='debug') # Encrypt chunk if encryption is enabled
assert MYSQL_DATABASE in databases_list, 'Database not found' if settings.ENCRYPT_FILES:
database_initialized = True data = encrypt_file(data, str(upload_id))
async with aiofiles.open(chunk_path, 'wb') as f:
await f.write(data)
await logger.adebug(
"Chunk stored successfully",
upload_id=str(upload_id),
chunk_index=chunk_index,
chunk_size=len(data)
)
return chunk_id
except Exception as e: except Exception as e:
make_log("SQL", 'MariaDB is not ready yet: ' + str(e), level='debug') await logger.aerror(
time.sleep(1) "Failed to store chunk",
upload_id=str(upload_id),
chunk_index=chunk_index,
error=str(e)
)
raise
engine = create_engine(f"{MYSQL_URI}/{MYSQL_DATABASE}", poolclass=NullPool) async def retrieve_chunk(self, chunk_id: str) -> bytes:
Session = sessionmaker(bind=engine) """Retrieve and optionally decrypt chunk from local filesystem."""
@contextmanager
def db_session(auto_commit=False):
_session = Session()
try: try:
yield _session chunk_path = self.chunks_path / f"{chunk_id}.chunk"
if auto_commit is True:
_session.commit()
except BaseException as e:
_session.rollback()
raise e
finally:
_session.close()
if not chunk_path.exists():
raise FileNotFoundError(f"Chunk {chunk_id} not found")
async with aiofiles.open(chunk_path, 'rb') as f:
data = await f.read()
# Decrypt chunk if encryption is enabled
if settings.ENCRYPT_FILES:
upload_id = chunk_id.split('_')[0]
data = decrypt_file(data, upload_id)
return data
except Exception as e:
await logger.aerror("Failed to retrieve chunk", chunk_id=chunk_id, error=str(e))
raise
async def delete_chunk(self, chunk_id: str) -> bool:
"""Delete chunk file from local filesystem."""
try:
chunk_path = self.chunks_path / f"{chunk_id}.chunk"
if chunk_path.exists():
await aiofiles.os.remove(chunk_path)
return True
return False
except Exception as e:
await logger.aerror("Failed to delete chunk", chunk_id=chunk_id, error=str(e))
return False
async def assemble_file(self, upload_id: UUID, chunks: List[str]) -> str:
"""Assemble chunks into final file."""
try:
file_id = str(uuid4())
file_path = self.files_path / f"{file_id}"
async with aiofiles.open(file_path, 'wb') as output_file:
for chunk_id in chunks:
chunk_data = await self.retrieve_chunk(chunk_id)
await output_file.write(chunk_data)
# Clean up chunks after assembly
for chunk_id in chunks:
await self.delete_chunk(chunk_id)
await logger.ainfo(
"File assembled successfully",
upload_id=str(upload_id),
file_path=str(file_path),
chunks_count=len(chunks)
)
return str(file_path)
except Exception as e:
await logger.aerror(
"Failed to assemble file",
upload_id=str(upload_id),
error=str(e)
)
raise
async def delete_file(self, file_path: str) -> bool:
"""Delete file from local filesystem."""
try:
path = Path(file_path)
if path.exists() and path.is_file():
await aiofiles.os.remove(path)
return True
return False
except Exception as e:
await logger.aerror("Failed to delete file", file_path=file_path, error=str(e))
return False
async def get_file_stream(self, file_path: str) -> AsyncGenerator[bytes, None]:
"""Stream file content for download."""
try:
path = Path(file_path)
if not path.exists():
raise FileNotFoundError(f"File {file_path} not found")
async with aiofiles.open(path, 'rb') as f:
while True:
chunk = await f.read(65536) # 64KB chunks
if not chunk:
break
yield chunk
except Exception as e:
await logger.aerror("Failed to stream file", file_path=file_path, error=str(e))
raise
class StorageManager:
"""Main storage manager with upload session management and caching."""
def __init__(self):
self.backend = LocalStorageBackend() # Can be extended to support S3, etc.
self.cache_manager = get_cache_manager()
async def create_upload_session(self, content_id: UUID, total_size: int) -> Dict[str, Any]:
"""Create new upload session with chunked upload support."""
try:
upload_id = uuid4()
session_data = {
"upload_id": str(upload_id),
"content_id": str(content_id),
"total_size": total_size,
"chunk_size": settings.CHUNK_SIZE,
"total_chunks": (total_size + settings.CHUNK_SIZE - 1) // settings.CHUNK_SIZE,
"uploaded_chunks": [],
"created_at": datetime.utcnow().isoformat(),
"expires_at": (datetime.utcnow() + timedelta(hours=24)).isoformat(),
"status": "active"
}
# Store session in cache
session_key = f"upload_session:{upload_id}"
await self.cache_manager.set(session_key, session_data, ttl=86400) # 24 hours
# Store in database for persistence
async with get_async_session() as session:
upload_session = ContentUploadSession(
id=upload_id,
content_id=content_id,
total_size=total_size,
chunk_size=settings.CHUNK_SIZE,
total_chunks=session_data["total_chunks"],
expires_at=datetime.fromisoformat(session_data["expires_at"])
)
session.add(upload_session)
await session.commit()
await logger.ainfo(
"Upload session created",
upload_id=str(upload_id),
content_id=str(content_id),
total_size=total_size
)
return {
"upload_id": str(upload_id),
"chunk_size": settings.CHUNK_SIZE,
"total_chunks": session_data["total_chunks"],
"upload_url": f"/api/v1/storage/upload/{upload_id}",
"expires_at": session_data["expires_at"]
}
except Exception as e:
await logger.aerror(
"Failed to create upload session",
content_id=str(content_id),
error=str(e)
)
raise
async def upload_chunk(
self,
upload_id: UUID,
chunk_index: int,
chunk_data: bytes,
chunk_hash: str
) -> Dict[str, Any]:
"""Upload and validate a file chunk."""
try:
# Verify chunk hash
calculated_hash = hashlib.sha256(chunk_data).hexdigest()
if calculated_hash != chunk_hash:
raise ValueError("Chunk hash mismatch")
# Get upload session
session_data = await self._get_upload_session(upload_id)
if not session_data:
raise ValueError("Upload session not found or expired")
# Check if chunk already uploaded
if chunk_index in session_data.get("uploaded_chunks", []):
return {"status": "already_uploaded", "chunk_index": chunk_index}
# Store chunk
chunk_id = await self.backend.store_chunk(upload_id, chunk_index, chunk_data)
# Update session data
session_data["uploaded_chunks"].append(chunk_index)
session_data["uploaded_chunks"].sort()
session_key = f"upload_session:{upload_id}"
await self.cache_manager.set(session_key, session_data, ttl=86400)
# Store chunk info in database
async with get_async_session() as session:
chunk_record = ContentChunk(
upload_id=upload_id,
chunk_index=chunk_index,
chunk_id=chunk_id,
chunk_hash=chunk_hash,
chunk_size=len(chunk_data)
)
session.add(chunk_record)
await session.commit()
await logger.adebug(
"Chunk uploaded successfully",
upload_id=str(upload_id),
chunk_index=chunk_index,
chunk_size=len(chunk_data)
)
return {
"status": "uploaded",
"chunk_index": chunk_index,
"uploaded_chunks": len(session_data["uploaded_chunks"]),
"total_chunks": session_data["total_chunks"]
}
except Exception as e:
await logger.aerror(
"Failed to upload chunk",
upload_id=str(upload_id),
chunk_index=chunk_index,
error=str(e)
)
raise
async def finalize_upload(self, upload_id: UUID) -> Dict[str, Any]:
"""Finalize upload by assembling chunks into final file."""
try:
# Get upload session
session_data = await self._get_upload_session(upload_id)
if not session_data:
raise ValueError("Upload session not found")
# Verify all chunks are uploaded
uploaded_chunks = session_data.get("uploaded_chunks", [])
total_chunks = session_data["total_chunks"]
if len(uploaded_chunks) != total_chunks:
missing_chunks = set(range(total_chunks)) - set(uploaded_chunks)
raise ValueError(f"Missing chunks: {missing_chunks}")
# Get chunk IDs in order
async with get_async_session() as session:
stmt = (
select(ContentChunk)
.where(ContentChunk.upload_id == upload_id)
.order_by(ContentChunk.chunk_index)
)
result = await session.execute(stmt)
chunks = result.scalars().all()
chunk_ids = [chunk.chunk_id for chunk in chunks]
# Assemble file
file_path = await self.backend.assemble_file(upload_id, chunk_ids)
# Update content record
async with get_async_session() as session:
stmt = (
update(Content)
.where(Content.id == UUID(session_data["content_id"]))
.values(
file_path=file_path,
status="completed",
updated_at=datetime.utcnow()
)
)
await session.execute(stmt)
await session.commit()
# Clean up session
session_key = f"upload_session:{upload_id}"
await self.cache_manager.delete(session_key)
await logger.ainfo(
"Upload finalized successfully",
upload_id=str(upload_id),
file_path=file_path,
total_chunks=total_chunks
)
return {
"status": "completed",
"file_path": file_path,
"content_id": session_data["content_id"]
}
except Exception as e:
await logger.aerror(
"Failed to finalize upload",
upload_id=str(upload_id),
error=str(e)
)
raise
async def get_file_stream(self, file_path: str) -> AsyncGenerator[bytes, None]:
"""Get file stream for download with caching support."""
try:
# Check if file is cached
cache_key = f"file_stream:{hashlib.md5(file_path.encode()).hexdigest()}"
async for chunk in self.backend.get_file_stream(file_path):
yield chunk
except Exception as e:
await logger.aerror("Failed to get file stream", file_path=file_path, error=str(e))
raise
async def delete_content_files(self, content_id: UUID) -> bool:
"""Delete all files associated with content."""
try:
async with get_async_session() as session:
# Get content
stmt = select(Content).where(Content.id == content_id)
result = await session.execute(stmt)
content = result.scalar_one_or_none()
if not content or not content.file_path:
return True
# Delete main file
await self.backend.delete_file(content.file_path)
# Delete any remaining chunks
chunk_stmt = select(ContentChunk).where(
ContentChunk.upload_id == content_id
)
chunk_result = await session.execute(chunk_stmt)
chunks = chunk_result.scalars().all()
for chunk in chunks:
await self.backend.delete_chunk(chunk.chunk_id)
# Update content record
update_stmt = (
update(Content)
.where(Content.id == content_id)
.values(file_path=None, status="deleted")
)
await session.execute(update_stmt)
await session.commit()
await logger.ainfo(
"Content files deleted",
content_id=str(content_id)
)
return True
except Exception as e:
await logger.aerror(
"Failed to delete content files",
content_id=str(content_id),
error=str(e)
)
return False
async def get_storage_stats(self) -> Dict[str, Any]:
"""Get storage usage statistics."""
try:
async with get_async_session() as session:
# Get total files and size
from sqlalchemy import func
stmt = select(
func.count(Content.id).label('total_files'),
func.sum(Content.file_size).label('total_size')
).where(Content.status == 'completed')
result = await session.execute(stmt)
stats = result.first()
# Get storage by type
type_stmt = select(
Content.content_type,
func.count(Content.id).label('count'),
func.sum(Content.file_size).label('size')
).where(Content.status == 'completed').group_by(Content.content_type)
type_result = await session.execute(type_stmt)
type_stats = {
row.content_type: {
'count': row.count,
'size': row.size or 0
}
for row in type_result
}
return {
'total_files': stats.total_files or 0,
'total_size': stats.total_size or 0,
'by_type': type_stats,
'updated_at': datetime.utcnow().isoformat()
}
except Exception as e:
await logger.aerror("Failed to get storage stats", error=str(e))
return {}
async def _get_upload_session(self, upload_id: UUID) -> Optional[Dict[str, Any]]:
"""Get upload session from cache or database."""
# Try cache first
session_key = f"upload_session:{upload_id}"
session_data = await self.cache_manager.get(session_key)
if session_data:
# Check if session is expired
expires_at = datetime.fromisoformat(session_data["expires_at"])
if expires_at > datetime.utcnow():
return session_data
# Fallback to database
try:
async with get_async_session() as session:
stmt = (
select(ContentUploadSession)
.where(ContentUploadSession.id == upload_id)
)
result = await session.execute(stmt)
upload_session = result.scalar_one_or_none()
if upload_session and upload_session.expires_at > datetime.utcnow():
# Rebuild session data
chunk_stmt = select(ContentChunk).where(
ContentChunk.upload_id == upload_id
)
chunk_result = await session.execute(chunk_stmt)
chunks = chunk_result.scalars().all()
session_data = {
"upload_id": str(upload_session.id),
"content_id": str(upload_session.content_id),
"total_size": upload_session.total_size,
"chunk_size": upload_session.chunk_size,
"total_chunks": upload_session.total_chunks,
"uploaded_chunks": [chunk.chunk_index for chunk in chunks],
"created_at": upload_session.created_at.isoformat(),
"expires_at": upload_session.expires_at.isoformat(),
"status": "active"
}
# Update cache
await self.cache_manager.set(session_key, session_data, ttl=86400)
return session_data
except Exception as e:
await logger.aerror(
"Failed to get upload session from database",
upload_id=str(upload_id),
error=str(e)
)
return None
# Additional model for upload sessions
from app.core.models.base import Base
from sqlalchemy import Column, Integer, DateTime
class ContentUploadSession(Base):
"""Model for tracking upload sessions."""
__tablename__ = "content_upload_sessions"
content_id = Column("content_id", sa.UUID(as_uuid=True), nullable=False)
total_size = Column(Integer, nullable=False)
chunk_size = Column(Integer, nullable=False, default=1048576) # 1MB
total_chunks = Column(Integer, nullable=False)
expires_at = Column(DateTime, nullable=False)
completed_at = Column(DateTime, nullable=True)

371
app/core/validation.py Normal file
View File

@ -0,0 +1,371 @@
"""
Comprehensive validation schemas using Pydantic for request/response validation.
Provides type safety, data validation, and automatic documentation generation.
"""
from datetime import datetime
from typing import Dict, List, Optional, Any, Union
from uuid import UUID
from enum import Enum
from pydantic import BaseModel, Field, validator, root_validator
from pydantic.networks import EmailStr, HttpUrl
class ContentTypeEnum(str, Enum):
"""Supported content types."""
AUDIO = "audio"
VIDEO = "video"
IMAGE = "image"
DOCUMENT = "document"
ARCHIVE = "archive"
OTHER = "other"
class VisibilityEnum(str, Enum):
"""Content visibility levels."""
PUBLIC = "public"
PRIVATE = "private"
UNLISTED = "unlisted"
RESTRICTED = "restricted"
class StatusEnum(str, Enum):
"""Content processing status."""
PENDING = "pending"
PROCESSING = "processing"
COMPLETED = "completed"
FAILED = "failed"
DELETED = "deleted"
class PermissionEnum(str, Enum):
"""User permissions."""
READ = "read"
WRITE = "write"
DELETE = "delete"
ADMIN = "admin"
class BaseSchema(BaseModel):
"""Base schema with common configuration."""
class Config:
use_enum_values = True
validate_assignment = True
allow_population_by_field_name = True
json_encoders = {
datetime: lambda v: v.isoformat(),
UUID: lambda v: str(v)
}
class ContentSchema(BaseSchema):
"""Schema for content creation."""
title: str = Field(..., min_length=1, max_length=255, description="Content title")
description: Optional[str] = Field(None, max_length=2000, description="Content description")
content_type: ContentTypeEnum = Field(..., description="Type of content")
file_size: Optional[int] = Field(None, ge=0, le=10737418240, description="File size in bytes (max 10GB)")
visibility: VisibilityEnum = Field(VisibilityEnum.PRIVATE, description="Content visibility")
tags: List[str] = Field(default_factory=list, max_items=20, description="Content tags")
license_id: Optional[UUID] = Field(None, description="License ID if applicable")
metadata: Optional[Dict[str, Any]] = Field(None, description="Additional metadata")
@validator('tags')
def validate_tags(cls, v):
"""Validate tags format and content."""
if not v:
return v
# Check each tag
for tag in v:
if not isinstance(tag, str):
raise ValueError("Tags must be strings")
if len(tag) < 1 or len(tag) > 50:
raise ValueError("Tag length must be between 1 and 50 characters")
if not tag.replace('-', '').replace('_', '').isalnum():
raise ValueError("Tags can only contain alphanumeric characters, hyphens, and underscores")
# Remove duplicates while preserving order
seen = set()
unique_tags = []
for tag in v:
tag_lower = tag.lower()
if tag_lower not in seen:
seen.add(tag_lower)
unique_tags.append(tag)
return unique_tags
@validator('metadata')
def validate_metadata(cls, v):
"""Validate metadata structure."""
if not v:
return v
# Check metadata size (JSON serialized)
import json
try:
serialized = json.dumps(v)
if len(serialized) > 10000: # Max 10KB of metadata
raise ValueError("Metadata too large (max 10KB)")
except (TypeError, ValueError) as e:
raise ValueError(f"Invalid metadata format: {e}")
return v
class ContentUpdateSchema(BaseSchema):
"""Schema for content updates."""
title: Optional[str] = Field(None, min_length=1, max_length=255)
description: Optional[str] = Field(None, max_length=2000)
visibility: Optional[VisibilityEnum] = None
tags: Optional[List[str]] = Field(None, max_items=20)
license_id: Optional[UUID] = None
status: Optional[StatusEnum] = None
@validator('tags')
def validate_tags(cls, v):
"""Validate tags if provided."""
if v is None:
return v
return ContentSchema.validate_tags(v)
class ContentSearchSchema(BaseSchema):
"""Schema for content search requests."""
query: Optional[str] = Field(None, min_length=1, max_length=200, description="Search query")
content_type: Optional[ContentTypeEnum] = None
status: Optional[StatusEnum] = None
tags: Optional[List[str]] = Field(None, max_items=10)
visibility: Optional[VisibilityEnum] = None
date_from: Optional[datetime] = None
date_to: Optional[datetime] = None
sort_by: Optional[str] = Field("updated_at", regex="^(created_at|updated_at|title|file_size)$")
sort_order: Optional[str] = Field("desc", regex="^(asc|desc)$")
page: int = Field(1, ge=1, le=1000)
per_page: int = Field(20, ge=1, le=100)
@root_validator
def validate_date_range(cls, values):
"""Validate date range."""
date_from = values.get('date_from')
date_to = values.get('date_to')
if date_from and date_to and date_from >= date_to:
raise ValueError("date_from must be before date_to")
return values
class UserRegistrationSchema(BaseSchema):
"""Schema for user registration."""
username: str = Field(..., min_length=3, max_length=50, regex="^[a-zA-Z0-9_.-]+$")
email: EmailStr = Field(..., description="Valid email address")
password: str = Field(..., min_length=8, max_length=128, description="Password (min 8 characters)")
full_name: Optional[str] = Field(None, max_length=100)
@validator('password')
def validate_password(cls, v):
"""Validate password strength."""
if len(v) < 8:
raise ValueError("Password must be at least 8 characters long")
# Check for required character types
has_upper = any(c.isupper() for c in v)
has_lower = any(c.islower() for c in v)
has_digit = any(c.isdigit() for c in v)
has_special = any(c in "!@#$%^&*()_+-=[]{}|;:,.<>?" for c in v)
if not (has_upper and has_lower and has_digit and has_special):
raise ValueError(
"Password must contain at least one uppercase letter, "
"one lowercase letter, one digit, and one special character"
)
return v
class UserLoginSchema(BaseSchema):
"""Schema for user login."""
username: str = Field(..., min_length=1, max_length=50)
password: str = Field(..., min_length=1, max_length=128)
remember_me: bool = Field(False, description="Keep session longer")
class UserUpdateSchema(BaseSchema):
"""Schema for user profile updates."""
full_name: Optional[str] = Field(None, max_length=100)
email: Optional[EmailStr] = None
bio: Optional[str] = Field(None, max_length=500)
avatar_url: Optional[HttpUrl] = None
settings: Optional[Dict[str, Any]] = None
@validator('settings')
def validate_settings(cls, v):
"""Validate user settings."""
if not v:
return v
# Allowed settings keys
allowed_keys = {
'notifications', 'privacy', 'theme', 'language',
'timezone', 'auto_save', 'quality_preference'
}
for key in v.keys():
if key not in allowed_keys:
raise ValueError(f"Invalid settings key: {key}")
return v
class StorageUploadSchema(BaseSchema):
"""Schema for file upload initiation."""
filename: str = Field(..., min_length=1, max_length=255)
file_size: int = Field(..., ge=1, le=10737418240) # Max 10GB
content_type: str = Field(..., min_length=1, max_length=100)
chunk_size: Optional[int] = Field(1048576, ge=65536, le=10485760) # 64KB to 10MB
@validator('filename')
def validate_filename(cls, v):
"""Validate filename format."""
import re
# Check for dangerous characters
if re.search(r'[<>:"/\\|?*\x00-\x1f]', v):
raise ValueError("Filename contains invalid characters")
# Check for reserved names (Windows)
reserved_names = {
'CON', 'PRN', 'AUX', 'NUL',
'COM1', 'COM2', 'COM3', 'COM4', 'COM5', 'COM6', 'COM7', 'COM8', 'COM9',
'LPT1', 'LPT2', 'LPT3', 'LPT4', 'LPT5', 'LPT6', 'LPT7', 'LPT8', 'LPT9'
}
name_part = v.split('.')[0].upper()
if name_part in reserved_names:
raise ValueError("Filename uses reserved name")
return v
class ChunkUploadSchema(BaseSchema):
"""Schema for chunk upload."""
upload_id: UUID = Field(..., description="Upload session ID")
chunk_index: int = Field(..., ge=0, description="Chunk sequence number")
chunk_hash: str = Field(..., min_length=64, max_length=64, description="SHA256 hash of chunk")
is_final: bool = Field(False, description="Is this the final chunk")
class BlockchainTransactionSchema(BaseSchema):
"""Schema for blockchain transactions."""
transaction_type: str = Field(..., regex="^(transfer|mint|burn|stake|unstake)$")
amount: Optional[int] = Field(None, ge=0, description="Amount in nanotons")
recipient_address: Optional[str] = Field(None, min_length=48, max_length=48)
message: Optional[str] = Field(None, max_length=500)
@validator('recipient_address')
def validate_ton_address(cls, v):
"""Validate TON address format."""
if not v:
return v
# Basic TON address validation
import re
if not re.match(r'^[a-zA-Z0-9_-]{48}$', v):
raise ValueError("Invalid TON address format")
return v
class LicenseSchema(BaseSchema):
"""Schema for license information."""
name: str = Field(..., min_length=1, max_length=100)
description: Optional[str] = Field(None, max_length=1000)
url: Optional[HttpUrl] = None
commercial_use: bool = Field(False, description="Allows commercial use")
attribution_required: bool = Field(True, description="Requires attribution")
share_alike: bool = Field(False, description="Requires share-alike")
class AccessControlSchema(BaseSchema):
"""Schema for content access control."""
user_id: UUID = Field(..., description="User to grant access to")
permission: str = Field(..., regex="^(read|write|delete|admin)$")
expires_at: Optional[datetime] = Field(None, description="Access expiration time")
@root_validator
def validate_expiration(cls, values):
"""Validate access expiration."""
expires_at = values.get('expires_at')
if expires_at and expires_at <= datetime.utcnow():
raise ValueError("Expiration time must be in the future")
return values
class ApiKeySchema(BaseSchema):
"""Schema for API key creation."""
name: str = Field(..., min_length=1, max_length=100, description="API key name")
permissions: List[str] = Field(..., min_items=1, description="List of permissions")
expires_at: Optional[datetime] = Field(None, description="Key expiration time")
@validator('permissions')
def validate_permissions(cls, v):
"""Validate permission format."""
valid_permissions = {
'content.read', 'content.create', 'content.update', 'content.delete',
'storage.upload', 'storage.download', 'storage.delete',
'user.read', 'user.update', 'admin.read', 'admin.write'
}
for perm in v:
if perm not in valid_permissions:
raise ValueError(f"Invalid permission: {perm}")
return list(set(v)) # Remove duplicates
class WebhookSchema(BaseSchema):
"""Schema for webhook configuration."""
url: HttpUrl = Field(..., description="Webhook endpoint URL")
events: List[str] = Field(..., min_items=1, description="Events to subscribe to")
secret: Optional[str] = Field(None, min_length=16, max_length=64, description="Webhook secret")
active: bool = Field(True, description="Whether webhook is active")
@validator('events')
def validate_events(cls, v):
"""Validate webhook events."""
valid_events = {
'content.created', 'content.updated', 'content.deleted',
'user.registered', 'user.updated', 'upload.completed',
'blockchain.transaction', 'system.error'
}
for event in v:
if event not in valid_events:
raise ValueError(f"Invalid event: {event}")
return list(set(v))
# Response schemas
class ContentResponseSchema(BaseSchema):
"""Schema for content response."""
id: UUID
title: str
description: Optional[str]
content_type: ContentTypeEnum
file_size: int
status: StatusEnum
visibility: VisibilityEnum
tags: List[str]
created_at: datetime
updated_at: datetime
user_id: UUID
class UserResponseSchema(BaseSchema):
"""Schema for user response."""
id: UUID
username: str
email: EmailStr
full_name: Optional[str]
created_at: datetime
is_active: bool
permissions: List[str]
class ErrorResponseSchema(BaseSchema):
"""Schema for error responses."""
error: str = Field(..., description="Error message")
code: str = Field(..., description="Error code")
details: Optional[Dict[str, Any]] = Field(None, description="Additional error details")
timestamp: datetime = Field(default_factory=datetime.utcnow)
class SuccessResponseSchema(BaseSchema):
"""Schema for success responses."""
message: str = Field(..., description="Success message")
data: Optional[Dict[str, Any]] = Field(None, description="Response data")
timestamp: datetime = Field(default_factory=datetime.utcnow)

275
app/main.py Normal file
View File

@ -0,0 +1,275 @@
"""
MY Network - Main Application Entry Point
Точка входа для приложения с поддержкой MY Network
"""
import asyncio
import logging
from pathlib import Path
# Настройка логирования
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Определить режим запуска
def get_app_mode():
"""Определить режим запуска приложения."""
import os
# Проверить переменные окружения
if os.getenv('USE_FASTAPI', '').lower() == 'true':
return 'fastapi'
# Проверить наличие FastAPI зависимостей
try:
import fastapi
import uvicorn
return 'fastapi'
except ImportError:
pass
# Проверить наличие Sanic
try:
import sanic
return 'sanic'
except ImportError:
pass
# Fallback к минимальному серверу
return 'minimal'
def create_fastapi_app():
"""Создать FastAPI приложение с MY Network."""
try:
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
app = FastAPI(
title="MY Network",
description="Distributed Content Protocol v2.0",
version="2.0.0"
)
# CORS middleware
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Добавить MY Network маршруты
try:
from app.api.routes.my_network_routes import router as my_network_router
from app.api.routes.my_monitoring import router as monitoring_router
app.include_router(my_network_router)
app.include_router(monitoring_router)
logger.info("MY Network routes added to FastAPI")
except ImportError as e:
logger.warning(f"Could not import MY Network routes: {e}")
# Базовые маршруты
@app.get("/")
async def root():
return {"message": "MY Network v2.0 - Distributed Content Protocol"}
@app.get("/health")
async def health():
return {"status": "healthy", "service": "MY Network"}
return app
except Exception as e:
logger.error(f"Failed to create FastAPI app: {e}")
raise
def create_sanic_app():
"""Создать Sanic приложение с MY Network."""
try:
# Импортировать существующее Sanic приложение
from app.api import create_app
return create_app()
except Exception as e:
logger.error(f"Failed to create Sanic app: {e}")
raise
async def start_my_network_service():
"""Запустить MY Network сервис."""
try:
from app.core.my_network.node_service import NodeService
logger.info("Starting MY Network service...")
# Создать и запустить сервис
node_service = NodeService()
await node_service.start()
logger.info("MY Network service started successfully")
# Вернуть задачу для поддержания сервиса
async def keep_service_running():
try:
while node_service.is_running:
await asyncio.sleep(30) # Проверять каждые 30 секунд
except asyncio.CancelledError:
logger.info("MY Network service shutdown requested")
await node_service.stop()
raise
except Exception as e:
logger.error(f"MY Network service error: {e}")
await node_service.stop()
raise
return keep_service_running()
except ImportError as e:
logger.info(f"MY Network service not available: {e}")
return None
except Exception as e:
logger.error(f"Failed to start MY Network service: {e}")
return None
async def run_fastapi_server():
"""Запустить FastAPI сервер."""
try:
import uvicorn
# Создать приложение
app = create_fastapi_app()
# Запустить MY Network сервис в фоне
my_network_task = await start_my_network_service()
# Конфигурация сервера
config = uvicorn.Config(
app,
host="0.0.0.0",
port=8000,
log_level="info"
)
server = uvicorn.Server(config)
# Запустить сервер и MY Network параллельно
if my_network_task:
await asyncio.gather(
server.serve(),
my_network_task,
return_exceptions=True
)
else:
await server.serve()
except Exception as e:
logger.error(f"FastAPI server error: {e}")
raise
async def run_sanic_server():
"""Запустить Sanic сервер."""
try:
# Создать приложение
app = create_sanic_app()
# Запустить MY Network сервис в фоне
my_network_task = await start_my_network_service()
if my_network_task:
app.add_background_task(my_network_task)
# Запустить сервер
await app.create_server(
host="0.0.0.0",
port=8000,
debug=False,
access_log=True
)
except Exception as e:
logger.error(f"Sanic server error: {e}")
raise
async def run_minimal_server():
"""Запустить минимальный HTTP сервер."""
try:
from start_my_network import main as start_minimal
logger.info("Starting minimal MY Network server...")
start_minimal()
except Exception as e:
logger.error(f"Minimal server error: {e}")
raise
async def main():
"""Главная функция запуска."""
print("""
MY NETWORK v2.0
Distributed Content Protocol
Starting application with MY Network integration...
""")
# Определить режим запуска
app_mode = get_app_mode()
logger.info(f"Application mode: {app_mode}")
try:
if app_mode == 'fastapi':
logger.info("Starting FastAPI server with MY Network...")
await run_fastapi_server()
elif app_mode == 'sanic':
logger.info("Starting Sanic server with MY Network...")
await run_sanic_server()
else:
logger.info("Starting minimal MY Network server...")
await run_minimal_server()
except KeyboardInterrupt:
logger.info("Received keyboard interrupt, shutting down...")
except Exception as e:
logger.error(f"Application error: {e}")
raise
# FastAPI app для ASGI серверов (uvicorn, gunicorn)
try:
app = create_fastapi_app()
# Добавить startup event для MY Network
@app.on_event("startup")
async def startup_event():
"""Startup event для MY Network."""
my_network_task = await start_my_network_service()
if my_network_task:
# Запустить как фоновую задачу
import asyncio
asyncio.create_task(my_network_task)
except Exception as e:
logger.warning(f"Could not create FastAPI app instance: {e}")
app = None
if __name__ == "__main__":
# Запуск через python app/main.py
asyncio.run(main())

View File

@ -0,0 +1,88 @@
"""Script to create an admin user for the application."""
import asyncio
import getpass
import sys
from datetime import datetime
from uuid import uuid4
from app.core.config import get_settings
from app.core.database import get_async_session
from app.core.models.user import User
from app.core.security import hash_password
async def create_admin_user():
"""Create an admin user interactively."""
print("🔧 My Uploader Bot - Admin User Creation")
print("=" * 50)
# Get user input
username = input("Enter admin username: ").strip()
if not username:
print("❌ Username is required")
sys.exit(1)
email = input("Enter admin email: ").strip()
if not email:
print("❌ Email is required")
sys.exit(1)
password = getpass.getpass("Enter admin password: ").strip()
if not password:
print("❌ Password is required")
sys.exit(1)
confirm_password = getpass.getpass("Confirm admin password: ").strip()
if password != confirm_password:
print("❌ Passwords do not match")
sys.exit(1)
first_name = input("Enter first name (optional): ").strip() or None
last_name = input("Enter last name (optional): ").strip() or None
try:
async with get_async_session() as session:
# Check if user already exists
from sqlalchemy import select
existing_user = await session.execute(
select(User).where(
(User.username == username) | (User.email == email)
)
)
if existing_user.scalar_one_or_none():
print("❌ User with this username or email already exists")
sys.exit(1)
# Create admin user
admin_user = User(
id=uuid4(),
username=username,
email=email,
password_hash=hash_password(password),
first_name=first_name,
last_name=last_name,
is_active=True,
is_verified=True,
is_superuser=True,
created_at=datetime.utcnow(),
updated_at=datetime.utcnow()
)
session.add(admin_user)
await session.commit()
print("✅ Admin user created successfully!")
print(f" Username: {username}")
print(f" Email: {email}")
print(f" User ID: {admin_user.id}")
print(" Status: Active, Verified, Superuser")
except Exception as e:
print(f"❌ Error creating admin user: {e}")
sys.exit(1)
if __name__ == "__main__":
asyncio.run(create_admin_user())

View File

@ -0,0 +1,620 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>MY Network Monitor - Distributed Protocol v2.0</title>
<style>
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
body {
background: linear-gradient(135deg, #0a0a0a 0%, #1a1a2e 50%, #16213e 100%);
color: #00ff41;
font-family: 'Courier New', 'Lucida Console', monospace;
min-height: 100vh;
overflow-x: auto;
animation: backgroundPulse 10s ease-in-out infinite alternate;
}
@keyframes backgroundPulse {
0% { background: linear-gradient(135deg, #0a0a0a 0%, #1a1a2e 50%, #16213e 100%); }
100% { background: linear-gradient(135deg, #0f0f0f 0%, #1f1f3e 50%, #1b274e 100%); }
}
.container {
max-width: 1400px;
margin: 0 auto;
padding: 20px;
}
.header {
text-align: center;
margin-bottom: 30px;
padding: 20px;
border: 2px solid #00ff41;
border-radius: 10px;
background: rgba(0, 255, 65, 0.05);
box-shadow: 0 0 20px rgba(0, 255, 65, 0.3);
}
.header h1 {
font-size: 2.5em;
text-shadow: 0 0 10px #00ff41;
animation: glow 2s ease-in-out infinite alternate;
}
@keyframes glow {
from { text-shadow: 0 0 10px #00ff41, 0 0 20px #00ff41; }
to { text-shadow: 0 0 20px #00ff41, 0 0 30px #00ff41, 0 0 40px #00ff41; }
}
.status-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(350px, 1fr));
gap: 20px;
margin-bottom: 30px;
}
.status-card {
background: rgba(0, 0, 0, 0.7);
border: 1px solid #00ff41;
border-radius: 8px;
padding: 20px;
box-shadow: 0 0 15px rgba(0, 255, 65, 0.2);
transition: all 0.3s ease;
}
.status-card:hover {
transform: translateY(-5px);
box-shadow: 0 5px 25px rgba(0, 255, 65, 0.4);
}
.status-card h3 {
color: #00ff41;
margin-bottom: 15px;
font-size: 1.2em;
text-transform: uppercase;
border-bottom: 1px solid #00ff41;
padding-bottom: 5px;
}
.status-item {
display: flex;
justify-content: space-between;
margin: 10px 0;
padding: 5px 0;
}
.status-value {
color: #ffffff;
font-weight: bold;
}
.status-online { color: #00ff00; }
.status-offline { color: #ff0000; }
.status-warning { color: #ffff00; }
.ascii-display {
background: rgba(0, 0, 0, 0.9);
border: 2px solid #00ff41;
border-radius: 8px;
padding: 20px;
margin: 20px 0;
font-family: 'Courier New', monospace;
font-size: 11px;
line-height: 1.1;
white-space: pre;
overflow-x: auto;
box-shadow: inset 0 0 20px rgba(0, 255, 65, 0.1);
}
.control-panel {
display: flex;
gap: 15px;
margin: 20px 0;
flex-wrap: wrap;
}
.btn {
background: linear-gradient(45deg, #00ff41, #00cc33);
color: #000;
border: none;
padding: 12px 24px;
font-family: inherit;
font-weight: bold;
cursor: pointer;
border-radius: 5px;
text-transform: uppercase;
transition: all 0.3s ease;
box-shadow: 0 0 10px rgba(0, 255, 65, 0.3);
}
.btn:hover {
background: linear-gradient(45deg, #00cc33, #00ff41);
transform: translateY(-2px);
box-shadow: 0 5px 15px rgba(0, 255, 65, 0.5);
}
.btn:active {
transform: translateY(1px);
}
.network-topology {
background: rgba(0, 0, 0, 0.8);
border: 1px solid #00ff41;
border-radius: 8px;
padding: 20px;
margin: 20px 0;
min-height: 300px;
}
.node {
display: inline-block;
background: rgba(0, 255, 65, 0.1);
border: 2px solid #00ff41;
border-radius: 50%;
width: 80px;
height: 80px;
line-height: 76px;
text-align: center;
margin: 10px;
position: relative;
animation: pulse 3s ease-in-out infinite;
}
@keyframes pulse {
0%, 100% { transform: scale(1); }
50% { transform: scale(1.05); }
}
.node.this-node {
background: rgba(0, 255, 65, 0.3);
animation: strongPulse 2s ease-in-out infinite;
}
@keyframes strongPulse {
0%, 100% { box-shadow: 0 0 10px #00ff41; }
50% { box-shadow: 0 0 30px #00ff41, 0 0 40px #00ff41; }
}
.logs-panel {
background: rgba(0, 0, 0, 0.9);
border: 1px solid #00ff41;
border-radius: 8px;
padding: 15px;
margin: 20px 0;
height: 200px;
overflow-y: auto;
font-size: 12px;
}
.log-entry {
margin: 5px 0;
padding: 2px 0;
}
.log-timestamp {
color: #888;
margin-right: 10px;
}
.log-info { color: #00ff41; }
.log-warning { color: #ffff00; }
.log-error { color: #ff0000; }
.footer {
text-align: center;
margin-top: 40px;
padding: 20px;
border-top: 1px solid #00ff41;
color: #888;
}
.loading {
display: inline-block;
animation: spin 1s linear infinite;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
.metrics-bar {
background: rgba(0, 0, 0, 0.5);
height: 20px;
border-radius: 10px;
margin: 10px 0;
overflow: hidden;
border: 1px solid #00ff41;
}
.metrics-fill {
height: 100%;
background: linear-gradient(90deg, #00ff41, #00cc33);
transition: width 0.5s ease;
}
/* Responsive design */
@media (max-width: 768px) {
.status-grid {
grid-template-columns: 1fr;
}
.header h1 {
font-size: 1.8em;
}
.ascii-display {
font-size: 9px;
}
.control-panel {
justify-content: center;
}
}
</style>
</head>
<body>
<div class="container">
<div class="header">
<h1>MY NETWORK MONITOR</h1>
<p>Distributed Content Protocol v2.0</p>
<p id="last-update">Last Update: <span id="timestamp">Loading...</span></p>
</div>
<div class="control-panel">
<button class="btn" onclick="refreshData()">
<span id="refresh-icon">🔄</span> REFRESH
</button>
<button class="btn" onclick="toggleASCII()">
📊 ASCII VIEW
</button>
<button class="btn" onclick="startSync()">
⚡ START SYNC
</button>
<button class="btn" onclick="showLogs()">
📋 LOGS
</button>
</div>
<div class="status-grid">
<div class="status-card">
<h3>🖥️ Node Status</h3>
<div class="status-item">
<span>Node ID:</span>
<span class="status-value" id="node-id">Loading...</span>
</div>
<div class="status-item">
<span>Status:</span>
<span class="status-value" id="node-status">Loading...</span>
</div>
<div class="status-item">
<span>Uptime:</span>
<span class="status-value" id="node-uptime">Loading...</span>
</div>
<div class="status-item">
<span>Version:</span>
<span class="status-value" id="node-version">Loading...</span>
</div>
</div>
<div class="status-card">
<h3>🌐 Network Status</h3>
<div class="status-item">
<span>Connected Peers:</span>
<span class="status-value" id="peer-count">Loading...</span>
</div>
<div class="status-item">
<span>Known Nodes:</span>
<span class="status-value" id="known-nodes">Loading...</span>
</div>
<div class="status-item">
<span>Network Health:</span>
<span class="status-value" id="network-health">Loading...</span>
</div>
<div class="metrics-bar">
<div class="metrics-fill" id="network-bar" style="width: 0%"></div>
</div>
</div>
<div class="status-card">
<h3>⚡ Sync Engine</h3>
<div class="status-item">
<span>Sync Status:</span>
<span class="status-value" id="sync-status">Loading...</span>
</div>
<div class="status-item">
<span>Active Syncs:</span>
<span class="status-value" id="active-syncs">Loading...</span>
</div>
<div class="status-item">
<span>Queue Size:</span>
<span class="status-value" id="queue-size">Loading...</span>
</div>
<div class="status-item">
<span>Workers:</span>
<span class="status-value" id="workers-count">Loading...</span>
</div>
</div>
<div class="status-card">
<h3>📊 Content Stats</h3>
<div class="status-item">
<span>Total Items:</span>
<span class="status-value" id="content-items">Loading...</span>
</div>
<div class="status-item">
<span>Total Size:</span>
<span class="status-value" id="content-size">Loading...</span>
</div>
<div class="status-item">
<span>Replicated:</span>
<span class="status-value" id="replicated-count">Loading...</span>
</div>
<div class="metrics-bar">
<div class="metrics-fill" id="storage-bar" style="width: 0%"></div>
</div>
</div>
</div>
<div id="ascii-container" class="ascii-display" style="display: none;">
<div id="ascii-content">Loading ASCII status...</div>
</div>
<div class="network-topology" id="topology-view">
<h3>🔗 Network Topology</h3>
<div id="topology-nodes">
<div class="node this-node">THIS<br>NODE</div>
</div>
</div>
<div class="logs-panel" id="logs-panel" style="display: none;">
<h3>📋 System Logs</h3>
<div id="logs-content">
<div class="log-entry log-info">
<span class="log-timestamp">{{ monitoring_data.timestamp[:19] if monitoring_data.timestamp else 'N/A' }}</span>
<span>[INFO] MY Network Monitor initialized</span>
</div>
</div>
</div>
<div class="footer">
<p>MY Network Protocol - Decentralized Content Distribution System</p>
<p>Real-time monitoring dashboard with live updates every 15 seconds</p>
</div>
</div>
<script>
let asciiVisible = false;
let logsVisible = false;
let updateInterval;
// Инициализация данных
let monitoringData = {};
function setMonitoringData(data) {
monitoringData = data;
}
function initializeData() {
if (monitoringData.status === 'online') {
updateNodeInfo(monitoringData.node_info);
updatePeersInfo(monitoringData.peers_info);
updateSyncStatus(monitoringData.sync_status);
} else {
showOfflineStatus();
}
updateTimestamp();
}
function updateNodeInfo(nodeInfo) {
document.getElementById('node-id').textContent =
nodeInfo.node_id ? nodeInfo.node_id.substring(0, 16) + '...' : 'Unknown';
const statusElement = document.getElementById('node-status');
statusElement.textContent = nodeInfo.status ? nodeInfo.status.toUpperCase() : 'UNKNOWN';
statusElement.className = 'status-value ' +
(nodeInfo.status === 'running' ? 'status-online' : 'status-offline');
const uptime = nodeInfo.uptime || 0;
const hours = Math.floor(uptime / 3600);
const minutes = Math.floor((uptime % 3600) / 60);
document.getElementById('node-uptime').textContent = `${hours}h ${minutes}m`;
document.getElementById('node-version').textContent = nodeInfo.version || 'MY Network 2.0';
}
function updatePeersInfo(peersInfo) {
const peerCount = peersInfo.peer_count || 0;
document.getElementById('peer-count').textContent = peerCount;
document.getElementById('known-nodes').textContent = peersInfo.peers ? peersInfo.peers.length : 0;
const healthElement = document.getElementById('network-health');
const health = peerCount > 0 ? 'CONNECTED' : 'ISOLATED';
healthElement.textContent = health;
healthElement.className = 'status-value ' +
(peerCount > 0 ? 'status-online' : 'status-warning');
// Обновить индикатор сети
const networkBar = document.getElementById('network-bar');
const healthPercent = Math.min(peerCount * 20, 100);
networkBar.style.width = healthPercent + '%';
// Обновить топологию
updateTopology(peersInfo.peers || []);
}
function updateSyncStatus(syncStatus) {
const isRunning = syncStatus.is_running || false;
const statusElement = document.getElementById('sync-status');
statusElement.textContent = isRunning ? 'RUNNING' : 'STOPPED';
statusElement.className = 'status-value ' +
(isRunning ? 'status-online' : 'status-offline');
document.getElementById('active-syncs').textContent = syncStatus.active_syncs || 0;
document.getElementById('queue-size').textContent = syncStatus.queue_size || 0;
document.getElementById('workers-count').textContent = syncStatus.workers_count || 0;
}
function updateTopology(peers) {
const topologyNodes = document.getElementById('topology-nodes');
// Очистить существующие ноды (кроме центральной)
const existingNodes = topologyNodes.querySelectorAll('.node:not(.this-node)');
existingNodes.forEach(node => node.remove());
// Добавить ноды пиров
peers.slice(0, 8).forEach((peer, index) => {
const node = document.createElement('div');
node.className = 'node';
node.innerHTML = peer.node_id ? peer.node_id.substring(0, 4).toUpperCase() : 'PEER';
node.style.animationDelay = (index * 0.2) + 's';
topologyNodes.appendChild(node);
});
}
function showOfflineStatus() {
document.getElementById('node-status').textContent = 'OFFLINE';
document.getElementById('node-status').className = 'status-value status-offline';
document.getElementById('network-health').textContent = 'DISCONNECTED';
document.getElementById('network-health').className = 'status-value status-offline';
document.getElementById('sync-status').textContent = 'STOPPED';
document.getElementById('sync-status').className = 'status-value status-offline';
}
function updateTimestamp() {
const now = new Date();
document.getElementById('timestamp').textContent =
now.toISOString().substring(0, 19).replace('T', ' ') + ' UTC';
}
async function refreshData() {
const refreshIcon = document.getElementById('refresh-icon');
refreshIcon.className = 'loading';
refreshIcon.textContent = '⏳';
try {
const response = await fetch('/api/my/monitor/live');
const data = await response.json();
if (data.success) {
updateNodeInfo(data.data.node_info);
updatePeersInfo({
peer_count: data.data.network_stats.connected_peers,
peers: data.data.peers
});
updateSyncStatus(data.data.sync_status);
updateTimestamp();
addLogEntry('info', 'Data refreshed successfully');
}
} catch (error) {
addLogEntry('error', 'Failed to refresh data: ' + error.message);
} finally {
refreshIcon.className = '';
refreshIcon.textContent = '🔄';
}
}
async function toggleASCII() {
asciiVisible = !asciiVisible;
const container = document.getElementById('ascii-container');
if (asciiVisible) {
container.style.display = 'block';
try {
const response = await fetch('/api/my/monitor/ascii');
const data = await response.json();
document.getElementById('ascii-content').textContent = data.ascii;
} catch (error) {
document.getElementById('ascii-content').textContent = 'Error loading ASCII view';
}
} else {
container.style.display = 'none';
}
}
async function startSync() {
try {
const response = await fetch('/api/my/sync/start', { method: 'POST' });
const data = await response.json();
if (data.success) {
addLogEntry('info', 'Network sync started');
setTimeout(refreshData, 1000); // Обновить через секунду
} else {
addLogEntry('error', 'Failed to start sync');
}
} catch (error) {
addLogEntry('error', 'Sync request failed: ' + error.message);
}
}
function showLogs() {
logsVisible = !logsVisible;
const logsPanel = document.getElementById('logs-panel');
logsPanel.style.display = logsVisible ? 'block' : 'none';
}
function addLogEntry(level, message) {
const logsContent = document.getElementById('logs-content');
const logEntry = document.createElement('div');
logEntry.className = `log-entry log-${level}`;
const timestamp = new Date().toISOString().substring(11, 19);
logEntry.innerHTML = `
<span class="log-timestamp">${timestamp}</span>
<span>[${level.toUpperCase()}] ${message}</span>
`;
logsContent.appendChild(logEntry);
logsContent.scrollTop = logsContent.scrollHeight;
// Ограничить количество логов
if (logsContent.children.length > 50) {
logsContent.removeChild(logsContent.firstChild);
}
}
// Автоматическое обновление каждые 15 секунд
function startAutoUpdate() {
updateInterval = setInterval(refreshData, 15000);
}
function stopAutoUpdate() {
if (updateInterval) {
clearInterval(updateInterval);
}
}
// Инициализация при загрузке страницы
document.addEventListener('DOMContentLoaded', function() {
// Получить данные из data-атрибута
const dataElement = document.getElementById('monitoring-data');
if (dataElement) {
try {
const data = JSON.parse(dataElement.textContent);
setMonitoringData(data);
} catch (e) {
console.error('Error parsing monitoring data:', e);
setMonitoringData({status: 'offline', error: 'Data parsing failed'});
}
}
initializeData();
startAutoUpdate();
addLogEntry('info', 'MY Network Monitor loaded');
});
// Остановить обновления при уходе со страницы
window.addEventListener('beforeunload', stopAutoUpdate);
</script>
<!-- Данные мониторинга -->
<script type="application/json" id="monitoring-data">{{ monitoring_data | tojson }}</script>
</body>
</html>

772
auto_deploy.sh Normal file
View File

@ -0,0 +1,772 @@
#!/bin/bash
# MY Network Bootstrap Node - Автоматическое развертывание
# Домен: my-public-node-3.projscale.dev
# Сервер: 2.58.65.188
# Единственная команда для полного развертывания
set -e
echo "🚀 MY Network Bootstrap Node - Автоматическое развертывание"
echo "=========================================================="
echo "Домен: my-public-node-3.projscale.dev"
echo "Режим: Bootstrap Node (Primary)"
echo "Порты: Только 443 (HTTPS)"
echo ""
# Проверка прав root
if [[ $EUID -ne 0 ]]; then
echo "❌ Запустите от root: sudo bash auto_deploy.sh"
exit 1
fi
DOMAIN="my-public-node-3.projscale.dev"
EMAIL="admin@projscale.dev"
PROJECT_DIR="/opt/my-network"
echo "📋 Конфигурация:"
echo " Домен: $DOMAIN"
echo " Директория: $PROJECT_DIR"
echo " Внешний порт: 443 только"
echo ""
# Обновление системы
echo "🔄 Обновление системы..."
apt update && apt upgrade -y
# Установка базовых пакетов
echo "📦 Установка пакетов..."
apt install -y \
docker.io \
docker-compose \
git \
curl \
wget \
unzip \
python3 \
python3-pip \
python3-venv \
nginx \
certbot \
python3-certbot-nginx \
ufw \
fail2ban \
htop \
tree \
nano \
jq \
net-tools \
software-properties-common
# Настройка Docker
echo "🐳 Настройка Docker..."
systemctl enable docker
systemctl start docker
# Создание структуры проекта
echo "📁 Создание структуры..."
mkdir -p $PROJECT_DIR
mkdir -p /opt/storage /opt/logs
chmod 755 /opt/storage /opt/logs
cd $PROJECT_DIR
# Создание структуры проекта MY Network
echo "🏗️ Создание структуры MY Network проекта..."
mkdir -p my-uploader-bot/{app,static,templates}
mkdir -p my-uploader-bot/app/{core,api}
mkdir -p my-uploader-bot/app/core/{my_network,models,background}
mkdir -p my-uploader-bot/app/api/routes
mkdir -p my-uploader-bot/app/templates
cd my-uploader-bot
# Создание requirements.txt
echo "📝 Создание requirements.txt..."
cat > requirements_new.txt << 'EOF'
fastapi==0.104.1
sanic==23.6.0
uvicorn==0.24.0
pydantic==2.5.0
sqlalchemy==2.0.23
alembic==1.12.1
pymysql==1.1.0
cryptography==41.0.7
redis==5.0.1
requests==2.31.0
aiohttp==3.9.1
websockets==12.0
python-multipart==0.0.6
python-jose[cryptography]==3.3.0
passlib[bcrypt]==1.7.4
jinja2==3.1.2
python-dotenv==1.0.0
aiomysql==0.2.0
aioredis==2.0.1
httpx==0.25.2
schedule==1.2.0
psutil==5.9.6
netifaces==0.11.0
matplotlib==3.8.2
numpy==1.25.2
pillow==10.1.0
qrcode==7.4.2
prometheus-client==0.19.0
EOF
# Создание основной конфигурации
echo "⚙️ Создание конфигурации..."
cat > .env << EOF
# MY Network Bootstrap Node Configuration
NODE_ID=bootstrap-$(date +%s)
NODE_TYPE=bootstrap
NODE_PORT=15100
DOMAIN=$DOMAIN
EMAIL=$EMAIL
# Bootstrap Settings
IS_BOOTSTRAP=true
BOOTSTRAP_NODES=[]
MAX_PEERS=100
SYNC_INTERVAL=300
PUBLIC_NODE=true
# Database
DB_HOST=localhost
DB_PORT=3306
DB_NAME=my_network_bootstrap
DB_USER=my_network_user
DB_PASSWORD=$(openssl rand -base64 32)
# Redis
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD=$(openssl rand -base64 32)
# Security
SECRET_KEY=$(openssl rand -base64 64)
JWT_SECRET=$(openssl rand -base64 32)
# Paths
STORAGE_PATH=/opt/storage
LOG_PATH=/opt/logs
# API Settings
API_RATE_LIMIT=100
MONITOR_RATE_LIMIT=10
ENABLE_PUBLIC_API=true
ENABLE_CORS=true
EOF
# Создание bootstrap.json
echo "🌐 Создание bootstrap.json..."
cat > bootstrap.json << EOF
{
"version": "2.0",
"network_id": "my-network-main",
"bootstrap_nodes": [
{
"id": "bootstrap-primary",
"host": "$DOMAIN",
"port": 443,
"ssl": true,
"public": true,
"region": "eu-central",
"capacity": "high",
"services": ["api", "sync", "monitor", "storage"],
"last_seen": "$(date -u +%Y-%m-%dT%H:%M:%SZ)"
}
],
"network_config": {
"protocol_version": "2.0",
"sync_interval": 300,
"max_peers": 100,
"chunk_size": 1048576,
"compression": true,
"encryption": true,
"heartbeat_interval": 60
},
"api_endpoints": {
"base_url": "https://$DOMAIN",
"health": "/api/my/health",
"node_info": "/api/my/node/info",
"peers": "/api/my/node/peers",
"sync": "/api/my/sync/status",
"monitor": "/api/my/monitor/",
"bootstrap": "/api/my/bootstrap/config"
}
}
EOF
# Создание Docker Compose конфигурации
echo "🐳 Создание Docker Compose..."
cat > docker-compose.new.yml << EOF
version: '3.8'
services:
mariadb:
image: mariadb:11.2
container_name: my_network_db
environment:
MYSQL_ROOT_PASSWORD: \${DB_PASSWORD}
MYSQL_DATABASE: \${DB_NAME}
MYSQL_USER: \${DB_USER}
MYSQL_PASSWORD: \${DB_PASSWORD}
ports:
- "127.0.0.1:3306:3306"
volumes:
- mariadb_data:/var/lib/mysql
restart: unless-stopped
redis:
image: redis:7-alpine
container_name: my_network_redis
command: redis-server --requirepass \${REDIS_PASSWORD}
ports:
- "127.0.0.1:6379:6379"
volumes:
- redis_data:/data
restart: unless-stopped
volumes:
mariadb_data:
redis_data:
EOF
# Создание основного приложения
echo "🐍 Создание Python приложения..."
# app/main.py
cat > app/main.py << 'EOF'
#!/usr/bin/env python3
"""MY Network Bootstrap Node - Main Application"""
import asyncio
import json
import os
import sys
import logging
from datetime import datetime
from pathlib import Path
# Добавляем путь к приложению
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
try:
from fastapi import FastAPI, HTTPException
from fastapi.responses import JSONResponse, HTMLResponse
from fastapi.middleware.cors import CORSMiddleware
import uvicorn
USE_FASTAPI = True
except ImportError:
try:
from sanic import Sanic, response
USE_FASTAPI = False
except ImportError:
print("Neither FastAPI nor Sanic available")
sys.exit(1)
# Настройка логирования
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('/opt/logs/my-network.log'),
logging.StreamHandler()
]
)
logger = logging.getLogger(__name__)
# Конфигурация
NODE_ID = os.getenv('NODE_ID', f'bootstrap-{int(datetime.now().timestamp())}')
DOMAIN = os.getenv('DOMAIN', 'my-public-node-3.projscale.dev')
NODE_PORT = int(os.getenv('NODE_PORT', 15100))
IS_BOOTSTRAP = os.getenv('IS_BOOTSTRAP', 'true').lower() == 'true'
if USE_FASTAPI:
app = FastAPI(
title="MY Network Bootstrap Node",
description="MY Network v2.0 Bootstrap Node API",
version="2.0.0"
)
# CORS для публичного API
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/api/my/health")
async def health_check():
return JSONResponse({
"status": "healthy",
"node_id": NODE_ID,
"node_type": "bootstrap",
"domain": DOMAIN,
"timestamp": datetime.utcnow().isoformat(),
"services": ["api", "sync", "monitor", "storage"]
})
@app.get("/api/my/node/info")
async def node_info():
return JSONResponse({
"node_id": NODE_ID,
"node_type": "bootstrap",
"domain": DOMAIN,
"port": 443,
"ssl": True,
"public": True,
"region": "eu-central",
"capacity": "high",
"services": ["api", "sync", "monitor", "storage"],
"protocol_version": "2.0",
"last_seen": datetime.utcnow().isoformat()
})
@app.get("/api/my/bootstrap/config")
async def bootstrap_config():
try:
with open('bootstrap.json', 'r') as f:
config = json.load(f)
return JSONResponse(config)
except Exception as e:
logger.error(f"Error loading bootstrap config: {e}")
raise HTTPException(status_code=500, detail="Config not available")
@app.get("/api/my/monitor/")
async def monitor_dashboard():
html_content = """
<!DOCTYPE html>
<html>
<head>
<title>MY Network Bootstrap Monitor</title>
<style>
body { font-family: monospace; background: #000; color: #0f0; margin: 20px; }
h1 { color: #ff0; text-align: center; }
.status { margin: 10px 0; padding: 10px; border: 1px solid #0f0; }
.healthy { border-color: #0f0; }
.error { border-color: #f00; color: #f00; }
</style>
</head>
<body>
<h1>🚀 MY Network Bootstrap Node</h1>
<div class="status healthy">
<h3>Node Status: ACTIVE</h3>
<p>Node ID: """ + NODE_ID + """</p>
<p>Domain: """ + DOMAIN + """</p>
<p>Type: Bootstrap Primary</p>
<p>Services: API, Sync, Monitor, Storage</p>
</div>
<div class="status healthy">
<h3>Network Status</h3>
<p>Protocol Version: 2.0</p>
<p>Max Peers: 100</p>
<p>Public Access: Enabled</p>
<p>SSL: Enabled</p>
</div>
<script>
setInterval(() => location.reload(), 30000);
</script>
</body>
</html>
"""
return HTMLResponse(content=html_content)
if __name__ == "__main__":
logger.info(f"Starting MY Network Bootstrap Node on port {NODE_PORT}")
uvicorn.run(app, host="0.0.0.0", port=NODE_PORT)
else:
# Sanic версия
app = Sanic("MY_Network_Bootstrap")
@app.route("/api/my/health")
async def health_check(request):
return response.json({
"status": "healthy",
"node_id": NODE_ID,
"node_type": "bootstrap",
"domain": DOMAIN,
"timestamp": datetime.utcnow().isoformat(),
"services": ["api", "sync", "monitor", "storage"]
})
@app.route("/api/my/node/info")
async def node_info(request):
return response.json({
"node_id": NODE_ID,
"node_type": "bootstrap",
"domain": DOMAIN,
"port": 443,
"ssl": True,
"public": True,
"region": "eu-central",
"capacity": "high",
"services": ["api", "sync", "monitor", "storage"],
"protocol_version": "2.0",
"last_seen": datetime.utcnow().isoformat()
})
if __name__ == "__main__":
logger.info(f"Starting MY Network Bootstrap Node (Sanic) on port {NODE_PORT}")
app.run(host="0.0.0.0", port=NODE_PORT)
EOF
# Создание systemd сервиса
echo "⚙️ Создание systemd сервиса..."
cat > /etc/systemd/system/my-network-bootstrap.service << EOF
[Unit]
Description=MY Network Bootstrap Node
After=network.target docker.service
Requires=docker.service
[Service]
Type=simple
User=root
WorkingDirectory=$PROJECT_DIR/my-uploader-bot
Environment=PATH=$PROJECT_DIR/my-uploader-bot/venv/bin
ExecStart=$PROJECT_DIR/my-uploader-bot/venv/bin/python app/main.py
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
# Установка Python зависимостей
echo "🐍 Установка Python зависимостей..."
python3 -m venv venv
source venv/bin/activate
pip install --upgrade pip
pip install -r requirements_new.txt
# Запуск Docker сервисов
echo "🐳 Запуск Docker сервисов..."
docker-compose -f docker-compose.new.yml up -d
# Ожидание БД
echo "⏳ Ожидание запуска базы данных..."
sleep 30
# Настройка Nginx с Cloudflare
echo "🌐 Настройка Nginx..."
cat > /etc/nginx/sites-available/my-network-bootstrap << EOF
server {
listen 80;
server_name $DOMAIN;
return 301 https://\$server_name\$request_uri;
}
server {
listen 443 ssl http2;
server_name $DOMAIN;
# SSL будет настроен certbot'ом
ssl_certificate /etc/letsencrypt/live/$DOMAIN/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/$DOMAIN/privkey.pem;
# Cloudflare IP ranges
set_real_ip_from 173.245.48.0/20;
set_real_ip_from 103.21.244.0/22;
set_real_ip_from 103.22.200.0/22;
set_real_ip_from 103.31.4.0/22;
set_real_ip_from 141.101.64.0/18;
set_real_ip_from 108.162.192.0/18;
set_real_ip_from 190.93.240.0/20;
set_real_ip_from 188.114.96.0/20;
set_real_ip_from 197.234.240.0/22;
set_real_ip_from 198.41.128.0/17;
set_real_ip_from 162.158.0.0/15;
set_real_ip_from 104.16.0.0/13;
set_real_ip_from 104.24.0.0/14;
set_real_ip_from 172.64.0.0/13;
set_real_ip_from 131.0.72.0/22;
real_ip_header CF-Connecting-IP;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header Referrer-Policy "strict-origin-when-cross-origin";
add_header X-MY-Network-Node-Type "bootstrap";
add_header X-MY-Network-Version "2.0";
server_tokens off;
# Rate limiting
limit_req_zone \$binary_remote_addr zone=api:10m rate=100r/s;
limit_req_zone \$binary_remote_addr zone=monitor:10m rate=10r/s;
# Bootstrap API endpoints (публичные)
location /api/my/ {
limit_req zone=api burst=200 nodelay;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
# CORS headers
add_header Access-Control-Allow-Origin "*";
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS";
add_header Access-Control-Allow-Headers "Content-Type, Authorization";
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
if (\$request_method = 'OPTIONS') {
add_header Access-Control-Allow-Origin "*";
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS";
add_header Access-Control-Allow-Headers "Content-Type, Authorization";
add_header Content-Length 0;
add_header Content-Type text/plain;
return 200;
}
}
# Monitor (ограниченный доступ)
location /api/my/monitor {
limit_req zone=monitor burst=10 nodelay;
allow 127.0.0.1;
allow ::1;
deny all;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Redirect root to health
location = / {
return 301 /api/my/health;
}
# Block sensitive paths
location ~ /\.(?!well-known) {
deny all;
}
location ~ ^/(config|\.env|requirements|docker-compose) {
deny all;
}
}
EOF
# Активация сайта
ln -sf /etc/nginx/sites-available/my-network-bootstrap /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# Проверка конфигурации nginx
nginx -t
# Запуск приложения
echo "🚀 Запуск MY Network сервиса..."
systemctl daemon-reload
systemctl enable my-network-bootstrap
systemctl start my-network-bootstrap
# Ожидание запуска приложения
echo "⏳ Ожидание запуска приложения..."
sleep 10
# Получение SSL сертификата
echo "🔐 Получение SSL сертификата..."
certbot --nginx -d $DOMAIN --email $EMAIL --agree-tos --non-interactive --redirect
# Настройка firewall (ТОЛЬКО 443!)
echo "🔥 Настройка firewall - ТОЛЬКО порт 443..."
ufw --force reset
ufw default deny incoming
ufw default allow outgoing
# SSH (определяем текущий порт)
SSH_PORT=$(ss -tlnp | grep sshd | grep -o ':[0-9]*' | head -1 | cut -d: -f2)
if [[ -n "$SSH_PORT" ]]; then
echo "🔑 Разрешение SSH на порту $SSH_PORT"
ufw allow $SSH_PORT/tcp comment 'SSH Access'
fi
# HTTP и HTTPS для Cloudflare
ufw allow 80/tcp comment 'HTTP Redirect'
ufw allow 443/tcp comment 'HTTPS Only'
# Блокируем прямой доступ к приложению
ufw deny 15100 comment 'Block direct app access'
ufw deny 3306 comment 'Block MySQL access'
ufw deny 6379 comment 'Block Redis access'
ufw --force enable
# Настройка fail2ban
echo "🚫 Настройка fail2ban..."
cat > /etc/fail2ban/jail.local << EOF
[DEFAULT]
bantime = 3600
findtime = 600
maxretry = 5
[sshd]
enabled = true
port = $SSH_PORT
filter = sshd
logpath = /var/log/auth.log
[nginx-http-auth]
enabled = true
filter = nginx-http-auth
logpath = /var/log/nginx/error.log
[nginx-limit-req]
enabled = true
filter = nginx-limit-req
logpath = /var/log/nginx/error.log
maxretry = 10
EOF
systemctl enable fail2ban
systemctl start fail2ban
# Запуск nginx
systemctl enable nginx
systemctl start nginx
# Настройка мониторинга
echo "📊 Настройка мониторинга..."
cat > /opt/bootstrap-monitor.sh << 'MONITOR_EOF'
#!/bin/bash
LOG_FILE="/opt/logs/bootstrap-monitor.log"
DATE=$(date '+%Y-%m-%d %H:%M:%S')
# Check services
BOOTSTRAP_STATUS=$(systemctl is-active my-network-bootstrap)
NGINX_STATUS=$(systemctl is-active nginx)
DOCKER_STATUS=$(systemctl is-active docker)
# Check API
API_STATUS=$(curl -s -o /dev/null -w "%{http_code}" https://my-public-node-3.projscale.dev/api/my/health 2>/dev/null || echo "FAIL")
# System stats
DISK_USAGE=$(df -h /opt | awk 'NR==2 {print $5}' | sed 's/%//')
MEM_USAGE=$(free | grep Mem | awk '{printf "%.0f", $3/$2 * 100.0}')
CPU_LOAD=$(uptime | awk -F'load average:' '{print $2}' | awk '{print $1}' | sed 's/,//')
# Log status
echo "[$DATE] Bootstrap: $BOOTSTRAP_STATUS, Nginx: $NGINX_STATUS, Docker: $DOCKER_STATUS, API: $API_STATUS, Disk: ${DISK_USAGE}%, Mem: ${MEM_USAGE}%, Load: $CPU_LOAD" >> $LOG_FILE
# Alert if critical
if [[ "$BOOTSTRAP_STATUS" != "active" || "$NGINX_STATUS" != "active" || "$DOCKER_STATUS" != "active" || "$API_STATUS" != "200" ]]; then
echo "[$DATE] ALERT: Bootstrap node critical issue detected!" >> $LOG_FILE
fi
MONITOR_EOF
chmod +x /opt/bootstrap-monitor.sh
# Добавление в cron
(crontab -l 2>/dev/null; echo "*/2 * * * * /opt/bootstrap-monitor.sh") | crontab -
(crontab -l 2>/dev/null; echo "0 12 * * * /usr/bin/certbot renew --quiet") | crontab -
# Финальная проверка
echo "🔍 Финальная проверка системы..."
sleep 10
echo ""
echo "📊 Статус сервисов:"
systemctl status my-network-bootstrap --no-pager -l | head -10
echo ""
systemctl status nginx --no-pager -l | head -10
echo ""
echo "🔥 Firewall статус:"
ufw status numbered
echo ""
echo "🌐 Проверка API:"
curl -s -I https://$DOMAIN/api/my/health || echo "API недоступен"
echo ""
echo "🔐 SSL сертификаты:"
certbot certificates
echo ""
echo "🌍 Открытые порты:"
netstat -tlnp | grep LISTEN
# Создание итогового отчета
cat > /opt/bootstrap-deployment-report.txt << EOF
MY Network Bootstrap Node - Отчет о развертывании
===============================================
Время развертывания: $(date)
Домен: $DOMAIN
Сервер: $(hostname -I | awk '{print $1}')
✅ УСПЕШНО РАЗВЕРНУТО:
- MY Network Bootstrap Node (версия 2.0)
- Nginx proxy с SSL (порт 443 только)
- MariaDB и Redis в Docker
- UFW Firewall (SSH + 443 только)
- Fail2ban защита
- Автоматический мониторинг
- SSL сертификат Let's Encrypt
🌐 ДОСТУПНЫЕ ENDPOINTS:
- Health Check: https://$DOMAIN/api/my/health
- Node Info: https://$DOMAIN/api/my/node/info
- Bootstrap Config: https://$DOMAIN/api/my/bootstrap/config
- Monitor Dashboard: https://$DOMAIN/api/my/monitor/ (localhost only)
🔧 УПРАВЛЕНИЕ:
- Статус: systemctl status my-network-bootstrap nginx docker
- Логи: journalctl -u my-network-bootstrap -f
- Монитор: tail -f /opt/logs/bootstrap-monitor.log
- Firewall: ufw status
- SSL: certbot certificates
🔒 БЕЗОПАСНОСТЬ:
- Открыт только порт 443 (HTTPS)
- Все внутренние сервисы заблокированы
- SSL шифрование обязательно
- Rate limiting активен
- Fail2ban защита от атак
📂 ФАЙЛЫ КОНФИГУРАЦИИ:
- $PROJECT_DIR/my-uploader-bot/.env
- $PROJECT_DIR/my-uploader-bot/bootstrap.json
- /etc/nginx/sites-available/my-network-bootstrap
- /etc/systemd/system/my-network-bootstrap.service
Bootstrap узел готов к работе как основной узел сети!
EOF
echo ""
echo "✅ MY Network Bootstrap Node развернут успешно!"
echo "=============================================="
echo "🌐 Домен: https://$DOMAIN"
echo "🔐 SSL: Активен и настроен"
echo "🔥 Firewall: Только порт 443 открыт"
echo "🚀 Статус: Bootstrap узел активен"
echo "📊 Мониторинг: Каждые 2 минуты"
echo ""
echo "🔍 Проверьте работу:"
echo " curl https://$DOMAIN/api/my/health"
echo " curl https://$DOMAIN/api/my/node/info"
echo " curl https://$DOMAIN/api/my/bootstrap/config"
echo ""
echo "📄 Полный отчет: /opt/bootstrap-deployment-report.txt"
echo ""
echo "🎯 Bootstrap узел готов принимать подключения других узлов!"
# Конец скрипта
exit 0

82
auto_ssh_deploy.exp Executable file
View File

@ -0,0 +1,82 @@
#!/usr/bin/expect -f
set timeout 1800
set password "DMUEjmnh6mDs/qlzhpjDzQ"
set server "2.58.65.188"
log_user 1
spawn ssh -o StrictHostKeyChecking=no service@$server
expect {
"password:" {
send "$password\r"
exp_continue
}
"$ " {
# Переход в root
send "sudo su -\r"
expect {
"password:" {
send "$password\r"
expect "# "
}
"# " {
# Уже root
}
}
# Проверка и запуск скрипта
send "ls -la /tmp/auto_deploy.sh\r"
expect "# "
send "chmod +x /tmp/auto_deploy.sh\r"
expect "# "
send "echo 'Starting MY Network Bootstrap deployment...'\r"
expect "# "
# Запуск скрипта развертывания
send "/tmp/auto_deploy.sh\r"
# Ожидаем завершения установки (до 30 минут)
expect {
"Bootstrap узел готов к работе!" {
send "echo 'Deployment completed successfully!'\r"
expect "# "
# Проверка результата
send "systemctl status my-network-bootstrap --no-pager\r"
expect "# "
send "curl -s https://my-public-node-3.projscale.dev/api/my/health || echo 'API check failed'\r"
expect "# "
send "ufw status numbered\r"
expect "# "
send "echo 'MY Network Bootstrap Node deployed successfully!'\r"
expect "# "
interact
}
"error" {
send "echo 'Deployment error occurred'\r"
interact
}
timeout {
send "echo 'Deployment timeout - checking status...'\r"
send "systemctl status my-network-bootstrap nginx docker\r"
interact
}
}
}
timeout {
puts "Connection timeout"
exit 1
}
eof {
puts "Connection closed"
exit 1
}
}

244
bootstrap.json Normal file
View File

@ -0,0 +1,244 @@
{
"version": "1.0.0",
"network_id": "my-network-mainnet",
"network_name": "MY Distributed Content Network",
"protocol_version": "1.0.0",
"created_at": "2025-01-02T15:00:00Z",
"description": "Bootstrap configuration for MY Network - Distributed content replication system",
"bootstrap_nodes": [
{
"id": "bootstrap-eu-001",
"address": "my://bootstrap-eu-1.mynetwork.io:8080",
"public_key": "ed25519:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"region": "eu-west-1",
"country": "DE",
"weight": 100,
"features": [
"content_sync",
"consensus",
"dht",
"bootstrap",
"monitoring"
],
"metadata": {
"provider": "hetzner",
"datacenter": "fsn1",
"bandwidth_gbps": 10,
"storage_tb": 100
}
},
{
"id": "bootstrap-us-001",
"address": "my://bootstrap-us-1.mynetwork.io:8080",
"public_key": "ed25519:BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
"region": "us-east-1",
"country": "US",
"weight": 100,
"features": [
"content_sync",
"consensus",
"dht",
"bootstrap",
"monitoring"
],
"metadata": {
"provider": "aws",
"datacenter": "us-east-1a",
"bandwidth_gbps": 25,
"storage_tb": 200
}
},
{
"id": "bootstrap-asia-001",
"address": "my://bootstrap-asia-1.mynetwork.io:8080",
"public_key": "ed25519:CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
"region": "ap-southeast-1",
"country": "SG",
"weight": 90,
"features": [
"content_sync",
"consensus",
"dht",
"bootstrap"
],
"metadata": {
"provider": "digitalocean",
"datacenter": "sgp1",
"bandwidth_gbps": 5,
"storage_tb": 50
}
}
],
"consensus": {
"algorithm": "pbft",
"min_quorum": 3,
"consensus_threshold": 0.67,
"timeout_seconds": 30,
"retry_attempts": 3,
"max_byzantine_nodes": 1,
"view_change_timeout": 60,
"checkpoint_interval": 100
},
"sync_settings": {
"sync_interval_seconds": 60,
"initial_sync_timeout": 300,
"max_batch_size": 100,
"max_concurrent_syncs": 5,
"compression_enabled": true,
"compression_algorithm": "gzip",
"compression_level": 6,
"encryption_enabled": true,
"encryption_algorithm": "aes-256-gcm",
"delta_sync_enabled": true,
"checksum_verification": true
},
"network_settings": {
"max_peers": 50,
"max_inbound_connections": 100,
"max_outbound_connections": 25,
"connection_timeout_seconds": 30,
"keepalive_interval_seconds": 60,
"max_message_size_bytes": 16777216,
"rate_limit_per_peer": 1000,
"blacklist_duration_hours": 24
},
"content_settings": {
"max_content_size_bytes": 1073741824,
"supported_content_types": [
"audio/*",
"video/*",
"image/*",
"text/*",
"application/pdf",
"application/json"
],
"replication_factor": 3,
"min_replicas": 2,
"max_replicas": 10,
"gc_interval_hours": 24,
"cache_size_mb": 1024,
"preview_generation": true,
"thumbnail_sizes": [64, 128, 256, 512]
},
"security_settings": {
"require_tls": true,
"min_tls_version": "1.3",
"certificate_verification": true,
"peer_authentication": true,
"message_signing": true,
"signature_algorithm": "ed25519",
"key_rotation_days": 90,
"audit_logging": true,
"rate_limiting": true,
"ddos_protection": true
},
"storage_settings": {
"storage_path": "./storage/my-network",
"max_storage_gb": 1000,
"storage_cleanup_threshold": 0.9,
"backup_enabled": true,
"backup_interval_hours": 6,
"backup_retention_days": 30,
"indexing_enabled": true,
"deduplication_enabled": true
},
"monitoring_settings": {
"metrics_enabled": true,
"metrics_interval_seconds": 30,
"health_check_interval_seconds": 60,
"log_level": "info",
"log_rotation_mb": 100,
"log_retention_days": 7,
"alerting_enabled": true,
"webhook_alerts": [],
"telegram_alerts": {
"enabled": false,
"bot_token": "",
"chat_ids": []
}
},
"api_settings": {
"listen_address": "0.0.0.0",
"listen_port": 8080,
"api_prefix": "/api/my",
"cors_enabled": true,
"cors_origins": ["*"],
"auth_required": false,
"rate_limit_rpm": 1000,
"timeout_seconds": 30
},
"development_settings": {
"debug_mode": false,
"test_network": false,
"mock_bootstrap": false,
"simulation_mode": false,
"verbose_logging": false,
"profiling_enabled": false
},
"feature_flags": {
"experimental_dht": false,
"advanced_routing": true,
"content_caching": true,
"peer_scoring": true,
"adaptive_replication": false,
"quantum_encryption": false,
"ml_optimization": false
},
"regional_settings": {
"eu-west-1": {
"preferred_nodes": ["bootstrap-eu-001"],
"max_latency_ms": 100,
"compliance": ["gdpr"],
"data_residency": true
},
"us-east-1": {
"preferred_nodes": ["bootstrap-us-001"],
"max_latency_ms": 150,
"compliance": ["ccpa"],
"data_residency": false
},
"ap-southeast-1": {
"preferred_nodes": ["bootstrap-asia-001"],
"max_latency_ms": 200,
"compliance": [],
"data_residency": false
}
},
"emergency_settings": {
"emergency_mode": false,
"emergency_contacts": [
"admin@mynetwork.io",
"security@mynetwork.io"
],
"auto_isolation": true,
"failsafe_mode": {
"enabled": true,
"max_cpu_percent": 95,
"max_memory_percent": 95,
"max_disk_percent": 98
}
},
"version_compatibility": {
"min_supported_version": "1.0.0",
"max_supported_version": "1.9.9",
"deprecated_versions": [],
"upgrade_required_versions": []
},
"checksum": "sha256:1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef12",
"signature": "ed25519:DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
}

194
config/redis.conf Normal file
View File

@ -0,0 +1,194 @@
# Redis configuration for my-uploader-bot
# Optimized for production use with caching and session storage
# Network configuration
bind 0.0.0.0
port 6379
tcp-backlog 511
timeout 300
tcp-keepalive 300
# General configuration
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
# Security
# requirepass your_redis_password_here
# rename-command FLUSHDB ""
# rename-command FLUSHALL ""
# rename-command DEBUG ""
# Memory management
maxmemory 2gb
maxmemory-policy allkeys-lru
maxmemory-samples 5
# Persistence configuration
# RDB (Redis Database) snapshots
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /data
# AOF (Append Only File)
appendonly yes
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
# Slow log
slowlog-log-slower-than 10000
slowlog-max-len 128
# Latency monitoring
latency-monitor-threshold 100
# Client configuration
maxclients 10000
# Advanced configuration
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
# Active rehashing
activerehashing yes
# Client output buffer limits
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
# Client query buffer limit
client-query-buffer-limit 1gb
# Protocol buffer limit
proto-max-bulk-len 512mb
# Frequency of background tasks
hz 10
# Dynamic HZ
dynamic-hz yes
# AOF rewrite incremental fsync
aof-rewrite-incremental-fsync yes
# RDB save incremental fsync
rdb-save-incremental-fsync yes
# Jemalloc background thread
jemalloc-bg-thread yes
# TLS Configuration (if needed)
# port 0
# tls-port 6380
# tls-cert-file redis.crt
# tls-key-file redis.key
# tls-ca-cert-file ca.crt
# Modules (if needed)
# loadmodule /path/to/module.so
# Custom configuration for my-uploader-bot
# Session storage database
# Database 0: General cache
# Database 1: Session storage
# Database 2: Upload queue
# Database 3: Blockchain cache
# Database 4: User activity
# Database 5: Rate limiting
# Database 6: Temporary data
# Database 7: Analytics
# Database 8-15: Reserved for future use
# Performance tuning for caching workload
# Optimize for read-heavy workload
replica-read-only yes
replica-serve-stale-data yes
replica-priority 100
# Memory usage optimization
# Use jemalloc for better memory management
# Disable transparent huge pages for better latency
# Monitoring and debugging
# Enable keyspace notifications for specific events
notify-keyspace-events "Ex"
# Client timeout for idle connections
# timeout 300 (already set above)
# TCP keepalive
# tcp-keepalive 300 (already set above)
# Background saving
# stop-writes-on-bgsave-error yes (already set above)
# Compression
# rdbcompression yes (already set above)
# Checksum
# rdbchecksum yes (already set above)
# Auto memory report
oom-score-adj no
# Disable some potentially dangerous commands in production
# rename-command SHUTDOWN SHUTDOWN_MYUPLOADER
# rename-command CONFIG CONFIG_MYUPLOADER
# Enable protected mode (default)
protected-mode yes
# Disable dangerous commands
# rename-command EVAL ""
# rename-command SCRIPT ""
# Set maximum memory usage warning
# When memory usage reaches 80% of maxmemory, log warnings
# This is handled by application monitoring
# Custom settings for upload service
# Optimize for frequent key expiration
active-expire-ratio 25
# Optimize for small objects (typical for session data)
# Already configured above with ziplist settings
# Enable lazy freeing for better performance
lazyfree-lazy-eviction yes
lazyfree-lazy-expire yes
lazyfree-lazy-server-del yes
replica-lazy-flush yes
# IO threading (Redis 6.0+)
# io-threads 4
# io-threads-do-reads yes
# THP (Transparent Huge Pages) recommendation
# echo never > /sys/kernel/mm/transparent_hugepage/enabled
# Add this to your system configuration
# Kernel overcommit memory setting
# echo 1 > /proc/sys/vm/overcommit_memory
# Add this to your system configuration

546
deploy_bootstrap_node.sh Normal file
View File

@ -0,0 +1,546 @@
#!/bin/bash
# MY Network Bootstrap Node Deployment Script
# Домен: my-public-node-3.projscale.dev
# Сервер: 2.58.65.188
# Только порт 443 наружу через nginx proxy
set -e
echo "🚀 MY Network Bootstrap Node Deployment"
echo "======================================="
echo "Домен: my-public-node-3.projscale.dev"
echo "Сервер: 2.58.65.188"
echo "Режим: Bootstrap Node (Primary)"
echo ""
# Проверка прав root
if [[ $EUID -ne 0 ]]; then
echo "❌ Этот скрипт должен запускаться от root"
echo "Используйте: sudo bash deploy_bootstrap_node.sh"
exit 1
fi
DOMAIN="my-public-node-3.projscale.dev"
EMAIL="admin@projscale.dev"
NODE_TYPE="bootstrap"
echo "📋 Конфигурация Bootstrap узла:"
echo " Домен: $DOMAIN"
echo " Email: $EMAIL"
echo " Тип: $NODE_TYPE"
echo " Внешний порт: 443 (HTTPS только)"
echo " Внутренний порт: 15100"
echo ""
# Обновление системы
echo "🔄 Обновление системы..."
apt update && apt upgrade -y
# Установка пакетов
echo "📦 Установка необходимых пакетов..."
apt install -y \
docker.io \
docker-compose \
git \
curl \
wget \
unzip \
python3 \
python3-pip \
python3-venv \
nginx \
certbot \
python3-certbot-nginx \
ufw \
fail2ban \
htop \
tree \
nano \
jq
# Настройка Docker
echo "🐳 Настройка Docker..."
systemctl enable docker
systemctl start docker
usermod -aG docker service
# Создание структуры проекта
echo "📁 Создание структуры проекта..."
PROJECT_DIR="/opt/my-network"
mkdir -p $PROJECT_DIR
cd $PROJECT_DIR
# Создание директорий
mkdir -p /opt/storage /opt/logs
chmod 755 /opt/storage /opt/logs
chown service:service /opt/storage /opt/logs
# Клонирование проекта (тут будет запрос git pull)
echo "📥 Настройка проекта..."
echo "ТРЕБУЕТСЯ: Выполните команду git clone или скопируйте проект в $PROJECT_DIR/my-uploader-bot/"
echo "После этого продолжите выполнение скрипта"
read -p "Нажмите Enter когда проект будет готов..."
cd my-uploader-bot
# Создание конфигурации
echo "📝 Создание конфигурации bootstrap узла..."
cat > .env << EOF
# MY Network Bootstrap Node Configuration
NODE_ID=bootstrap-node-$(date +%s)
NODE_TYPE=bootstrap
NODE_PORT=15100
DOMAIN=$DOMAIN
EMAIL=$EMAIL
# Network Configuration
IS_BOOTSTRAP=true
BOOTSTRAP_NODES=[]
MAX_PEERS=50
SYNC_INTERVAL=300
# Database
DB_HOST=localhost
DB_PORT=3306
DB_NAME=my_network_bootstrap
DB_USER=my_network_user
DB_PASSWORD=$(openssl rand -base64 32)
# Redis
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD=$(openssl rand -base64 32)
# Security
SECRET_KEY=$(openssl rand -base64 64)
JWT_SECRET=$(openssl rand -base64 32)
# Storage
STORAGE_PATH=/opt/storage
LOG_PATH=/opt/logs
# API Settings
API_RATE_LIMIT=100
MONITOR_RATE_LIMIT=10
ENABLE_PUBLIC_API=true
EOF
# Создание bootstrap.json конфигурации
echo "🌐 Создание bootstrap конфигурации..."
cat > bootstrap.json << EOF
{
"version": "2.0",
"network_id": "my-network-main",
"bootstrap_nodes": [
{
"id": "bootstrap-node-primary",
"host": "$DOMAIN",
"port": 443,
"ssl": true,
"public": true,
"region": "eu-central",
"capacity": "high",
"services": ["api", "sync", "monitor", "storage"]
}
],
"network_config": {
"protocol_version": "2.0",
"sync_interval": 300,
"max_peers": 50,
"chunk_size": 1048576,
"compression": true,
"encryption": true
},
"api_endpoints": {
"health": "/api/my/health",
"node_info": "/api/my/node/info",
"peers": "/api/my/node/peers",
"sync": "/api/my/sync/status",
"monitor": "/api/my/monitor/",
"bootstrap": "/api/my/bootstrap/config"
}
}
EOF
# Установка Python зависимостей
echo "🐍 Установка Python зависимостей..."
python3 -m venv venv
source venv/bin/activate
pip install --upgrade pip
pip install -r requirements_new.txt
# Настройка базы данных
echo "🗄️ Запуск Docker сервисов..."
docker-compose -f docker-compose.new.yml up -d
# Ожидание запуска БД
echo "⏳ Ожидание запуска базы данных..."
sleep 30
# Создание systemd сервиса
echo "⚙️ Создание systemd сервиса..."
cat > /etc/systemd/system/my-network-bootstrap.service << EOF
[Unit]
Description=MY Network Bootstrap Node
After=network.target docker.service
Requires=docker.service
[Service]
Type=simple
User=service
Group=service
WorkingDirectory=$PROJECT_DIR/my-uploader-bot
Environment=PATH=$PROJECT_DIR/my-uploader-bot/venv/bin
ExecStart=$PROJECT_DIR/my-uploader-bot/venv/bin/python app/main.py
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target
EOF
# Настройка Nginx для Cloudflare
echo "🌐 Настройка Nginx для Cloudflare..."
cat > /etc/nginx/sites-available/my-network-bootstrap << EOF
# MY Network Bootstrap Node - Cloudflare Compatible
server {
listen 80;
server_name $DOMAIN;
return 301 https://\$server_name\$request_uri;
}
server {
listen 443 ssl http2;
server_name $DOMAIN;
# SSL Configuration для Cloudflare
ssl_certificate /etc/letsencrypt/live/$DOMAIN/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/$DOMAIN/privkey.pem;
# Cloudflare IP ranges
set_real_ip_from 173.245.48.0/20;
set_real_ip_from 103.21.244.0/22;
set_real_ip_from 103.22.200.0/22;
set_real_ip_from 103.31.4.0/22;
set_real_ip_from 141.101.64.0/18;
set_real_ip_from 108.162.192.0/18;
set_real_ip_from 190.93.240.0/20;
set_real_ip_from 188.114.96.0/20;
set_real_ip_from 197.234.240.0/22;
set_real_ip_from 198.41.128.0/17;
set_real_ip_from 162.158.0.0/15;
set_real_ip_from 104.16.0.0/13;
set_real_ip_from 104.24.0.0/14;
set_real_ip_from 172.64.0.0/13;
set_real_ip_from 131.0.72.0/22;
real_ip_header CF-Connecting-IP;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header Referrer-Policy "strict-origin-when-cross-origin";
# Bootstrap Node specific headers
add_header X-MY-Network-Node-Type "bootstrap";
add_header X-MY-Network-Version "2.0";
server_tokens off;
# Rate limiting для bootstrap узла
limit_req_zone \$binary_remote_addr zone=bootstrap_api:10m rate=100r/s;
limit_req_zone \$binary_remote_addr zone=bootstrap_monitor:10m rate=10r/s;
limit_req_zone \$binary_remote_addr zone=bootstrap_sync:10m rate=50r/s;
# Bootstrap configuration endpoint (публичный)
location /api/my/bootstrap/ {
limit_req zone=bootstrap_api burst=50 nodelay;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_set_header X-MY-Network-Bootstrap "true";
# CORS для bootstrap API
add_header Access-Control-Allow-Origin "*";
add_header Access-Control-Allow-Methods "GET, POST, OPTIONS";
add_header Access-Control-Allow-Headers "Content-Type, Authorization";
}
# Health check endpoint (публичный)
location /api/my/health {
limit_req zone=bootstrap_api burst=20 nodelay;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
add_header Access-Control-Allow-Origin "*";
}
# Node info (публичный для discovery)
location /api/my/node/info {
limit_req zone=bootstrap_api burst=30 nodelay;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
add_header Access-Control-Allow-Origin "*";
}
# Sync endpoints (для других узлов)
location /api/my/sync/ {
limit_req zone=bootstrap_sync burst=100 nodelay;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
# Увеличенные таймауты для синхронизации
proxy_connect_timeout 60s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
}
# Main API
location /api/my/ {
limit_req zone=bootstrap_api burst=200 nodelay;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
# Monitoring interface (защищенный)
location /api/my/monitor {
limit_req zone=bootstrap_monitor burst=10 nodelay;
# Разрешить только определенным IP (настроить по необходимости)
allow 127.0.0.1;
allow ::1;
# allow YOUR_ADMIN_IP;
deny all;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location /static/ {
alias $PROJECT_DIR/my-uploader-bot/static/;
expires 30d;
add_header Cache-Control "public, immutable";
}
# Блокировка чувствительных путей
location ~ /\.(?!well-known) {
deny all;
}
location ~ ^/(config|\.env|requirements|docker-compose) {
deny all;
}
# Root redirect to monitor
location = / {
return 301 /api/my/monitor/;
}
}
EOF
# Активация nginx конфигурации
ln -sf /etc/nginx/sites-available/my-network-bootstrap /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# Проверка nginx конфигурации
nginx -t
# Получение SSL сертификата
echo "🔐 Получение SSL сертификата..."
certbot --nginx -d $DOMAIN --email $EMAIL --agree-tos --non-interactive --redirect
# Настройка firewall (ТОЛЬКО порт 443!)
echo "🔥 Настройка firewall (только порт 443)..."
ufw --force reset
ufw default deny incoming
ufw default allow outgoing
# SSH порт (узнаем текущий)
SSH_PORT=$(ss -tlnp | grep sshd | grep -o ':[0-9]*' | head -1 | cut -d: -f2)
if [[ -n "$SSH_PORT" ]]; then
echo "🔑 Разрешение SSH на порту $SSH_PORT..."
ufw allow $SSH_PORT/tcp comment 'SSH'
fi
# ТОЛЬКО HTTP и HTTPS для Cloudflare
ufw allow 80/tcp comment 'HTTP for Cloudflare'
ufw allow 443/tcp comment 'HTTPS for Cloudflare'
# Блокируем прямой доступ к приложению
ufw deny 15100 comment 'Block direct app access'
ufw --force enable
# Настройка fail2ban
echo "🚫 Настройка fail2ban..."
apt install -y fail2ban
cat > /etc/fail2ban/jail.local << EOF
[DEFAULT]
bantime = 3600
findtime = 600
maxretry = 5
[sshd]
enabled = true
port = $SSH_PORT
filter = sshd
logpath = /var/log/auth.log
[nginx-http-auth]
enabled = true
filter = nginx-http-auth
logpath = /var/log/nginx/error.log
[nginx-limit-req]
enabled = true
filter = nginx-limit-req
logpath = /var/log/nginx/error.log
maxretry = 10
EOF
systemctl enable fail2ban
systemctl start fail2ban
# Запуск сервисов
echo "🚀 Запуск сервисов..."
systemctl daemon-reload
systemctl enable my-network-bootstrap
systemctl start my-network-bootstrap
systemctl enable nginx
systemctl start nginx
# Создание мониторинг скрипта
echo "📊 Настройка мониторинга..."
cat > /opt/bootstrap-monitor.sh << 'EOF'
#!/bin/bash
LOG_FILE="/opt/logs/bootstrap-monitor.log"
DATE=$(date '+%Y-%m-%d %H:%M:%S')
# Check services
BOOTSTRAP_STATUS=$(systemctl is-active my-network-bootstrap)
NGINX_STATUS=$(systemctl is-active nginx)
DOCKER_STATUS=$(systemctl is-active docker)
# Check API
API_STATUS=$(curl -s -o /dev/null -w "%{http_code}" https://my-public-node-3.projscale.dev/api/my/health || echo "FAIL")
# Check disk space
DISK_USAGE=$(df -h /opt | awk 'NR==2 {print $5}' | sed 's/%//')
# Log status
echo "[$DATE] Bootstrap: $BOOTSTRAP_STATUS, Nginx: $NGINX_STATUS, Docker: $DOCKER_STATUS, API: $API_STATUS, Disk: ${DISK_USAGE}%" >> $LOG_FILE
# Alert if critical
if [[ "$BOOTSTRAP_STATUS" != "active" || "$NGINX_STATUS" != "active" || "$DOCKER_STATUS" != "active" || "$API_STATUS" != "200" ]]; then
echo "[$DATE] ALERT: Bootstrap node has critical issues!" >> $LOG_FILE
fi
EOF
chmod +x /opt/bootstrap-monitor.sh
# Добавить в cron
(crontab -l 2>/dev/null; echo "*/2 * * * * /opt/bootstrap-monitor.sh") | crontab -
# Настройка автообновления SSL
(crontab -l 2>/dev/null; echo "0 12 * * * /usr/bin/certbot renew --quiet") | crontab -
# Финальная проверка
echo "🔍 Финальная проверка bootstrap узла..."
sleep 10
echo "📊 Статус сервисов:"
systemctl status my-network-bootstrap --no-pager -l
systemctl status nginx --no-pager -l
systemctl status docker --no-pager -l
echo "🔥 Статус firewall:"
ufw status numbered
echo "🌐 Проверка API:"
curl -s https://$DOMAIN/api/my/health || echo "API недоступен"
echo "🔐 SSL сертификаты:"
certbot certificates
# Сохранение конфигурации
cat > /opt/bootstrap-node-config.txt << EOF
MY Network Bootstrap Node - Конфигурация
=======================================
Домен: $DOMAIN
Тип: Bootstrap Node (Primary)
Внешний порт: 443 (HTTPS only)
Внутренний порт: 15100
Cloudflare: Enabled
Статус сервисов:
systemctl status my-network-bootstrap nginx docker fail2ban
Логи:
journalctl -u my-network-bootstrap -f
tail -f /opt/logs/bootstrap-monitor.log
API Endpoints:
https://$DOMAIN/api/my/health
https://$DOMAIN/api/my/node/info
https://$DOMAIN/api/my/bootstrap/config
https://$DOMAIN/api/my/monitor/ (restricted)
Конфигурационные файлы:
$PROJECT_DIR/my-uploader-bot/.env
$PROJECT_DIR/my-uploader-bot/bootstrap.json
EOF
echo ""
echo "✅ MY Network Bootstrap Node развернут!"
echo "======================================="
echo "🌐 Домен: https://$DOMAIN"
echo "🔐 SSL: Активен"
echo "🔥 Firewall: Только порт 443"
echo "🌍 Cloudflare: Совместимость включена"
echo "📊 Мониторинг: Каждые 2 минуты"
echo ""
echo "🔍 Проверка работы:"
echo " curl https://$DOMAIN/api/my/health"
echo " curl https://$DOMAIN/api/my/node/info"
echo " curl https://$DOMAIN/api/my/bootstrap/config"
echo ""
echo "📚 Конфигурация: /opt/bootstrap-node-config.txt"
echo ""
echo "🎯 Bootstrap узел готов к работе!"

718
deploy_my_network.sh Normal file
View File

@ -0,0 +1,718 @@
#!/bin/bash
# MY Network Production Deployment Script
# Скрипт для развертывания MY Network с nginx, SSL и полной инфраструктурой
set -e # Выход при ошибке
# Цвета для вывода
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
PURPLE='\033[0;35m'
CYAN='\033[0;36m'
NC='\033[0m' # No Color
# ASCII Art заставка
print_header() {
echo -e "${CYAN}"
cat << "EOF"
╔══════════════════════════════════════════════════════════════════════════════╗
║ MY NETWORK v2.0 ║
║ Production Deployment Script ║
║ Distributed Content Protocol Installer ║
╚══════════════════════════════════════════════════════════════════════════════╝
EOF
echo -e "${NC}"
}
# Логирование
log_info() {
echo -e "${GREEN}[INFO]${NC} $1"
}
log_warn() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
log_step() {
echo -e "${BLUE}[STEP]${NC} $1"
}
# Конфигурационные переменные
DOMAIN=${DOMAIN:-"my-network.local"}
EMAIL=${EMAIL:-"admin@${DOMAIN}"}
HTTP_PORT=${HTTP_PORT:-80}
HTTPS_PORT=${HTTPS_PORT:-443}
APP_PORT=${APP_PORT:-15100}
REDIS_PORT=${REDIS_PORT:-6379}
DB_PORT=${DB_PORT:-3306}
# Проверка прав root
check_root() {
if [[ $EUID -ne 0 ]]; then
log_error "This script must be run as root"
exit 1
fi
}
# Проверка операционной системы
check_os() {
log_step "Checking operating system..."
if [[ -f /etc/os-release ]]; then
. /etc/os-release
OS=$NAME
VER=$VERSION_ID
log_info "Detected OS: $OS $VER"
else
log_error "Cannot detect operating system"
exit 1
fi
}
# Установка зависимостей
install_dependencies() {
log_step "Installing system dependencies..."
if [[ "$OS" == *"Ubuntu"* ]] || [[ "$OS" == *"Debian"* ]]; then
apt update
apt install -y \
nginx \
certbot \
python3-certbot-nginx \
docker.io \
docker-compose \
curl \
wget \
git \
htop \
ufw \
fail2ban
elif [[ "$OS" == *"CentOS"* ]] || [[ "$OS" == *"Red Hat"* ]]; then
yum update -y
yum install -y \
nginx \
certbot \
python3-certbot-nginx \
docker \
docker-compose \
curl \
wget \
git \
htop \
firewalld
else
log_warn "Unsupported OS, attempting generic installation..."
fi
# Запустить Docker
systemctl enable docker
systemctl start docker
log_info "Dependencies installed successfully"
}
# Настройка файрвола
setup_firewall() {
log_step "Configuring firewall..."
if command -v ufw &> /dev/null; then
# Ubuntu/Debian firewall
ufw --force reset
ufw default deny incoming
ufw default allow outgoing
# Разрешить SSH
ufw allow 22/tcp
# Разрешить HTTP/HTTPS
ufw allow $HTTP_PORT/tcp
ufw allow $HTTPS_PORT/tcp
# Разрешить порт приложения
ufw allow $APP_PORT/tcp
# MY Network P2P порты
ufw allow 8000:8010/tcp
ufw allow 8000:8010/udp
ufw --force enable
elif command -v firewall-cmd &> /dev/null; then
# CentOS/RHEL firewall
systemctl enable firewalld
systemctl start firewalld
firewall-cmd --permanent --add-service=ssh
firewall-cmd --permanent --add-service=http
firewall-cmd --permanent --add-service=https
firewall-cmd --permanent --add-port=$APP_PORT/tcp
firewall-cmd --permanent --add-port=8000-8010/tcp
firewall-cmd --permanent --add-port=8000-8010/udp
firewall-cmd --reload
fi
log_info "Firewall configured successfully"
}
# Создание пользователя для приложения
create_app_user() {
log_step "Creating application user..."
if ! id "my-network" &>/dev/null; then
useradd -r -s /bin/false -d /opt/my-network -m my-network
usermod -aG docker my-network
log_info "User 'my-network' created"
else
log_info "User 'my-network' already exists"
fi
}
# Настройка директорий
setup_directories() {
log_step "Setting up directories..."
# Создать основные директории
mkdir -p /opt/my-network/{app,data,logs,storage,config,ssl}
mkdir -p /var/log/my-network
# Создать директории для хранения
mkdir -p /opt/my-network/storage/{uploads,previews,encrypted,my-network}
# Права доступа
chown -R my-network:my-network /opt/my-network
chown -R my-network:my-network /var/log/my-network
chmod 755 /opt/my-network
chmod 750 /opt/my-network/config
chmod 700 /opt/my-network/ssl
log_info "Directories configured successfully"
}
# Копирование файлов приложения
deploy_application() {
log_step "Deploying MY Network application..."
# Копировать исходники
cp -r . /opt/my-network/app/
# Установить права
chown -R my-network:my-network /opt/my-network/app
# Создать .env файл для продакшена
cat > /opt/my-network/app/.env << EOF
# MY Network Production Configuration
PROJECT_NAME=MY-Network
PROJECT_VERSION=2.0.0
DEBUG=False
ENVIRONMENT=production
# Database Configuration
DATABASE_URL=mysql://mymusic:mymusic_password@localhost:$DB_PORT/mymusic
DATABASE_POOL_SIZE=20
DATABASE_MAX_OVERFLOW=30
# Redis Configuration
REDIS_URL=redis://localhost:$REDIS_PORT/0
REDIS_PASSWORD=
# Application Settings
SECRET_KEY=$(openssl rand -hex 32)
MAX_FILE_SIZE=5368709120
STORAGE_PATH=/opt/my-network/storage
# MY Network Settings
MY_NETWORK_ENABLED=True
MY_NETWORK_NODE_ID=$(uuidgen)
MY_NETWORK_BOOTSTRAP_NODES=[]
MY_NETWORK_P2P_PORT=8001
MY_NETWORK_API_PORT=$APP_PORT
# SSL Settings
SSL_ENABLED=True
SSL_CERT_PATH=/opt/my-network/ssl/fullchain.pem
SSL_KEY_PATH=/opt/my-network/ssl/privkey.pem
# Logging
LOG_LEVEL=INFO
LOG_FILE=/var/log/my-network/app.log
# Monitoring
METRICS_ENABLED=True
PROMETHEUS_PORT=9090
EOF
log_info "Application deployed successfully"
}
# Настройка nginx
setup_nginx() {
log_step "Configuring nginx..."
# Создать конфигурацию nginx
cat > /etc/nginx/sites-available/my-network << EOF
# MY Network Nginx Configuration
upstream my_network_backend {
server 127.0.0.1:$APP_PORT;
keepalive 32;
}
# HTTP -> HTTPS redirect
server {
listen $HTTP_PORT;
server_name $DOMAIN;
# Для Let's Encrypt challenge
location /.well-known/acme-challenge/ {
root /var/www/html;
}
# Редирект на HTTPS
location / {
return 301 https://\$server_name\$request_uri;
}
}
# HTTPS сервер
server {
listen $HTTPS_PORT ssl http2;
server_name $DOMAIN;
# SSL сертификаты
ssl_certificate /opt/my-network/ssl/fullchain.pem;
ssl_certificate_key /opt/my-network/ssl/privkey.pem;
# SSL настройки
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers ECDHE-RSA-AES256-GCM-SHA512:DHE-RSA-AES256-GCM-SHA512:ECDHE-RSA-AES256-GCM-SHA384:DHE-RSA-AES256-GCM-SHA384;
ssl_prefer_server_ciphers on;
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 10m;
# Безопасность
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header X-Frame-Options DENY always;
add_header X-Content-Type-Options nosniff always;
add_header X-XSS-Protection "1; mode=block" always;
add_header Referrer-Policy "strict-origin-when-cross-origin" always;
# Основное приложение
location / {
proxy_pass http://my_network_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
proxy_cache_bypass \$http_upgrade;
proxy_read_timeout 86400;
# Ограничения
client_max_body_size 5G;
proxy_request_buffering off;
}
# MY Network мониторинг
location /api/my/monitor/ {
proxy_pass http://my_network_backend;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection 'upgrade';
proxy_set_header Host \$host;
proxy_cache_bypass \$http_upgrade;
# Разрешить для всех
allow all;
}
# Статические файлы
location /static/ {
alias /opt/my-network/storage/static/;
expires 30d;
add_header Cache-Control "public, immutable";
}
# Логи
access_log /var/log/nginx/my-network-access.log;
error_log /var/log/nginx/my-network-error.log;
}
EOF
# Включить сайт
ln -sf /etc/nginx/sites-available/my-network /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# Проверить конфигурацию
nginx -t
log_info "Nginx configured successfully"
}
# Получение SSL сертификата
setup_ssl() {
log_step "Setting up SSL certificate..."
# Запустить nginx для получения сертификата
systemctl start nginx
# Получить сертификат Let's Encrypt
if certbot --nginx -d $DOMAIN --email $EMAIL --agree-tos --non-interactive --redirect; then
log_info "SSL certificate obtained successfully"
# Копировать сертификаты в нашу директорию
cp /etc/letsencrypt/live/$DOMAIN/fullchain.pem /opt/my-network/ssl/
cp /etc/letsencrypt/live/$DOMAIN/privkey.pem /opt/my-network/ssl/
chown my-network:my-network /opt/my-network/ssl/*
# Настроить автообновление
echo "0 3 * * * /usr/bin/certbot renew --quiet && systemctl reload nginx" | crontab -
else
log_warn "Failed to obtain SSL certificate, generating self-signed..."
# Создать самоподписанный сертификат
openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
-keyout /opt/my-network/ssl/privkey.pem \
-out /opt/my-network/ssl/fullchain.pem \
-subj "/C=US/ST=State/L=City/O=Organization/CN=$DOMAIN"
chown my-network:my-network /opt/my-network/ssl/*
fi
}
# Создание docker-compose для продакшена
create_docker_compose() {
log_step "Creating production docker-compose..."
cat > /opt/my-network/docker-compose.prod.yml << EOF
version: '3.8'
services:
# MariaDB Database
mariadb:
image: mariadb:11.2
container_name: my-network-db
restart: unless-stopped
environment:
MYSQL_ROOT_PASSWORD: \${MYSQL_ROOT_PASSWORD:-root_password}
MYSQL_DATABASE: mymusic
MYSQL_USER: mymusic
MYSQL_PASSWORD: \${MYSQL_PASSWORD:-mymusic_password}
volumes:
- /opt/my-network/data/mysql:/var/lib/mysql
- /opt/my-network/storage:/Storage
ports:
- "127.0.0.1:$DB_PORT:3306"
networks:
- my-network
healthcheck:
test: ["CMD", "mysqladmin", "ping", "-h", "localhost"]
interval: 30s
timeout: 10s
retries: 5
# Redis Cache
redis:
image: redis:7-alpine
container_name: my-network-redis
restart: unless-stopped
command: redis-server --appendonly yes
volumes:
- /opt/my-network/data/redis:/data
ports:
- "127.0.0.1:$REDIS_PORT:6379"
networks:
- my-network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 30s
timeout: 10s
retries: 3
# MY Network Application
my-network-app:
build:
context: /opt/my-network/app
dockerfile: Dockerfile.prod
container_name: my-network-app
restart: unless-stopped
environment:
- ENVIRONMENT=production
volumes:
- /opt/my-network/app:/app
- /opt/my-network/storage:/Storage
- /opt/my-network/ssl:/ssl:ro
- /var/log/my-network:/var/log/my-network
ports:
- "127.0.0.1:$APP_PORT:$APP_PORT"
- "$((APP_PORT + 1)):$((APP_PORT + 1))" # P2P порт
networks:
- my-network
depends_on:
mariadb:
condition: service_healthy
redis:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:$APP_PORT/health"]
interval: 30s
timeout: 10s
retries: 3
networks:
my-network:
driver: bridge
volumes:
mysql_data:
redis_data:
EOF
# Создать Dockerfile для продакшена
cat > /opt/my-network/app/Dockerfile.prod << EOF
FROM python:3.11-slim
# Установить системные зависимости
RUN apt-get update && apt-get install -y \\
gcc \\
g++ \\
libmariadb-dev \\
pkg-config \\
curl \\
&& rm -rf /var/lib/apt/lists/*
# Создать пользователя приложения
RUN useradd -r -s /bin/false -d /app mynetwork
# Установить зависимости Python
WORKDIR /app
COPY requirements_new.txt .
RUN pip install --no-cache-dir -r requirements_new.txt
# Копировать приложение
COPY . .
RUN chown -R mynetwork:mynetwork /app
# Создать директории
RUN mkdir -p /var/log/my-network && \\
chown mynetwork:mynetwork /var/log/my-network
USER mynetwork
# Порты
EXPOSE $APP_PORT $((APP_PORT + 1))
# Команда запуска
CMD ["python", "-m", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "$APP_PORT"]
EOF
log_info "Docker configuration created successfully"
}
# Создание systemd сервиса
create_systemd_service() {
log_step "Creating systemd service..."
cat > /etc/systemd/system/my-network.service << EOF
[Unit]
Description=MY Network Distributed Protocol Service
After=docker.service
Requires=docker.service
[Service]
Type=oneshot
RemainAfterExit=yes
WorkingDirectory=/opt/my-network
ExecStart=/usr/bin/docker-compose -f docker-compose.prod.yml up -d
ExecStop=/usr/bin/docker-compose -f docker-compose.prod.yml down
User=my-network
Group=my-network
[Install]
WantedBy=multi-user.target
EOF
# Перезагрузить systemd и запустить сервис
systemctl daemon-reload
systemctl enable my-network
log_info "Systemd service created successfully"
}
# Настройка мониторинга
setup_monitoring() {
log_step "Setting up monitoring..."
# Создать скрипт проверки здоровья
cat > /opt/my-network/health_check.sh << 'EOF'
#!/bin/bash
# MY Network Health Check Script
DOMAIN="localhost"
PORT="15100"
LOG_FILE="/var/log/my-network/health.log"
# Функция логирования
log_message() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> $LOG_FILE
}
# Проверка HTTP эндпоинта
check_http() {
if curl -f -s "http://$DOMAIN:$PORT/health" > /dev/null; then
return 0
else
return 1
fi
}
# Проверка MY Network
check_my_network() {
if curl -f -s "http://$DOMAIN:$PORT/api/my/health" > /dev/null; then
return 0
else
return 1
fi
}
# Основная проверка
if check_http && check_my_network; then
log_message "Health check PASSED"
exit 0
else
log_message "Health check FAILED"
# Попытаться перезапустить сервис
systemctl restart my-network
log_message "Service restart attempted"
exit 1
fi
EOF
chmod +x /opt/my-network/health_check.sh
chown my-network:my-network /opt/my-network/health_check.sh
# Добавить в cron для мониторинга каждые 5 минут
echo "*/5 * * * * /opt/my-network/health_check.sh" | crontab -u my-network -
log_info "Monitoring configured successfully"
}
# Запуск всех сервисов
start_services() {
log_step "Starting all services..."
# Запустить nginx
systemctl enable nginx
systemctl restart nginx
# Запустить MY Network
systemctl start my-network
# Подождать запуска
sleep 10
# Проверить статус
if systemctl is-active --quiet my-network; then
log_info "MY Network service is running"
else
log_error "MY Network service failed to start"
systemctl status my-network
exit 1
fi
if systemctl is-active --quiet nginx; then
log_info "Nginx service is running"
else
log_error "Nginx service failed to start"
systemctl status nginx
exit 1
fi
}
# Финальная информация
print_summary() {
echo -e "${GREEN}"
cat << EOF
╔══════════════════════════════════════════════════════════════════════════════╗
║ MY NETWORK DEPLOYMENT COMPLETED ║
╚══════════════════════════════════════════════════════════════════════════════╝
🌐 Web Interface: https://$DOMAIN
📊 Monitoring: https://$DOMAIN/api/my/monitor/
🔧 API Documentation: https://$DOMAIN/api/docs
❤️ Health Check: https://$DOMAIN/health
📝 Configuration Files:
• Application: /opt/my-network/app/.env
• Nginx: /etc/nginx/sites-available/my-network
• Docker: /opt/my-network/docker-compose.prod.yml
• SSL: /opt/my-network/ssl/
📋 Management Commands:
• Start service: systemctl start my-network
• Stop service: systemctl stop my-network
• Restart service: systemctl restart my-network
• View logs: journalctl -u my-network -f
• Health check: /opt/my-network/health_check.sh
🔒 Security Features:
✅ SSL/TLS encryption
✅ Firewall configured
✅ Fail2ban protection
✅ Security headers
✅ Rate limiting
🚀 MY Network Features:
✅ Distributed content protocol
✅ P2P networking
✅ Content synchronization
✅ Load balancing
✅ Real-time monitoring
The system is now ready for production use!
EOF
echo -e "${NC}"
}
# Главная функция
main() {
print_header
log_info "Starting MY Network production deployment..."
check_root
check_os
install_dependencies
setup_firewall
create_app_user
setup_directories
deploy_application
setup_nginx
setup_ssl
create_docker_compose
create_systemd_service
setup_monitoring
start_services
print_summary
log_info "Deployment completed successfully!"
}
# Запуск скрипта
main "$@"

View File

@ -0,0 +1,140 @@
version: '3'
services:
maria_db:
image: mariadb:11.2
ports:
- "3307:3306"
env_file:
- .env
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD:-password}
- MYSQL_DATABASE=${MYSQL_DATABASE:-myuploader}
- MYSQL_USER=${MYSQL_USER:-myuploader}
- MYSQL_PASSWORD=${MYSQL_PASSWORD:-password}
volumes:
- /Storage/sqlStorage:/var/lib/mysql
restart: always
healthcheck:
test: [ "CMD", "healthcheck.sh", "--connect", "--innodb_initialized" ]
interval: 10s
timeout: 5s
retries: 3
redis:
image: redis:7-alpine
ports:
- "6379:6379"
volumes:
- redis_data:/data
restart: always
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 5s
retries: 3
app:
build:
context: .
dockerfile: Dockerfile
command: python -m app
env_file:
- .env
restart: always
links:
- maria_db
- redis
ports:
- "15100:15100"
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
condition: service_healthy
redis:
condition: service_healthy
indexer:
build:
context: .
dockerfile: Dockerfile
restart: always
command: python -m app indexer
env_file:
- .env
links:
- maria_db
- redis
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
condition: service_healthy
redis:
condition: service_healthy
ton_daemon:
build:
context: .
dockerfile: Dockerfile
command: python -m app ton_daemon
restart: always
env_file:
- .env
links:
- maria_db
- redis
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
condition: service_healthy
redis:
condition: service_healthy
license_index:
build:
context: .
dockerfile: Dockerfile
command: python -m app license_index
restart: always
env_file:
- .env
links:
- maria_db
- redis
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
condition: service_healthy
redis:
condition: service_healthy
convert_process:
build:
context: .
dockerfile: Dockerfile
command: python -m app convert_process
restart: always
env_file:
- .env
links:
- maria_db
- redis
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
maria_db:
condition: service_healthy
redis:
condition: service_healthy
volumes:
redis_data:

311
docker-compose.new.yml Normal file
View File

@ -0,0 +1,311 @@
version: '3.8'
services:
# PostgreSQL Database
postgres:
image: postgres:15-alpine
container_name: uploader_postgres
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB:-uploader_bot}
POSTGRES_USER: ${POSTGRES_USER:-uploader}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-secure_password}
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
volumes:
- postgres_data:/var/lib/postgresql/data
- ./scripts/init-db.sql:/docker-entrypoint-initdb.d/init-db.sql:ro
ports:
- "${POSTGRES_PORT:-5432}:5432"
networks:
- uploader_network
healthcheck:
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-uploader} -d ${POSTGRES_DB:-uploader_bot}"]
interval: 10s
timeout: 5s
retries: 5
start_period: 30s
# Redis Cache
redis:
image: redis:7-alpine
container_name: uploader_redis
restart: unless-stopped
command: >
redis-server
--appendonly yes
--maxmemory 512mb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
volumes:
- redis_data:/data
- ./config/redis.conf:/usr/local/etc/redis/redis.conf:ro
ports:
- "${REDIS_PORT:-6379}:6379"
networks:
- uploader_network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 5
start_period: 10s
# Main Application
app:
build:
context: .
dockerfile: Dockerfile.new
target: production
container_name: uploader_app
restart: unless-stopped
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
# Database
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-uploader}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-uploader_bot}
# Redis
REDIS_URL: redis://redis:6379/0
# Application
PROJECT_HOST: ${PROJECT_HOST:-http://localhost:15100}
SANIC_PORT: 15100
DEBUG: ${DEBUG:-false}
LOG_LEVEL: ${LOG_LEVEL:-INFO}
# Telegram
TELEGRAM_API_KEY: ${TELEGRAM_API_KEY}
CLIENT_TELEGRAM_API_KEY: ${CLIENT_TELEGRAM_API_KEY}
# TON Blockchain
TESTNET: ${TESTNET:-false}
TONCENTER_HOST: ${TONCENTER_HOST:-https://toncenter.com/api/v2/}
TONCENTER_API_KEY: ${TONCENTER_API_KEY}
# Security
SECRET_KEY: ${SECRET_KEY}
JWT_SECRET_KEY: ${JWT_SECRET_KEY}
# File Storage
UPLOADS_DIR: /app/data
# Services
INDEXER_ENABLED: ${INDEXER_ENABLED:-true}
TON_DAEMON_ENABLED: ${TON_DAEMON_ENABLED:-true}
LICENSE_SERVICE_ENABLED: ${LICENSE_SERVICE_ENABLED:-true}
CONVERT_SERVICE_ENABLED: ${CONVERT_SERVICE_ENABLED:-true}
# Monitoring
METRICS_ENABLED: ${METRICS_ENABLED:-true}
HEALTH_CHECK_ENABLED: ${HEALTH_CHECK_ENABLED:-true}
# Rate Limiting
RATE_LIMIT_ENABLED: ${RATE_LIMIT_ENABLED:-true}
RATE_LIMIT_REQUESTS: ${RATE_LIMIT_REQUESTS:-100}
RATE_LIMIT_WINDOW: ${RATE_LIMIT_WINDOW:-60}
volumes:
- app_data:/app/data
- app_logs:/app/logs
- ./config:/app/config:ro
ports:
- "${SANIC_PORT:-15100}:15100"
- "${METRICS_PORT:-9090}:9090"
networks:
- uploader_network
labels:
- "traefik.enable=true"
- "traefik.http.routers.uploader.rule=Host(`${DOMAIN:-localhost}`)"
- "traefik.http.routers.uploader.entrypoints=web,websecure"
- "traefik.http.routers.uploader.tls.certresolver=letsencrypt"
- "traefik.http.services.uploader.loadbalancer.server.port=15100"
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:15100/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
# Background Services (Alternative architecture - separate containers)
indexer:
build:
context: .
dockerfile: Dockerfile.new
target: production
container_name: uploader_indexer
restart: unless-stopped
command: python -m app.services.indexer
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-uploader}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-uploader_bot}
REDIS_URL: redis://redis:6379/0
TELEGRAM_API_KEY: ${TELEGRAM_API_KEY}
CLIENT_TELEGRAM_API_KEY: ${CLIENT_TELEGRAM_API_KEY}
TESTNET: ${TESTNET:-false}
TONCENTER_HOST: ${TONCENTER_HOST:-https://toncenter.com/api/v2/}
TONCENTER_API_KEY: ${TONCENTER_API_KEY}
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: indexer
volumes:
- app_logs:/app/logs
networks:
- uploader_network
profiles:
- separate-services
ton_daemon:
build:
context: .
dockerfile: Dockerfile.new
target: production
container_name: uploader_ton_daemon
restart: unless-stopped
command: python -m app.services.ton_daemon
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-uploader}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-uploader_bot}
REDIS_URL: redis://redis:6379/0
TESTNET: ${TESTNET:-false}
TONCENTER_HOST: ${TONCENTER_HOST:-https://toncenter.com/api/v2/}
TONCENTER_API_KEY: ${TONCENTER_API_KEY}
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: ton_daemon
volumes:
- app_logs:/app/logs
networks:
- uploader_network
profiles:
- separate-services
# Monitoring and Observability
prometheus:
image: prom/prometheus:latest
container_name: uploader_prometheus
restart: unless-stopped
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/etc/prometheus/console_libraries'
- '--web.console.templates=/etc/prometheus/consoles'
- '--storage.tsdb.retention.time=200h'
- '--web.enable-lifecycle'
volumes:
- ./monitoring/prometheus.yml:/etc/prometheus/prometheus.yml:ro
- prometheus_data:/prometheus
ports:
- "9091:9090"
networks:
- uploader_network
profiles:
- monitoring
grafana:
image: grafana/grafana:latest
container_name: uploader_grafana
restart: unless-stopped
environment:
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin}
GF_USERS_ALLOW_SIGN_UP: false
volumes:
- grafana_data:/var/lib/grafana
- ./monitoring/grafana/dashboards:/etc/grafana/provisioning/dashboards:ro
- ./monitoring/grafana/datasources:/etc/grafana/provisioning/datasources:ro
ports:
- "3001:3000"
networks:
- uploader_network
profiles:
- monitoring
# Reverse Proxy (optional)
traefik:
image: traefik:v3.0
container_name: uploader_traefik
restart: unless-stopped
command:
- '--api.dashboard=true'
- '--api.insecure=true'
- '--providers.docker=true'
- '--providers.docker.exposedbydefault=false'
- '--entrypoints.web.address=:80'
- '--entrypoints.websecure.address=:443'
- '--certificatesresolvers.letsencrypt.acme.httpchallenge=true'
- '--certificatesresolvers.letsencrypt.acme.httpchallenge.entrypoint=web'
- '--certificatesresolvers.letsencrypt.acme.email=${ACME_EMAIL:-admin@example.com}'
- '--certificatesresolvers.letsencrypt.acme.storage=/letsencrypt/acme.json'
ports:
- "80:80"
- "443:443"
- "8080:8080" # Traefik dashboard
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- traefik_data:/letsencrypt
networks:
- uploader_network
profiles:
- proxy
# Database backup service
postgres_backup:
image: postgres:15-alpine
container_name: uploader_backup
restart: "no"
depends_on:
- postgres
environment:
POSTGRES_DB: ${POSTGRES_DB:-uploader_bot}
POSTGRES_USER: ${POSTGRES_USER:-uploader}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-secure_password}
BACKUP_SCHEDULE: ${BACKUP_SCHEDULE:-0 2 * * *} # Daily at 2 AM
volumes:
- backup_data:/backups
- ./scripts/backup.sh:/backup.sh:ro
command: /backup.sh
networks:
- uploader_network
profiles:
- backup
# Named volumes for data persistence
volumes:
postgres_data:
driver: local
redis_data:
driver: local
app_data:
driver: local
app_logs:
driver: local
prometheus_data:
driver: local
grafana_data:
driver: local
traefik_data:
driver: local
backup_data:
driver: local
# Custom network
networks:
uploader_network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16

214
install_service.sh Normal file
View File

@ -0,0 +1,214 @@
#!/bin/bash
# MY Network - Установка сервиса (Шаг 1 из 2)
# Устанавливает базовый сервис без защиты
# Для защиты используйте: bash secure_service.sh
set -e
echo "🚀 MY Network - Установка сервиса"
echo "================================="
# Проверка прав root
if [[ $EUID -ne 0 ]]; then
echo "❌ Этот скрипт должен запускаться от root"
echo "Используйте: sudo bash install_service.sh"
exit 1
fi
# Получение домена
read -p "🌐 Введите домен (например: my-network.example.com): " DOMAIN
if [[ -z "$DOMAIN" ]]; then
echo "❌ Домен обязателен!"
exit 1
fi
# Получение email для SSL
read -p "📧 Введите email для SSL сертификата: " EMAIL
if [[ -z "$EMAIL" ]]; then
echo "❌ Email обязателен для SSL!"
exit 1
fi
echo "📋 Настройки:"
echo " Домен: $DOMAIN"
echo " Email: $EMAIL"
echo " Порт: 15100"
echo ""
# Обновление системы
echo "🔄 Обновление системы..."
apt update && apt upgrade -y
# Установка основных пакетов
echo "📦 Установка пакетов..."
apt install -y \
docker.io \
docker-compose \
git \
curl \
wget \
unzip \
python3 \
python3-pip \
python3-venv \
htop \
tree \
nano \
ufw
# Запуск Docker
systemctl enable docker
systemctl start docker
# Добавление пользователя в группу docker
usermod -aG docker $USER
# Создание директории проекта
PROJECT_DIR="/opt/my-network"
mkdir -p $PROJECT_DIR
cd $PROJECT_DIR
# Клонирование или копирование проекта
if [[ -d "my-uploader-bot" ]]; then
echo "📁 Проект уже существует, обновляем..."
cd my-uploader-bot
git pull 2>/dev/null || echo "Git pull failed, продолжаем..."
cd ..
else
echo "📥 Копирование проекта..."
# Если запускается из директории проекта, копируем
if [[ -f "../pyproject.toml" ]]; then
cp -r ../ ./my-uploader-bot/
else
echo "❌ Не найден файл проекта. Убедитесь что скрипт запускается из директории проекта."
exit 1
fi
fi
cd my-uploader-bot
# Создание .env файла
echo "📝 Создание конфигурации..."
cat > .env << EOF
# MY Network Configuration
NODE_ID=node-$(date +%s)
NODE_PORT=15100
DOMAIN=$DOMAIN
EMAIL=$EMAIL
# Database
DB_HOST=localhost
DB_PORT=3306
DB_NAME=my_network
DB_USER=my_network_user
DB_PASSWORD=$(openssl rand -base64 32)
# Redis
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD=$(openssl rand -base64 32)
# Security
SECRET_KEY=$(openssl rand -base64 64)
JWT_SECRET=$(openssl rand -base64 32)
# Paths
STORAGE_PATH=/opt/storage
LOG_PATH=/opt/logs
# Network
BOOTSTRAP_NODES=[]
SYNC_INTERVAL=300
MAX_PEERS=10
EOF
# Создание директорий
echo "📁 Создание директорий..."
mkdir -p /opt/storage /opt/logs
chmod 755 /opt/storage /opt/logs
# Установка Python зависимостей
echo "🐍 Установка Python зависимостей..."
python3 -m venv venv
source venv/bin/activate
pip install --upgrade pip
pip install -r requirements_new.txt
# Создание systemd сервиса
echo "⚙️ Создание systemd сервиса..."
cat > /etc/systemd/system/my-network.service << EOF
[Unit]
Description=MY Network Service
After=network.target docker.service
Requires=docker.service
[Service]
Type=forking
User=root
WorkingDirectory=$PROJECT_DIR/my-uploader-bot
Environment=PATH=$PROJECT_DIR/my-uploader-bot/venv/bin
ExecStart=/bin/bash -c 'source venv/bin/activate && python app/main.py'
ExecStop=/bin/kill -TERM \$MAINPID
Restart=always
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
# Запуск Docker Compose
echo "🐳 Запуск Docker сервисов..."
docker-compose -f docker-compose.new.yml up -d
# Активация сервиса
echo "🔧 Активация сервиса..."
systemctl daemon-reload
systemctl enable my-network
systemctl start my-network
# Проверка статуса
sleep 5
echo "📊 Проверка статуса..."
systemctl status my-network --no-pager
# Проверка портов
echo "🌐 Проверка портов..."
netstat -tlnp | grep :15100 || echo "⚠️ Порт 15100 не слушается"
# Сохранение конфигурации
echo "💾 Сохранение конфигурации..."
cat > /opt/my-network-config.txt << EOF
MY Network Service - Конфигурация
===============================
Домен: $DOMAIN
Email: $EMAIL
Порт: 15100
Директория: $PROJECT_DIR
Логи: /opt/logs
Хранилище: /opt/storage
Статус сервиса: systemctl status my-network
Логи сервиса: journalctl -u my-network -f
Перезапуск: systemctl restart my-network
Для защиты сервера запустите:
sudo bash secure_service.sh
EOF
echo ""
echo "✅ MY Network сервис установлен!"
echo "================================"
echo "📍 Конфигурация сохранена в: /opt/my-network-config.txt"
echo "🌐 Домен: $DOMAIN"
echo "🔌 Порт: 15100"
echo "📁 Директория: $PROJECT_DIR"
echo ""
echo "🔍 Проверка работы:"
echo " curl http://localhost:15100/api/my/health"
echo " systemctl status my-network"
echo ""
echo "⚠️ ВАЖНО: Для защиты сервера выполните:"
echo " sudo bash secure_service.sh"
echo ""
echo "📚 Документация: /opt/my-network/my-uploader-bot/MY_NETWORK_README.md"

126
monitor_deployment.sh Executable file
View File

@ -0,0 +1,126 @@
#!/bin/bash
# MY Network Bootstrap Deployment Monitor
# Скрипт для наблюдения за процессом развертывания
echo "🚀 MY Network Bootstrap Deployment Monitor"
echo "=========================================="
echo "Сервер: 2.58.65.188"
echo "Домен: my-public-node-3.projscale.dev"
echo "Время запуска: $(date)"
echo ""
while true; do
clear
echo "🚀 MY Network Bootstrap - Live Monitor"
echo "====================================="
echo "$(date '+%H:%M:%S') | Обновляется каждые 30 секунд"
echo ""
# Проверка локальных процессов
echo "📡 Локальные процессы:"
SCP_COUNT=$(ps aux | grep 'scp.*auto_deploy.sh' | grep -v grep | wc -l | tr -d ' ')
SSH_COUNT=$(ps aux | grep 'auto_ssh_deploy.exp' | grep -v grep | wc -l | tr -d ' ')
if [[ $SCP_COUNT -gt 0 ]]; then
echo " ✅ SCP Transfer: Активен (передача файлов)"
else
echo " ✅ SCP Transfer: Завершен"
fi
if [[ $SSH_COUNT -gt 0 ]]; then
echo " 🔄 SSH Deploy: Активен (установка на сервере)"
else
echo " ⚠️ SSH Deploy: Завершен или прерван"
fi
echo ""
# Проверка доступности домена
echo "🌐 Проверка домена:"
DOMAIN_STATUS=$(curl -s --connect-timeout 3 -o /dev/null -w "%{http_code}" https://my-public-node-3.projscale.dev/api/my/health 2>/dev/null || echo "TIMEOUT")
case $DOMAIN_STATUS in
"200")
echo " ✅ API доступен: https://my-public-node-3.projscale.dev/api/my/health"
echo " 🎉 РАЗВЕРТЫВАНИЕ ЗАВЕРШЕНО УСПЕШНО!"
;;
"521")
echo " ⏳ Error 521: Сервер недоступен (установка в процессе)"
;;
"522")
echo " ⏳ Error 522: Connection timeout (сервис запускается)"
;;
"TIMEOUT")
echo " ⏳ Timeout: Проверяем соединение..."
;;
*)
echo " ⚠️ HTTP $DOMAIN_STATUS: Проверяем статус..."
;;
esac
echo ""
# Проверка прямого подключения к серверу
echo "🔍 Проверка сервера:"
SERVER_PING=$(ping -c 1 -W 1000 2.58.65.188 2>/dev/null | grep "1 received" | wc -l | tr -d ' ')
if [[ $SERVER_PING -gt 0 ]]; then
echo " ✅ Сервер 2.58.65.188 доступен"
else
echo " ⚠️ Сервер 2.58.65.188 недоступен"
fi
# Попытка проверить порты
echo ""
echo "🔌 Проверка портов:"
# Проверяем SSH (22)
SSH_CHECK=$(timeout 3 bash -c "</dev/tcp/2.58.65.188/22" 2>/dev/null && echo "open" || echo "closed")
echo " SSH (22): $SSH_CHECK"
# Проверяем HTTP (80)
HTTP_CHECK=$(timeout 3 bash -c "</dev/tcp/2.58.65.188/80" 2>/dev/null && echo "open" || echo "closed")
echo " HTTP (80): $HTTP_CHECK"
# Проверяем HTTPS (443)
HTTPS_CHECK=$(timeout 3 bash -c "</dev/tcp/2.58.65.188/443" 2>/dev/null && echo "open" || echo "closed")
echo " HTTPS (443): $HTTPS_CHECK"
echo ""
echo "📊 Статус развертывания:"
if [[ $DOMAIN_STATUS == "200" ]]; then
echo " 🎯 Статус: ЗАВЕРШЕНО"
echo " ✅ MY Network Bootstrap Node готов к работе!"
echo ""
echo "🔗 Доступные endpoints:"
echo " • Health: https://my-public-node-3.projscale.dev/api/my/health"
echo " • Node Info: https://my-public-node-3.projscale.dev/api/my/node/info"
echo " • Bootstrap: https://my-public-node-3.projscale.dev/api/my/bootstrap/config"
echo ""
echo "🎉 Развертывание успешно завершено!"
break
elif [[ $SSH_COUNT -gt 0 ]]; then
echo " 🔄 Статус: В ПРОЦЕССЕ"
echo " ⏳ Установка компонентов на сервере..."
if [[ $HTTPS_CHECK == "open" ]]; then
echo " ✅ Порт 443 открыт - nginx работает"
fi
elif [[ $SCP_COUNT -gt 0 ]]; then
echo " 📤 Статус: ПЕРЕДАЧА ФАЙЛОВ"
echo " ⏳ Копирование скриптов на сервер..."
else
echo " ⚠️ Статус: НЕИЗВЕСТНО"
echo " 🔍 Проверьте терминалы или перезапустите развертывание"
fi
echo ""
echo "💡 Для детального мониторинга:"
echo " Terminal 1: SCP процесс"
echo " Terminal 2: SSH развертывание"
echo ""
echo "⏹️ Нажмите Ctrl+C для выхода"
echo "🔄 Следующее обновление через 30 секунд..."
sleep 30
done

287
monitoring/alert_rules.yml Normal file
View File

@ -0,0 +1,287 @@
# Alert rules for my-uploader-bot monitoring
groups:
- name: application_alerts
interval: 30s
rules:
# Application availability
- alert: ApplicationDown
expr: up{job="my-uploader-bot"} == 0
for: 1m
labels:
severity: critical
service: my-uploader-bot
annotations:
summary: "Application instance {{ $labels.instance }} is down"
description: "My-uploader-bot application has been down for more than 1 minute"
# High error rate
- alert: HighErrorRate
expr: rate(http_requests_total{status=~"5.."}[5m]) / rate(http_requests_total[5m]) > 0.1
for: 5m
labels:
severity: warning
service: my-uploader-bot
annotations:
summary: "High error rate detected"
description: "Error rate is {{ $value | humanizePercentage }} for the last 5 minutes"
# High response time
- alert: HighResponseTime
expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket[5m])) > 2.0
for: 5m
labels:
severity: warning
service: my-uploader-bot
annotations:
summary: "High response time detected"
description: "95th percentile response time is {{ $value }}s for the last 5 minutes"
# High memory usage
- alert: HighMemoryUsage
expr: (process_resident_memory_bytes / 1024 / 1024 / 1024) > 2.0
for: 10m
labels:
severity: warning
service: my-uploader-bot
annotations:
summary: "High memory usage detected"
description: "Memory usage is {{ $value | humanize }}GB"
# High CPU usage
- alert: HighCPUUsage
expr: rate(process_cpu_seconds_total[5m]) * 100 > 80
for: 10m
labels:
severity: warning
service: my-uploader-bot
annotations:
summary: "High CPU usage detected"
description: "CPU usage is {{ $value | humanizePercentage }}"
- name: database_alerts
interval: 30s
rules:
# Database down
- alert: PostgreSQLDown
expr: up{job="postgres"} == 0
for: 1m
labels:
severity: critical
service: postgresql
annotations:
summary: "PostgreSQL instance {{ $labels.instance }} is down"
description: "PostgreSQL database has been down for more than 1 minute"
# High database connections
- alert: HighDatabaseConnections
expr: pg_stat_database_numbackends / pg_settings_max_connections > 0.8
for: 5m
labels:
severity: warning
service: postgresql
annotations:
summary: "High database connection usage"
description: "Database connection usage is {{ $value | humanizePercentage }}"
# Slow queries
- alert: SlowQueries
expr: pg_stat_activity_max_tx_duration > 300
for: 5m
labels:
severity: warning
service: postgresql
annotations:
summary: "Slow database queries detected"
description: "Longest running query is {{ $value }}s"
# Replication lag
- alert: ReplicationLag
expr: pg_replication_lag > 60
for: 5m
labels:
severity: warning
service: postgresql
annotations:
summary: "High replication lag"
description: "Replication lag is {{ $value }}s"
- name: cache_alerts
interval: 30s
rules:
# Redis down
- alert: RedisDown
expr: up{job="redis"} == 0
for: 1m
labels:
severity: critical
service: redis
annotations:
summary: "Redis instance {{ $labels.instance }} is down"
description: "Redis cache has been down for more than 1 minute"
# High memory usage
- alert: RedisHighMemoryUsage
expr: redis_memory_used_bytes / redis_memory_max_bytes > 0.9
for: 5m
labels:
severity: warning
service: redis
annotations:
summary: "Redis high memory usage"
description: "Redis memory usage is {{ $value | humanizePercentage }}"
# High hit rate drop
- alert: RedisCacheHitRateDrop
expr: rate(redis_keyspace_hits_total[5m]) / (rate(redis_keyspace_hits_total[5m]) + rate(redis_keyspace_misses_total[5m])) < 0.8
for: 10m
labels:
severity: warning
service: redis
annotations:
summary: "Redis cache hit rate dropped"
description: "Cache hit rate is {{ $value | humanizePercentage }}"
- name: system_alerts
interval: 30s
rules:
# High disk usage
- alert: HighDiskUsage
expr: (node_filesystem_size_bytes - node_filesystem_avail_bytes) / node_filesystem_size_bytes > 0.85
for: 5m
labels:
severity: warning
service: system
annotations:
summary: "High disk usage on {{ $labels.mountpoint }}"
description: "Disk usage is {{ $value | humanizePercentage }}"
# High memory usage
- alert: HighSystemMemoryUsage
expr: (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) / node_memory_MemTotal_bytes > 0.9
for: 5m
labels:
severity: warning
service: system
annotations:
summary: "High system memory usage"
description: "System memory usage is {{ $value | humanizePercentage }}"
# High load average
- alert: HighLoadAverage
expr: node_load15 / count without (cpu, mode) (node_cpu_seconds_total{mode="idle"}) > 2.0
for: 10m
labels:
severity: warning
service: system
annotations:
summary: "High system load"
description: "15-minute load average is {{ $value }}"
- name: storage_alerts
interval: 60s
rules:
# High upload queue
- alert: HighUploadQueue
expr: upload_queue_size > 100
for: 5m
labels:
severity: warning
service: storage
annotations:
summary: "High upload queue size"
description: "Upload queue has {{ $value }} pending items"
# Failed uploads
- alert: HighFailedUploads
expr: rate(upload_failures_total[10m]) > 0.1
for: 5m
labels:
severity: warning
service: storage
annotations:
summary: "High upload failure rate"
description: "Upload failure rate is {{ $value }}/min"
# Storage space
- alert: LowStorageSpace
expr: storage_available_bytes / storage_total_bytes < 0.1
for: 5m
labels:
severity: critical
service: storage
annotations:
summary: "Low storage space"
description: "Available storage is {{ $value | humanizePercentage }}"
- name: blockchain_alerts
interval: 60s
rules:
# TON service down
- alert: TONServiceDown
expr: ton_service_up == 0
for: 2m
labels:
severity: critical
service: blockchain
annotations:
summary: "TON service is down"
description: "TON blockchain service has been unavailable for more than 2 minutes"
# High transaction failures
- alert: HighTransactionFailures
expr: rate(blockchain_transaction_failures_total[10m]) > 0.05
for: 5m
labels:
severity: warning
service: blockchain
annotations:
summary: "High blockchain transaction failure rate"
description: "Transaction failure rate is {{ $value }}/min"
# Pending transactions
- alert: HighPendingTransactions
expr: blockchain_pending_transactions > 50
for: 10m
labels:
severity: warning
service: blockchain
annotations:
summary: "High number of pending transactions"
description: "{{ $value }} transactions are pending for more than 10 minutes"
- name: security_alerts
interval: 30s
rules:
# High login failures
- alert: HighLoginFailures
expr: rate(auth_login_failures_total[5m]) > 0.1
for: 5m
labels:
severity: warning
service: security
annotations:
summary: "High login failure rate"
description: "Login failure rate is {{ $value }}/min"
# Rate limit hits
- alert: HighRateLimitHits
expr: rate(rate_limit_hits_total[5m]) > 10
for: 5m
labels:
severity: warning
service: security
annotations:
summary: "High rate limit hits"
description: "Rate limit hit rate is {{ $value }}/min"
# Suspicious activity
- alert: SuspiciousActivity
expr: security_suspicious_events > 5
for: 1m
labels:
severity: critical
service: security
annotations:
summary: "Suspicious security activity detected"
description: "{{ $value }} suspicious events detected in the last minute"

84
monitoring/prometheus.yml Normal file
View File

@ -0,0 +1,84 @@
# Prometheus configuration for my-uploader-bot monitoring
global:
scrape_interval: 15s
evaluation_interval: 15s
external_labels:
cluster: 'my-uploader-bot'
environment: 'production'
# Rules for alerting
rule_files:
- "alert_rules.yml"
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
- alertmanager:9093
# Scrape configurations
scrape_configs:
# Prometheus itself
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
scrape_interval: 30s
metrics_path: '/metrics'
# Main application
- job_name: 'my-uploader-bot'
static_configs:
- targets: ['app:8000']
scrape_interval: 15s
metrics_path: '/metrics'
scrape_timeout: 10s
honor_labels: true
params:
format: ['prometheus']
# PostgreSQL metrics
- job_name: 'postgres'
static_configs:
- targets: ['postgres-exporter:9187']
scrape_interval: 30s
metrics_path: '/metrics'
# Redis metrics
- job_name: 'redis'
static_configs:
- targets: ['redis-exporter:9121']
scrape_interval: 30s
metrics_path: '/metrics'
# Node exporter for system metrics
- job_name: 'node'
static_configs:
- targets: ['node-exporter:9100']
scrape_interval: 30s
metrics_path: '/metrics'
# cAdvisor for container metrics
- job_name: 'cadvisor'
static_configs:
- targets: ['cadvisor:8080']
scrape_interval: 30s
metrics_path: '/metrics'
# Nginx metrics (if using nginx as reverse proxy)
- job_name: 'nginx'
static_configs:
- targets: ['nginx-exporter:9113']
scrape_interval: 30s
metrics_path: '/metrics'
# Remote write configuration (for long-term storage)
# remote_write:
# - url: "http://remote-storage:8086/api/v1/prom/write?db=prometheus"
# Storage configuration is handled by command line args in docker-compose
# --storage.tsdb.retention.time=30d
# --storage.tsdb.retention.size=10GB
# --storage.tsdb.path=/prometheus/data
# --storage.tsdb.wal-compression

150
pyproject.toml Normal file
View File

@ -0,0 +1,150 @@
[tool.poetry]
name = "my-uploader-bot"
version = "2.0.0"
description = "Enhanced async uploader bot with comprehensive security and monitoring"
authors = ["Your Name <your.email@example.com>"]
readme = "README.md"
packages = [{include = "app"}]
[tool.poetry.dependencies]
python = "^3.11"
sanic = "^23.12.1"
sanic-cors = "^2.2.0"
asyncpg = "^0.29.0"
sqlalchemy = {extras = ["asyncio"], version = "^2.0.23"}
alembic = "^1.13.1"
aioredis = "^2.0.1"
structlog = "^23.2.0"
pydantic = "^2.5.2"
pydantic-settings = "^2.1.0"
python-multipart = "^0.0.6"
aiofiles = "^23.2.1"
python-magic = "^0.4.27"
pillow = "^10.1.0"
prometheus-client = "^0.19.0"
psutil = "^5.9.6"
bcrypt = "^4.1.2"
pyjwt = "^2.8.0"
cryptography = "^41.0.7"
httpx = "^0.25.2"
ujson = "^5.8.0"
orjson = "^3.9.10"
toncli = "^0.0.54"
ton = "^0.24"
validators = "^0.22.0"
python-dateutil = "^2.8.2"
typing-extensions = "^4.8.0"
[tool.poetry.group.dev.dependencies]
pytest = "^7.4.3"
pytest-asyncio = "^0.21.1"
pytest-cov = "^4.1.0"
black = "^23.11.0"
isort = "^5.12.0"
flake8 = "^6.1.0"
mypy = "^1.7.1"
pre-commit = "^3.5.0"
bandit = "^1.7.5"
safety = "^2.3.5"
[tool.poetry.group.test.dependencies]
pytest-xdist = "^3.4.0"
pytest-mock = "^3.12.0"
pytest-benchmark = "^4.0.0"
hypothesis = "^6.92.1"
factory-boy = "^3.3.0"
faker = "^20.1.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.black]
line-length = 88
target-version = ['py311']
include = '\.pyi?$'
extend-exclude = '''
/(
# directories
\.eggs
| \.git
| \.hg
| \.mypy_cache
| \.tox
| \.venv
| build
| dist
)/
'''
[tool.isort]
profile = "black"
multi_line_output = 3
line_length = 88
known_first_party = ["app"]
[tool.mypy]
python_version = "3.11"
warn_return_any = true
warn_unused_configs = true
disallow_untyped_defs = true
disallow_incomplete_defs = true
check_untyped_defs = true
disallow_untyped_decorators = true
no_implicit_optional = true
warn_redundant_casts = true
warn_unused_ignores = true
warn_no_return = true
warn_unreachable = true
strict_equality = true
[[tool.mypy.overrides]]
module = [
"magic.*",
"toncli.*",
"ton.*",
]
ignore_missing_imports = true
[tool.pytest.ini_options]
minversion = "7.0"
addopts = "-ra -q --strict-markers --strict-config"
testpaths = ["tests"]
asyncio_mode = "auto"
markers = [
"slow: marks tests as slow (deselect with '-m \"not slow\"')",
"integration: marks tests as integration tests",
"unit: marks tests as unit tests",
"api: marks tests as API tests",
"database: marks tests as database tests",
"redis: marks tests as redis tests",
]
[tool.coverage.run]
source = ["app"]
omit = [
"*/tests/*",
"*/venv/*",
"*/.venv/*",
]
[tool.coverage.report]
exclude_lines = [
"pragma: no cover",
"def __repr__",
"if self.debug:",
"if settings.DEBUG",
"raise AssertionError",
"raise NotImplementedError",
"if 0:",
"if __name__ == .__main__.:",
"class .*\bProtocol\):",
"@(abc\.)?abstractmethod",
]
[tool.bandit]
exclude_dirs = ["tests", "venv", ".venv"]
skips = ["B101", "B601"]
[tool.bandit.assert_used]
skips = ["*_test.py", "*/test_*.py"]

View File

@ -0,0 +1,49 @@
# Existing dependencies from original requirements.txt
sanic==21.9.1
websockets==10.0
sqlalchemy==2.0.23
python-dotenv==1.0.0
pymysql==1.1.0
aiogram==3.13.0
pytonconnect==0.3.0
base58==2.1.1
git+https://github.com/tonfactory/tonsdk.git@3ebbf0b702f48c2519e4c6c425f9514f673b9d48#egg=tonsdk
httpx==0.25.0
docker==7.0.0
pycryptodome==3.20.0
pynacl==1.5.0
aiofiles==23.2.1
pydub==0.25.1
pillow==10.2.0
ffmpeg-python==0.2.0
python-magic==0.4.27
# Additional dependencies for enhanced functionality
# Security and validation
bcrypt==4.1.2
pyjwt==2.8.0
cryptography==41.0.8
email-validator==2.1.0
pydantic==2.5.2
pydantic-settings==2.1.0
# Database and caching
aiomysql==0.2.0
asyncpg==0.29.0
redis==5.0.1
# Monitoring and logging
prometheus-client==0.19.0
structlog==23.2.0
# Development and testing
pytest==7.4.3
pytest-asyncio==0.21.1
black==23.11.0
isort==5.12.0
mypy==1.7.1
bandit==1.7.5
# Optional monitoring (commented out for minimal setup)
# grafana-api==1.0.3
# sentry-sdk==1.38.0

46
requirements_new.txt Normal file
View File

@ -0,0 +1,46 @@
# Core Framework
sanic==23.12.1
websockets==12.0
# Async Database
sqlalchemy[asyncio]==2.0.23
asyncpg==0.29.0
alembic==1.13.1
# Redis & Caching
redis[hiredis]==5.0.1
aioredis==2.0.1
# Telegram Bot
aiogram==3.13.0
aiohttp==3.9.1
# TON Blockchain
pytonconnect==0.3.0
base58==2.1.1
git+https://github.com/tonfactory/tonsdk.git@3ebbf0b702f48c2519e4c6c425f9514f673b9d48#egg=tonsdk
# HTTP Client
httpx[http2]==0.25.2
# Cryptography
pycryptodome==3.20.0
pynacl==1.5.0
# File Processing
aiofiles==24.1.0
pydub==0.25.1
pillow==10.2.0
ffmpeg-python==0.2.0
python-magic==0.4.27
# Utilities
python-dotenv==1.0.0
docker==7.0.0
# Monitoring & Observability
prometheus-client==0.19.0
structlog==23.2.0
# Validation
pydantic==2.5.2

Binary file not shown.

311
scripts/init-db.sql Normal file
View File

@ -0,0 +1,311 @@
-- PostgreSQL initialization script for my-uploader-bot
-- This script sets up the database, users, and extensions
-- Create database if it doesn't exist
SELECT 'CREATE DATABASE myuploader'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'myuploader')\gexec
-- Connect to the database
\c myuploader;
-- Create extensions
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pg_trgm";
CREATE EXTENSION IF NOT EXISTS "btree_gin";
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
-- Create custom types
DO $$ BEGIN
CREATE TYPE user_role_type AS ENUM ('admin', 'user', 'moderator');
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
DO $$ BEGIN
CREATE TYPE content_status_type AS ENUM ('pending', 'uploading', 'processing', 'completed', 'failed', 'deleted');
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
DO $$ BEGIN
CREATE TYPE transaction_status_type AS ENUM ('pending', 'confirmed', 'failed', 'cancelled');
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- Create application user (for connection pooling)
DO $$ BEGIN
CREATE USER app_user WITH PASSWORD 'secure_app_password';
EXCEPTION
WHEN duplicate_object THEN
ALTER USER app_user WITH PASSWORD 'secure_app_password';
END $$;
-- Grant necessary permissions
GRANT CONNECT ON DATABASE myuploader TO app_user;
GRANT USAGE ON SCHEMA public TO app_user;
-- Grant table permissions (will be applied after tables are created)
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO app_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO app_user;
-- Create read-only user for monitoring/analytics
DO $$ BEGIN
CREATE USER readonly_user WITH PASSWORD 'readonly_password';
EXCEPTION
WHEN duplicate_object THEN
ALTER USER readonly_user WITH PASSWORD 'readonly_password';
END $$;
GRANT CONNECT ON DATABASE myuploader TO readonly_user;
GRANT USAGE ON SCHEMA public TO readonly_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO readonly_user;
-- Create backup user
DO $$ BEGIN
CREATE USER backup_user WITH PASSWORD 'backup_password';
EXCEPTION
WHEN duplicate_object THEN
ALTER USER backup_user WITH PASSWORD 'backup_password';
END $$;
GRANT CONNECT ON DATABASE myuploader TO backup_user;
GRANT USAGE ON SCHEMA public TO backup_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO backup_user;
-- Create performance monitoring functions
CREATE OR REPLACE FUNCTION get_table_stats()
RETURNS TABLE (
schema_name TEXT,
table_name TEXT,
row_count BIGINT,
total_size TEXT,
index_size TEXT,
toast_size TEXT
) AS $$
BEGIN
RETURN QUERY
SELECT
schemaname::TEXT,
tablename::TEXT,
n_tup_ins - n_tup_del AS row_count,
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS total_size,
pg_size_pretty(pg_indexes_size(schemaname||'.'||tablename)) AS index_size,
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename) - pg_relation_size(schemaname||'.'||tablename)) AS toast_size
FROM pg_stat_user_tables
WHERE schemaname = 'public'
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create index monitoring function
CREATE OR REPLACE FUNCTION get_unused_indexes()
RETURNS TABLE (
schema_name TEXT,
table_name TEXT,
index_name TEXT,
index_size TEXT,
index_scans BIGINT
) AS $$
BEGIN
RETURN QUERY
SELECT
schemaname::TEXT,
tablename::TEXT,
indexname::TEXT,
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
idx_scan
FROM pg_stat_user_indexes
WHERE schemaname = 'public'
AND idx_scan < 100 -- Indexes used less than 100 times
ORDER BY pg_relation_size(indexrelid) DESC;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create slow query logging configuration
ALTER SYSTEM SET log_min_duration_statement = 1000; -- Log queries taking more than 1 second
ALTER SYSTEM SET log_statement = 'mod'; -- Log modifications
ALTER SYSTEM SET log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ';
-- Create audit log table for sensitive operations
CREATE TABLE IF NOT EXISTS audit_log (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID,
action VARCHAR(50) NOT NULL,
table_name VARCHAR(50),
record_id UUID,
old_values JSONB,
new_values JSONB,
ip_address INET,
user_agent TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
CREATE INDEX IF NOT EXISTS idx_audit_log_user_id ON audit_log(user_id);
CREATE INDEX IF NOT EXISTS idx_audit_log_action ON audit_log(action);
CREATE INDEX IF NOT EXISTS idx_audit_log_created_at ON audit_log(created_at);
CREATE INDEX IF NOT EXISTS idx_audit_log_table_name ON audit_log(table_name);
-- Create audit trigger function
CREATE OR REPLACE FUNCTION audit_trigger_function()
RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'DELETE' THEN
INSERT INTO audit_log (action, table_name, record_id, old_values)
VALUES (TG_OP, TG_TABLE_NAME, OLD.id, row_to_json(OLD));
RETURN OLD;
ELSIF TG_OP = 'UPDATE' THEN
INSERT INTO audit_log (action, table_name, record_id, old_values, new_values)
VALUES (TG_OP, TG_TABLE_NAME, NEW.id, row_to_json(OLD), row_to_json(NEW));
RETURN NEW;
ELSIF TG_OP = 'INSERT' THEN
INSERT INTO audit_log (action, table_name, record_id, new_values)
VALUES (TG_OP, TG_TABLE_NAME, NEW.id, row_to_json(NEW));
RETURN NEW;
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create cleanup function for old audit logs
CREATE OR REPLACE FUNCTION cleanup_old_audit_logs(retention_days INTEGER DEFAULT 90)
RETURNS INTEGER AS $$
DECLARE
deleted_count INTEGER;
BEGIN
DELETE FROM audit_log
WHERE created_at < NOW() - INTERVAL '1 day' * retention_days;
GET DIAGNOSTICS deleted_count = ROW_COUNT;
RETURN deleted_count;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create maintenance function
CREATE OR REPLACE FUNCTION run_maintenance()
RETURNS TEXT AS $$
DECLARE
result TEXT := '';
rec RECORD;
BEGIN
-- Update table statistics
ANALYZE;
result := result || 'Statistics updated. ';
-- Vacuum analyze all tables
FOR rec IN SELECT tablename FROM pg_tables WHERE schemaname = 'public' LOOP
EXECUTE 'VACUUM ANALYZE ' || quote_ident(rec.tablename);
END LOOP;
result := result || 'Vacuum completed. ';
-- Cleanup old audit logs (keep 90 days)
result := result || 'Cleaned up ' || cleanup_old_audit_logs(90) || ' old audit logs. ';
-- Reindex if needed (check for bloat)
FOR rec IN
SELECT schemaname, tablename
FROM pg_stat_user_tables
WHERE n_dead_tup > n_live_tup * 0.1
AND n_live_tup > 1000
LOOP
EXECUTE 'REINDEX TABLE ' || quote_ident(rec.schemaname) || '.' || quote_ident(rec.tablename);
result := result || 'Reindexed ' || rec.tablename || '. ';
END LOOP;
RETURN result;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create backup verification function
CREATE OR REPLACE FUNCTION verify_backup_integrity()
RETURNS TABLE (
table_name TEXT,
row_count BIGINT,
last_modified TIMESTAMP WITH TIME ZONE,
checksum TEXT
) AS $$
BEGIN
RETURN QUERY
SELECT
t.tablename::TEXT,
t.n_live_tup,
GREATEST(t.last_vacuum, t.last_autovacuum, t.last_analyze, t.last_autoanalyze),
md5(string_agg(c.column_name, ',' ORDER BY c.ordinal_position))
FROM pg_stat_user_tables t
JOIN information_schema.columns c ON c.table_name = t.tablename
WHERE t.schemaname = 'public'
GROUP BY t.tablename, t.n_live_tup,
GREATEST(t.last_vacuum, t.last_autovacuum, t.last_analyze, t.last_autoanalyze)
ORDER BY t.tablename;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create connection monitoring view
CREATE OR REPLACE VIEW active_connections AS
SELECT
pid,
usename,
application_name,
client_addr,
client_port,
backend_start,
state,
query_start,
LEFT(query, 100) as query_preview
FROM pg_stat_activity
WHERE state != 'idle'
AND pid != pg_backend_pid()
ORDER BY backend_start;
-- Grant permissions for monitoring functions
GRANT EXECUTE ON FUNCTION get_table_stats() TO readonly_user;
GRANT EXECUTE ON FUNCTION get_unused_indexes() TO readonly_user;
GRANT EXECUTE ON FUNCTION verify_backup_integrity() TO backup_user;
GRANT SELECT ON active_connections TO readonly_user;
-- Set up automatic maintenance schedule (requires pg_cron extension)
-- Uncomment if pg_cron is available
-- SELECT cron.schedule('database-maintenance', '0 2 * * 0', 'SELECT run_maintenance();');
-- SELECT cron.schedule('audit-cleanup', '0 3 * * *', 'SELECT cleanup_old_audit_logs(90);');
-- Create performance tuning settings
ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_statements';
ALTER SYSTEM SET track_activity_query_size = 2048;
ALTER SYSTEM SET track_functions = 'all';
ALTER SYSTEM SET track_io_timing = 'on';
-- Connection pooling settings
ALTER SYSTEM SET max_connections = 200;
ALTER SYSTEM SET shared_buffers = '256MB';
ALTER SYSTEM SET effective_cache_size = '1GB';
ALTER SYSTEM SET maintenance_work_mem = '64MB';
ALTER SYSTEM SET checkpoint_completion_target = 0.9;
ALTER SYSTEM SET wal_buffers = '16MB';
ALTER SYSTEM SET default_statistics_target = 100;
ALTER SYSTEM SET random_page_cost = 1.1;
ALTER SYSTEM SET effective_io_concurrency = 200;
-- Security settings
ALTER SYSTEM SET ssl = 'on';
ALTER SYSTEM SET log_connections = 'on';
ALTER SYSTEM SET log_disconnections = 'on';
ALTER SYSTEM SET log_checkpoints = 'on';
ALTER SYSTEM SET log_lock_waits = 'on';
-- Reload configuration
SELECT pg_reload_conf();
-- Create initial admin user (password should be changed immediately)
-- This will be handled by the application during first startup
-- Display completion message
DO $$
BEGIN
RAISE NOTICE 'Database initialization completed successfully!';
RAISE NOTICE 'Remember to:';
RAISE NOTICE '1. Change default passwords for app_user, readonly_user, and backup_user';
RAISE NOTICE '2. Configure SSL certificates';
RAISE NOTICE '3. Set up regular backups';
RAISE NOTICE '4. Run initial migrations with Alembic';
RAISE NOTICE '5. Create your first admin user through the application';
END $$;

351
secure_service.sh Normal file
View File

@ -0,0 +1,351 @@
#!/bin/bash
# MY Network - Защита сервиса (Шаг 2 из 2)
# Настройка безопасности, nginx, SSL, firewall
# Запускать после install_service.sh
set -e
echo "🔒 MY Network - Защита сервиса"
echo "==============================="
# Проверка прав root
if [[ $EUID -ne 0 ]]; then
echo "❌ Этот скрипт должен запускаться от root"
echo "Используйте: sudo bash secure_service.sh"
exit 1
fi
# Проверка конфигурации
CONFIG_FILE="/opt/my-network-config.txt"
if [[ ! -f "$CONFIG_FILE" ]]; then
echo "❌ Конфигурация не найдена!"
echo "Сначала запустите: sudo bash install_service.sh"
exit 1
fi
# Извлечение конфигурации
DOMAIN=$(grep "Домен:" $CONFIG_FILE | cut -d' ' -f2)
EMAIL=$(grep "Email:" $CONFIG_FILE | cut -d' ' -f2)
if [[ -z "$DOMAIN" || -z "$EMAIL" ]]; then
echo "❌ Не удалось прочитать конфигурацию!"
exit 1
fi
echo "📋 Настройки безопасности:"
echo " Домен: $DOMAIN"
echo " Email: $EMAIL"
echo " SSL: Let's Encrypt"
echo " Firewall: UFW"
echo ""
# Установка nginx и certbot
echo "🌐 Установка nginx и certbot..."
apt install -y nginx certbot python3-certbot-nginx
# Настройка nginx
echo "⚙️ Настройка nginx..."
cat > /etc/nginx/sites-available/my-network << EOF
# MY Network - nginx configuration
server {
listen 80;
server_name $DOMAIN;
# Redirect HTTP to HTTPS
return 301 https://\$server_name\$request_uri;
}
server {
listen 443 ssl http2;
server_name $DOMAIN;
# SSL Configuration (will be updated by certbot)
ssl_certificate /etc/letsencrypt/live/$DOMAIN/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/$DOMAIN/privkey.pem;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always;
add_header Referrer-Policy "strict-origin-when-cross-origin";
# Hide nginx version
server_tokens off;
# Rate limiting
limit_req_zone \$binary_remote_addr zone=api:10m rate=10r/s;
limit_req_zone \$binary_remote_addr zone=monitor:10m rate=2r/s;
# Main application
location / {
limit_req zone=api burst=20 nodelay;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
# Websocket support
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
# Timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
# Monitoring interface (restricted)
location /api/my/monitor {
limit_req zone=monitor burst=5 nodelay;
# IP whitelist (localhost only by default)
allow 127.0.0.1;
allow ::1;
deny all;
proxy_pass http://127.0.0.1:15100;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
}
# Static files
location /static/ {
alias /opt/my-network/my-uploader-bot/static/;
expires 30d;
add_header Cache-Control "public, immutable";
}
# Block sensitive paths
location ~ /\.(?!well-known) {
deny all;
}
location ~ ^/(config|\.env|requirements|docker-compose) {
deny all;
}
}
EOF
# Включение сайта
ln -sf /etc/nginx/sites-available/my-network /etc/nginx/sites-enabled/
rm -f /etc/nginx/sites-enabled/default
# Проверка конфигурации nginx
nginx -t
# Получение SSL сертификата
echo "🔐 Получение SSL сертификата..."
certbot --nginx -d $DOMAIN --email $EMAIL --agree-tos --non-interactive --redirect
# Настройка автообновления сертификата
echo "🔄 Настройка автообновления SSL..."
(crontab -l 2>/dev/null; echo "0 12 * * * /usr/bin/certbot renew --quiet") | crontab -
# Настройка firewall
echo "🔥 Настройка firewall..."
ufw --force reset
ufw default deny incoming
ufw default allow outgoing
# Разрешение SSH (проверяем текущий порт)
SSH_PORT=$(ss -tlnp | grep sshd | grep -o ':[0-9]*' | head -1 | cut -d: -f2)
if [[ -n "$SSH_PORT" ]]; then
echo "🔑 Разрешение SSH на порту $SSH_PORT..."
ufw allow $SSH_PORT/tcp comment 'SSH'
fi
# Разрешение HTTP/HTTPS
ufw allow 80/tcp comment 'HTTP'
ufw allow 443/tcp comment 'HTTPS'
# Разрешение внутренних соединений
ufw allow from 127.0.0.1 to any port 15100 comment 'MY Network internal'
# Включение firewall
ufw --force enable
# Дополнительная защита системы
echo "🛡️ Дополнительная защита..."
# Отключение ненужных сервисов
systemctl disable --now apache2 2>/dev/null || true
systemctl disable --now nginx 2>/dev/null || true
# Запуск nginx
systemctl enable nginx
systemctl start nginx
# Настройка fail2ban
echo "🚫 Установка fail2ban..."
apt install -y fail2ban
cat > /etc/fail2ban/jail.local << EOF
[DEFAULT]
bantime = 3600
findtime = 600
maxretry = 5
[sshd]
enabled = true
port = $SSH_PORT
filter = sshd
logpath = /var/log/auth.log
[nginx-http-auth]
enabled = true
filter = nginx-http-auth
logpath = /var/log/nginx/error.log
[nginx-limit-req]
enabled = true
filter = nginx-limit-req
logpath = /var/log/nginx/error.log
maxretry = 3
EOF
systemctl enable fail2ban
systemctl start fail2ban
# Настройка logrotate
echo "📜 Настройка ротации логов..."
cat > /etc/logrotate.d/my-network << EOF
/opt/logs/*.log {
daily
missingok
rotate 30
compress
delaycompress
notifempty
copytruncate
}
EOF
# Настройка мониторинга
echo "📊 Настройка мониторинга..."
cat > /opt/monitor.sh << 'EOF'
#!/bin/bash
# MY Network monitoring script
LOG_FILE="/opt/logs/monitor.log"
DATE=$(date '+%Y-%m-%d %H:%M:%S')
# Check service status
if systemctl is-active --quiet my-network; then
SERVICE_STATUS="OK"
else
SERVICE_STATUS="FAIL"
fi
# Check nginx status
if systemctl is-active --quiet nginx; then
NGINX_STATUS="OK"
else
NGINX_STATUS="FAIL"
fi
# Check disk space
DISK_USAGE=$(df -h /opt | awk 'NR==2 {print $5}' | sed 's/%//')
if [[ $DISK_USAGE -gt 90 ]]; then
DISK_STATUS="CRITICAL"
elif [[ $DISK_USAGE -gt 80 ]]; then
DISK_STATUS="WARNING"
else
DISK_STATUS="OK"
fi
# Check memory
MEM_USAGE=$(free | grep Mem | awk '{printf "%.0f", $3/$2 * 100.0}')
if [[ $MEM_USAGE -gt 90 ]]; then
MEM_STATUS="CRITICAL"
elif [[ $MEM_USAGE -gt 80 ]]; then
MEM_STATUS="WARNING"
else
MEM_STATUS="OK"
fi
# Log status
echo "[$DATE] Service: $SERVICE_STATUS, Nginx: $NGINX_STATUS, Disk: $DISK_STATUS ($DISK_USAGE%), Memory: $MEM_STATUS ($MEM_USAGE%)" >> $LOG_FILE
# Alert if critical
if [[ "$SERVICE_STATUS" == "FAIL" || "$NGINX_STATUS" == "FAIL" || "$DISK_STATUS" == "CRITICAL" || "$MEM_STATUS" == "CRITICAL" ]]; then
echo "[$DATE] ALERT: Critical status detected!" >> $LOG_FILE
# Here you can add email notification or webhook
fi
EOF
chmod +x /opt/monitor.sh
# Добавление в cron
(crontab -l 2>/dev/null; echo "*/5 * * * * /opt/monitor.sh") | crontab -
# Обновление конфигурации
echo "💾 Обновление конфигурации..."
cat >> /opt/my-network-config.txt << EOF
Безопасность настроена:
======================
SSL: Включен (Let's Encrypt)
Firewall: Включен (UFW)
Nginx: Включен с rate limiting
Fail2ban: Включен
Мониторинг: /opt/monitor.sh (каждые 5 мин)
Проверка безопасности:
ufw status
systemctl status nginx
systemctl status fail2ban
certbot certificates
Логи безопасности:
/var/log/nginx/access.log
/var/log/nginx/error.log
/var/log/fail2ban.log
/opt/logs/monitor.log
EOF
# Финальная проверка
echo "🔍 Финальная проверка..."
sleep 3
echo "🌐 Проверка nginx..."
systemctl status nginx --no-pager -l
echo "🔥 Статус firewall..."
ufw status numbered
echo "🔐 SSL сертификаты..."
certbot certificates
echo "📡 Проверка доступности..."
curl -s -o /dev/null -w "%{http_code}" https://$DOMAIN/api/my/health || echo "Сервис недоступен"
echo ""
echo "✅ MY Network защищен!"
echo "======================"
echo "🌐 Домен: https://$DOMAIN"
echo "🔐 SSL: Включен"
echo "🔥 Firewall: Включен"
echo "🚫 Fail2ban: Включен"
echo "📊 Мониторинг: Включен"
echo ""
echo "🔍 Проверка работы:"
echo " curl https://$DOMAIN/api/my/health"
echo " systemctl status my-network nginx fail2ban"
echo ""
echo "📈 Мониторинг:"
echo " https://$DOMAIN/api/my/monitor/ (только с localhost)"
echo " tail -f /opt/logs/monitor.log"
echo ""
echo "🛡️ Безопасность:"
echo " ufw status"
echo " fail2ban-client status"
echo " certbot certificates"
echo ""
echo "📚 Полная документация: /opt/my-network/my-uploader-bot/DOCS_RU.md"

19
ssh_connect.exp Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/expect -f
set timeout 30
set password "DMUEjmnh6mDs/qlzhpjDzQ"
spawn ssh -o StrictHostKeyChecking=no service@2.58.65.188
expect {
"password:" {
send "$password\r"
exp_continue
}
"$ " {
interact
}
eof {
exit
}
}

298
start_my_network.py Normal file
View File

@ -0,0 +1,298 @@
#!/usr/bin/env python3
"""
MY Network Server Startup Script
Скрипт для запуска MY Network с минимальными HTTP эндпоинтами
"""
import asyncio
import logging
import signal
import sys
from pathlib import Path
from typing import Optional
# Добавить корневую директорию в путь
sys.path.append(str(Path(__file__).parent))
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse
# Настройка логирования
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(sys.stdout),
logging.FileHandler('my_network.log')
]
)
logger = logging.getLogger(__name__)
# Глобальные переменные
app: Optional[FastAPI] = None
node_service = None
def create_app() -> FastAPI:
"""Создать FastAPI приложение для MY Network."""
app = FastAPI(
title="MY Network",
description="Distributed Content Protocol v2.0",
version="2.0.0",
docs_url="/api/docs",
redoc_url="/api/redoc"
)
# Настройка CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
return app
async def init_my_network_service():
"""Инициализировать MY Network сервис."""
global node_service
try:
logger.info("Initializing MY Network service...")
# Импортировать и инициализировать сервис ноды
from app.core.my_network.node_service import NodeService
# Создать сервис ноды
node_service = NodeService()
# Запустить сервис
await node_service.start()
logger.info("MY Network service initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize MY Network service: {e}")
raise
def setup_routes(app: FastAPI):
"""Настроить маршруты приложения."""
try:
# Импортировать маршруты MY Network
from app.api.routes.my_network_routes import router as my_network_router
from app.api.routes.my_monitoring import router as monitoring_router
# Добавить маршруты
app.include_router(my_network_router)
app.include_router(monitoring_router)
logger.info("MY Network routes configured")
except ImportError as e:
logger.error(f"Failed to import MY Network routes: {e}")
# Создать минимальные маршруты если основные не работают
setup_minimal_routes(app)
def setup_minimal_routes(app: FastAPI):
"""Настроить минимальные маршруты."""
@app.get("/")
async def root():
return {"message": "MY Network v2.0 - Distributed Content Protocol"}
@app.get("/health")
async def health_check():
return {
"status": "healthy" if node_service else "initializing",
"service": "MY Network",
"version": "2.0.0"
}
@app.get("/api/my/node/info")
async def node_info():
if not node_service:
raise HTTPException(status_code=503, detail="MY Network service not available")
try:
info = await node_service.get_node_info()
return {"success": True, "data": info}
except Exception as e:
logger.error(f"Error getting node info: {e}")
raise HTTPException(status_code=500, detail=str(e))
@app.get("/api/my/monitor/", response_class=HTMLResponse)
async def monitoring_dashboard():
"""Простой HTML мониторинг если шаблоны не работают."""
html_content = """
<!DOCTYPE html>
<html>
<head>
<title>MY Network Monitor</title>
<style>
body { background: #000; color: #0f0; font-family: monospace; padding: 20px; }
.container { max-width: 800px; margin: 0 auto; }
.status { border: 1px solid #0f0; padding: 10px; margin: 10px 0; }
</style>
</head>
<body>
<div class="container">
<h1>MY Network Monitor</h1>
<div class="status">
<h3>Status: ONLINE</h3>
<p>MY Network service is running</p>
</div>
<div class="status">
<h3>Endpoints:</h3>
<ul>
<li><a href="/api/my/node/info" style="color: #0f0;">/api/my/node/info</a> - Node information</li>
<li><a href="/health" style="color: #0f0;">/health</a> - Health check</li>
<li><a href="/api/docs" style="color: #0f0;">/api/docs</a> - API Documentation</li>
</ul>
</div>
</div>
</body>
</html>
"""
return HTMLResponse(content=html_content)
logger.info("Minimal routes configured")
async def startup_event():
"""Событие запуска приложения."""
logger.info("Starting MY Network server...")
try:
# Инициализировать MY Network сервис
await init_my_network_service()
logger.info("MY Network server started successfully")
except Exception as e:
logger.error(f"Failed to start MY Network server: {e}")
# Не останавливать сервер, работать в ограниченном режиме
logger.info("Running in limited mode without full MY Network features")
async def shutdown_event():
"""Событие остановки приложения."""
logger.info("Shutting down MY Network server...")
try:
if node_service:
await node_service.stop()
logger.info("MY Network server stopped successfully")
except Exception as e:
logger.error(f"Error during shutdown: {e}")
def setup_signal_handlers():
"""Настроить обработчики сигналов."""
def signal_handler(signum, frame):
logger.info(f"Received signal {signum}, shutting down...")
# Запустить graceful shutdown
if node_service:
try:
asyncio.create_task(node_service.stop())
except Exception as e:
logger.error(f"Error during signal shutdown: {e}")
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def main():
"""Главная функция запуска."""
print("""
MY NETWORK v2.0
Distributed Content Protocol
Starting minimal HTTP server with MY Network capabilities...
""")
try:
# Настроить обработчики сигналов
setup_signal_handlers()
# Создать приложение
global app
app = create_app()
# Настроить маршруты
setup_routes(app)
# Добавить события запуска/остановки
app.add_event_handler("startup", startup_event)
app.add_event_handler("shutdown", shutdown_event)
# Определить порт
port = 8000
# Попробовать найти свободный порт
import socket
for test_port in [8000, 8001, 8080, 15100]:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', test_port))
sock.close()
port = test_port
break
except OSError:
continue
logger.info(f"Starting server on port {port}")
print(f"""
MY Network Server starting on http://localhost:{port}
Available endpoints:
http://localhost:{port}/ - Main page
http://localhost:{port}/health - Health check
http://localhost:{port}/api/my/monitor/ - Monitoring dashboard
http://localhost:{port}/api/docs - API Documentation
Press Ctrl+C to stop the server
""")
# Запустить сервер
uvicorn.run(
app,
host="0.0.0.0",
port=port,
log_level="info",
access_log=True
)
except KeyboardInterrupt:
logger.info("Received keyboard interrupt, shutting down...")
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()