mariadb -> postgres

This commit is contained in:
user 2025-07-04 12:33:03 +03:00
parent 84acc64ad3
commit 2b9fbb6c7d
5 changed files with 461 additions and 441 deletions

View File

@ -1,21 +1,21 @@
version: '3'
services:
maria_db:
image: mariadb:11.2
postgres:
image: postgres:15-alpine
ports:
- "3307:3306"
- "5432:5432"
env_file:
- .env
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD:-password}
- MYSQL_DATABASE=${MYSQL_DATABASE:-myuploader}
- MYSQL_USER=${MYSQL_USER:-myuploader}
- MYSQL_PASSWORD=${MYSQL_PASSWORD:-password}
- POSTGRES_DB=${POSTGRES_DB:-my_uploader_db}
- POSTGRES_USER=${POSTGRES_USER:-my_user}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-secure_password}
- POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes:
- /Storage/sqlStorage:/var/lib/mysql
- postgres_data:/var/lib/postgresql/data
restart: always
healthcheck:
test: [ "CMD", "healthcheck.sh", "--connect", "--innodb_initialized" ]
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-my_user} -d ${POSTGRES_DB:-my_uploader_db}"]
interval: 10s
timeout: 5s
retries: 3
@ -42,7 +42,7 @@ services:
- .env
restart: always
links:
- maria_db
- postgres
- redis
ports:
- "15100:15100"
@ -50,7 +50,7 @@ services:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
postgres:
condition: service_healthy
redis:
condition: service_healthy
@ -64,13 +64,13 @@ services:
env_file:
- .env
links:
- maria_db
- postgres
- redis
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
postgres:
condition: service_healthy
redis:
condition: service_healthy
@ -84,13 +84,13 @@ services:
env_file:
- .env
links:
- maria_db
- postgres
- redis
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
postgres:
condition: service_healthy
redis:
condition: service_healthy
@ -104,13 +104,13 @@ services:
env_file:
- .env
links:
- maria_db
- postgres
- redis
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
postgres:
condition: service_healthy
redis:
condition: service_healthy
@ -124,17 +124,20 @@ services:
env_file:
- .env
links:
- maria_db
- postgres
- redis
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
maria_db:
postgres:
condition: service_healthy
redis:
condition: service_healthy
volumes:
postgres_data:
driver: local
redis_data:
driver: local

View File

@ -1,105 +1,254 @@
version: '3'
version: '3.8'
services:
maria_db:
image: mariadb:11.2
ports:
- "3307:3306"
env_file:
- .env
# PostgreSQL Database
postgres:
image: postgres:15-alpine
container_name: uploader-bot-postgres
restart: unless-stopped
environment:
POSTGRES_DB: ${POSTGRES_DB:-my_uploader_db}
POSTGRES_USER: ${POSTGRES_USER:-my_user}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-secure_password}
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
volumes:
- /Storage/sqlStorage:/var/lib/mysql
restart: always
- postgres_data:/var/lib/postgresql/data
- ./scripts/init-db.sql:/docker-entrypoint-initdb.d/01-init.sql:ro
ports:
- "5432:5432"
networks:
- uploader_network
healthcheck:
test: [ "CMD", "healthcheck.sh", "--connect", "--innodb_initialized" ]
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-my_user} -d ${POSTGRES_DB:-my_uploader_db}"]
interval: 10s
timeout: 5s
retries: 3
retries: 5
start_period: 30s
# Redis Cache
redis:
image: redis:7-alpine
container_name: uploader-bot-redis
restart: unless-stopped
command: >
redis-server
--appendonly yes
--maxmemory 512mb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
volumes:
- redis_data:/data
ports:
- "6379:6379"
networks:
- uploader_network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 5
start_period: 10s
# Main Application (MY Uploader Bot)
app:
build:
context: .
dockerfile: Dockerfile
container_name: uploader-bot-app
command: python -m app
env_file:
- .env
restart: always
links:
- maria_db
restart: unless-stopped
depends_on:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
# Database
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
POSTGRES_HOST: postgres
POSTGRES_PORT: 5432
POSTGRES_DB: ${POSTGRES_DB:-my_uploader_db}
POSTGRES_USER: ${POSTGRES_USER:-my_user}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-secure_password}
# Redis
REDIS_URL: redis://redis:6379/0
REDIS_HOST: redis
REDIS_PORT: 6379
# Application
NODE_ENV: ${NODE_ENV:-production}
DEBUG: ${DEBUG:-false}
LOG_LEVEL: ${LOG_LEVEL:-INFO}
# MY Network
MY_NETWORK_NODE_ID: ${MY_NETWORK_NODE_ID}
MY_NETWORK_PORT: ${MY_NETWORK_PORT:-15100}
MY_NETWORK_HOST: ${MY_NETWORK_HOST:-0.0.0.0}
MY_NETWORK_DOMAIN: ${MY_NETWORK_DOMAIN}
MY_NETWORK_SSL_ENABLED: ${MY_NETWORK_SSL_ENABLED:-true}
# API Settings
API_HOST: ${API_HOST:-0.0.0.0}
API_PORT: ${API_PORT:-15100}
API_WORKERS: ${API_WORKERS:-4}
MAX_UPLOAD_SIZE: ${MAX_UPLOAD_SIZE:-100MB}
# Security
SECRET_KEY: ${SECRET_KEY}
JWT_SECRET: ${JWT_SECRET}
ENCRYPTION_KEY: ${ENCRYPTION_KEY}
# Converter (on-demand)
CONVERTER_DOCKER_IMAGE: ${CONVERTER_DOCKER_IMAGE:-my-converter:latest}
CONVERTER_SHARED_PATH: ${CONVERTER_SHARED_PATH:-/shared/converter}
CONVERTER_MAX_PARALLEL: ${CONVERTER_MAX_PARALLEL:-3}
CONVERTER_TIMEOUT: ${CONVERTER_TIMEOUT:-300}
ports:
- "15100:15100"
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
condition: service_healthy
- app_data:/app/data
- app_logs:/app/logs
- /var/run/docker.sock:/var/run/docker.sock # For on-demand converter containers
- converter_shared:/shared/converter
networks:
- uploader_network
healthcheck:
test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:15100/health')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
indexer: # Отправка уведомления о появлении новой NFT-listen. Установка CID поля у всего контента. Проверка следующего за последним индексом item коллекции и поиск нового контента, отправка информации о том что контент найден его загружателю. Присваивание encrypted_content onchain_index
# Indexer Service
indexer:
build:
context: .
dockerfile: Dockerfile
restart: always
container_name: uploader-bot-indexer
restart: unless-stopped
command: python -m app indexer
env_file:
- .env
links:
- maria_db
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on:
maria_db:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
REDIS_URL: redis://redis:6379/0
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: indexer
volumes:
- app_logs:/app/logs
- app_data:/app/data
networks:
- uploader_network
ton_daemon: # Работа с TON-сетью. Задачи сервисного кошелька и деплой контрактов
# TON Daemon Service
ton_daemon:
build:
context: .
dockerfile: Dockerfile
container_name: uploader-bot-ton-daemon
command: python -m app ton_daemon
restart: always
env_file:
- .env
links:
- maria_db
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
restart: unless-stopped
depends_on:
maria_db:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
REDIS_URL: redis://redis:6379/0
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: ton_daemon
volumes:
- app_logs:/app/logs
- app_data:/app/data
networks:
- uploader_network
license_index: # Проверка кошельков пользователей на новые NFT. Опрос этих NFT на определяемый GET-метод по которому мы определяем что это определенная лицензия и сохранение информации по ней
# License Index Service
license_index:
build:
context: .
dockerfile: Dockerfile
container_name: uploader-bot-license-index
command: python -m app license_index
restart: always
env_file:
- .env
links:
- maria_db
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
restart: unless-stopped
depends_on:
maria_db:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
REDIS_URL: redis://redis:6379/0
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: license_index
volumes:
- app_logs:/app/logs
- app_data:/app/data
networks:
- uploader_network
# Convert Process Service
convert_process:
build:
context: .
dockerfile: Dockerfile
container_name: uploader-bot-convert-process
command: python -m app convert_process
restart: always
env_file:
- .env
links:
- maria_db
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
- /var/run/docker.sock:/var/run/docker.sock
restart: unless-stopped
depends_on:
maria_db:
postgres:
condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
REDIS_URL: redis://redis:6379/0
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: convert_process
volumes:
- app_logs:/app/logs
- app_data:/app/data
- /var/run/docker.sock:/var/run/docker.sock
- converter_shared:/shared/converter
networks:
- uploader_network
# Converter Build (for creating converter image)
converter-build:
build:
context: ./modules/converter-module
dockerfile: Dockerfile
image: my-converter:latest
container_name: uploader-bot-converter-build
profiles: ["build-only"] # Only runs during build
volumes:
- converter_shared:/shared/converter
networks:
- uploader_network
volumes:
postgres_data:
driver: local
redis_data:
driver: local
app_data:
driver: local
app_logs:
driver: local
converter_shared:
driver: local
networks:
uploader_network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16

View File

@ -1,20 +1,46 @@
sanic==21.9.1
websockets==10.0
sqlalchemy==2.0.23
python-dotenv==1.0.0
pymysql==1.1.0
# Core Framework
sanic==23.12.1
websockets==12.0
# Async Database (PostgreSQL)
sqlalchemy[asyncio]==2.0.23
asyncpg==0.29.0
alembic==1.13.1
# Redis & Caching
redis[hiredis]==5.0.1
aioredis==2.0.1
# Telegram Bot
aiogram==3.13.0
aiohttp==3.9.1
# TON Blockchain
pytonconnect==0.3.0
base58==2.1.1
git+https://github.com/tonfactory/tonsdk.git@3ebbf0b702f48c2519e4c6c425f9514f673b9d48#egg=tonsdk
httpx==0.25.0
docker==7.0.0
# HTTP Client
httpx[http2]==0.25.2
# Cryptography
pycryptodome==3.20.0
pynacl==1.5.0
aiofiles==23.2.1
# File Processing
aiofiles==24.1.0
pydub==0.25.1
pillow==10.2.0
ffmpeg-python==0.2.0
python-magic==0.4.27
# Utilities
python-dotenv==1.0.0
docker==7.0.0
# Monitoring & Observability
prometheus-client==0.19.0
structlog==23.2.0
# Validation
pydantic==2.5.2

View File

@ -1,311 +1,57 @@
-- PostgreSQL initialization script for my-uploader-bot
-- This script sets up the database, users, and extensions
-- PostgreSQL Database Initialization Script for MY Uploader Bot
-- This script sets up the initial database structure and users
-- Create database if it doesn't exist
SELECT 'CREATE DATABASE myuploader'
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'myuploader')\gexec
-- Create database if not exists (done by PostgreSQL container initialization)
-- Create user and grant privileges are also handled by container environment variables
-- Connect to the database
\c myuploader;
-- Set up database encoding and collation (handled by POSTGRES_INITDB_ARGS)
-- Create extensions
-- Create extensions if needed
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pg_trgm";
CREATE EXTENSION IF NOT EXISTS "btree_gin";
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
-- Create custom types
DO $$ BEGIN
CREATE TYPE user_role_type AS ENUM ('admin', 'user', 'moderator');
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- Create schemas for better organization
CREATE SCHEMA IF NOT EXISTS my_network;
CREATE SCHEMA IF NOT EXISTS content;
CREATE SCHEMA IF NOT EXISTS users;
CREATE SCHEMA IF NOT EXISTS analytics;
DO $$ BEGIN
CREATE TYPE content_status_type AS ENUM ('pending', 'uploading', 'processing', 'completed', 'failed', 'deleted');
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- Grant permissions on schemas
GRANT USAGE, CREATE ON SCHEMA my_network TO my_user;
GRANT USAGE, CREATE ON SCHEMA content TO my_user;
GRANT USAGE, CREATE ON SCHEMA users TO my_user;
GRANT USAGE, CREATE ON SCHEMA analytics TO my_user;
DO $$ BEGIN
CREATE TYPE transaction_status_type AS ENUM ('pending', 'confirmed', 'failed', 'cancelled');
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- Set default search path
ALTER DATABASE my_uploader_db SET search_path TO public, my_network, content, users, analytics;
-- Create application user (for connection pooling)
DO $$ BEGIN
CREATE USER app_user WITH PASSWORD 'secure_app_password';
EXCEPTION
WHEN duplicate_object THEN
ALTER USER app_user WITH PASSWORD 'secure_app_password';
END $$;
-- Create initial tables structure (if not handled by ORM migrations)
-- These will be created by the application's migration system
-- Grant necessary permissions
GRANT CONNECT ON DATABASE myuploader TO app_user;
GRANT USAGE ON SCHEMA public TO app_user;
-- Grant table permissions (will be applied after tables are created)
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO app_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO app_user;
-- Create read-only user for monitoring/analytics
DO $$ BEGIN
CREATE USER readonly_user WITH PASSWORD 'readonly_password';
EXCEPTION
WHEN duplicate_object THEN
ALTER USER readonly_user WITH PASSWORD 'readonly_password';
END $$;
GRANT CONNECT ON DATABASE myuploader TO readonly_user;
GRANT USAGE ON SCHEMA public TO readonly_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO readonly_user;
-- Create backup user
DO $$ BEGIN
CREATE USER backup_user WITH PASSWORD 'backup_password';
EXCEPTION
WHEN duplicate_object THEN
ALTER USER backup_user WITH PASSWORD 'backup_password';
END $$;
GRANT CONNECT ON DATABASE myuploader TO backup_user;
GRANT USAGE ON SCHEMA public TO backup_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO backup_user;
-- Create performance monitoring functions
CREATE OR REPLACE FUNCTION get_table_stats()
RETURNS TABLE (
schema_name TEXT,
table_name TEXT,
row_count BIGINT,
total_size TEXT,
index_size TEXT,
toast_size TEXT
) AS $$
BEGIN
RETURN QUERY
SELECT
schemaname::TEXT,
tablename::TEXT,
n_tup_ins - n_tup_del AS row_count,
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS total_size,
pg_size_pretty(pg_indexes_size(schemaname||'.'||tablename)) AS index_size,
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename) - pg_relation_size(schemaname||'.'||tablename)) AS toast_size
FROM pg_stat_user_tables
WHERE schemaname = 'public'
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create index monitoring function
CREATE OR REPLACE FUNCTION get_unused_indexes()
RETURNS TABLE (
schema_name TEXT,
table_name TEXT,
index_name TEXT,
index_size TEXT,
index_scans BIGINT
) AS $$
BEGIN
RETURN QUERY
SELECT
schemaname::TEXT,
tablename::TEXT,
indexname::TEXT,
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
idx_scan
FROM pg_stat_user_indexes
WHERE schemaname = 'public'
AND idx_scan < 100 -- Indexes used less than 100 times
ORDER BY pg_relation_size(indexrelid) DESC;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create slow query logging configuration
ALTER SYSTEM SET log_min_duration_statement = 1000; -- Log queries taking more than 1 second
ALTER SYSTEM SET log_statement = 'mod'; -- Log modifications
ALTER SYSTEM SET log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ';
-- Create audit log table for sensitive operations
CREATE TABLE IF NOT EXISTS audit_log (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID,
action VARCHAR(50) NOT NULL,
table_name VARCHAR(50),
record_id UUID,
old_values JSONB,
new_values JSONB,
ip_address INET,
user_agent TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
-- Example: Create a simple health check table
CREATE TABLE IF NOT EXISTS health_check (
id SERIAL PRIMARY KEY,
status VARCHAR(50) NOT NULL DEFAULT 'ok',
timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
details JSONB
);
CREATE INDEX IF NOT EXISTS idx_audit_log_user_id ON audit_log(user_id);
CREATE INDEX IF NOT EXISTS idx_audit_log_action ON audit_log(action);
CREATE INDEX IF NOT EXISTS idx_audit_log_created_at ON audit_log(created_at);
CREATE INDEX IF NOT EXISTS idx_audit_log_table_name ON audit_log(table_name);
-- Insert initial health check record
INSERT INTO health_check (status, details)
VALUES ('initialized', '{"message": "Database initialized successfully", "version": "1.0.0"}')
ON CONFLICT DO NOTHING;
-- Create audit trigger function
CREATE OR REPLACE FUNCTION audit_trigger_function()
RETURNS TRIGGER AS $$
BEGIN
IF TG_OP = 'DELETE' THEN
INSERT INTO audit_log (action, table_name, record_id, old_values)
VALUES (TG_OP, TG_TABLE_NAME, OLD.id, row_to_json(OLD));
RETURN OLD;
ELSIF TG_OP = 'UPDATE' THEN
INSERT INTO audit_log (action, table_name, record_id, old_values, new_values)
VALUES (TG_OP, TG_TABLE_NAME, NEW.id, row_to_json(OLD), row_to_json(NEW));
RETURN NEW;
ELSIF TG_OP = 'INSERT' THEN
INSERT INTO audit_log (action, table_name, record_id, new_values)
VALUES (TG_OP, TG_TABLE_NAME, NEW.id, row_to_json(NEW));
RETURN NEW;
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create indexes for better performance
CREATE INDEX IF NOT EXISTS idx_health_check_timestamp ON health_check(timestamp);
CREATE INDEX IF NOT EXISTS idx_health_check_status ON health_check(status);
-- Create cleanup function for old audit logs
CREATE OR REPLACE FUNCTION cleanup_old_audit_logs(retention_days INTEGER DEFAULT 90)
RETURNS INTEGER AS $$
DECLARE
deleted_count INTEGER;
BEGIN
DELETE FROM audit_log
WHERE created_at < NOW() - INTERVAL '1 day' * retention_days;
-- Log successful initialization
INSERT INTO health_check (status, details)
VALUES ('ready', '{"message": "PostgreSQL initialization completed", "timestamp": "' || NOW() || '"}');
GET DIAGNOSTICS deleted_count = ROW_COUNT;
RETURN deleted_count;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create maintenance function
CREATE OR REPLACE FUNCTION run_maintenance()
RETURNS TEXT AS $$
DECLARE
result TEXT := '';
rec RECORD;
BEGIN
-- Update table statistics
ANALYZE;
result := result || 'Statistics updated. ';
-- Vacuum analyze all tables
FOR rec IN SELECT tablename FROM pg_tables WHERE schemaname = 'public' LOOP
EXECUTE 'VACUUM ANALYZE ' || quote_ident(rec.tablename);
END LOOP;
result := result || 'Vacuum completed. ';
-- Cleanup old audit logs (keep 90 days)
result := result || 'Cleaned up ' || cleanup_old_audit_logs(90) || ' old audit logs. ';
-- Reindex if needed (check for bloat)
FOR rec IN
SELECT schemaname, tablename
FROM pg_stat_user_tables
WHERE n_dead_tup > n_live_tup * 0.1
AND n_live_tup > 1000
LOOP
EXECUTE 'REINDEX TABLE ' || quote_ident(rec.schemaname) || '.' || quote_ident(rec.tablename);
result := result || 'Reindexed ' || rec.tablename || '. ';
END LOOP;
RETURN result;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create backup verification function
CREATE OR REPLACE FUNCTION verify_backup_integrity()
RETURNS TABLE (
table_name TEXT,
row_count BIGINT,
last_modified TIMESTAMP WITH TIME ZONE,
checksum TEXT
) AS $$
BEGIN
RETURN QUERY
SELECT
t.tablename::TEXT,
t.n_live_tup,
GREATEST(t.last_vacuum, t.last_autovacuum, t.last_analyze, t.last_autoanalyze),
md5(string_agg(c.column_name, ',' ORDER BY c.ordinal_position))
FROM pg_stat_user_tables t
JOIN information_schema.columns c ON c.table_name = t.tablename
WHERE t.schemaname = 'public'
GROUP BY t.tablename, t.n_live_tup,
GREATEST(t.last_vacuum, t.last_autovacuum, t.last_analyze, t.last_autoanalyze)
ORDER BY t.tablename;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create connection monitoring view
CREATE OR REPLACE VIEW active_connections AS
SELECT
pid,
usename,
application_name,
client_addr,
client_port,
backend_start,
state,
query_start,
LEFT(query, 100) as query_preview
FROM pg_stat_activity
WHERE state != 'idle'
AND pid != pg_backend_pid()
ORDER BY backend_start;
-- Grant permissions for monitoring functions
GRANT EXECUTE ON FUNCTION get_table_stats() TO readonly_user;
GRANT EXECUTE ON FUNCTION get_unused_indexes() TO readonly_user;
GRANT EXECUTE ON FUNCTION verify_backup_integrity() TO backup_user;
GRANT SELECT ON active_connections TO readonly_user;
-- Set up automatic maintenance schedule (requires pg_cron extension)
-- Uncomment if pg_cron is available
-- SELECT cron.schedule('database-maintenance', '0 2 * * 0', 'SELECT run_maintenance();');
-- SELECT cron.schedule('audit-cleanup', '0 3 * * *', 'SELECT cleanup_old_audit_logs(90);');
-- Create performance tuning settings
ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_statements';
ALTER SYSTEM SET track_activity_query_size = 2048;
ALTER SYSTEM SET track_functions = 'all';
ALTER SYSTEM SET track_io_timing = 'on';
-- Connection pooling settings
ALTER SYSTEM SET max_connections = 200;
ALTER SYSTEM SET shared_buffers = '256MB';
ALTER SYSTEM SET effective_cache_size = '1GB';
ALTER SYSTEM SET maintenance_work_mem = '64MB';
ALTER SYSTEM SET checkpoint_completion_target = 0.9;
ALTER SYSTEM SET wal_buffers = '16MB';
ALTER SYSTEM SET default_statistics_target = 100;
ALTER SYSTEM SET random_page_cost = 1.1;
ALTER SYSTEM SET effective_io_concurrency = 200;
-- Security settings
ALTER SYSTEM SET ssl = 'on';
ALTER SYSTEM SET log_connections = 'on';
ALTER SYSTEM SET log_disconnections = 'on';
ALTER SYSTEM SET log_checkpoints = 'on';
ALTER SYSTEM SET log_lock_waits = 'on';
-- Reload configuration
SELECT pg_reload_conf();
-- Create initial admin user (password should be changed immediately)
-- This will be handled by the application during first startup
-- Display completion message
DO $$
BEGIN
RAISE NOTICE 'Database initialization completed successfully!';
RAISE NOTICE 'Remember to:';
RAISE NOTICE '1. Change default passwords for app_user, readonly_user, and backup_user';
RAISE NOTICE '2. Configure SSL certificates';
RAISE NOTICE '3. Set up regular backups';
RAISE NOTICE '4. Run initial migrations with Alembic';
RAISE NOTICE '5. Create your first admin user through the application';
END $$;
COMMENT ON DATABASE my_uploader_db IS 'MY Uploader Bot - Main database for content management and MY Network protocol';
COMMENT ON SCHEMA my_network IS 'MY Network protocol related tables and functions';
COMMENT ON SCHEMA content IS 'Content storage and management tables';
COMMENT ON SCHEMA users IS 'User management and authentication tables';
COMMENT ON SCHEMA analytics IS 'Analytics and metrics tables';

View File

@ -26,6 +26,7 @@ EMAIL="${3:-admin@${1}}"
PROJECT_DIR="/home/myuploader"
SERVICE_USER="myuploader"
PROGRESS_FILE="/home/myuploader/.installation_progress"
MAIN_PROJECT_DIR="$PROJECT_DIR/uploader-bot"
# Репозитории для клонирования
UPLOADER_REPO="https://git.projscale.dev/my-dev/uploader-bot"
@ -407,16 +408,65 @@ EOF
chown "$SERVICE_USER:$SERVICE_USER" "$MAIN_PROJECT_DIR/.env"
log_success "Файл .env создан"
# Создание симлинков для модулей в uploader-bot
log "Создание симлинков для модулей..."
# Создание папок и симлинков для модулей
log "Создание папок и симлинков для модулей..."
sudo -u "$SERVICE_USER" mkdir -p "$MAIN_PROJECT_DIR/modules"
sudo -u "$SERVICE_USER" mkdir -p "$MAIN_PROJECT_DIR/scripts"
# Создание симлинков для модулей
sudo -u "$SERVICE_USER" ln -sf "$PROJECT_DIR/converter-module" "$MAIN_PROJECT_DIR/modules/converter-module"
if [ "$NODE_TYPE" = "main" ]; then
sudo -u "$SERVICE_USER" ln -sf "$PROJECT_DIR/web2-client" "$MAIN_PROJECT_DIR/modules/web2-client"
fi
log_success "Симлинки созданы"
# Создание init-db.sql если его нет
if [ ! -f "$MAIN_PROJECT_DIR/scripts/init-db.sql" ]; then
log "Создание init-db.sql..."
cat > "$MAIN_PROJECT_DIR/scripts/init-db.sql" << 'EOF'
-- PostgreSQL Database Initialization Script for MY Uploader Bot
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pg_trgm";
CREATE EXTENSION IF NOT EXISTS "btree_gin";
-- Create schemas for better organization
CREATE SCHEMA IF NOT EXISTS my_network;
CREATE SCHEMA IF NOT EXISTS content;
CREATE SCHEMA IF NOT EXISTS users;
CREATE SCHEMA IF NOT EXISTS analytics;
-- Grant permissions on schemas
GRANT USAGE, CREATE ON SCHEMA my_network TO my_user;
GRANT USAGE, CREATE ON SCHEMA content TO my_user;
GRANT USAGE, CREATE ON SCHEMA users TO my_user;
GRANT USAGE, CREATE ON SCHEMA analytics TO my_user;
-- Set default search path
ALTER DATABASE my_uploader_db SET search_path TO public, my_network, content, users, analytics;
-- Create health check table
CREATE TABLE IF NOT EXISTS health_check (
id SERIAL PRIMARY KEY,
status VARCHAR(50) NOT NULL DEFAULT 'ok',
timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
details JSONB
);
-- Insert initial health check record
INSERT INTO health_check (status, details)
VALUES ('initialized', '{"message": "Database initialized successfully", "version": "1.0.0"}')
ON CONFLICT DO NOTHING;
-- Create indexes for better performance
CREATE INDEX IF NOT EXISTS idx_health_check_timestamp ON health_check(timestamp);
CREATE INDEX IF NOT EXISTS idx_health_check_status ON health_check(status);
COMMENT ON DATABASE my_uploader_db IS 'MY Uploader Bot - Main database for content management and MY Network protocol';
EOF
chown "$SERVICE_USER:$SERVICE_USER" "$MAIN_PROJECT_DIR/scripts/init-db.sql"
fi
log_success "Папки и симлинки созданы"
log_progress "environment_setup"
fi
@ -427,6 +477,9 @@ echo -e "${BLUE}==========================================${NC}"
echo ""
if ! check_and_skip "nginx_config" "Настройка Nginx"; then
# Определяем MAIN_PROJECT_DIR если не определена
MAIN_PROJECT_DIR="$PROJECT_DIR/uploader-bot"
log "Настройка Nginx конфигурации..."
# Сначала добавляем rate limiting в основной конфиг nginx
@ -447,27 +500,28 @@ if ! check_and_skip "nginx_config" "Настройка Nginx"; then
mkdir -p /etc/nginx/sites-enabled
if [ "$NODE_TYPE" = "main" ]; then
# Конфигурация для основной ноды (с web2-client)
# Конфигурация для основной ноды (с web2-client) - только HTTP сначала
cat > "/etc/nginx/sites-available/$DOMAIN" << EOF
server {
listen 80;
server_name $DOMAIN;
# Redirect all HTTP requests to HTTPS
return 301 https://\$server_name\$request_uri;
}
# File upload limits
client_max_body_size 100M;
client_body_timeout 600s;
proxy_read_timeout 600s;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
server {
listen 443 ssl http2;
server_name $DOMAIN;
# Hide server version
server_tokens off;
# SSL configuration will be added by certbot
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload";
# Deny access to hidden files
location ~ /\. {
deny all;
access_log off;
log_not_found off;
}
# Web2 Client (main page for main nodes)
location / {
@ -513,6 +567,8 @@ server {
proxy_read_timeout 600s;
proxy_send_timeout 600s;
client_max_body_size 100M;
proxy_buffering off;
proxy_request_buffering off;
}
# Converter API (on-demand, через uploader-bot)
@ -533,6 +589,7 @@ server {
# Health check (no rate limiting)
location /health {
proxy_pass http://127.0.0.1:15100/health;
proxy_set_header Host \$host;
access_log off;
}
@ -542,37 +599,31 @@ server {
expires 30d;
add_header Cache-Control "public, immutable";
}
# Disable access to hidden files
location ~ /\. {
deny all;
access_log off;
log_not_found off;
}
}
EOF
else
# Конфигурация для обычной ноды (только API)
# Конфигурация для обычной ноды (только API) - только HTTP сначала
cat > "/etc/nginx/sites-available/$DOMAIN" << EOF
server {
listen 80;
server_name $DOMAIN;
# Redirect all HTTP requests to HTTPS
return 301 https://\$server_name\$request_uri;
}
# File upload limits
client_max_body_size 100M;
client_body_timeout 600s;
proxy_read_timeout 600s;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
server {
listen 443 ssl http2;
server_name $DOMAIN;
# Hide server version
server_tokens off;
# SSL configuration will be added by certbot
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload";
# Deny access to hidden files
location ~ /\. {
deny all;
access_log off;
log_not_found off;
}
# Main API (uploader-bot)
location / {
@ -604,6 +655,8 @@ server {
proxy_read_timeout 600s;
proxy_send_timeout 600s;
client_max_body_size 100M;
proxy_buffering off;
proxy_request_buffering off;
}
# Converter API (on-demand, через uploader-bot)
@ -624,6 +677,7 @@ server {
# Health check (no rate limiting)
location /health {
proxy_pass http://127.0.0.1:15100/health;
proxy_set_header Host \$host;
access_log off;
}
@ -633,13 +687,6 @@ server {
expires 30d;
add_header Cache-Control "public, immutable";
}
# Disable access to hidden files
location ~ /\. {
deny all;
access_log off;
log_not_found off;
}
}
EOF
fi
@ -788,13 +835,20 @@ if ! check_and_skip "docker_build" "Сборка и запуск"; then
log "Запуск приложения..."
if [ "$NODE_TYPE" = "main" ]; then
# Для основной ноды запускаем все сервисы
# Для основной ноды запускаем все сервисы включая web2-client
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d app postgres redis web2-client nginx prometheus grafana loki promtail
else
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d
fi
log "🚀 Запущены все сервисы для основной ноды"
else
# Для обычной ноды запускаем только необходимые сервисы
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d uploader-bot converter-builder watchtower 2>/dev/null || \
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d
# Для обычной ноды запускаем только основные сервисы без web2-client
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d app postgres redis
else
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d app postgres redis indexer ton_daemon license_index convert_process
fi
log "🚀 Запущены основные сервисы для обычной ноды"
fi
@ -858,9 +912,17 @@ if ! check_and_skip "final_check" "Финальная проверка"; then
#!/bin/bash
cd $MAIN_PROJECT_DIR
if [ "$NODE_TYPE" = "main" ]; then
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
docker-compose -f $COMPOSE_FILE up -d app postgres redis web2-client nginx prometheus grafana loki promtail
else
docker-compose -f $COMPOSE_FILE up -d
fi
else
docker-compose -f $COMPOSE_FILE up -d uploader-bot converter-builder watchtower 2>/dev/null || docker-compose -f $COMPOSE_FILE up -d
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
docker-compose -f $COMPOSE_FILE up -d app postgres redis
else
docker-compose -f $COMPOSE_FILE up -d app postgres redis indexer ton_daemon license_index convert_process
fi
fi
echo "✅ MY Uploader Bot запущен (тип ноды: $NODE_TYPE)"
docker-compose -f $COMPOSE_FILE ps
@ -882,6 +944,12 @@ echo ""
echo "🌐 API статус:"
curl -s http://localhost:15100/health | jq . 2>/dev/null || echo "API недоступен"
echo ""
echo "🗄️ PostgreSQL статус:"
docker-compose -f $COMPOSE_FILE exec postgres pg_isready -U my_user -d my_uploader_db 2>/dev/null || echo "PostgreSQL недоступен"
echo ""
echo "🔴 Redis статус:"
docker-compose -f $COMPOSE_FILE exec redis redis-cli ping 2>/dev/null || echo "Redis недоступен"
echo ""
echo "🔒 SSL сертификат:"
sudo certbot certificates | grep -A2 -B2 $DOMAIN || echo "Нет SSL сертификата"
echo ""
@ -902,9 +970,17 @@ echo "🔄 Пересборка и перезапуск..."
docker-compose -f $COMPOSE_FILE down
docker-compose -f $COMPOSE_FILE build
if [ "$NODE_TYPE" = "main" ]; then
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
docker-compose -f $COMPOSE_FILE up -d app postgres redis web2-client nginx prometheus grafana loki promtail
else
docker-compose -f $COMPOSE_FILE up -d
fi
else
docker-compose -f $COMPOSE_FILE up -d uploader-bot converter-builder watchtower 2>/dev/null || docker-compose -f $COMPOSE_FILE up -d
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
docker-compose -f $COMPOSE_FILE up -d app postgres redis
else
docker-compose -f $COMPOSE_FILE up -d app postgres redis indexer ton_daemon license_index convert_process
fi
fi
echo "✅ Пересборка завершена"
docker-compose -f $COMPOSE_FILE ps
@ -917,10 +993,18 @@ EOF
log "Настройка автозапуска..."
if [ "$NODE_TYPE" = "main" ]; then
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d app postgres redis web2-client nginx prometheus grafana loki promtail"
else
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d"
fi
SERVICE_DESCRIPTION="MY Uploader Bot (Main Node)"
else
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d uploader-bot converter-builder watchtower"
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d app postgres redis"
else
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d app postgres redis indexer ton_daemon license_index convert_process"
fi
SERVICE_DESCRIPTION="MY Uploader Bot (Regular Node)"
fi
@ -1010,7 +1094,15 @@ echo "sudo systemctl start my-uploader-bot"
echo "sudo systemctl stop my-uploader-bot"
echo "sudo systemctl status my-uploader-bot"
echo ""
echo -e "${YELLOW}🔄 Повторная установка:${NC}"
echo -e "${YELLOW}🗄️ PostgreSQL управление:${NC}"
echo "# Подключение к БД:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE exec postgres psql -U my_user -d my_uploader_db"
echo "# Бэкап БД:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE exec postgres pg_dump -U my_user my_uploader_db > backup_\$(date +%Y%m%d).sql"
echo "# Проверка статуса БД:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE exec postgres pg_isready -U my_user -d my_uploader_db"
echo ""
echo -e "${YELLOW}<EFBFBD> Повторная установка:${NC}"
echo "# Для полной переустановки:"
echo "sudo rm -f $PROGRESS_FILE"
echo "# Для сброса определенного этапа отредактируйте:"
@ -1022,6 +1114,10 @@ echo ""
echo -e "${YELLOW}📊 Мониторинг:${NC}"
echo "docker stats"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE logs -f"
echo "# Логи PostgreSQL:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE logs postgres"
echo "# Логи Redis:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE logs redis"
echo ""
echo -e "${GREEN}✅ MY Uploader Bot готов к работе!${NC}"
echo ""