mariadb -> postgres

This commit is contained in:
user 2025-07-04 12:33:03 +03:00
parent 84acc64ad3
commit 2b9fbb6c7d
5 changed files with 461 additions and 441 deletions

View File

@ -1,21 +1,21 @@
version: '3' version: '3'
services: services:
maria_db: postgres:
image: mariadb:11.2 image: postgres:15-alpine
ports: ports:
- "3307:3306" - "5432:5432"
env_file: env_file:
- .env - .env
environment: environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD:-password} - POSTGRES_DB=${POSTGRES_DB:-my_uploader_db}
- MYSQL_DATABASE=${MYSQL_DATABASE:-myuploader} - POSTGRES_USER=${POSTGRES_USER:-my_user}
- MYSQL_USER=${MYSQL_USER:-myuploader} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD:-secure_password}
- MYSQL_PASSWORD=${MYSQL_PASSWORD:-password} - POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
volumes: volumes:
- /Storage/sqlStorage:/var/lib/mysql - postgres_data:/var/lib/postgresql/data
restart: always restart: always
healthcheck: healthcheck:
test: [ "CMD", "healthcheck.sh", "--connect", "--innodb_initialized" ] test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-my_user} -d ${POSTGRES_DB:-my_uploader_db}"]
interval: 10s interval: 10s
timeout: 5s timeout: 5s
retries: 3 retries: 3
@ -42,7 +42,7 @@ services:
- .env - .env
restart: always restart: always
links: links:
- maria_db - postgres
- redis - redis
ports: ports:
- "15100:15100" - "15100:15100"
@ -50,7 +50,7 @@ services:
- /Storage/logs:/app/logs - /Storage/logs:/app/logs
- /Storage/storedContent:/app/data - /Storage/storedContent:/app/data
depends_on: depends_on:
maria_db: postgres:
condition: service_healthy condition: service_healthy
redis: redis:
condition: service_healthy condition: service_healthy
@ -64,13 +64,13 @@ services:
env_file: env_file:
- .env - .env
links: links:
- maria_db - postgres
- redis - redis
volumes: volumes:
- /Storage/logs:/app/logs - /Storage/logs:/app/logs
- /Storage/storedContent:/app/data - /Storage/storedContent:/app/data
depends_on: depends_on:
maria_db: postgres:
condition: service_healthy condition: service_healthy
redis: redis:
condition: service_healthy condition: service_healthy
@ -84,13 +84,13 @@ services:
env_file: env_file:
- .env - .env
links: links:
- maria_db - postgres
- redis - redis
volumes: volumes:
- /Storage/logs:/app/logs - /Storage/logs:/app/logs
- /Storage/storedContent:/app/data - /Storage/storedContent:/app/data
depends_on: depends_on:
maria_db: postgres:
condition: service_healthy condition: service_healthy
redis: redis:
condition: service_healthy condition: service_healthy
@ -104,13 +104,13 @@ services:
env_file: env_file:
- .env - .env
links: links:
- maria_db - postgres
- redis - redis
volumes: volumes:
- /Storage/logs:/app/logs - /Storage/logs:/app/logs
- /Storage/storedContent:/app/data - /Storage/storedContent:/app/data
depends_on: depends_on:
maria_db: postgres:
condition: service_healthy condition: service_healthy
redis: redis:
condition: service_healthy condition: service_healthy
@ -124,17 +124,20 @@ services:
env_file: env_file:
- .env - .env
links: links:
- maria_db - postgres
- redis - redis
volumes: volumes:
- /Storage/logs:/app/logs - /Storage/logs:/app/logs
- /Storage/storedContent:/app/data - /Storage/storedContent:/app/data
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock
depends_on: depends_on:
maria_db: postgres:
condition: service_healthy condition: service_healthy
redis: redis:
condition: service_healthy condition: service_healthy
volumes: volumes:
postgres_data:
driver: local
redis_data: redis_data:
driver: local

View File

@ -1,105 +1,254 @@
version: '3' version: '3.8'
services: services:
maria_db: # PostgreSQL Database
image: mariadb:11.2 postgres:
ports: image: postgres:15-alpine
- "3307:3306" container_name: uploader-bot-postgres
env_file: restart: unless-stopped
- .env environment:
POSTGRES_DB: ${POSTGRES_DB:-my_uploader_db}
POSTGRES_USER: ${POSTGRES_USER:-my_user}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-secure_password}
POSTGRES_INITDB_ARGS: "--encoding=UTF-8 --lc-collate=C --lc-ctype=C"
volumes: volumes:
- /Storage/sqlStorage:/var/lib/mysql - postgres_data:/var/lib/postgresql/data
restart: always - ./scripts/init-db.sql:/docker-entrypoint-initdb.d/01-init.sql:ro
ports:
- "5432:5432"
networks:
- uploader_network
healthcheck: healthcheck:
test: [ "CMD", "healthcheck.sh", "--connect", "--innodb_initialized" ] test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-my_user} -d ${POSTGRES_DB:-my_uploader_db}"]
interval: 10s interval: 10s
timeout: 5s timeout: 5s
retries: 3 retries: 5
start_period: 30s
# Redis Cache
redis:
image: redis:7-alpine
container_name: uploader-bot-redis
restart: unless-stopped
command: >
redis-server
--appendonly yes
--maxmemory 512mb
--maxmemory-policy allkeys-lru
--save 900 1
--save 300 10
--save 60 10000
volumes:
- redis_data:/data
ports:
- "6379:6379"
networks:
- uploader_network
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 10s
timeout: 3s
retries: 5
start_period: 10s
# Main Application (MY Uploader Bot)
app: app:
build: build:
context: . context: .
dockerfile: Dockerfile dockerfile: Dockerfile
container_name: uploader-bot-app
command: python -m app command: python -m app
env_file: restart: unless-stopped
- .env depends_on:
restart: always postgres:
links: condition: service_healthy
- maria_db redis:
condition: service_healthy
environment:
# Database
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
POSTGRES_HOST: postgres
POSTGRES_PORT: 5432
POSTGRES_DB: ${POSTGRES_DB:-my_uploader_db}
POSTGRES_USER: ${POSTGRES_USER:-my_user}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-secure_password}
# Redis
REDIS_URL: redis://redis:6379/0
REDIS_HOST: redis
REDIS_PORT: 6379
# Application
NODE_ENV: ${NODE_ENV:-production}
DEBUG: ${DEBUG:-false}
LOG_LEVEL: ${LOG_LEVEL:-INFO}
# MY Network
MY_NETWORK_NODE_ID: ${MY_NETWORK_NODE_ID}
MY_NETWORK_PORT: ${MY_NETWORK_PORT:-15100}
MY_NETWORK_HOST: ${MY_NETWORK_HOST:-0.0.0.0}
MY_NETWORK_DOMAIN: ${MY_NETWORK_DOMAIN}
MY_NETWORK_SSL_ENABLED: ${MY_NETWORK_SSL_ENABLED:-true}
# API Settings
API_HOST: ${API_HOST:-0.0.0.0}
API_PORT: ${API_PORT:-15100}
API_WORKERS: ${API_WORKERS:-4}
MAX_UPLOAD_SIZE: ${MAX_UPLOAD_SIZE:-100MB}
# Security
SECRET_KEY: ${SECRET_KEY}
JWT_SECRET: ${JWT_SECRET}
ENCRYPTION_KEY: ${ENCRYPTION_KEY}
# Converter (on-demand)
CONVERTER_DOCKER_IMAGE: ${CONVERTER_DOCKER_IMAGE:-my-converter:latest}
CONVERTER_SHARED_PATH: ${CONVERTER_SHARED_PATH:-/shared/converter}
CONVERTER_MAX_PARALLEL: ${CONVERTER_MAX_PARALLEL:-3}
CONVERTER_TIMEOUT: ${CONVERTER_TIMEOUT:-300}
ports: ports:
- "15100:15100" - "15100:15100"
volumes: volumes:
- /Storage/logs:/app/logs - app_data:/app/data
- /Storage/storedContent:/app/data - app_logs:/app/logs
depends_on: - /var/run/docker.sock:/var/run/docker.sock # For on-demand converter containers
maria_db: - converter_shared:/shared/converter
condition: service_healthy networks:
- uploader_network
healthcheck:
test: ["CMD", "python", "-c", "import requests; requests.get('http://localhost:15100/health')"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
indexer: # Отправка уведомления о появлении новой NFT-listen. Установка CID поля у всего контента. Проверка следующего за последним индексом item коллекции и поиск нового контента, отправка информации о том что контент найден его загружателю. Присваивание encrypted_content onchain_index # Indexer Service
indexer:
build: build:
context: . context: .
dockerfile: Dockerfile dockerfile: Dockerfile
restart: always container_name: uploader-bot-indexer
restart: unless-stopped
command: python -m app indexer command: python -m app indexer
env_file:
- .env
links:
- maria_db
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on: depends_on:
maria_db: postgres:
condition: service_healthy condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
REDIS_URL: redis://redis:6379/0
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: indexer
volumes:
- app_logs:/app/logs
- app_data:/app/data
networks:
- uploader_network
ton_daemon: # Работа с TON-сетью. Задачи сервисного кошелька и деплой контрактов # TON Daemon Service
ton_daemon:
build: build:
context: . context: .
dockerfile: Dockerfile dockerfile: Dockerfile
container_name: uploader-bot-ton-daemon
command: python -m app ton_daemon command: python -m app ton_daemon
restart: always restart: unless-stopped
env_file:
- .env
links:
- maria_db
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on: depends_on:
maria_db: postgres:
condition: service_healthy condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
REDIS_URL: redis://redis:6379/0
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: ton_daemon
volumes:
- app_logs:/app/logs
- app_data:/app/data
networks:
- uploader_network
license_index: # Проверка кошельков пользователей на новые NFT. Опрос этих NFT на определяемый GET-метод по которому мы определяем что это определенная лицензия и сохранение информации по ней # License Index Service
license_index:
build: build:
context: . context: .
dockerfile: Dockerfile dockerfile: Dockerfile
container_name: uploader-bot-license-index
command: python -m app license_index command: python -m app license_index
restart: always restart: unless-stopped
env_file:
- .env
links:
- maria_db
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
depends_on: depends_on:
maria_db: postgres:
condition: service_healthy condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
REDIS_URL: redis://redis:6379/0
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: license_index
volumes:
- app_logs:/app/logs
- app_data:/app/data
networks:
- uploader_network
# Convert Process Service
convert_process: convert_process:
build: build:
context: . context: .
dockerfile: Dockerfile dockerfile: Dockerfile
container_name: uploader-bot-convert-process
command: python -m app convert_process command: python -m app convert_process
restart: always restart: unless-stopped
env_file:
- .env
links:
- maria_db
volumes:
- /Storage/logs:/app/logs
- /Storage/storedContent:/app/data
- /var/run/docker.sock:/var/run/docker.sock
depends_on: depends_on:
maria_db: postgres:
condition: service_healthy condition: service_healthy
redis:
condition: service_healthy
environment:
DATABASE_URL: postgresql+asyncpg://${POSTGRES_USER:-my_user}:${POSTGRES_PASSWORD:-secure_password}@postgres:5432/${POSTGRES_DB:-my_uploader_db}
REDIS_URL: redis://redis:6379/0
LOG_LEVEL: ${LOG_LEVEL:-INFO}
SERVICE_NAME: convert_process
volumes:
- app_logs:/app/logs
- app_data:/app/data
- /var/run/docker.sock:/var/run/docker.sock
- converter_shared:/shared/converter
networks:
- uploader_network
# Converter Build (for creating converter image)
converter-build:
build:
context: ./modules/converter-module
dockerfile: Dockerfile
image: my-converter:latest
container_name: uploader-bot-converter-build
profiles: ["build-only"] # Only runs during build
volumes:
- converter_shared:/shared/converter
networks:
- uploader_network
volumes:
postgres_data:
driver: local
redis_data:
driver: local
app_data:
driver: local
app_logs:
driver: local
converter_shared:
driver: local
networks:
uploader_network:
driver: bridge
ipam:
config:
- subnet: 172.20.0.0/16

View File

@ -1,20 +1,46 @@
sanic==21.9.1 # Core Framework
websockets==10.0 sanic==23.12.1
sqlalchemy==2.0.23 websockets==12.0
python-dotenv==1.0.0
pymysql==1.1.0 # Async Database (PostgreSQL)
sqlalchemy[asyncio]==2.0.23
asyncpg==0.29.0
alembic==1.13.1
# Redis & Caching
redis[hiredis]==5.0.1
aioredis==2.0.1
# Telegram Bot
aiogram==3.13.0 aiogram==3.13.0
aiohttp==3.9.1
# TON Blockchain
pytonconnect==0.3.0 pytonconnect==0.3.0
base58==2.1.1 base58==2.1.1
git+https://github.com/tonfactory/tonsdk.git@3ebbf0b702f48c2519e4c6c425f9514f673b9d48#egg=tonsdk git+https://github.com/tonfactory/tonsdk.git@3ebbf0b702f48c2519e4c6c425f9514f673b9d48#egg=tonsdk
httpx==0.25.0
docker==7.0.0 # HTTP Client
httpx[http2]==0.25.2
# Cryptography
pycryptodome==3.20.0 pycryptodome==3.20.0
pynacl==1.5.0 pynacl==1.5.0
aiofiles==23.2.1
# File Processing
aiofiles==24.1.0
pydub==0.25.1 pydub==0.25.1
pillow==10.2.0 pillow==10.2.0
ffmpeg-python==0.2.0 ffmpeg-python==0.2.0
python-magic==0.4.27 python-magic==0.4.27
# Utilities
python-dotenv==1.0.0
docker==7.0.0
# Monitoring & Observability
prometheus-client==0.19.0
structlog==23.2.0
# Validation
pydantic==2.5.2

View File

@ -1,311 +1,57 @@
-- PostgreSQL initialization script for my-uploader-bot -- PostgreSQL Database Initialization Script for MY Uploader Bot
-- This script sets up the database, users, and extensions -- This script sets up the initial database structure and users
-- Create database if it doesn't exist -- Create database if not exists (done by PostgreSQL container initialization)
SELECT 'CREATE DATABASE myuploader' -- Create user and grant privileges are also handled by container environment variables
WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = 'myuploader')\gexec
-- Connect to the database -- Set up database encoding and collation (handled by POSTGRES_INITDB_ARGS)
\c myuploader;
-- Create extensions -- Create extensions if needed
CREATE EXTENSION IF NOT EXISTS "uuid-ossp"; CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pg_trgm"; CREATE EXTENSION IF NOT EXISTS "pg_trgm";
CREATE EXTENSION IF NOT EXISTS "btree_gin"; CREATE EXTENSION IF NOT EXISTS "btree_gin";
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
-- Create custom types -- Create schemas for better organization
DO $$ BEGIN CREATE SCHEMA IF NOT EXISTS my_network;
CREATE TYPE user_role_type AS ENUM ('admin', 'user', 'moderator'); CREATE SCHEMA IF NOT EXISTS content;
EXCEPTION CREATE SCHEMA IF NOT EXISTS users;
WHEN duplicate_object THEN null; CREATE SCHEMA IF NOT EXISTS analytics;
END $$;
DO $$ BEGIN -- Grant permissions on schemas
CREATE TYPE content_status_type AS ENUM ('pending', 'uploading', 'processing', 'completed', 'failed', 'deleted'); GRANT USAGE, CREATE ON SCHEMA my_network TO my_user;
EXCEPTION GRANT USAGE, CREATE ON SCHEMA content TO my_user;
WHEN duplicate_object THEN null; GRANT USAGE, CREATE ON SCHEMA users TO my_user;
END $$; GRANT USAGE, CREATE ON SCHEMA analytics TO my_user;
DO $$ BEGIN -- Set default search path
CREATE TYPE transaction_status_type AS ENUM ('pending', 'confirmed', 'failed', 'cancelled'); ALTER DATABASE my_uploader_db SET search_path TO public, my_network, content, users, analytics;
EXCEPTION
WHEN duplicate_object THEN null;
END $$;
-- Create application user (for connection pooling) -- Create initial tables structure (if not handled by ORM migrations)
DO $$ BEGIN -- These will be created by the application's migration system
CREATE USER app_user WITH PASSWORD 'secure_app_password';
EXCEPTION
WHEN duplicate_object THEN
ALTER USER app_user WITH PASSWORD 'secure_app_password';
END $$;
-- Grant necessary permissions -- Example: Create a simple health check table
GRANT CONNECT ON DATABASE myuploader TO app_user; CREATE TABLE IF NOT EXISTS health_check (
GRANT USAGE ON SCHEMA public TO app_user; id SERIAL PRIMARY KEY,
status VARCHAR(50) NOT NULL DEFAULT 'ok',
-- Grant table permissions (will be applied after tables are created) timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO app_user; details JSONB
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT USAGE, SELECT ON SEQUENCES TO app_user;
-- Create read-only user for monitoring/analytics
DO $$ BEGIN
CREATE USER readonly_user WITH PASSWORD 'readonly_password';
EXCEPTION
WHEN duplicate_object THEN
ALTER USER readonly_user WITH PASSWORD 'readonly_password';
END $$;
GRANT CONNECT ON DATABASE myuploader TO readonly_user;
GRANT USAGE ON SCHEMA public TO readonly_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO readonly_user;
-- Create backup user
DO $$ BEGIN
CREATE USER backup_user WITH PASSWORD 'backup_password';
EXCEPTION
WHEN duplicate_object THEN
ALTER USER backup_user WITH PASSWORD 'backup_password';
END $$;
GRANT CONNECT ON DATABASE myuploader TO backup_user;
GRANT USAGE ON SCHEMA public TO backup_user;
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO backup_user;
-- Create performance monitoring functions
CREATE OR REPLACE FUNCTION get_table_stats()
RETURNS TABLE (
schema_name TEXT,
table_name TEXT,
row_count BIGINT,
total_size TEXT,
index_size TEXT,
toast_size TEXT
) AS $$
BEGIN
RETURN QUERY
SELECT
schemaname::TEXT,
tablename::TEXT,
n_tup_ins - n_tup_del AS row_count,
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS total_size,
pg_size_pretty(pg_indexes_size(schemaname||'.'||tablename)) AS index_size,
pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename) - pg_relation_size(schemaname||'.'||tablename)) AS toast_size
FROM pg_stat_user_tables
WHERE schemaname = 'public'
ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create index monitoring function
CREATE OR REPLACE FUNCTION get_unused_indexes()
RETURNS TABLE (
schema_name TEXT,
table_name TEXT,
index_name TEXT,
index_size TEXT,
index_scans BIGINT
) AS $$
BEGIN
RETURN QUERY
SELECT
schemaname::TEXT,
tablename::TEXT,
indexname::TEXT,
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
idx_scan
FROM pg_stat_user_indexes
WHERE schemaname = 'public'
AND idx_scan < 100 -- Indexes used less than 100 times
ORDER BY pg_relation_size(indexrelid) DESC;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create slow query logging configuration
ALTER SYSTEM SET log_min_duration_statement = 1000; -- Log queries taking more than 1 second
ALTER SYSTEM SET log_statement = 'mod'; -- Log modifications
ALTER SYSTEM SET log_line_prefix = '%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h ';
-- Create audit log table for sensitive operations
CREATE TABLE IF NOT EXISTS audit_log (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID,
action VARCHAR(50) NOT NULL,
table_name VARCHAR(50),
record_id UUID,
old_values JSONB,
new_values JSONB,
ip_address INET,
user_agent TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
); );
CREATE INDEX IF NOT EXISTS idx_audit_log_user_id ON audit_log(user_id); -- Insert initial health check record
CREATE INDEX IF NOT EXISTS idx_audit_log_action ON audit_log(action); INSERT INTO health_check (status, details)
CREATE INDEX IF NOT EXISTS idx_audit_log_created_at ON audit_log(created_at); VALUES ('initialized', '{"message": "Database initialized successfully", "version": "1.0.0"}')
CREATE INDEX IF NOT EXISTS idx_audit_log_table_name ON audit_log(table_name); ON CONFLICT DO NOTHING;
-- Create audit trigger function -- Create indexes for better performance
CREATE OR REPLACE FUNCTION audit_trigger_function() CREATE INDEX IF NOT EXISTS idx_health_check_timestamp ON health_check(timestamp);
RETURNS TRIGGER AS $$ CREATE INDEX IF NOT EXISTS idx_health_check_status ON health_check(status);
BEGIN
IF TG_OP = 'DELETE' THEN
INSERT INTO audit_log (action, table_name, record_id, old_values)
VALUES (TG_OP, TG_TABLE_NAME, OLD.id, row_to_json(OLD));
RETURN OLD;
ELSIF TG_OP = 'UPDATE' THEN
INSERT INTO audit_log (action, table_name, record_id, old_values, new_values)
VALUES (TG_OP, TG_TABLE_NAME, NEW.id, row_to_json(OLD), row_to_json(NEW));
RETURN NEW;
ELSIF TG_OP = 'INSERT' THEN
INSERT INTO audit_log (action, table_name, record_id, new_values)
VALUES (TG_OP, TG_TABLE_NAME, NEW.id, row_to_json(NEW));
RETURN NEW;
END IF;
RETURN NULL;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create cleanup function for old audit logs -- Log successful initialization
CREATE OR REPLACE FUNCTION cleanup_old_audit_logs(retention_days INTEGER DEFAULT 90) INSERT INTO health_check (status, details)
RETURNS INTEGER AS $$ VALUES ('ready', '{"message": "PostgreSQL initialization completed", "timestamp": "' || NOW() || '"}');
DECLARE
deleted_count INTEGER;
BEGIN
DELETE FROM audit_log
WHERE created_at < NOW() - INTERVAL '1 day' * retention_days;
GET DIAGNOSTICS deleted_count = ROW_COUNT; COMMENT ON DATABASE my_uploader_db IS 'MY Uploader Bot - Main database for content management and MY Network protocol';
RETURN deleted_count; COMMENT ON SCHEMA my_network IS 'MY Network protocol related tables and functions';
END; COMMENT ON SCHEMA content IS 'Content storage and management tables';
$$ LANGUAGE plpgsql SECURITY DEFINER; COMMENT ON SCHEMA users IS 'User management and authentication tables';
COMMENT ON SCHEMA analytics IS 'Analytics and metrics tables';
-- Create maintenance function
CREATE OR REPLACE FUNCTION run_maintenance()
RETURNS TEXT AS $$
DECLARE
result TEXT := '';
rec RECORD;
BEGIN
-- Update table statistics
ANALYZE;
result := result || 'Statistics updated. ';
-- Vacuum analyze all tables
FOR rec IN SELECT tablename FROM pg_tables WHERE schemaname = 'public' LOOP
EXECUTE 'VACUUM ANALYZE ' || quote_ident(rec.tablename);
END LOOP;
result := result || 'Vacuum completed. ';
-- Cleanup old audit logs (keep 90 days)
result := result || 'Cleaned up ' || cleanup_old_audit_logs(90) || ' old audit logs. ';
-- Reindex if needed (check for bloat)
FOR rec IN
SELECT schemaname, tablename
FROM pg_stat_user_tables
WHERE n_dead_tup > n_live_tup * 0.1
AND n_live_tup > 1000
LOOP
EXECUTE 'REINDEX TABLE ' || quote_ident(rec.schemaname) || '.' || quote_ident(rec.tablename);
result := result || 'Reindexed ' || rec.tablename || '. ';
END LOOP;
RETURN result;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create backup verification function
CREATE OR REPLACE FUNCTION verify_backup_integrity()
RETURNS TABLE (
table_name TEXT,
row_count BIGINT,
last_modified TIMESTAMP WITH TIME ZONE,
checksum TEXT
) AS $$
BEGIN
RETURN QUERY
SELECT
t.tablename::TEXT,
t.n_live_tup,
GREATEST(t.last_vacuum, t.last_autovacuum, t.last_analyze, t.last_autoanalyze),
md5(string_agg(c.column_name, ',' ORDER BY c.ordinal_position))
FROM pg_stat_user_tables t
JOIN information_schema.columns c ON c.table_name = t.tablename
WHERE t.schemaname = 'public'
GROUP BY t.tablename, t.n_live_tup,
GREATEST(t.last_vacuum, t.last_autovacuum, t.last_analyze, t.last_autoanalyze)
ORDER BY t.tablename;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Create connection monitoring view
CREATE OR REPLACE VIEW active_connections AS
SELECT
pid,
usename,
application_name,
client_addr,
client_port,
backend_start,
state,
query_start,
LEFT(query, 100) as query_preview
FROM pg_stat_activity
WHERE state != 'idle'
AND pid != pg_backend_pid()
ORDER BY backend_start;
-- Grant permissions for monitoring functions
GRANT EXECUTE ON FUNCTION get_table_stats() TO readonly_user;
GRANT EXECUTE ON FUNCTION get_unused_indexes() TO readonly_user;
GRANT EXECUTE ON FUNCTION verify_backup_integrity() TO backup_user;
GRANT SELECT ON active_connections TO readonly_user;
-- Set up automatic maintenance schedule (requires pg_cron extension)
-- Uncomment if pg_cron is available
-- SELECT cron.schedule('database-maintenance', '0 2 * * 0', 'SELECT run_maintenance();');
-- SELECT cron.schedule('audit-cleanup', '0 3 * * *', 'SELECT cleanup_old_audit_logs(90);');
-- Create performance tuning settings
ALTER SYSTEM SET shared_preload_libraries = 'pg_stat_statements';
ALTER SYSTEM SET track_activity_query_size = 2048;
ALTER SYSTEM SET track_functions = 'all';
ALTER SYSTEM SET track_io_timing = 'on';
-- Connection pooling settings
ALTER SYSTEM SET max_connections = 200;
ALTER SYSTEM SET shared_buffers = '256MB';
ALTER SYSTEM SET effective_cache_size = '1GB';
ALTER SYSTEM SET maintenance_work_mem = '64MB';
ALTER SYSTEM SET checkpoint_completion_target = 0.9;
ALTER SYSTEM SET wal_buffers = '16MB';
ALTER SYSTEM SET default_statistics_target = 100;
ALTER SYSTEM SET random_page_cost = 1.1;
ALTER SYSTEM SET effective_io_concurrency = 200;
-- Security settings
ALTER SYSTEM SET ssl = 'on';
ALTER SYSTEM SET log_connections = 'on';
ALTER SYSTEM SET log_disconnections = 'on';
ALTER SYSTEM SET log_checkpoints = 'on';
ALTER SYSTEM SET log_lock_waits = 'on';
-- Reload configuration
SELECT pg_reload_conf();
-- Create initial admin user (password should be changed immediately)
-- This will be handled by the application during first startup
-- Display completion message
DO $$
BEGIN
RAISE NOTICE 'Database initialization completed successfully!';
RAISE NOTICE 'Remember to:';
RAISE NOTICE '1. Change default passwords for app_user, readonly_user, and backup_user';
RAISE NOTICE '2. Configure SSL certificates';
RAISE NOTICE '3. Set up regular backups';
RAISE NOTICE '4. Run initial migrations with Alembic';
RAISE NOTICE '5. Create your first admin user through the application';
END $$;

View File

@ -26,6 +26,7 @@ EMAIL="${3:-admin@${1}}"
PROJECT_DIR="/home/myuploader" PROJECT_DIR="/home/myuploader"
SERVICE_USER="myuploader" SERVICE_USER="myuploader"
PROGRESS_FILE="/home/myuploader/.installation_progress" PROGRESS_FILE="/home/myuploader/.installation_progress"
MAIN_PROJECT_DIR="$PROJECT_DIR/uploader-bot"
# Репозитории для клонирования # Репозитории для клонирования
UPLOADER_REPO="https://git.projscale.dev/my-dev/uploader-bot" UPLOADER_REPO="https://git.projscale.dev/my-dev/uploader-bot"
@ -407,16 +408,65 @@ EOF
chown "$SERVICE_USER:$SERVICE_USER" "$MAIN_PROJECT_DIR/.env" chown "$SERVICE_USER:$SERVICE_USER" "$MAIN_PROJECT_DIR/.env"
log_success "Файл .env создан" log_success "Файл .env создан"
# Создание симлинков для модулей в uploader-bot # Создание папок и симлинков для модулей
log "Создание симлинков для модулей..." log "Создание папок и симлинков для модулей..."
sudo -u "$SERVICE_USER" mkdir -p "$MAIN_PROJECT_DIR/modules" sudo -u "$SERVICE_USER" mkdir -p "$MAIN_PROJECT_DIR/modules"
sudo -u "$SERVICE_USER" mkdir -p "$MAIN_PROJECT_DIR/scripts"
# Создание симлинков для модулей
sudo -u "$SERVICE_USER" ln -sf "$PROJECT_DIR/converter-module" "$MAIN_PROJECT_DIR/modules/converter-module" sudo -u "$SERVICE_USER" ln -sf "$PROJECT_DIR/converter-module" "$MAIN_PROJECT_DIR/modules/converter-module"
if [ "$NODE_TYPE" = "main" ]; then if [ "$NODE_TYPE" = "main" ]; then
sudo -u "$SERVICE_USER" ln -sf "$PROJECT_DIR/web2-client" "$MAIN_PROJECT_DIR/modules/web2-client" sudo -u "$SERVICE_USER" ln -sf "$PROJECT_DIR/web2-client" "$MAIN_PROJECT_DIR/modules/web2-client"
fi fi
log_success "Симлинки созданы" # Создание init-db.sql если его нет
if [ ! -f "$MAIN_PROJECT_DIR/scripts/init-db.sql" ]; then
log "Создание init-db.sql..."
cat > "$MAIN_PROJECT_DIR/scripts/init-db.sql" << 'EOF'
-- PostgreSQL Database Initialization Script for MY Uploader Bot
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pg_trgm";
CREATE EXTENSION IF NOT EXISTS "btree_gin";
-- Create schemas for better organization
CREATE SCHEMA IF NOT EXISTS my_network;
CREATE SCHEMA IF NOT EXISTS content;
CREATE SCHEMA IF NOT EXISTS users;
CREATE SCHEMA IF NOT EXISTS analytics;
-- Grant permissions on schemas
GRANT USAGE, CREATE ON SCHEMA my_network TO my_user;
GRANT USAGE, CREATE ON SCHEMA content TO my_user;
GRANT USAGE, CREATE ON SCHEMA users TO my_user;
GRANT USAGE, CREATE ON SCHEMA analytics TO my_user;
-- Set default search path
ALTER DATABASE my_uploader_db SET search_path TO public, my_network, content, users, analytics;
-- Create health check table
CREATE TABLE IF NOT EXISTS health_check (
id SERIAL PRIMARY KEY,
status VARCHAR(50) NOT NULL DEFAULT 'ok',
timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
details JSONB
);
-- Insert initial health check record
INSERT INTO health_check (status, details)
VALUES ('initialized', '{"message": "Database initialized successfully", "version": "1.0.0"}')
ON CONFLICT DO NOTHING;
-- Create indexes for better performance
CREATE INDEX IF NOT EXISTS idx_health_check_timestamp ON health_check(timestamp);
CREATE INDEX IF NOT EXISTS idx_health_check_status ON health_check(status);
COMMENT ON DATABASE my_uploader_db IS 'MY Uploader Bot - Main database for content management and MY Network protocol';
EOF
chown "$SERVICE_USER:$SERVICE_USER" "$MAIN_PROJECT_DIR/scripts/init-db.sql"
fi
log_success "Папки и симлинки созданы"
log_progress "environment_setup" log_progress "environment_setup"
fi fi
@ -427,6 +477,9 @@ echo -e "${BLUE}==========================================${NC}"
echo "" echo ""
if ! check_and_skip "nginx_config" "Настройка Nginx"; then if ! check_and_skip "nginx_config" "Настройка Nginx"; then
# Определяем MAIN_PROJECT_DIR если не определена
MAIN_PROJECT_DIR="$PROJECT_DIR/uploader-bot"
log "Настройка Nginx конфигурации..." log "Настройка Nginx конфигурации..."
# Сначала добавляем rate limiting в основной конфиг nginx # Сначала добавляем rate limiting в основной конфиг nginx
@ -447,27 +500,28 @@ if ! check_and_skip "nginx_config" "Настройка Nginx"; then
mkdir -p /etc/nginx/sites-enabled mkdir -p /etc/nginx/sites-enabled
if [ "$NODE_TYPE" = "main" ]; then if [ "$NODE_TYPE" = "main" ]; then
# Конфигурация для основной ноды (с web2-client) # Конфигурация для основной ноды (с web2-client) - только HTTP сначала
cat > "/etc/nginx/sites-available/$DOMAIN" << EOF cat > "/etc/nginx/sites-available/$DOMAIN" << EOF
server { server {
listen 80; listen 80;
server_name $DOMAIN; server_name $DOMAIN;
# Redirect all HTTP requests to HTTPS # File upload limits
return 301 https://\$server_name\$request_uri; client_max_body_size 100M;
} client_body_timeout 600s;
proxy_read_timeout 600s;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
server { # Hide server version
listen 443 ssl http2; server_tokens off;
server_name $DOMAIN;
# SSL configuration will be added by certbot # Deny access to hidden files
location ~ /\. {
# Security headers deny all;
add_header X-Frame-Options DENY; access_log off;
add_header X-Content-Type-Options nosniff; log_not_found off;
add_header X-XSS-Protection "1; mode=block"; }
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload";
# Web2 Client (main page for main nodes) # Web2 Client (main page for main nodes)
location / { location / {
@ -513,6 +567,8 @@ server {
proxy_read_timeout 600s; proxy_read_timeout 600s;
proxy_send_timeout 600s; proxy_send_timeout 600s;
client_max_body_size 100M; client_max_body_size 100M;
proxy_buffering off;
proxy_request_buffering off;
} }
# Converter API (on-demand, через uploader-bot) # Converter API (on-demand, через uploader-bot)
@ -533,6 +589,7 @@ server {
# Health check (no rate limiting) # Health check (no rate limiting)
location /health { location /health {
proxy_pass http://127.0.0.1:15100/health; proxy_pass http://127.0.0.1:15100/health;
proxy_set_header Host \$host;
access_log off; access_log off;
} }
@ -542,37 +599,31 @@ server {
expires 30d; expires 30d;
add_header Cache-Control "public, immutable"; add_header Cache-Control "public, immutable";
} }
# Disable access to hidden files
location ~ /\. {
deny all;
access_log off;
log_not_found off;
}
} }
EOF EOF
else else
# Конфигурация для обычной ноды (только API) # Конфигурация для обычной ноды (только API) - только HTTP сначала
cat > "/etc/nginx/sites-available/$DOMAIN" << EOF cat > "/etc/nginx/sites-available/$DOMAIN" << EOF
server { server {
listen 80; listen 80;
server_name $DOMAIN; server_name $DOMAIN;
# Redirect all HTTP requests to HTTPS # File upload limits
return 301 https://\$server_name\$request_uri; client_max_body_size 100M;
} client_body_timeout 600s;
proxy_read_timeout 600s;
proxy_connect_timeout 600s;
proxy_send_timeout 600s;
server { # Hide server version
listen 443 ssl http2; server_tokens off;
server_name $DOMAIN;
# SSL configuration will be added by certbot # Deny access to hidden files
location ~ /\. {
# Security headers deny all;
add_header X-Frame-Options DENY; access_log off;
add_header X-Content-Type-Options nosniff; log_not_found off;
add_header X-XSS-Protection "1; mode=block"; }
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload";
# Main API (uploader-bot) # Main API (uploader-bot)
location / { location / {
@ -604,6 +655,8 @@ server {
proxy_read_timeout 600s; proxy_read_timeout 600s;
proxy_send_timeout 600s; proxy_send_timeout 600s;
client_max_body_size 100M; client_max_body_size 100M;
proxy_buffering off;
proxy_request_buffering off;
} }
# Converter API (on-demand, через uploader-bot) # Converter API (on-demand, через uploader-bot)
@ -624,6 +677,7 @@ server {
# Health check (no rate limiting) # Health check (no rate limiting)
location /health { location /health {
proxy_pass http://127.0.0.1:15100/health; proxy_pass http://127.0.0.1:15100/health;
proxy_set_header Host \$host;
access_log off; access_log off;
} }
@ -633,13 +687,6 @@ server {
expires 30d; expires 30d;
add_header Cache-Control "public, immutable"; add_header Cache-Control "public, immutable";
} }
# Disable access to hidden files
location ~ /\. {
deny all;
access_log off;
log_not_found off;
}
} }
EOF EOF
fi fi
@ -788,13 +835,20 @@ if ! check_and_skip "docker_build" "Сборка и запуск"; then
log "Запуск приложения..." log "Запуск приложения..."
if [ "$NODE_TYPE" = "main" ]; then if [ "$NODE_TYPE" = "main" ]; then
# Для основной ноды запускаем все сервисы # Для основной ноды запускаем все сервисы включая web2-client
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d app postgres redis web2-client nginx prometheus grafana loki promtail
else
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d
fi
log "🚀 Запущены все сервисы для основной ноды" log "🚀 Запущены все сервисы для основной ноды"
else else
# Для обычной ноды запускаем только необходимые сервисы # Для обычной ноды запускаем только основные сервисы без web2-client
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d uploader-bot converter-builder watchtower 2>/dev/null || \ if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d app postgres redis
else
sudo -u "$SERVICE_USER" docker-compose -f "$COMPOSE_FILE" up -d app postgres redis indexer ton_daemon license_index convert_process
fi
log "🚀 Запущены основные сервисы для обычной ноды" log "🚀 Запущены основные сервисы для обычной ноды"
fi fi
@ -858,9 +912,17 @@ if ! check_and_skip "final_check" "Финальная проверка"; then
#!/bin/bash #!/bin/bash
cd $MAIN_PROJECT_DIR cd $MAIN_PROJECT_DIR
if [ "$NODE_TYPE" = "main" ]; then if [ "$NODE_TYPE" = "main" ]; then
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
docker-compose -f $COMPOSE_FILE up -d app postgres redis web2-client nginx prometheus grafana loki promtail
else
docker-compose -f $COMPOSE_FILE up -d docker-compose -f $COMPOSE_FILE up -d
fi
else else
docker-compose -f $COMPOSE_FILE up -d uploader-bot converter-builder watchtower 2>/dev/null || docker-compose -f $COMPOSE_FILE up -d if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
docker-compose -f $COMPOSE_FILE up -d app postgres redis
else
docker-compose -f $COMPOSE_FILE up -d app postgres redis indexer ton_daemon license_index convert_process
fi
fi fi
echo "✅ MY Uploader Bot запущен (тип ноды: $NODE_TYPE)" echo "✅ MY Uploader Bot запущен (тип ноды: $NODE_TYPE)"
docker-compose -f $COMPOSE_FILE ps docker-compose -f $COMPOSE_FILE ps
@ -882,6 +944,12 @@ echo ""
echo "🌐 API статус:" echo "🌐 API статус:"
curl -s http://localhost:15100/health | jq . 2>/dev/null || echo "API недоступен" curl -s http://localhost:15100/health | jq . 2>/dev/null || echo "API недоступен"
echo "" echo ""
echo "🗄️ PostgreSQL статус:"
docker-compose -f $COMPOSE_FILE exec postgres pg_isready -U my_user -d my_uploader_db 2>/dev/null || echo "PostgreSQL недоступен"
echo ""
echo "🔴 Redis статус:"
docker-compose -f $COMPOSE_FILE exec redis redis-cli ping 2>/dev/null || echo "Redis недоступен"
echo ""
echo "🔒 SSL сертификат:" echo "🔒 SSL сертификат:"
sudo certbot certificates | grep -A2 -B2 $DOMAIN || echo "Нет SSL сертификата" sudo certbot certificates | grep -A2 -B2 $DOMAIN || echo "Нет SSL сертификата"
echo "" echo ""
@ -902,9 +970,17 @@ echo "🔄 Пересборка и перезапуск..."
docker-compose -f $COMPOSE_FILE down docker-compose -f $COMPOSE_FILE down
docker-compose -f $COMPOSE_FILE build docker-compose -f $COMPOSE_FILE build
if [ "$NODE_TYPE" = "main" ]; then if [ "$NODE_TYPE" = "main" ]; then
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
docker-compose -f $COMPOSE_FILE up -d app postgres redis web2-client nginx prometheus grafana loki promtail
else
docker-compose -f $COMPOSE_FILE up -d docker-compose -f $COMPOSE_FILE up -d
fi
else else
docker-compose -f $COMPOSE_FILE up -d uploader-bot converter-builder watchtower 2>/dev/null || docker-compose -f $COMPOSE_FILE up -d if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
docker-compose -f $COMPOSE_FILE up -d app postgres redis
else
docker-compose -f $COMPOSE_FILE up -d app postgres redis indexer ton_daemon license_index convert_process
fi
fi fi
echo "✅ Пересборка завершена" echo "✅ Пересборка завершена"
docker-compose -f $COMPOSE_FILE ps docker-compose -f $COMPOSE_FILE ps
@ -917,10 +993,18 @@ EOF
log "Настройка автозапуска..." log "Настройка автозапуска..."
if [ "$NODE_TYPE" = "main" ]; then if [ "$NODE_TYPE" = "main" ]; then
if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d app postgres redis web2-client nginx prometheus grafana loki promtail"
else
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d" SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d"
fi
SERVICE_DESCRIPTION="MY Uploader Bot (Main Node)" SERVICE_DESCRIPTION="MY Uploader Bot (Main Node)"
else else
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d uploader-bot converter-builder watchtower" if [ "$COMPOSE_FILE" = "docker-compose.production.yml" ]; then
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d app postgres redis"
else
SERVICE_EXEC_START="/usr/local/bin/docker-compose -f $COMPOSE_FILE up -d app postgres redis indexer ton_daemon license_index convert_process"
fi
SERVICE_DESCRIPTION="MY Uploader Bot (Regular Node)" SERVICE_DESCRIPTION="MY Uploader Bot (Regular Node)"
fi fi
@ -1010,7 +1094,15 @@ echo "sudo systemctl start my-uploader-bot"
echo "sudo systemctl stop my-uploader-bot" echo "sudo systemctl stop my-uploader-bot"
echo "sudo systemctl status my-uploader-bot" echo "sudo systemctl status my-uploader-bot"
echo "" echo ""
echo -e "${YELLOW}🔄 Повторная установка:${NC}" echo -e "${YELLOW}🗄️ PostgreSQL управление:${NC}"
echo "# Подключение к БД:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE exec postgres psql -U my_user -d my_uploader_db"
echo "# Бэкап БД:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE exec postgres pg_dump -U my_user my_uploader_db > backup_\$(date +%Y%m%d).sql"
echo "# Проверка статуса БД:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE exec postgres pg_isready -U my_user -d my_uploader_db"
echo ""
echo -e "${YELLOW}<EFBFBD> Повторная установка:${NC}"
echo "# Для полной переустановки:" echo "# Для полной переустановки:"
echo "sudo rm -f $PROGRESS_FILE" echo "sudo rm -f $PROGRESS_FILE"
echo "# Для сброса определенного этапа отредактируйте:" echo "# Для сброса определенного этапа отредактируйте:"
@ -1022,6 +1114,10 @@ echo ""
echo -e "${YELLOW}📊 Мониторинг:${NC}" echo -e "${YELLOW}📊 Мониторинг:${NC}"
echo "docker stats" echo "docker stats"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE logs -f" echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE logs -f"
echo "# Логи PostgreSQL:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE logs postgres"
echo "# Логи Redis:"
echo "sudo docker-compose -f $MAIN_PROJECT_DIR/$COMPOSE_FILE logs redis"
echo "" echo ""
echo -e "${GREEN}✅ MY Uploader Bot готов к работе!${NC}" echo -e "${GREEN}✅ MY Uploader Bot готов к работе!${NC}"
echo "" echo ""