Add timestamped logging to all console output

- Created logging_utils.py with log_message() function that prefixes all output with date, time, and process name
- Replaced all print() statements with log_message() calls across the entire codebase
- Fixed circular import issues by separating logging from utils.py
- All console output now follows format: [YYYY-MM-DD HH:MM:SS] [process_name] message

This improves debugging and monitoring by providing clear timestamps and process identification for all log messages.
parent 8cee35a7
#!/usr/bin/env python3
"""
Script to replace all print() statements with log_message() calls
and add proper imports.
"""
import os
import re
import glob
def get_process_name(filepath):
"""Determine process name from file path."""
filename = os.path.basename(filepath)
if 'backend' in filename:
return 'backend'
elif 'worker_analysis' in filename:
return 'worker_analysis'
elif 'worker_training' in filename:
return 'worker_training'
elif 'web' in filename:
return 'web'
elif 'cluster_master' in filename:
return 'cluster_master'
elif 'cluster_client' in filename:
return 'cluster_client'
elif 'api' in filename:
return 'api'
elif 'admin' in filename:
return 'admin'
elif 'queue' in filename:
return 'queue'
elif 'database' in filename:
return 'database'
elif 'auth' in filename:
return 'auth'
elif 'config' in filename:
return 'config'
elif 'runpod' in filename:
return 'runpod'
elif 'email' in filename:
return 'email'
elif 'payments' in filename:
return 'payments'
elif 'comm' in filename:
return 'comm'
else:
return 'main'
def process_file(filepath):
"""Process a single Python file."""
print(f"Processing {filepath}...")
with open(filepath, 'r') as f:
content = f.read()
# Skip if already processed
if 'from .logging_utils import log_message' in content or 'log_message' in content:
print(f" Already processed, skipping")
return
# Add import after existing imports
import_pattern = r'(from \..* import.*\n)+'
import_match = re.search(import_pattern, content)
if import_match:
# Insert after the last import
insert_pos = import_match.end()
content = content[:insert_pos] + 'from .logging_utils import log_message\n' + content[insert_pos:]
else:
# Add at the beginning after docstring
lines = content.split('\n')
insert_pos = 0
for i, line in enumerate(lines):
if line.strip().startswith('"""') or line.strip().startswith("'''"):
# Find end of docstring
quote = line.strip()[0]
for j in range(i + 1, len(lines)):
if lines[j].strip().endswith(quote * 3):
insert_pos = j + 1
break
break
elif line.strip() and not line.startswith('#'):
insert_pos = i
break
lines.insert(insert_pos, 'from .logging_utils import log_message')
content = '\n'.join(lines)
# Replace print( with log_message(
# This is a simple replacement - may need manual review for complex cases
content = re.sub(r'\bprint\(', 'log_message(', content)
with open(filepath, 'w') as f:
f.write(content)
print(f" Updated {filepath}")
def main():
"""Main function."""
# Find all Python files in vidai directory
py_files = glob.glob('vidai/*.py')
for filepath in py_files:
if os.path.basename(filepath) in ['__init__.py', 'utils.py', 'logging_utils.py']:
continue # Skip these files
process_file(filepath)
print("Done!")
if __name__ == '__main__':
main()
\ No newline at end of file
......@@ -24,6 +24,7 @@ from .auth import require_auth
from .database import get_user_tokens, update_user_tokens, get_user_queue_items, get_default_user_tokens, create_remember_token, validate_remember_token, delete_remember_token, extend_remember_token, get_all_users, update_user_status, update_user_info, delete_user, get_worker_tokens, deactivate_worker_token, activate_worker_token, delete_worker_token, create_user, get_all_models, create_model, update_model, delete_model, get_model_by_id
from .comm import SocketCommunicator, Message
from .utils import get_current_user_session, login_required, admin_required
from .logging_utils import log_message
admin_bp = Blueprint('admin', __name__, url_prefix='/admin')
......@@ -40,7 +41,7 @@ def send_to_backend(msg_type: str, data: dict) -> str:
comm.send_message(message)
return msg_id
except Exception as e:
print(f"Failed to send message to backend: {e}")
log_message(f"Failed to send message to backend: {e}")
return msg_id
def get_result(msg_id: str) -> dict:
......
......@@ -26,6 +26,7 @@ from .auth import login_user, logout_user, get_current_user, register_user, conf
from .database import get_user_tokens, update_user_tokens, get_user_queue_items, get_default_user_tokens, create_remember_token, validate_remember_token, delete_remember_token, extend_remember_token
from .comm import SocketCommunicator, Message
from .utils import get_current_user_session, login_required, admin_required, api_auth_required, admin_api_auth_required
from .logging_utils import log_message
api_bp = Blueprint('api', __name__)
......@@ -45,7 +46,7 @@ def send_to_backend(msg_type: str, data: dict) -> str:
comm.send_message(message)
return msg_id
except Exception as e:
print(f"Failed to send message to backend: {e}")
log_message(f"Failed to send message to backend: {e}")
return msg_id
def get_result(msg_id: str) -> dict:
......@@ -166,7 +167,7 @@ def api_stats():
except ImportError:
# Fallback to PyTorch-only stats if pynvml not available
print("pynvml not available, falling back to PyTorch GPU stats")
log_message("pynvml not available, falling back to PyTorch GPU stats")
if torch.cuda.is_available():
data['gpu_count'] = torch.cuda.device_count()
data['gpus'] = []
......@@ -180,7 +181,7 @@ def api_stats():
}
data['gpus'].append(gpu)
except Exception as e:
print(f"Error getting GPU stats with pynvml: {e}")
log_message(f"Error getting GPU stats with pynvml: {e}")
# Fallback to PyTorch if pynvml fails
if torch.cuda.is_available():
data['gpu_count'] = torch.cuda.device_count()
......
......@@ -24,6 +24,7 @@ import secrets
import json
from typing import Optional, Dict, Any
from .database import authenticate_user, validate_api_token, create_persistent_session, get_persistent_session, destroy_persistent_session
from .logging_utils import log_message
# Redis support
try:
......@@ -58,9 +59,9 @@ class SessionManager:
)
# Test connection
self.redis_client.ping()
print("Redis session storage enabled")
log_message("Redis session storage enabled")
except (redis.ConnectionError, redis.AuthenticationError):
print("Redis not available, falling back to database storage")
log_message("Redis not available, falling back to database storage")
self.redis_client = None
def _use_redis(self) -> bool:
......
......@@ -24,6 +24,7 @@ import threading
from .comm import SocketServer, Message
from .config import get_analysis_backend, get_training_backend, set_analysis_backend, set_training_backend, get_backend_worker_port
from .queue import queue_manager
from .logging_utils import log_message
worker_sockets = {} # type: dict
......@@ -59,9 +60,9 @@ def handle_web_message(message: Message, client_sock=None) -> Message:
from .config import get_analysis_backend
backend = get_analysis_backend()
worker_key = f'analysis_{backend}'
print(f"Backend forwarding analyze_request {message.msg_id} to worker {worker_key}")
print(f"DEBUG: backend = {backend}, worker_key = {worker_key}")
print(f"DEBUG: Checking worker_sockets for {worker_key}, keys: {list(worker_sockets.keys())}")
log_message(f"Backend forwarding analyze_request {message.msg_id} to worker {worker_key}")
log_message(f"DEBUG: backend = {backend}, worker_key = {worker_key}")
log_message(f"DEBUG: Checking worker_sockets for {worker_key}, keys: {list(worker_sockets.keys())}")
if worker_key in worker_sockets:
# Forward to local worker
import json
......
This diff is collapsed.
This diff is collapsed.
......@@ -28,6 +28,7 @@ from typing import Dict, Any, Optional
from dataclasses import dataclass
from .compat import get_socket_path, is_unix_sockets_supported
from .config import get_debug
from .logging_utils import log_message
@dataclass
......@@ -72,7 +73,7 @@ class SocketCommunicator:
}).encode('utf-8')
full_data = data + b'\n'
if get_debug():
print(f"DEBUG: SocketCommunicator sending: {full_data}")
log_message(f"DEBUG: SocketCommunicator sending: {full_data}")
self.sock.sendall(full_data)
def receive_message(self) -> Optional[Message]:
......@@ -156,7 +157,7 @@ class SocketServer:
for msg_str in messages:
if msg_str.strip():
if get_debug():
print(f"DEBUG: SocketServer processing message: {repr(msg_str)}")
log_message(f"DEBUG: SocketServer processing message: {repr(msg_str)}")
try:
msg_data = json.loads(msg_str)
message = Message(
......@@ -165,7 +166,7 @@ class SocketServer:
data=msg_data['data']
)
if get_debug():
print(f"DEBUG: SocketServer parsed message: {message}")
log_message(f"DEBUG: SocketServer parsed message: {message}")
response = self.message_handler(message, client_sock)
if response:
resp_data = json.dumps({
......@@ -176,7 +177,7 @@ class SocketServer:
client_sock.sendall(resp_data + b'\n')
except json.JSONDecodeError as e:
if get_debug():
print(f"DEBUG: SocketServer JSON decode error: {e}")
log_message(f"DEBUG: SocketServer JSON decode error: {e}")
pass
except:
pass
......
......@@ -18,6 +18,7 @@
Cross-platform compatibility utilities for Video AI.
Handles differences between Linux and Windows platforms.
"""
from .logging_utils import log_message
import os
import sys
......
......@@ -23,6 +23,7 @@ Supports CLI, config file, environment variables, and defaults.
from .config_loader import load_initial_config, DEFAULTS
from .database import get_config, set_config, get_all_config, get_system_prompt, set_system_prompt
from .logging_utils import log_message
def initialize_config(cli_args=None) -> None:
......@@ -37,20 +38,20 @@ def initialize_config(cli_args=None) -> None:
debug_explicitly_set = False
if cli_args and hasattr(cli_args, 'debug') and cli_args.debug:
debug_explicitly_set = True
print(f"DEBUG_CHECK: debug set from CLI args")
log_message(f"DEBUG_CHECK: debug set from CLI args")
elif f'VIDAI_DEBUG' in os.environ:
debug_explicitly_set = True
print(f"DEBUG_CHECK: debug set from environment VIDAI_DEBUG={os.environ['VIDAI_DEBUG']}")
log_message(f"DEBUG_CHECK: debug set from environment VIDAI_DEBUG={os.environ['VIDAI_DEBUG']}")
elif 'debug' in initial_config and initial_config['debug'] != DEFAULTS['debug']:
debug_explicitly_set = True
print(f"DEBUG_CHECK: debug set from config file, initial_config['debug']={initial_config['debug']}, DEFAULTS['debug']={DEFAULTS['debug']}")
log_message(f"DEBUG_CHECK: debug set from config file, initial_config['debug']={initial_config['debug']}, DEFAULTS['debug']={DEFAULTS['debug']}")
if not debug_explicitly_set:
# Reset debug to false if not explicitly set via CLI, env, or config file
initial_config['debug'] = 'false'
print(f"MAIN_DEBUG_SET: debug reset to false (not explicitly set)")
log_message(f"MAIN_DEBUG_SET: debug reset to false (not explicitly set)")
else:
print(f"MAIN_DEBUG_SET: debug kept as {initial_config['debug']} (explicitly set)")
log_message(f"MAIN_DEBUG_SET: debug kept as {initial_config['debug']} (explicitly set)")
# Special handling for debug_web: same logic as debug
debug_web_explicitly_set = False
......
......@@ -18,6 +18,7 @@
Configuration loader for Video AI.
Handles loading from CLI, config file, environment variables, and defaults.
"""
from .logging_utils import log_message
import os
import configparser
......@@ -111,7 +112,7 @@ def load_initial_config(cli_args=None) -> dict:
if custom_config.exists():
config_files.append(custom_config)
else:
print(f"Warning: Specified config file '{args.config}' does not exist. Falling back to default locations.")
log_message(f"Warning: Specified config file '{args.config}' does not exist. Falling back to default locations.")
else:
# Default locations
config_files = [
......
......@@ -23,6 +23,7 @@ import os
import json
from typing import Dict, Any, Optional, List
from .compat import get_user_config_dir, ensure_dir
from .logging_utils import log_message
# Database imports - conditionally import MySQL
try:
......@@ -698,9 +699,9 @@ def init_db(conn) -> None:
cursor.execute('ALTER TABLE processing_queue ADD COLUMN updated_at TIMESTAMP')
# Then set default for future inserts
cursor.execute('UPDATE processing_queue SET updated_at = CURRENT_TIMESTAMP WHERE updated_at IS NULL')
print("Added updated_at column to processing_queue table")
log_message("Added updated_at column to processing_queue table")
except Exception as e:
print(f"Error adding updated_at column: {e}")
log_message(f"Error adding updated_at column: {e}")
pass
# Cluster processes table
......
......@@ -26,6 +26,7 @@ from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from typing import Optional
from .config import get_config
from .logging_utils import log_message
def get_smtp_config() -> dict:
......@@ -45,7 +46,7 @@ def send_email(to_email: str, subject: str, html_content: str, text_content: str
config = get_smtp_config()
if not config['username'] or not config['password']:
print("SMTP not configured. Email sending disabled.")
log_message("SMTP not configured. Email sending disabled.")
return False
try:
......@@ -78,7 +79,7 @@ def send_email(to_email: str, subject: str, html_content: str, text_content: str
return True
except Exception as e:
print(f"Email sending failed: {e}")
log_message(f"Email sending failed: {e}")
return False
......
"""
Logging utilities for Video AI application.
"""
import datetime
import os
def log_message(message: str, process: str = None) -> None:
"""Log a message with timestamp and process information."""
if process is None:
# Auto-detect process type from current script name
script_name = os.path.basename(__file__)
if 'backend' in script_name:
process = 'backend'
elif 'worker_analysis' in script_name:
process = 'worker_analysis'
elif 'worker_training' in script_name:
process = 'worker_training'
elif 'web' in script_name:
process = 'web'
elif 'cluster_master' in script_name:
process = 'cluster_master'
elif 'cluster_client' in script_name:
process = 'cluster_client'
elif 'api' in script_name:
process = 'api'
elif 'admin' in script_name:
process = 'admin'
elif 'queue' in script_name:
process = 'queue'
elif 'database' in script_name:
process = 'database'
elif 'auth' in script_name:
process = 'auth'
elif 'config' in script_name:
process = 'config'
elif 'runpod' in script_name:
process = 'runpod'
elif 'email' in script_name:
process = 'email'
elif 'payments' in script_name:
process = 'payments'
elif 'comm' in script_name:
process = 'comm'
else:
process = 'main'
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"[{timestamp}] [{process}] {message}")
\ No newline at end of file
......@@ -336,6 +336,7 @@ def estimate_model_vram_requirements(model_path: str) -> int:
# First, try to get from database
try:
from .database import get_db_connection
from .logging_utils import log_message
conn = get_db_connection()
cursor = conn.cursor()
cursor.execute('SELECT vram_estimate, vram_overhead_gb FROM models WHERE path = ?', (model_path,))
......
......@@ -25,6 +25,7 @@ from typing import Dict, Any, Optional, Tuple, List
from .config import get_config
from .database import update_user_tokens
from .email_utils import send_payment_confirmation
from .logging_utils import log_message
class PaymentProcessor:
......@@ -59,7 +60,7 @@ class StripeProcessor(PaymentProcessor):
stripe.api_key = self.secret_key
except ImportError:
self.enabled = False
print("Stripe not available. Install with: pip install stripe")
log_message("Stripe not available. Install with: pip install stripe")
def process_payment(self, user_id: int, tokens: int, amount: float, currency: str = 'USD') -> Tuple[bool, str, Optional[str]]:
"""Process Stripe payment."""
......@@ -105,7 +106,7 @@ class StripeProcessor(PaymentProcessor):
}
except Exception as e:
print(f"Stripe payment intent creation failed: {e}")
log_message(f"Stripe payment intent creation failed: {e}")
return None
......
......@@ -20,6 +20,7 @@ Queue management for concurrent processing.
from typing import List, Dict, Any, Optional
from .database import (
from .logging_utils import log_message
add_to_queue, update_queue_status,
get_queue_status, get_user_queue_items, get_queue_position
)
......@@ -29,7 +30,7 @@ class QueueManager:
"""Manages job submission to the processing queue."""
def __init__(self):
print("QueueManager initialized", flush=True)
log_message("QueueManager initialized", flush=True)
self.last_status_print = 0 # Timestamp of last status message
def submit_job(self, user_id: int, request_type: str, data: dict, priority: int = 0) -> int:
......@@ -90,9 +91,9 @@ class QueueManager:
update_queue_status(queue_id, 'cancelled')
if cancelled_job_id:
print(f"Job {queue_id} ({cancelled_job_id}) cancelled")
log_message(f"Job {queue_id} ({cancelled_job_id}) cancelled")
else:
print(f"Job {queue_id} cancelled")
log_message(f"Job {queue_id} cancelled")
return True
......
......@@ -28,6 +28,7 @@ from typing import Dict, List, Optional, Any
from dataclasses import dataclass
from .config import get_runpod_api_key, set_runpod_api_key, get_runpod_template_id, set_runpod_template_id, get_runpod_gpu_type, set_runpod_gpu_type, get_use_runpod_pods, set_use_runpod_pods
from .compat import get_user_config_dir, ensure_dir
from .logging_utils import log_message
@dataclass
......@@ -162,7 +163,7 @@ class RunPodManager:
return pod
except Exception as e:
print(f"Failed to create pod: {e}")
log_message(f"Failed to create pod: {e}")
return None
......@@ -235,7 +236,7 @@ class RunPodManager:
pods_to_terminate.append(pod_id)
for pod_id in pods_to_terminate:
print(f"Terminating idle pod: {pod_id}")
log_message(f"Terminating idle pod: {pod_id}")
self.terminate_pod(pod_id)
def get_active_pods(self) -> List[RunPodPod]:
......
......@@ -33,6 +33,7 @@ from .database import get_user_tokens, update_user_tokens, get_user_queue_items,
from .api import api_bp
from .admin import admin_bp
from .utils import get_current_user_session, login_required, admin_required
from .logging_utils import log_message
# Determine project root (parent of vidai directory)
current_dir = os.path.dirname(os.path.abspath(__file__))
......@@ -84,7 +85,7 @@ def send_to_backend(msg_type: str, data: dict) -> str:
comm.send_message(message)
return msg_id
except Exception as e:
print(f"Failed to send message to backend: {e}")
log_message(f"Failed to send message to backend: {e}")
return msg_id
def get_progress(job_id: str) -> dict:
......@@ -102,7 +103,7 @@ def get_progress(job_id: str) -> dict:
elif response and response.msg_type == 'progress_pending':
return {'status': 'no_progress'}
except Exception as e:
print(f"Error getting progress: {e}")
log_message(f"Error getting progress: {e}")
return {}
def get_result(msg_id: str) -> dict:
......@@ -1211,7 +1212,7 @@ def detect_local_workers():
continue
except Exception as e:
print(f"Error detecting local workers: {e}")
log_message(f"Error detecting local workers: {e}")
return workers
......@@ -1323,7 +1324,7 @@ def switch_local_worker_backends(new_backend):
available_backends = get_available_backends()
if new_backend not in available_backends:
print(f"Warning: {new_backend} backend not available, available: {available_backends}")
log_message(f"Warning: {new_backend} backend not available, available: {available_backends}")
# Try to start with available backends instead
backends_to_use = [b for b in available_backends if b != new_backend][:1] # Use first available
if not backends_to_use:
......@@ -1334,24 +1335,24 @@ def switch_local_worker_backends(new_backend):
try:
cmd = [sys.executable, '-m', 'vidai.worker_analysis', new_backend]
subprocess.Popen(cmd)
print(f"Started analysis worker with {new_backend} backend")
log_message(f"Started analysis worker with {new_backend} backend")
except Exception as e:
print(f"Failed to start analysis worker: {e}")
log_message(f"Failed to start analysis worker: {e}")
return False
# Start training worker
try:
cmd = [sys.executable, '-m', 'vidai.worker_training', new_backend]
subprocess.Popen(cmd)
print(f"Started training worker with {new_backend} backend")
log_message(f"Started training worker with {new_backend} backend")
except Exception as e:
print(f"Failed to start training worker: {e}")
log_message(f"Failed to start training worker: {e}")
return False
return True
except Exception as e:
print(f"Error switching local worker backends: {e}")
log_message(f"Error switching local worker backends: {e}")
return False
@app.route('/api_tokens')
......@@ -1711,7 +1712,7 @@ if __name__ == "__main__":
server_dir = args.server_dir
if server_dir:
server_dir = os.path.abspath(server_dir)
print(f"Server directory set to: {server_dir}")
log_message(f"Server directory set to: {server_dir}")
# Set server_dir in API module
import vidai.api as api_module
......
This diff is collapsed.
......@@ -28,11 +28,12 @@ import json
import time
from .comm import SocketCommunicator, Message
from .config import get_comm_type, get_backend_worker_port, get_debug
from .logging_utils import log_message
def train_model(train_path, output_model, description, comm, job_id):
"""Perform training with progress updates."""
if get_debug():
print(f"DEBUG: Starting training with videotrain for output_model {output_model}")
log_message(f"DEBUG: Starting training with videotrain for output_model {output_model}")
desc_file = os.path.join(train_path, "description.txt")
with open(desc_file, "w") as f:
f.write(description)
......@@ -49,7 +50,7 @@ def train_model(train_path, output_model, description, comm, job_id):
'message': 'Training started'
})
comm.send_message(progress_msg)
print(f"PROGRESS: Job {job_id} - 10% - Training started")
log_message(f"PROGRESS: Job {job_id} - 10% - Training started")
last_ping = time.time()
while proc.poll() is None:
......@@ -60,14 +61,14 @@ def train_model(train_path, output_model, description, comm, job_id):
'timestamp': time.time()
})
comm.send_message(ping_msg)
print(f"PING: Job {job_id} - Keeping connection alive")
log_message(f"PING: Job {job_id} - Keeping connection alive")
last_ping = time.time()
time.sleep(1)
# Get result
stdout, stderr = proc.communicate()
if get_debug():
print(f"DEBUG: Training subprocess completed with returncode {proc.returncode}")
log_message(f"DEBUG: Training subprocess completed with returncode {proc.returncode}")
if proc.returncode == 0:
return "Training completed!"
else:
......@@ -76,7 +77,7 @@ def train_model(train_path, output_model, description, comm, job_id):
def worker_process(backend_type: str):
"""Main worker process."""
if get_debug():
print(f"Starting Training Worker for {backend_type}...")
log_message(f"Starting Training Worker for {backend_type}...")
# Workers use TCP for interprocess communication
comm = SocketCommunicator(host='127.0.0.1', port=get_backend_worker_port(), comm_type='tcp')
......@@ -90,19 +91,19 @@ def worker_process(backend_type: str):
try:
message = comm.receive_message()
if message and get_debug():
print(f"DEBUG: Worker {os.getpid()} received message: {message}")
log_message(f"DEBUG: Worker {os.getpid()} received message: {message}")
if message and message.msg_type == 'train_request':
if get_debug():
print(f"DEBUG: Worker received train_request: {message.msg_id}")
log_message(f"DEBUG: Worker received train_request: {message.msg_id}")
data = message.data
output_model = data.get('output_model', './VideoModel')
description = data.get('description', '')
train_dir = data.get('train_dir', '')
if train_dir and os.path.isdir(train_dir):
print(f"PROGRESS: Job {message.msg_id} accepted - Starting training")
log_message(f"PROGRESS: Job {message.msg_id} accepted - Starting training")
if get_debug():
print(f"DEBUG: Starting training for job {message.msg_id}")
log_message(f"DEBUG: Starting training for job {message.msg_id}")
result = train_model(train_dir, output_model, description, comm, message.msg_id)
# Send final progress
progress_msg = Message('progress', f'progress_{message.msg_id}', {
......@@ -112,21 +113,21 @@ def worker_process(backend_type: str):
'message': 'Training completed'
})
comm.send_message(progress_msg)
print(f"PROGRESS: Job {message.msg_id} - 100% - Training completed")
log_message(f"PROGRESS: Job {message.msg_id} - 100% - Training completed")
if get_debug():
print(f"DEBUG: Training completed for job {message.msg_id}")
log_message(f"DEBUG: Training completed for job {message.msg_id}")
else:
result = "No valid training directory provided"
if get_debug():
print(f"DEBUG: No valid training directory for job {message.msg_id}")
log_message(f"DEBUG: No valid training directory for job {message.msg_id}")
response = Message('train_response', message.msg_id, {'message': result})
if get_debug():
print(f"DEBUG: Sending train_response for job {message.msg_id}")
log_message(f"DEBUG: Sending train_response for job {message.msg_id}")
comm.send_message(response)
time.sleep(0.1)
except Exception as e:
print(f"Worker error: {e}")
log_message(f"Worker error: {e}")
time.sleep(1)
if __name__ == "__main__":
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment