Add client list

parent 4f9f12bc
......@@ -571,6 +571,7 @@ def api_get_updates():
user = None
auth_method = None
api_token = None
# Try JWT authentication first (short-lived session tokens)
try:
......@@ -584,6 +585,7 @@ def api_get_updates():
# If JWT fails, try API token authentication (long-lived tokens)
try:
from app.auth.jwt_utils import validate_api_token, extract_token_from_request
token = extract_token_from_request()
if not token:
return jsonify({
......@@ -606,6 +608,12 @@ def api_get_updates():
logger.info(f"API updates accessed via {auth_method} by user {user.username} (ID: {user.id})")
# Track client activity if using API token
if api_token:
data = request.get_json() or {}
rustdesk_id = data.get('rustdesk_id')
track_client_activity(api_token, rustdesk_id)
# Get 'from' parameter (unix timestamp) - now optional
# Support both GET (query params) and POST (JSON body)
if request.method == 'GET':
......@@ -793,3 +801,77 @@ def api_download_zip(match_id):
except Exception as e:
logger.error(f"API ZIP download error: {str(e)}")
return jsonify({'error': 'ZIP download failed'}), 500
def track_client_activity(api_token, rustdesk_id=None):
"""Track client activity for online status"""
try:
if not api_token or not api_token.is_valid():
return
from app.models import ClientActivity
from flask import request
# Get client info
ip_address = request.headers.get('X-Forwarded-For', request.remote_addr)
user_agent = request.headers.get('User-Agent')
# Check if client already exists
client = ClientActivity.query.filter_by(
api_token_id=api_token.id,
rustdesk_id=rustdesk_id or 'unknown'
).first()
if client:
# Update existing client
client.last_seen = datetime.utcnow()
client.ip_address = ip_address
client.user_agent = user_agent
else:
# Create new client
client = ClientActivity(
api_token_id=api_token.id,
rustdesk_id=rustdesk_id or 'unknown',
ip_address=ip_address,
user_agent=user_agent
)
db.session.add(client)
db.session.commit()
except Exception as e:
logger.error(f"Failed to track client activity: {str(e)}")
@bp.route('/track', methods=['POST'])
@csrf.exempt
def api_track_client():
"""Track client activity with rustdesk_id"""
try:
from app.auth.jwt_utils import validate_api_token, extract_token_from_request
token = extract_token_from_request()
if not token:
return jsonify({'error': 'API token required'}), 401
user, api_token = validate_api_token(token)
if not user or not user.is_active:
return jsonify({'error': 'User not found or inactive'}), 404
# Get rustdesk_id from request
data = request.get_json() or {}
rustdesk_id = data.get('rustdesk_id')
if not rustdesk_id:
return jsonify({'error': 'rustdesk_id is required'}), 400
# Track client activity
track_client_activity(api_token, rustdesk_id)
return jsonify({
'message': 'Client activity tracked successfully',
'rustdesk_id': rustdesk_id,
'last_seen': datetime.utcnow().isoformat()
}), 200
except Exception as e:
logger.error(f"API track client error: {str(e)}")
return jsonify({'error': 'Failed to track client'}), 500
\ No newline at end of file
......@@ -457,6 +457,114 @@ class Migration_007_AddDoneToStatusEnum(Migration):
def can_rollback(self) -> bool:
return True
class Migration_008_AddRemoteDomainSetting(Migration):
"""Add remote_domain setting to system_settings table"""
def __init__(self):
super().__init__("008", "Add remote_domain setting for client remote connections")
def up(self):
"""Add remote_domain setting"""
try:
# Check if setting already exists
from app.models import SystemSettings
existing = SystemSettings.query.filter_by(key='remote_domain').first()
if existing:
logger.info("remote_domain setting already exists, skipping creation")
return True
# Add the setting
setting = SystemSettings(
key='remote_domain',
value='townshipscombatleague.com',
value_type='string',
description='Domain for remote client connections'
)
db.session.add(setting)
db.session.commit()
logger.info("Added remote_domain setting successfully")
return True
except Exception as e:
logger.error(f"Migration 008 failed: {str(e)}")
raise
def down(self):
"""Remove remote_domain setting"""
try:
from app.models import SystemSettings
setting = SystemSettings.query.filter_by(key='remote_domain').first()
if setting:
db.session.delete(setting)
db.session.commit()
logger.info("Removed remote_domain setting")
return True
except Exception as e:
logger.error(f"Rollback of migration 008 failed: {str(e)}")
raise
def can_rollback(self) -> bool:
return True
class Migration_009_CreateClientActivityTable(Migration):
"""Create client activity table for tracking online clients"""
def __init__(self):
super().__init__("009", "Create client activity table for tracking online clients")
def up(self):
"""Create client_activity table"""
try:
# Check if table already exists
inspector = inspect(db.engine)
if 'client_activity' in inspector.get_table_names():
logger.info("client_activity table already exists, skipping creation")
return True
# Create the table using raw SQL to ensure compatibility
create_table_sql = '''
CREATE TABLE client_activity (
id INT AUTO_INCREMENT PRIMARY KEY,
api_token_id INT NOT NULL,
rustdesk_id VARCHAR(255) NOT NULL,
last_seen DATETIME DEFAULT CURRENT_TIMESTAMP,
ip_address VARCHAR(45),
user_agent TEXT,
INDEX idx_client_activity_api_token_id (api_token_id),
INDEX idx_client_activity_rustdesk_id (rustdesk_id),
INDEX idx_client_activity_last_seen (last_seen),
FOREIGN KEY (api_token_id) REFERENCES api_tokens(id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
'''
with db.engine.connect() as conn:
conn.execute(text(create_table_sql))
conn.commit()
logger.info("Created client_activity table successfully")
return True
except Exception as e:
logger.error(f"Migration 009 failed: {str(e)}")
raise
def down(self):
"""Drop client_activity table"""
try:
with db.engine.connect() as conn:
conn.execute(text("DROP TABLE IF EXISTS client_activity"))
conn.commit()
logger.info("Dropped client_activity table")
return True
except Exception as e:
logger.error(f"Rollback of migration 009 failed: {str(e)}")
raise
def can_rollback(self) -> bool:
return True
class MigrationManager:
"""Manages database migrations and versioning"""
......@@ -469,6 +577,8 @@ class MigrationManager:
Migration_005_AddFixtureActiveTime(),
Migration_006_AddStatusColumn(),
Migration_007_AddDoneToStatusEnum(),
Migration_008_AddRemoteDomainSetting(),
Migration_009_CreateClientActivityTable(),
]
def ensure_version_table(self):
......
......@@ -1413,3 +1413,77 @@ def download_zip(match_id):
logger.error(f"ZIP download error: {str(e)}")
flash('Error downloading ZIP file', 'error')
abort(500)
@csrf.exempt
@bp.route('/clients')
@login_required
@require_active_user
def clients():
"""Clients page showing connected clients"""
try:
from app.models import ClientActivity, SystemSettings, APIToken
from datetime import datetime, timedelta
# Get remote domain setting
remote_domain = SystemSettings.get_setting('remote_domain', 'townshipscombatleague.com')
# Get clients with their associated token and user info
clients_query = db.session.query(
ClientActivity,
APIToken.name.label('token_name'),
APIToken.user_id,
APIToken.created_at.label('token_created_at')
).join(
APIToken, ClientActivity.api_token_id == APIToken.id
).filter(
APIToken.is_active == True
).order_by(
ClientActivity.last_seen.desc()
)
clients_data = []
for client_activity, token_name, user_id, token_created_at in clients_query.all():
# Get user info
from app.models import User
user = User.query.get(user_id)
# Calculate if client is online (last seen within 30 minutes)
now = datetime.utcnow()
time_diff = now - client_activity.last_seen
is_online = time_diff.total_seconds() <= 1800 # 30 minutes = 1800 seconds
# Format last seen time
last_seen_formatted = client_activity.last_seen.strftime('%Y-%m-%d %H:%M:%S')
# Calculate time ago
if time_diff.total_seconds() < 60:
last_seen_ago = f"{int(time_diff.total_seconds())} seconds ago"
elif time_diff.total_seconds() < 3600:
last_seen_ago = f"{int(time_diff.total_seconds() / 60)} minutes ago"
elif time_diff.total_seconds() < 86400:
last_seen_ago = f"{int(time_diff.total_seconds() / 3600)} hours ago"
else:
last_seen_ago = f"{int(time_diff.total_seconds() / 86400)} days ago"
clients_data.append({
'rustdesk_id': client_activity.rustdesk_id,
'token_name': token_name,
'username': user.username if user else 'Unknown',
'is_online': is_online,
'last_seen': client_activity.last_seen,
'last_seen_formatted': last_seen_formatted,
'last_seen_ago': last_seen_ago,
'ip_address': client_activity.ip_address,
'user_agent': client_activity.user_agent,
'remote_domain': remote_domain
})
# Sort: online clients first, then offline clients by last seen
clients_data.sort(key=lambda x: (not x['is_online'], x['last_seen']), reverse=True)
return render_template('main/clients.html', clients=clients_data)
except Exception as e:
logger.error(f"Clients page error: {str(e)}")
flash('Error loading clients', 'error')
return render_template('main/clients.html', clients=[])
\ No newline at end of file
......@@ -794,6 +794,7 @@ class SystemSettings(db.Model):
('session_timeout_hours', 24, 'integer', 'User session timeout in hours'),
('api_rate_limit_per_minute', 60, 'integer', 'API rate limit per minute per IP'),
('api_updates_default_count', 10, 'integer', 'Default number of fixtures returned by /api/updates when no from parameter is provided'),
('remote_domain', 'townshipscombatleague.com', 'string', 'Domain for remote client connections'),
]
for key, default_value, value_type, description in defaults:
......@@ -819,3 +820,20 @@ class SystemSettings(db.Model):
def __repr__(self):
return f'<SystemSettings {self.key}: {self.value}>'
class ClientActivity(db.Model):
"""Track client activity for online/offline status"""
__tablename__ = 'client_activity'
id = db.Column(db.Integer, primary_key=True)
api_token_id = db.Column(db.Integer, db.ForeignKey('api_tokens.id'), nullable=False, index=True)
rustdesk_id = db.Column(db.String(255), nullable=False, index=True)
last_seen = db.Column(db.DateTime, default=datetime.utcnow, index=True)
ip_address = db.Column(db.String(45))
user_agent = db.Column(db.Text)
# Relationships
api_token = db.relationship('APIToken', backref='client_activity', lazy='select')
def __repr__(self):
return f'<ClientActivity {self.rustdesk_id} via token {self.api_token_id}>'
\ No newline at end of file
......@@ -178,6 +178,7 @@
<div class="nav">
<a href="{{ url_for('main.dashboard') }}">Dashboard</a>
<a href="{{ url_for('main.fixtures') }}">Fixtures</a>
<a href="{{ url_for('main.clients') }}">Clients</a>
<a href="{{ url_for('main.uploads') }}">Uploads</a>
<a href="{{ url_for('main.statistics') }}">Statistics</a>
<a href="{{ url_for('main.user_tokens') }}">API Tokens</a>
......
{% extends "base.html" %}
{% block title %}Clients - Fixture Manager{% endblock %}
{% block content %}
<div class="d-flex justify-content-between align-items-center mb-2">
<h1>Connected Clients</h1>
<div class="d-flex gap-1">
<span class="badge badge-success" style="background-color: #28a745; color: white;">Online</span>
<span class="badge badge-secondary" style="background-color: #6c757d; color: white;">Offline</span>
</div>
</div>
<div class="alert alert-info">
<strong>Client Status:</strong> Clients are considered online if they've sent a request to the API in the last 30 minutes.
The list shows all clients first (online), followed by offline clients.
</div>
<div class="table-responsive">
<table class="table table-striped">
<thead>
<tr>
<th>Client Name</th>
<th>RustDesk ID</th>
<th>Status</th>
<th>Last Seen</th>
<th>Remote Link</th>
<th>IP Address</th>
<th>User Agent</th>
</tr>
</thead>
<tbody>
{% if clients %}
{% for client in clients %}
<tr class="{% if client.is_online %}table-success{% else %}table-secondary{% endif %}">
<td>
<strong>{{ client.token_name }}</strong>
<br><small class="text-muted">{{ client.username }}</small>
</td>
<td>
<code>{{ client.rustdesk_id }}</code>
</td>
<td>
{% if client.is_online %}
<span class="badge badge-success" style="background-color: #28a745; color: white;">Online</span>
{% else %}
<span class="badge badge-secondary" style="background-color: #6c757d; color: white;">Offline</span>
{% endif %}
</td>
<td>
{{ client.last_seen_formatted }}
<br><small class="text-muted">{{ client.last_seen_ago }}</small>
</td>
<td>
{% if client.is_online %}
<a href="https://{{ client.rustdesk_id }}.remote.{{ client.remote_domain }}"
target="_blank"
class="btn btn-sm btn-primary">
Connect
</a>
{% else %}
<span class="text-muted">Not available</span>
{% endif %}
</td>
<td>
{% if client.ip_address %}
<code>{{ client.ip_address }}</code>
{% else %}
<span class="text-muted">Unknown</span>
{% endif %}
</td>
<td>
{% if client.user_agent %}
<small class="text-muted">{{ client.user_agent[:50] }}{% if client.user_agent|length > 50 %}...{% endif %}</small>
{% else %}
<span class="text-muted">Unknown</span>
{% endif %}
</td>
</tr>
{% endfor %}
{% else %}
<tr>
<td colspan="7" class="text-center text-muted">
No clients found. Clients will appear here when they connect to the API.
</td>
</tr>
{% endif %}
</tbody>
</table>
</div>
<div class="mt-2 text-muted">
<small>
<strong>Note:</strong> The remote link format is: https://{rustdesk_id}.remote.{remote_domain}
<br>Default remote domain: townshipscombatleague.com (configurable in admin settings)
</small>
</div>
{% endblock %}
{% block extra_css %}
<style>
.badge {
padding: 4px 8px;
border-radius: 4px;
font-size: 0.8rem;
font-weight: bold;
}
.table-success {
background-color: rgba(40, 167, 69, 0.1) !important;
}
.table-secondary {
background-color: rgba(108, 117, 125, 0.1) !important;
}
code {
background-color: #f8f9fa;
padding: 2px 4px;
border-radius: 3px;
font-size: 0.9em;
}
</style>
{% endblock %}
\ No newline at end of file
......@@ -358,7 +358,7 @@
<span class="status-inactive">Inactive</span>
{% endif %}
</td>
<td>
<td id="zip_status_{{ match.id }}">
{% if match.zip_upload_status == 'completed' %}
<span class="status-active">Completed</span>
{% elif match.zip_upload_status == 'pending' %}
......@@ -369,12 +369,13 @@
<span style="color: #6c757d;">{{ match.zip_upload_status.title() }}</span>
{% endif %}
</td>
<td>
<td id="zip_actions_td_{{ match.id }}">
<div class="zip-actions" id="zip_actions_{{ match.id }}">
<a href="{{ url_for('main.match_detail', id=match.id) }}?fixture_id={{ fixture_info.fixture_id }}" class="btn btn-sm">View Details</a>
{% if match.zip_upload_status == 'completed' %}
<!-- ZIP file exists - show replace and delete options -->
<!-- ZIP file exists - show download, replace and delete options -->
<a href="{{ url_for('main.download_zip', match_id=match.id) }}" class="btn btn-success btn-sm" title="Download ZIP file">Download ZIP</a>
<form class="upload-form" id="upload_form_{{ match.id }}" method="POST" action="{{ url_for('upload.upload_zip') }}" enctype="multipart/form-data">
<input type="hidden" name="match_id" value="{{ match.id }}">
<input type="file" id="zip_file_{{ match.id }}" name="zip_file" accept=".zip">
......@@ -646,13 +647,20 @@
updateProgress(matchId, 100);
statusDiv.className = 'upload-status success';
statusDiv.textContent = 'Upload successful!';
// Reload page after a short delay to show updated status
// Update status and actions without page reload
setTimeout(() => {
window.location.reload();
}, 1500);
updateMatchZipStatus(matchId);
}, 500);
} else {
statusDiv.className = 'upload-status error';
statusDiv.textContent = 'Finalization failed: ' + (data.error || 'Unknown error');
// Update status to failed
const statusCell = document.getElementById(`zip_status_${matchId}`);
if (statusCell) {
statusCell.innerHTML = '<span class="status-inactive">Failed</span>';
}
}
})
.catch(error => {
......@@ -661,6 +669,62 @@
});
}
function updateMatchZipStatus(matchId) {
// Update status cell
const statusCell = document.getElementById(`zip_status_${matchId}`);
if (statusCell) {
statusCell.innerHTML = '<span class="status-active">Completed</span>';
}
// Update actions
const actionsDiv = document.getElementById(`zip_actions_${matchId}`);
if (actionsDiv) {
// Add download button if not present
if (!document.getElementById(`download_btn_${matchId}`)) {
const downloadBtn = document.createElement('a');
downloadBtn.id = `download_btn_${matchId}`;
downloadBtn.href = `/download/zip/${matchId}`;
downloadBtn.className = 'btn btn-success btn-sm';
downloadBtn.title = 'Download ZIP file';
downloadBtn.textContent = 'Download ZIP';
const uploadForm = actionsDiv.querySelector('.upload-form');
if (uploadForm) {
actionsDiv.insertBefore(downloadBtn, uploadForm);
} else {
actionsDiv.appendChild(downloadBtn);
}
}
// Change upload label to Replace ZIP
const uploadLabel = actionsDiv.querySelector('.upload-label');
if (uploadLabel) {
uploadLabel.textContent = 'Replace ZIP';
uploadLabel.className = 'upload-label btn-warning';
uploadLabel.title = 'Replace existing ZIP file';
}
// Add delete form if not present
if (!document.getElementById(`delete_form_${matchId}`)) {
const deleteForm = document.createElement('form');
deleteForm.id = `delete_form_${matchId}`;
deleteForm.className = 'upload-form';
deleteForm.method = 'POST';
deleteForm.action = `/upload/zip/${matchId}/delete`;
deleteForm.style.display = 'inline';
deleteForm.innerHTML = `<button type="submit" class="btn btn-danger btn-sm" title="Delete ZIP file" onclick="return confirm('Are you sure you want to delete the ZIP file for Match #${matchId}?')">Delete ZIP</button>`;
actionsDiv.appendChild(deleteForm);
}
}
// Hide progress and status after update
setTimeout(() => {
const progressContainer = document.getElementById(`progress_${matchId}`);
const statusDiv = document.getElementById(`status_${matchId}`);
if (progressContainer) progressContainer.style.display = 'none';
if (statusDiv) statusDiv.style.display = 'none';
}, 1000);
}
function startUpload(matchId) {
const fileInput = document.getElementById(`zip_file_${matchId}`);
const file = fileInput.files[0];
......@@ -669,6 +733,12 @@
return;
}
// Update status to uploading
const statusCell = document.getElementById(`zip_status_${matchId}`);
if (statusCell) {
statusCell.innerHTML = '<span style="color: #17a2b8;">Uploading...</span>';
}
uploadFileInChunks(file, matchId);
}
......@@ -835,13 +905,22 @@
updateFixtureProgress(100);
statusDiv.className = 'upload-status success';
statusDiv.textContent = 'Upload successful! All matches are now active.';
// Reload page after a short delay to show updated status
// Update all match statuses that were not completed
setTimeout(() => {
window.location.reload();
}, 2000);
updateFixtureMatchesStatus();
}, 500);
} else {
statusDiv.className = 'upload-status error';
statusDiv.textContent = 'Finalization failed: ' + (data.error || 'Unknown error');
// Reset match statuses that were uploading
const statusCells = document.querySelectorAll('[id^="zip_status_"]');
statusCells.forEach(cell => {
if (cell.innerHTML.includes('Uploading...')) {
cell.innerHTML = '<span style="color: #6c757d;">Pending</span>';
}
});
}
fixtureUploadActive = false;
})
......@@ -852,6 +931,67 @@
});
}
function updateFixtureMatchesStatus() {
// Update all match statuses that are not completed (they now have ZIP)
const statusCells = document.querySelectorAll('[id^="zip_status_"]');
statusCells.forEach(cell => {
if (!cell.querySelector('.status-active')) {
const matchId = cell.id.replace('zip_status_', '');
cell.innerHTML = '<span class="status-active">Completed</span>';
updateMatchActions(matchId);
}
});
// Hide progress and status after update
setTimeout(() => {
const progressContainer = document.getElementById('fixture_progress_container');
const statusDiv = document.getElementById('fixture_upload_status');
if (progressContainer) progressContainer.style.display = 'none';
if (statusDiv) statusDiv.style.display = 'none';
}, 1000);
}
function updateMatchActions(matchId) {
const actionsDiv = document.getElementById(`zip_actions_${matchId}`);
if (!actionsDiv) return;
// Add download button if not present
if (!document.getElementById(`download_btn_${matchId}`)) {
const downloadBtn = document.createElement('a');
downloadBtn.id = `download_btn_${matchId}`;
downloadBtn.href = `/download/zip/${matchId}`;
downloadBtn.className = 'btn btn-success btn-sm';
downloadBtn.title = 'Download ZIP file';
downloadBtn.textContent = 'Download ZIP';
const uploadForm = actionsDiv.querySelector('.upload-form');
if (uploadForm) {
actionsDiv.insertBefore(downloadBtn, uploadForm);
} else {
actionsDiv.appendChild(downloadBtn);
}
}
// Change upload label to Replace ZIP
const uploadLabel = actionsDiv.querySelector('.upload-label');
if (uploadLabel) {
uploadLabel.textContent = 'Replace ZIP';
uploadLabel.className = 'upload-label btn-warning';
uploadLabel.title = 'Replace existing ZIP file';
}
// Add delete form if not present
if (!document.getElementById(`delete_form_${matchId}`)) {
const deleteForm = document.createElement('form');
deleteForm.id = `delete_form_${matchId}`;
deleteForm.className = 'upload-form';
deleteForm.method = 'POST';
deleteForm.action = `/upload/zip/${matchId}/delete`;
deleteForm.style.display = 'inline';
deleteForm.innerHTML = `<button type="submit" class="btn btn-danger btn-sm" title="Delete ZIP file" onclick="return confirm('Are you sure you want to delete the ZIP file for Match #${matchId}?')">Delete ZIP</button>`;
actionsDiv.appendChild(deleteForm);
}
}
function startFixtureUpload() {
const fileInput = document.getElementById('fixture_zip_file');
const file = fileInput.files[0];
......@@ -865,6 +1005,15 @@
}
fixtureUploadActive = true;
// Update all non-completed match statuses to uploading
const statusCells = document.querySelectorAll('[id^="zip_status_"]');
statusCells.forEach(cell => {
if (!cell.querySelector('.status-active')) {
cell.innerHTML = '<span style="color: #17a2b8;">Uploading...</span>';
}
});
uploadFixtureFileInChunks(file);
}
......
......@@ -11,16 +11,20 @@ import shutil
import platform
from pathlib import Path
def run_command(cmd, cwd=None):
def run_command(cmd, cwd=None, capture_output=True):
"""Run a command and return the result"""
print(f"Running: {' '.join(cmd)}")
try:
if capture_output:
result = subprocess.run(cmd, cwd=cwd, check=True, capture_output=True, text=True)
if result.stdout:
print(result.stdout)
else:
result = subprocess.run(cmd, cwd=cwd, check=True)
return True
except subprocess.CalledProcessError as e:
print(f"Error: {e}")
if capture_output:
if e.stdout:
print(f"STDOUT: {e.stdout}")
if e.stderr:
......@@ -35,17 +39,47 @@ def check_python_version():
print(f"Python version: {sys.version}")
return True
def check_dependencies_installed(requirements_file):
"""Check if dependencies from requirements file are installed"""
try:
with open(requirements_file, 'r') as f:
requirements = [line.strip().split('==')[0].split('>=')[0].split('<')[0].split('>')[0]
for line in f if line.strip() and not line.startswith('#')]
except FileNotFoundError:
return False
for req in requirements:
try:
__import__(req.replace('-', '_'))
except ImportError:
return False
return True
def install_build_dependencies():
"""Install PyInstaller and other build dependencies"""
print("Installing build dependencies...")
print("Checking build dependencies...")
# Install build requirements
if not run_command([sys.executable, "-m", "pip", "install", "-r", "requirements-build.txt", "--break-system-packages"]):
# Check if build dependencies are already installed
if check_dependencies_installed("requirements-build.txt"):
print("Build dependencies already installed, skipping...")
else:
print("Installing build dependencies...")
# Try without --break-system-packages first
if not run_command([sys.executable, "-m", "pip", "install", "-r", "requirements-build.txt"], capture_output=False):
print("Trying with --break-system-packages...")
if not run_command([sys.executable, "-m", "pip", "install", "-r", "requirements-build.txt", "--break-system-packages"], capture_output=False):
print("Failed to install build dependencies")
return False
# Install runtime requirements
if not run_command([sys.executable, "-m", "pip", "install", "-r", "requirements.txt", "--break-system-packages"]):
# Check if runtime dependencies are already installed
if check_dependencies_installed("requirements.txt"):
print("Runtime dependencies already installed, skipping...")
else:
print("Installing runtime dependencies...")
# Try without --break-system-packages first
if not run_command([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"], capture_output=False):
print("Trying with --break-system-packages...")
if not run_command([sys.executable, "-m", "pip", "install", "-r", "requirements.txt", "--break-system-packages"], capture_output=False):
print("Failed to install runtime dependencies")
return False
......
# Database Configuration
MYSQL_HOST=localhost
MYSQL_PORT=3306
MYSQL_USER=fixture_user
MYSQL_PASSWORD=secure_password_here
MYSQL_DATABASE=fixture_manager
# Security Configuration
SECRET_KEY=your-secret-key-here-change-in-production
JWT_SECRET_KEY=your-jwt-secret-key-here
BCRYPT_LOG_ROUNDS=12
# File Upload Configuration
UPLOAD_FOLDER=/var/lib/fixture-daemon/uploads
MAX_CONTENT_LENGTH=524288000
CHUNK_SIZE=8192
MAX_CONCURRENT_UPLOADS=5
# Daemon Configuration
DAEMON_PID_FILE=/var/run/fixture-daemon.pid
DAEMON_LOG_FILE=/var/log/fixture-daemon.log
DAEMON_WORKING_DIR=/var/lib/fixture-daemon
# Web Server Configuration
HOST=0.0.0.0
PORT=5000
DEBUG=false
# Logging Configuration
LOG_LEVEL=INFO
# JWT Configuration
JWT_ACCESS_TOKEN_EXPIRES=3600
\ No newline at end of file
# Fixture Manager - Comprehensive Python Daemon System
A sophisticated Python daemon system for Linux servers with internet exposure, implementing a secure web dashboard and RESTful API with robust authentication mechanisms. The system provides advanced file upload capabilities with real-time progress tracking and a comprehensive fixture management system.
## Features
### Core Functionality
- **Secure Web Dashboard**: Modern web interface with authentication and authorization
- **RESTful API**: Comprehensive API with JWT authentication
- **MySQL Database Integration**: Robust database connectivity with connection pooling
- **Advanced File Upload System**: Real-time progress tracking with SHA1 checksum verification
- **Dual-Format Support**: Intelligent parsing of CSV/XLSX fixture files
- **Two-Stage Upload Workflow**: Fixture files followed by mandatory ZIP uploads
- **Daemon Process Management**: Full Linux daemon with systemd integration
### Security Features
- **Multi-layer Authentication**: Session-based and JWT token authentication
- **API Token Management**: User-generated tokens for external application access
- **Rate Limiting**: Protection against brute force attacks
- **File Validation**: Comprehensive security checks and malicious content detection
- **SQL Injection Protection**: Parameterized queries and ORM usage
- **CSRF Protection**: Cross-site request forgery prevention
- **Security Headers**: Comprehensive HTTP security headers
- **Input Sanitization**: All user inputs are validated and sanitized
### Database Schema
- **Normalized Design**: Optimized relational database structure
- **Primary Matches Table**: Core fixture data with system fields and status tracking
- **Match Status System**: Comprehensive status tracking with 8 predefined states
- **Secondary Outcomes Table**: Dynamic result columns with foreign key relationships
- **API Token Management**: Secure token storage with usage tracking
- **File Upload Tracking**: Complete upload lifecycle management
- **System Logging**: Comprehensive audit trail
- **Session Management**: Secure user session handling
## Installation
### Prerequisites
- Linux server (Ubuntu 18.04+, CentOS 7+, or similar)
- Python 3.8+
- MySQL 5.7+ or MariaDB 10.3+
- Root or sudo access
### Quick Installation
```bash
# Clone the repository
git clone <repository-url>
cd fixture-manager
# Make installation script executable
chmod +x install.sh
# Run installation (as root)
sudo ./install.sh
```
### Manual Installation
1. **Install System Dependencies**:
```bash
# Ubuntu/Debian
apt-get update
apt-get install python3 python3-pip python3-venv mysql-server nginx supervisor
# CentOS/RHEL
yum install python3 python3-pip mysql-server nginx supervisor
```
2. **Create System User**:
```bash
useradd --system --home-dir /var/lib/fixture-daemon fixture
```
3. **Install Python Dependencies**:
```bash
python3 -m venv venv
source venv/bin/activate
pip install -r requirements.txt
```
4. **Configure Database**:
```bash
mysql -u root -p < database/schema.sql
```
5. **Configure Environment**:
```bash
cp .env.example .env
# Edit .env with your configuration
# Note: For PyInstaller deployments, configuration will migrate to mbetterd.conf automatically
```
## Configuration
### Configuration File (mbetterd.conf)
The system automatically migrates from `.env` to `mbetterd.conf` stored in persistent directories for PyInstaller compatibility. Configuration settings include:
```bash
# Database Configuration
MYSQL_HOST=localhost
MYSQL_PORT=3306
MYSQL_USER=fixture_user
MYSQL_PASSWORD=secure_password
MYSQL_DATABASE=fixture_manager
# Security Configuration
SECRET_KEY=your-secret-key-here
JWT_SECRET_KEY=your-jwt-secret-key
BCRYPT_LOG_ROUNDS=12
# File Upload Configuration
UPLOAD_FOLDER=/var/lib/fixture-daemon/uploads
MAX_CONTENT_LENGTH=524288000 # 500MB
MAX_CONCURRENT_UPLOADS=5
# Server Configuration
HOST=0.0.0.0
PORT=5000
DEBUG=false
```
### Database Schema
The system automatically creates the following tables:
- `users` - User authentication and management
- `matches` - Core fixture data with system fields and status tracking
- `match_outcomes` - Dynamic outcome results
- `api_tokens` - User-generated API tokens for external access
- `file_uploads` - Upload tracking and progress
- `system_logs` - Comprehensive logging
- `user_sessions` - Session management
### Match Status System
The matches table includes a comprehensive status tracking system with 8 predefined states:
**Status Values:**
- `pending` - Initial state, match created but not processed
- `scheduled` - Match scheduled for future processing
- `bet` - Match available for betting
- `ingame` - Match currently in progress
- `cancelled` - Match cancelled
- `failed` - Match processing failed
- `paused` - Match temporarily paused
- `done` - Match completed successfully
**Active Status Criteria:**
A match is considered "active" when ALL of these conditions are met:
1. **ZIP Upload Status**: `zip_upload_status == 'completed'`
2. **ZIP File Checksum**: `zip_sha1sum` is present (not null/empty)
3. **Match Status**: `status == 'pending'` (new requirement)
**Status Features:**
- **Automatic Migration**: New column added via database migration system
- **Default Value**: All new matches default to 'pending' status
- **Web Interface**: Status displayed in match detail pages
- **API Integration**: Status available via REST API endpoints
- **Backward Compatibility**: Existing matches retain their current state
## Usage
### Daemon Management
```bash
# Start the daemon
sudo systemctl start fixture-daemon
# Stop the daemon
sudo systemctl stop fixture-daemon
# Restart the daemon
sudo systemctl restart fixture-daemon
# Check status
sudo systemctl status fixture-daemon
# View logs
journalctl -u fixture-daemon -f
```
### Direct Daemon Control
```bash
# Start in foreground (for debugging)
python daemon.py start --foreground
# Start as daemon
python daemon.py start
# Stop daemon
python daemon.py stop
# Restart daemon
python daemon.py restart
# Check status
python daemon.py status
# Reload configuration
python daemon.py reload
```
### Web Interface
Access the web dashboard at `http://your-server-ip/`
**Default Credentials**:
- Username: `admin`
- Password: `admin123`
**⚠️ Important**: Change the default password immediately after installation!
### API Usage
#### Authentication Methods
**1. Session-Based Authentication (Web Interface)**
```bash
# Login via web interface
curl -X POST http://your-server/auth/login \
-H "Content-Type: application/json" \
-d '{"username": "admin", "password": "admin123"}'
```
**2. API Token Authentication (Recommended for External Apps)**
```bash
# Use API token in Authorization header (recommended)
curl -H "Authorization: Bearer YOUR_API_TOKEN" \
http://your-server/api/fixtures
# Alternative: Use X-API-Token header
curl -H "X-API-Token: YOUR_API_TOKEN" \
http://your-server/api/matches
# Alternative: Use query parameter (less secure)
curl "http://your-server/api/match/123?token=YOUR_API_TOKEN"
```
**3. JWT Token Authentication (Legacy)**
```bash
# Login and get JWT token
curl -X POST http://your-server/auth/api/login \
-H "Content-Type: application/json" \
-d '{"username": "admin", "password": "admin123"}'
```
#### Upload Fixture File
```bash
# Upload CSV/XLSX fixture file
curl -X POST http://your-server/upload/api/fixture \
-H "Authorization: Bearer YOUR_JWT_TOKEN" \
-F "file=@fixtures.csv"
```
#### Upload ZIP File
```bash
# Upload ZIP file for specific match
curl -X POST http://your-server/upload/api/zip/123 \
-H "Authorization: Bearer YOUR_JWT_TOKEN" \
-F "file=@match_data.zip"
```
#### Get Matches
```bash
# Get all matches with pagination
curl -X GET "http://your-server/api/matches?page=1&per_page=20" \
-H "Authorization: Bearer YOUR_API_TOKEN"
#### Get Fixtures
```bash
# Get all fixtures
curl -X GET "http://your-server/api/fixtures" \
-H "Authorization: Bearer YOUR_API_TOKEN"
```
#### Get Match Details
```bash
# Get specific match with outcomes
curl -X GET "http://your-server/api/match/123" \
-H "Authorization: Bearer YOUR_API_TOKEN"
```
#### Get Fixture Updates (New!)
The `/api/updates` endpoint provides incremental synchronization for fixture data:
```bash
# Get last N fixtures (default behavior, N configured in system settings)
curl -X GET "http://your-server/api/updates" \
-H "Authorization: Bearer YOUR_API_TOKEN"
# Get fixtures updated after specific unix timestamp
curl -X GET "http://your-server/api/updates?from=1704067200" \
-H "Authorization: Bearer YOUR_API_TOKEN"
# POST method also supported with JSON body
curl -X POST "http://your-server/api/updates" \
-H "Authorization: Bearer YOUR_API_TOKEN" \
-H "Content-Type: application/json" \
-d '{"from": 1704067200}'
# Get recent fixtures without timestamp filter
curl -X POST "http://your-server/api/updates" \
-H "Authorization: Bearer YOUR_API_TOKEN" \
-H "Content-Type: application/json" \
-d '{}'
```
**Features:**
- **Incremental Updates**: Use `from` parameter for efficient data synchronization
- **Flexible Methods**: Supports both GET (query params) and POST (JSON body)
- **Configurable Limits**: Respects system setting for maximum fixtures returned
- **Authenticated ZIP Downloads**: Secure direct download URLs with token authentication
- **Hybrid Authentication**: Works with both JWT and API tokens automatically
- **Smart Fallback**: Gracefully handles existing data without active timestamps
#### Download ZIP Files (Authenticated)
```bash
# Download ZIP file for specific match (requires authentication)
curl -X GET "http://your-server/api/download/zip/123" \
-H "Authorization: Bearer YOUR_API_TOKEN" \
-o "match_123.zip"
```
## File Format Requirements
### Fixture Files (CSV/XLSX)
**Required Columns**:
- `Match #` (integer) - Unique match identifier
- `Fighter1 (Township)` (varchar255) - First fighter details
- `Fighter2 (Township)` (varchar255) - Second fighter details
- `Venue (Kampala Township)` (varchar255) - Match venue
**Optional Columns**:
- Any numeric columns will be automatically detected as outcome results
- Values must be numeric (float with 2-decimal precision)
**Example CSV**:
```csv
Match #,Fighter1 (Township),Fighter2 (Township),Venue (Kampala Township),Score1,Score2,Duration
1,John Doe (Central),Jane Smith (North),Stadium A (Kampala),85.5,92.3,12.5
2,Mike Johnson (East),Sarah Wilson (West),Arena B (Kampala),78.2,81.7,15.2
```
### ZIP Files
- Must be uploaded after fixture file processing
- Associated with specific match records
- Triggers match activation upon successful upload
- SHA1 checksum verification for integrity
## Architecture
### System Components
1. **Flask Web Application**: Core web framework with blueprints
2. **SQLAlchemy ORM**: Database abstraction and management
3. **JWT Authentication**: Stateless API authentication
4. **File Upload Handler**: Chunked uploads with progress tracking
5. **Fixture Parser**: Intelligent CSV/XLSX parsing
6. **Security Layer**: Multi-layer security implementation
7. **Logging System**: Comprehensive audit and monitoring
8. **Daemon Manager**: Linux daemon process management
### Security Architecture
- **Authentication**: Multi-factor with session and JWT support
- **Authorization**: Role-based access control (RBAC)
- **Input Validation**: Comprehensive sanitization and validation
- **File Security**: Malicious content detection and quarantine
- **Network Security**: Rate limiting and DDoS protection
- **Data Protection**: Encryption at rest and in transit
### Database Design
- **Normalized Schema**: Third normal form compliance
- **Foreign Key Constraints**: Referential integrity
- **Indexing Strategy**: Optimized query performance
- **Transaction Management**: ACID compliance
- **Connection Pooling**: Efficient resource utilization
## Monitoring and Maintenance
### Log Files
- **Application Logs**: `/var/log/fixture-daemon.log`
- **System Logs**: `journalctl -u fixture-daemon`
- **Database Logs**: MySQL error logs
- **Web Server Logs**: Nginx access/error logs
### Health Monitoring
```bash
# Check system health
curl http://your-server/health
# Get system statistics
curl -H "Authorization: Bearer TOKEN" http://your-server/api/statistics
```
### Backup and Recovery
```bash
# Manual backup
/opt/fixture-manager/backup.sh
# Restore from backup
mysql -u fixture_user -p fixture_manager < backup.sql
```
### Maintenance Tasks
The daemon automatically performs:
- **Session Cleanup**: Expired sessions removed hourly
- **Log Rotation**: Old logs archived daily
- **File Cleanup**: Failed uploads cleaned every 6 hours
- **Database Optimization**: Statistics updated nightly
## Troubleshooting
### Common Issues
1. **Database Connection Failed**
```bash
# Check MySQL service
systemctl status mysql
# Verify credentials
mysql -u fixture_user -p
```
2. **File Upload Errors**
```bash
# Check permissions
ls -la /var/lib/fixture-daemon/uploads
# Check disk space
df -h
```
3. **Daemon Won't Start**
```bash
# Check logs
journalctl -u fixture-daemon -n 50
# Test configuration
python daemon.py start --foreground
```
4. **Permission Denied**
```bash
# Fix ownership
chown -R fixture:fixture /var/lib/fixture-daemon
# Fix permissions
chmod 755 /opt/fixture-manager
```
### Debug Mode
```bash
# Run in debug mode
export DEBUG=true
python daemon.py start --foreground --config development
```
## API Token Management
### Creating API Tokens
**Via Web Interface:**
1. Login to the web dashboard
2. Navigate to "API Tokens" from the main navigation
3. Click "Create New Token"
4. Provide a descriptive name (e.g., "Mobile App", "Dashboard Integration")
5. Copy the generated token immediately (it's only shown once)
6. Use the token in your external applications
**Token Features:**
- **Secure Generation**: Cryptographically secure random tokens
- **Named Tokens**: Descriptive names for easy identification
- **Expiration Management**: Default 1-year expiration, extendable
- **Usage Tracking**: Last used timestamp and IP address
- **Lifecycle Management**: Revoke, extend, or delete tokens
- **Security**: SHA256 hashed storage, one-time display
### Token Management Operations
**Create Token:**
```bash
# Via API (requires session authentication)
curl -X POST http://your-server/profile/tokens/create \
-H "Content-Type: application/json" \
-H "Cookie: session=YOUR_SESSION_COOKIE" \
-d '{"name": "My API Integration"}'
```
**List User Tokens:**
```bash
# Via web interface at /profile/tokens
# Shows all tokens with status, creation date, expiration, and usage info
```
**Revoke Token:**
```bash
# Via API (requires session authentication)
curl -X POST http://your-server/profile/tokens/123/revoke \
-H "Cookie: session=YOUR_SESSION_COOKIE"
```
**Extend Token Expiration:**
```bash
# Via API (requires session authentication)
curl -X POST http://your-server/profile/tokens/123/extend \
-H "Content-Type: application/json" \
-H "Cookie: session=YOUR_SESSION_COOKIE" \
-d '{"days": 365}'
```
**Delete Token:**
```bash
# Via API (requires session authentication)
curl -X DELETE http://your-server/profile/tokens/123/delete \
-H "Cookie: session=YOUR_SESSION_COOKIE"
```
## API Documentation
### Authentication Endpoints
- `POST /auth/api/login` - User login
- `POST /auth/api/logout` - User logout
- `POST /auth/api/refresh` - Refresh JWT token
- `GET /auth/api/profile` - Get user profile
### Token Management Endpoints
- `GET /profile/tokens` - Token management page (web interface)
- `POST /profile/tokens/create` - Create new API token
- `POST /profile/tokens/{id}/revoke` - Revoke API token
- `POST /profile/tokens/{id}/extend` - Extend token expiration
- `DELETE /profile/tokens/{id}/delete` - Delete API token
### Protected API Endpoints (Require API Token)
- `GET /api/fixtures` - List all fixtures with match counts
- `GET /api/matches` - List matches with pagination and filtering
- `GET /api/match/{id}` - Get match details with outcomes
- `GET|POST /api/updates` - **New!** Get fixture updates with incremental sync support
### Upload Endpoints
- `POST /upload/api/fixture` - Upload fixture file
- `POST /upload/api/zip/{match_id}` - Upload ZIP file
- `GET /upload/api/progress/{upload_id}` - Get upload progress
- `GET /upload/api/uploads` - List user uploads
### Administration
- `GET /api/admin/users` - List users (admin)
- `PUT /api/admin/users/{id}` - Update user (admin)
- `GET /api/admin/logs` - System logs (admin)
- `GET /api/admin/system-info` - System information (admin)
## Performance Optimization
### Database Optimization
- Connection pooling with 10 connections
- Query optimization with proper indexing
- Prepared statements for security
- Transaction batching for bulk operations
### File Upload Optimization
- Chunked uploads for large files
- Concurrent upload support (configurable)
- Progress tracking with minimal overhead
- Automatic cleanup of failed uploads
### Caching Strategy
- Session caching with Redis (optional)
- Static file caching with Nginx
- Database query result caching
- API response caching for read-heavy endpoints
## Security Considerations
### Production Deployment
1. **Change Default Credentials**: Update admin password immediately
2. **SSL/TLS Configuration**: Enable HTTPS with valid certificates
3. **Firewall Configuration**: Restrict access to necessary ports only
4. **Regular Updates**: Keep system and dependencies updated
5. **Backup Strategy**: Implement regular automated backups
6. **Monitoring**: Set up comprehensive monitoring and alerting
### Security Best Practices
- Regular security audits
- Penetration testing
- Vulnerability scanning
- Access log monitoring
- Incident response procedures
## Building Single Executable
The project can be packaged as a single executable file for easy distribution with **cross-platform persistent directories**:
### Quick Build
```bash
# Run the automated build script
python build.py
```
### Manual Build
```bash
# Install build dependencies
pip install -r requirements-build.txt
# Build with PyInstaller
pyinstaller --clean fixture-manager.spec
```
The executable will be created in the `dist/` directory and includes:
- All Python dependencies
- Complete Flask application
- Database utilities and models
- Web dashboard and API
- Configuration templates
- **Cross-platform persistent directory support**
**Executable Size**: ~80-120MB
**No Python Installation Required** on target systems
**Cross-Platform Compatibility**: Windows, macOS, and Linux
### PyInstaller Features
- **Persistent Data Storage**: Files persist between application restarts
- **Cross-Platform Directories**: Uses OS-appropriate locations (AppData, Library, /opt)
- **Configuration Migration**: Automatic .env to mbetterd.conf migration
- **Upload Directory Persistence**: ZIP files and fixtures stored outside temp directories
- **Platform Detection**: Automatic PyInstaller environment detection
See [BUILD.md](BUILD.md) for detailed build instructions and troubleshooting.
## Contributing
1. Fork the repository
2. Create a feature branch
3. Make your changes
4. Add tests for new functionality
5. Submit a pull request
## License
This project is licensed under the MIT License - see the LICENSE file for details.
## Support
For support and questions:
- Check the troubleshooting section
- Review system logs
- See BUILD.md for executable build issues
- Contact system administrator
## API Token Security Best Practices
### For Developers
1. **Store Tokens Securely**: Never commit tokens to version control
2. **Use Environment Variables**: Store tokens in environment variables or secure config files
3. **Rotate Tokens Regularly**: Generate new tokens periodically and revoke old ones
4. **Monitor Usage**: Check token usage logs for suspicious activity
5. **Use Descriptive Names**: Name tokens clearly to identify their purpose
6. **Minimum Permissions**: Only use tokens for their intended purpose
### For System Administrators
1. **Monitor Token Activity**: Review token usage logs regularly
2. **Set Expiration Policies**: Enforce reasonable token expiration periods
3. **Audit Token Access**: Regular audits of active tokens and their usage
4. **Revoke Unused Tokens**: Remove tokens that haven't been used recently
5. **Secure Database**: Ensure API token table is properly secured
6. **Backup Considerations**: Include token management in backup/recovery procedures
### Example Integration
**Python Example:**
```python
import requests
# Store token securely (environment variable recommended)
API_TOKEN = "your-api-token-here"
BASE_URL = "http://your-server"
headers = {
"Authorization": f"Bearer {API_TOKEN}",
"Content-Type": "application/json"
}
# Get all fixtures
response = requests.get(f"{BASE_URL}/api/fixtures", headers=headers)
fixtures = response.json()
# Get specific match details
match_id = 123
response = requests.get(f"{BASE_URL}/api/match/{match_id}", headers=headers)
match_details = response.json()
```
**JavaScript Example:**
```javascript
const API_TOKEN = process.env.API_TOKEN; // Store in environment variable
const BASE_URL = 'http://your-server';
const headers = {
'Authorization': `Bearer ${API_TOKEN}`,
'Content-Type': 'application/json'
};
// Get all matches
fetch(`${BASE_URL}/api/matches?page=1&per_page=20`, { headers })
.then(response => response.json())
.then(data => console.log(data));
// Get fixtures
fetch(`${BASE_URL}/api/fixtures`, { headers })
.then(response => response.json())
.then(data => console.log(data));
```
**cURL Examples:**
```bash
# Set token as environment variable
export API_TOKEN="your-api-token-here"
# Get fixtures
curl -H "Authorization: Bearer $API_TOKEN" \
http://your-server/api/fixtures
# Get matches with filtering
curl -H "Authorization: Bearer $API_TOKEN" \
"http://your-server/api/matches?fixture_id=abc123&active_only=true"
# Get specific match
curl -H "Authorization: Bearer $API_TOKEN" \
http://your-server/api/match/123
```
---
**Version**: 1.2.3
**Last Updated**: 2025-08-26
**Minimum Requirements**: Python 3.8+, MySQL 5.7+, Linux/Windows/macOS
### Recent Updates (v1.2.3) - Match Status System
-**Match Status Column**: Added comprehensive status tracking system to matches table
- 8 predefined status values: pending, scheduled, bet, ingame, cancelled, failed, paused, done
- Automatic database migration with default 'pending' status for new matches
- Web interface integration with status display in match detail pages
- API endpoints include status information in responses
- Backward compatibility maintained for existing matches
- **New Migration_007**: Ensures 'done' status is available in existing ENUM columns
- Safely adds 'done' to existing status ENUM if not present
- Handles databases created before the 'done' status was added
- Includes proper rollback support for migration reversal
-**Configuration Auto-Migration**: Enhanced persistent directory configuration system
- Automatic copying of .env to persistent config directory when mbetterd.conf doesn't exist
- Improved cross-platform directory detection (Linux /opt, macOS ~/Library, Windows %APPDATA%)
- Better error handling and fallback mechanisms for configuration loading
### Previous Updates (v1.2.2) - Bug Fix
-**Fixture Parser Fighter Column Fix**: Fixed critical bug where both fighter1 and fighter2 were incorrectly mapped to fighter1 column during XLSX upload
- Enhanced [`FixtureParser.detect_required_columns()`](app/upload/fixture_parser.py:179) with specific fighter number matching logic
- Prevents cross-mapping of fighter columns during partial column name matching
- Ensures accurate fighter data separation in database records
- Maintains compatibility with all existing column naming conventions
### Updates (v1.2.1) - PyInstaller Enhancement
-**Cross-Platform Persistent Directories**: Windows (%APPDATA%), macOS (~/Library/Application Support), Linux (/opt/MBetter)
-**Configuration Migration**: Automatic .env to mbetterd.conf migration for PyInstaller deployments
-**Authenticated ZIP Downloads**: Secure API endpoint for ZIP file downloads with token authentication
-**PyInstaller Detection**: Automatic detection and optimization for PyInstaller environments
-**Persistent Upload Storage**: Uploads stored outside PyInstaller temp directories
-**Migration Utility**: migrate_config.py script for environment transition
-**Platform-Specific Paths**: OS-appropriate directory structures for all platforms
### Updates (v1.2.0) - API Enhancement
-**New `/api/updates` Endpoint**: Incremental fixture synchronization with timestamp-based filtering
-**Hybrid Authentication**: JWT and API token support with automatic fallback
-**Fixture Active Time Tracking**: Automatic timestamp management for fixture activation
-**SHA1-based ZIP Naming**: Consistent file naming across all upload methods
-**Configurable API Limits**: System setting for controlling API response sizes
-**Data Backfill Utility**: Migration tool for existing fixture data
-**Enhanced Database Schema**: New indexed columns and optimized queries
-**Flexible HTTP Methods**: Both GET and POST support for API endpoints
-**Fallback Mechanisms**: Graceful degradation for legacy data compatibility
### Previous Updates (v1.1.0)
-**API Token Management**: Complete user-generated token system
-**Enhanced Security**: SHA256 token hashing with usage tracking
-**Web Interface**: Professional token management UI
-**Multiple Auth Methods**: Bearer tokens, headers, and query parameters
-**Token Lifecycle**: Create, revoke, extend, and delete operations
-**Usage Monitoring**: Last used timestamps and IP tracking
-**Database Migration**: Automatic schema updates with versioning
-**REST API Endpoints**: Protected fixture and match data access
-**Documentation**: Comprehensive API and security guidelines
\ No newline at end of file
#!/bin/bash
# Fixture Manager Installation Script
# Comprehensive installation script for Linux servers
set -e # Exit on any error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
PROJECT_NAME="fixture-manager"
SERVICE_NAME="fixture-daemon"
INSTALL_DIR="/opt/fixture-manager"
DATA_DIR="/var/lib/fixture-daemon"
LOG_DIR="/var/log"
CONFIG_DIR="/etc/fixture-manager"
USER="fixture"
GROUP="fixture"
# Functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
check_root() {
if [[ $EUID -ne 0 ]]; then
log_error "This script must be run as root"
exit 1
fi
}
detect_os() {
if [[ -f /etc/os-release ]]; then
. /etc/os-release
OS=$NAME
VER=$VERSION_ID
else
log_error "Cannot detect operating system"
exit 1
fi
log_info "Detected OS: $OS $VER"
}
install_dependencies() {
log_info "Installing system dependencies..."
if [[ "$OS" == *"Ubuntu"* ]] || [[ "$OS" == *"Debian"* ]]; then
apt-get update
apt-get install -y \
python3 \
python3-pip \
python3-venv \
python3-dev \
mysql-server \
mysql-client \
libmysqlclient-dev \
nginx \
supervisor \
git \
curl \
wget \
unzip \
build-essential \
pkg-config
elif [[ "$OS" == *"CentOS"* ]] || [[ "$OS" == *"Red Hat"* ]] || [[ "$OS" == *"Rocky"* ]]; then
yum update -y
yum install -y \
python3 \
python3-pip \
python3-devel \
mysql-server \
mysql-devel \
nginx \
supervisor \
git \
curl \
wget \
unzip \
gcc \
gcc-c++ \
make \
pkgconfig
else
log_error "Unsupported operating system: $OS"
exit 1
fi
log_success "System dependencies installed"
}
create_user() {
log_info "Creating system user and group..."
# Create group if it doesn't exist
if ! getent group $GROUP > /dev/null 2>&1; then
groupadd --system $GROUP
log_success "Created group: $GROUP"
fi
# Create user if it doesn't exist
if ! getent passwd $USER > /dev/null 2>&1; then
useradd --system --gid $GROUP --home-dir $DATA_DIR --shell /bin/false $USER
log_success "Created user: $USER"
fi
}
create_directories() {
log_info "Creating directories..."
# Create main directories
mkdir -p $INSTALL_DIR
mkdir -p $DATA_DIR/{uploads,backups,logs}
mkdir -p $CONFIG_DIR
mkdir -p $LOG_DIR
# Set ownership and permissions
chown -R $USER:$GROUP $INSTALL_DIR
chown -R $USER:$GROUP $DATA_DIR
chown -R $USER:$GROUP $CONFIG_DIR
chmod 755 $INSTALL_DIR
chmod 750 $DATA_DIR
chmod 750 $CONFIG_DIR
chmod 755 $DATA_DIR/uploads
log_success "Directories created and configured"
}
install_application() {
log_info "Installing application files..."
# Copy application files
cp -r . $INSTALL_DIR/
# Create Python virtual environment
cd $INSTALL_DIR
python3 -m venv venv
source venv/bin/activate
# Upgrade pip
pip install --upgrade pip
# Install Python dependencies
pip install -r requirements.txt
# Set ownership
chown -R $USER:$GROUP $INSTALL_DIR
# Make daemon script executable
chmod +x $INSTALL_DIR/daemon.py
log_success "Application installed"
}
configure_database() {
log_info "Configuring MySQL database..."
# Start MySQL service
systemctl start mysql || systemctl start mysqld
systemctl enable mysql || systemctl enable mysqld
# Generate random password
DB_PASSWORD=$(openssl rand -base64 32)
# Create database and user
mysql -u root <<EOF
CREATE DATABASE IF NOT EXISTS fixture_manager CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;
CREATE USER IF NOT EXISTS 'fixture_user'@'localhost' IDENTIFIED BY '$DB_PASSWORD';
GRANT ALL PRIVILEGES ON fixture_manager.* TO 'fixture_user'@'localhost';
FLUSH PRIVILEGES;
EOF
# Execute schema
mysql -u fixture_user -p$DB_PASSWORD fixture_manager < $INSTALL_DIR/database/schema.sql
# Save database credentials
cat > $CONFIG_DIR/database.conf <<EOF
MYSQL_HOST=localhost
MYSQL_PORT=3306
MYSQL_USER=fixture_user
MYSQL_PASSWORD=$DB_PASSWORD
MYSQL_DATABASE=fixture_manager
EOF
chmod 600 $CONFIG_DIR/database.conf
chown $USER:$GROUP $CONFIG_DIR/database.conf
log_success "Database configured"
}
create_config() {
log_info "Creating configuration files..."
# Generate secret keys
SECRET_KEY=$(openssl rand -base64 32)
JWT_SECRET_KEY=$(openssl rand -base64 32)
# Create main configuration
cat > $CONFIG_DIR/config.env <<EOF
# Database Configuration
MYSQL_HOST=localhost
MYSQL_PORT=3306
MYSQL_USER=fixture_user
MYSQL_PASSWORD=$(grep MYSQL_PASSWORD $CONFIG_DIR/database.conf | cut -d'=' -f2)
MYSQL_DATABASE=fixture_manager
# Security Configuration
SECRET_KEY=$SECRET_KEY
JWT_SECRET_KEY=$JWT_SECRET_KEY
BCRYPT_LOG_ROUNDS=12
# File Upload Configuration
UPLOAD_FOLDER=$DATA_DIR/uploads
MAX_CONTENT_LENGTH=524288000
CHUNK_SIZE=8192
MAX_CONCURRENT_UPLOADS=5
# Daemon Configuration
DAEMON_PID_FILE=/var/run/fixture-daemon.pid
DAEMON_LOG_FILE=$LOG_DIR/fixture-daemon.log
DAEMON_WORKING_DIR=$DATA_DIR
# Web Server Configuration
HOST=0.0.0.0
PORT=5000
DEBUG=false
# Logging Configuration
LOG_LEVEL=INFO
# JWT Configuration
JWT_ACCESS_TOKEN_EXPIRES=3600
EOF
chmod 600 $CONFIG_DIR/config.env
chown $USER:$GROUP $CONFIG_DIR/config.env
# Create symlink for application
ln -sf $CONFIG_DIR/config.env $INSTALL_DIR/.env
log_success "Configuration files created"
}
create_systemd_service() {
log_info "Creating systemd service..."
cat > /etc/systemd/system/$SERVICE_NAME.service <<EOF
[Unit]
Description=Fixture Manager Daemon
After=network.target mysql.service
Requires=mysql.service
[Service]
Type=forking
User=$USER
Group=$GROUP
WorkingDirectory=$INSTALL_DIR
Environment=PATH=$INSTALL_DIR/venv/bin
ExecStart=$INSTALL_DIR/venv/bin/python $INSTALL_DIR/daemon.py start --config production
ExecStop=$INSTALL_DIR/venv/bin/python $INSTALL_DIR/daemon.py stop --config production
ExecReload=$INSTALL_DIR/venv/bin/python $INSTALL_DIR/daemon.py reload --config production
PIDFile=/var/run/fixture-daemon.pid
Restart=always
RestartSec=10
# Security settings
NoNewPrivileges=true
PrivateTmp=true
ProtectSystem=strict
ProtectHome=true
ReadWritePaths=$DATA_DIR $LOG_DIR /var/run
[Install]
WantedBy=multi-user.target
EOF
# Reload systemd and enable service
systemctl daemon-reload
systemctl enable $SERVICE_NAME
log_success "Systemd service created"
}
configure_nginx() {
log_info "Configuring Nginx reverse proxy..."
cat > /etc/nginx/sites-available/$PROJECT_NAME <<EOF
server {
listen 80;
server_name _;
# Security headers
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header X-XSS-Protection "1; mode=block";
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains";
# File upload size limit
client_max_body_size 500M;
location / {
proxy_pass http://127.0.0.1:5000;
proxy_set_header Host \$host;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto \$scheme;
# Timeout settings for large file uploads
proxy_connect_timeout 300s;
proxy_send_timeout 300s;
proxy_read_timeout 300s;
}
# Static files (if any)
location /static {
alias $INSTALL_DIR/app/static;
expires 1y;
add_header Cache-Control "public, immutable";
}
# Health check endpoint
location /health {
proxy_pass http://127.0.0.1:5000/health;
access_log off;
}
}
EOF
# Enable site
ln -sf /etc/nginx/sites-available/$PROJECT_NAME /etc/nginx/sites-enabled/
# Remove default site
rm -f /etc/nginx/sites-enabled/default
# Test and reload nginx
nginx -t
systemctl enable nginx
systemctl restart nginx
log_success "Nginx configured"
}
setup_logrotate() {
log_info "Setting up log rotation..."
cat > /etc/logrotate.d/$SERVICE_NAME <<EOF
$LOG_DIR/fixture-daemon.log {
daily
missingok
rotate 30
compress
delaycompress
notifempty
create 644 $USER $GROUP
postrotate
systemctl reload $SERVICE_NAME > /dev/null 2>&1 || true
endscript
}
EOF
log_success "Log rotation configured"
}
setup_firewall() {
log_info "Configuring firewall..."
if command -v ufw &> /dev/null; then
# Ubuntu/Debian UFW
ufw allow 22/tcp
ufw allow 80/tcp
ufw allow 443/tcp
ufw --force enable
log_success "UFW firewall configured"
elif command -v firewall-cmd &> /dev/null; then
# CentOS/RHEL firewalld
firewall-cmd --permanent --add-service=ssh
firewall-cmd --permanent --add-service=http
firewall-cmd --permanent --add-service=https
firewall-cmd --reload
log_success "Firewalld configured"
else
log_warning "No firewall detected. Please configure manually."
fi
}
create_backup_script() {
log_info "Creating backup script..."
cat > $INSTALL_DIR/backup.sh <<'EOF'
#!/bin/bash
# Fixture Manager Backup Script
BACKUP_DIR="/var/lib/fixture-daemon/backups"
DATE=$(date +%Y%m%d_%H%M%S)
BACKUP_FILE="fixture_manager_backup_$DATE.tar.gz"
# Load database configuration
source /etc/fixture-manager/database.conf
# Create backup directory
mkdir -p $BACKUP_DIR
# Backup database
mysqldump -u $MYSQL_USER -p$MYSQL_PASSWORD $MYSQL_DATABASE > $BACKUP_DIR/database_$DATE.sql
# Backup uploads
tar -czf $BACKUP_DIR/$BACKUP_FILE \
--exclude='*.log' \
--exclude='backups' \
/var/lib/fixture-daemon/uploads \
/etc/fixture-manager \
$BACKUP_DIR/database_$DATE.sql
# Remove temporary database dump
rm $BACKUP_DIR/database_$DATE.sql
# Keep only last 7 backups
find $BACKUP_DIR -name "fixture_manager_backup_*.tar.gz" -mtime +7 -delete
echo "Backup completed: $BACKUP_DIR/$BACKUP_FILE"
EOF
chmod +x $INSTALL_DIR/backup.sh
chown $USER:$GROUP $INSTALL_DIR/backup.sh
# Add to crontab for daily backups
(crontab -u $USER -l 2>/dev/null; echo "0 2 * * * $INSTALL_DIR/backup.sh") | crontab -u $USER -
log_success "Backup script created and scheduled"
}
start_services() {
log_info "Starting services..."
# Start and enable services
systemctl start $SERVICE_NAME
systemctl status $SERVICE_NAME --no-pager
log_success "Services started"
}
print_summary() {
log_success "Installation completed successfully!"
echo
echo "=== Installation Summary ==="
echo "Application Directory: $INSTALL_DIR"
echo "Data Directory: $DATA_DIR"
echo "Configuration Directory: $CONFIG_DIR"
echo "Log File: $LOG_DIR/fixture-daemon.log"
echo "Service Name: $SERVICE_NAME"
echo "User/Group: $USER:$GROUP"
echo
echo "=== Service Management ==="
echo "Start service: systemctl start $SERVICE_NAME"
echo "Stop service: systemctl stop $SERVICE_NAME"
echo "Restart service: systemctl restart $SERVICE_NAME"
echo "View logs: journalctl -u $SERVICE_NAME -f"
echo "View app logs: tail -f $LOG_DIR/fixture-daemon.log"
echo
echo "=== Web Interface ==="
echo "URL: http://$(hostname -I | awk '{print $1}')"
echo "Default admin credentials:"
echo " Username: admin"
echo " Password: admin123"
echo
log_warning "IMPORTANT: Change the default admin password immediately!"
echo
echo "=== Configuration Files ==="
echo "Main config: $CONFIG_DIR/config.env"
echo "Database config: $CONFIG_DIR/database.conf"
echo "Nginx config: /etc/nginx/sites-available/$PROJECT_NAME"
echo
echo "=== Backup ==="
echo "Backup script: $INSTALL_DIR/backup.sh"
echo "Backup directory: $DATA_DIR/backups"
echo "Automatic daily backups at 2:00 AM"
}
# Main installation process
main() {
log_info "Starting Fixture Manager installation..."
check_root
detect_os
install_dependencies
create_user
create_directories
install_application
configure_database
create_config
create_systemd_service
configure_nginx
setup_logrotate
setup_firewall
create_backup_script
start_services
print_summary
}
# Run installation
main "$@"
\ No newline at end of file
#!/bin/bash
# Fixture Manager Daemon Runner
# Set executable permissions
chmod +x ./fixture-manager
# Run the daemon
./fixture-manager "$@"
-- Fixture Manager Database Schema
-- MySQL DDL Script for automated database creation
-- Create database if it doesn't exist
CREATE DATABASE IF NOT EXISTS fixture_manager
CHARACTER SET utf8mb4
COLLATE utf8mb4_unicode_ci;
USE fixture_manager;
-- Users table for authentication
CREATE TABLE IF NOT EXISTS users (
id INT AUTO_INCREMENT PRIMARY KEY,
username VARCHAR(80) NOT NULL UNIQUE,
email VARCHAR(120) NOT NULL UNIQUE,
password_hash VARCHAR(255) NOT NULL,
is_active BOOLEAN DEFAULT TRUE,
is_admin BOOLEAN DEFAULT FALSE,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
last_login TIMESTAMP NULL,
INDEX idx_username (username),
INDEX idx_email (email),
INDEX idx_active (is_active)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- Primary matches table storing core fixture data
CREATE TABLE IF NOT EXISTS matches (
id INT AUTO_INCREMENT PRIMARY KEY,
match_number INT NOT NULL UNIQUE COMMENT 'Match # from fixture file',
fighter1_township VARCHAR(255) NOT NULL COMMENT 'Fighter1 (Township)',
fighter2_township VARCHAR(255) NOT NULL COMMENT 'Fighter2 (Township)',
venue_kampala_township VARCHAR(255) NOT NULL COMMENT 'Venue (Kampala Township)',
-- System fields
start_time DATETIME NULL COMMENT 'Match start time',
end_time DATETIME NULL COMMENT 'Match end time',
result VARCHAR(255) NULL COMMENT 'Match result/outcome',
filename VARCHAR(1024) NOT NULL COMMENT 'Original fixture filename',
file_sha1sum VARCHAR(255) NOT NULL COMMENT 'SHA1 checksum of fixture file',
fixture_id VARCHAR(255) NOT NULL UNIQUE COMMENT 'Unique fixture identifier',
active_status BOOLEAN DEFAULT FALSE COMMENT 'Active status flag',
fixture_active_time BIGINT NULL COMMENT 'Unix timestamp when fixture became active',
status ENUM('pending', 'scheduled', 'bet', 'ingame', 'cancelled', 'failed', 'paused', 'done') DEFAULT 'pending' COMMENT 'Match status',
-- ZIP file related fields
zip_filename VARCHAR(1024) NULL COMMENT 'Associated ZIP filename',
zip_sha1sum VARCHAR(255) NULL COMMENT 'SHA1 checksum of ZIP file',
zip_upload_status ENUM('pending', 'uploading', 'completed', 'failed') DEFAULT 'pending',
zip_upload_progress DECIMAL(5,2) DEFAULT 0.00 COMMENT 'Upload progress percentage',
-- Metadata
created_by INT NULL COMMENT 'User who created this record',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX idx_match_number (match_number),
INDEX idx_fixture_id (fixture_id),
INDEX idx_active_status (active_status),
INDEX idx_fixture_active_time (fixture_active_time),
INDEX idx_file_sha1sum (file_sha1sum),
INDEX idx_zip_sha1sum (zip_sha1sum),
INDEX idx_zip_upload_status (zip_upload_status),
INDEX idx_created_by (created_by),
FOREIGN KEY (created_by) REFERENCES users(id) ON DELETE SET NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- Secondary outcomes table with foreign key relationships
CREATE TABLE IF NOT EXISTS match_outcomes (
id INT AUTO_INCREMENT PRIMARY KEY,
match_id INT NOT NULL COMMENT 'Foreign key to matches table',
column_name VARCHAR(255) NOT NULL COMMENT 'Result column name from fixture file',
float_value DECIMAL(10,2) NOT NULL COMMENT 'Float value with 2-decimal precision',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX idx_match_id (match_id),
INDEX idx_column_name (column_name),
INDEX idx_float_value (float_value),
FOREIGN KEY (match_id) REFERENCES matches(id) ON DELETE CASCADE,
UNIQUE KEY unique_match_column (match_id, column_name)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- File uploads tracking table
CREATE TABLE IF NOT EXISTS file_uploads (
id INT AUTO_INCREMENT PRIMARY KEY,
filename VARCHAR(1024) NOT NULL,
original_filename VARCHAR(1024) NOT NULL,
file_path VARCHAR(2048) NOT NULL,
file_size BIGINT NOT NULL,
file_type ENUM('fixture', 'zip') NOT NULL,
mime_type VARCHAR(255) NOT NULL,
sha1sum VARCHAR(255) NOT NULL,
upload_status ENUM('uploading', 'completed', 'failed', 'processing') DEFAULT 'uploading',
upload_progress DECIMAL(5,2) DEFAULT 0.00,
error_message TEXT NULL,
-- Associated match (for ZIP files)
match_id INT NULL,
-- User tracking
uploaded_by INT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX idx_filename (filename),
INDEX idx_sha1sum (sha1sum),
INDEX idx_upload_status (upload_status),
INDEX idx_file_type (file_type),
INDEX idx_match_id (match_id),
INDEX idx_uploaded_by (uploaded_by),
FOREIGN KEY (match_id) REFERENCES matches(id) ON DELETE SET NULL,
FOREIGN KEY (uploaded_by) REFERENCES users(id) ON DELETE SET NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- System logs table for comprehensive logging
CREATE TABLE IF NOT EXISTS system_logs (
id INT AUTO_INCREMENT PRIMARY KEY,
level ENUM('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL') NOT NULL,
message TEXT NOT NULL,
module VARCHAR(255) NULL,
function_name VARCHAR(255) NULL,
line_number INT NULL,
-- Context information
user_id INT NULL,
match_id INT NULL,
upload_id INT NULL,
session_id VARCHAR(255) NULL,
ip_address VARCHAR(45) NULL,
user_agent TEXT NULL,
-- Additional metadata
extra_data JSON NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
INDEX idx_level (level),
INDEX idx_created_at (created_at),
INDEX idx_user_id (user_id),
INDEX idx_match_id (match_id),
INDEX idx_upload_id (upload_id),
INDEX idx_module (module),
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE SET NULL,
FOREIGN KEY (match_id) REFERENCES matches(id) ON DELETE SET NULL,
FOREIGN KEY (upload_id) REFERENCES file_uploads(id) ON DELETE SET NULL
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- Session management table
CREATE TABLE IF NOT EXISTS user_sessions (
id INT AUTO_INCREMENT PRIMARY KEY,
session_id VARCHAR(255) NOT NULL UNIQUE,
user_id INT NOT NULL,
ip_address VARCHAR(45) NOT NULL,
user_agent TEXT NULL,
is_active BOOLEAN DEFAULT TRUE,
expires_at TIMESTAMP NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
last_activity TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX idx_session_id (session_id),
INDEX idx_user_id (user_id),
INDEX idx_expires_at (expires_at),
INDEX idx_is_active (is_active),
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- API Tokens table for user-generated tokens
CREATE TABLE IF NOT EXISTS api_tokens (
id INT AUTO_INCREMENT PRIMARY KEY,
user_id INT NOT NULL COMMENT 'User who owns this token',
name VARCHAR(255) NOT NULL COMMENT 'Descriptive name for the token',
token_hash VARCHAR(255) NOT NULL UNIQUE COMMENT 'SHA256 hash of the token',
expires_at TIMESTAMP NOT NULL COMMENT 'Token expiration time',
is_active BOOLEAN DEFAULT TRUE COMMENT 'Whether token is active',
last_used_at TIMESTAMP NULL COMMENT 'Last time token was used',
last_used_ip VARCHAR(45) NULL COMMENT 'Last IP address that used token',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX idx_token_hash (token_hash),
INDEX idx_user_id (user_id),
INDEX idx_expires_at (expires_at),
INDEX idx_is_active (is_active),
INDEX idx_last_used_at (last_used_at),
FOREIGN KEY (user_id) REFERENCES users(id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- System Settings table for configuration management
CREATE TABLE IF NOT EXISTS system_settings (
id INT AUTO_INCREMENT PRIMARY KEY,
setting_key VARCHAR(255) NOT NULL UNIQUE COMMENT 'Setting key identifier',
setting_value TEXT NOT NULL COMMENT 'Setting value (can be JSON)',
setting_type ENUM('string', 'integer', 'float', 'boolean', 'json') DEFAULT 'string' COMMENT 'Data type of the setting',
description TEXT NULL COMMENT 'Human-readable description of the setting',
category VARCHAR(100) DEFAULT 'general' COMMENT 'Setting category for organization',
is_public BOOLEAN DEFAULT FALSE COMMENT 'Whether setting can be viewed by non-admin users',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
INDEX idx_setting_key (setting_key),
INDEX idx_category (category),
INDEX idx_is_public (is_public)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
-- Create default admin user (password: admin123 - CHANGE IN PRODUCTION!)
INSERT INTO users (username, email, password_hash, is_admin)
VALUES (
'admin',
'admin@fixture-daemon.local',
'$2b$12$LQv3c1yqBWVHxkd0LHAkCOYz6TtxMQJqhN8/LewdBPj3bp.Gm.F5e', -- admin123
TRUE
) ON DUPLICATE KEY UPDATE username=username;
-- Insert default system settings
INSERT INTO system_settings (setting_key, setting_value, setting_type, description, category) VALUES
('api_updates_default_count', '10', 'integer', 'Default number of fixtures returned by /api/updates when no from parameter is provided', 'api'),
('max_upload_size', '2147483648', 'integer', 'Maximum upload file size in bytes (2GB default)', 'uploads'),
('session_timeout', '3600', 'integer', 'User session timeout in seconds (1 hour default)', 'security'),
('cleanup_interval', '86400', 'integer', 'Interval for cleanup tasks in seconds (24 hours default)', 'maintenance')
ON DUPLICATE KEY UPDATE setting_key=setting_key;
-- Create indexes for performance optimization
CREATE INDEX idx_matches_composite ON matches(active_status, zip_upload_status, created_at);
CREATE INDEX idx_matches_fixture_time ON matches(fixture_active_time, fixture_id);
CREATE INDEX idx_outcomes_composite ON match_outcomes(match_id, column_name);
CREATE INDEX idx_uploads_composite ON file_uploads(upload_status, file_type, created_at);
CREATE INDEX idx_logs_composite ON system_logs(level, created_at, user_id);
CREATE INDEX idx_tokens_composite ON api_tokens(user_id, is_active, expires_at);
-- Create views for common queries
CREATE OR REPLACE VIEW active_matches AS
SELECT
m.*,
COUNT(mo.id) as outcome_count,
GROUP_CONCAT(CONCAT(mo.column_name, ':', mo.float_value) SEPARATOR ';') as outcomes
FROM matches m
LEFT JOIN match_outcomes mo ON m.id = mo.match_id
WHERE m.active_status = TRUE
GROUP BY m.id;
CREATE OR REPLACE VIEW fixtures_with_active_time AS
SELECT
m.fixture_id,
m.fixture_active_time,
m.filename,
MIN(m.created_at) as created_at,
COUNT(m.id) as match_count,
SUM(CASE WHEN m.active_status = TRUE THEN 1 ELSE 0 END) as active_matches
FROM matches m
WHERE m.fixture_active_time IS NOT NULL
GROUP BY m.fixture_id, m.fixture_active_time, m.filename
ORDER BY m.fixture_active_time DESC;
CREATE OR REPLACE VIEW upload_summary AS
SELECT
DATE(created_at) as upload_date,
file_type,
upload_status,
COUNT(*) as count,
SUM(file_size) as total_size,
AVG(upload_progress) as avg_progress
FROM file_uploads
GROUP BY DATE(created_at), file_type, upload_status;
-- Set up proper permissions (adjust as needed for your environment)
-- GRANT SELECT, INSERT, UPDATE, DELETE ON fixture_manager.* TO 'fixture_user'@'localhost';
-- FLUSH PRIVILEGES;
-- Display schema creation completion
SELECT 'Database schema created successfully!' as status;
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment