Add pagination to history page with page size selector (10, 20, 50, 100)

- Modified history route in web.py to support pagination parameters
- Added pagination controls to history.html template
- Added JavaScript functions for page navigation and page size changes
- Pagination shows current page info and total job count
- Page size selector allows 10, 20, 50, or 100 jobs per page
- Navigation includes Previous/Next buttons and numbered page buttons
parent 82840a6e
This diff is collapsed.
......@@ -127,6 +127,92 @@ def handle_web_message(message: Message, client_sock=None) -> Message:
return result
else:
return Message('result_pending', message.msg_id, {'status': 'pending'})
elif message.msg_type == 'get_stats':
# Get system stats including GPU information
import psutil
import torch
stats = {'status': 'Idle'}
# GPU stats (local machine)
stats['gpu_count'] = 0
stats['gpus'] = []
# Try to get actual GPU stats using pynvml (NVIDIA management library)
try:
import nvidia_ml_py as pynvml
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
stats['gpu_count'] = device_count
stats['gpus'] = []
for i in range(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(i)
name = pynvml.nvmlDeviceGetName(handle)
memory_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
utilization = pynvml.nvmlDeviceGetUtilizationRates(handle)
gpu = {
'name': name.decode('utf-8') if isinstance(name, bytes) else str(name),
'memory_used': memory_info.used / 1024**3, # Convert bytes to GB
'memory_total': memory_info.total / 1024**3,
'utilization': utilization.gpu,
'backend': 'cuda'
}
stats['gpus'].append(gpu)
pynvml.nvmlShutdown()
except ImportError:
# Fallback to PyTorch-only stats if pynvml not available
log_message("pynvml not available, falling back to PyTorch GPU stats")
if torch.cuda.is_available():
stats['gpu_count'] = torch.cuda.device_count()
stats['gpus'] = []
for i in range(torch.cuda.device_count()):
gpu = {
'name': torch.cuda.get_device_name(i),
'memory_used': torch.cuda.memory_allocated(i) / 1024**3, # GB
'memory_total': torch.cuda.get_device_properties(i).total_memory / 1024**3,
'utilization': 0, # pynvml required for actual utilization
'backend': 'cuda'
}
stats['gpus'].append(gpu)
except Exception as e:
log_message(f"Error getting GPU stats with pynvml: {e}")
# Fallback to PyTorch if pynvml fails
if torch.cuda.is_available():
stats['gpu_count'] = torch.cuda.device_count()
stats['gpus'] = []
for i in range(torch.cuda.device_count()):
gpu = {
'name': torch.cuda.get_device_name(i),
'memory_used': torch.cuda.memory_allocated(i) / 1024**3, # GB
'memory_total': torch.cuda.get_device_properties(i).total_memory / 1024**3,
'utilization': 0,
'backend': 'cuda'
}
stats['gpus'].append(gpu)
# CPU and RAM (local machine)
stats['cpu_percent'] = psutil.cpu_percent()
ram = psutil.virtual_memory()
stats['ram_used'] = ram.used / 1024**3
stats['ram_total'] = ram.total / 1024**3
# Add GPU info summary
from .compat import detect_gpu_backends
gpu_info = detect_gpu_backends()
stats['gpu_info'] = {
'cuda_available': gpu_info['cuda'],
'rocm_available': gpu_info['rocm'],
'cuda_devices': gpu_info['cuda_devices'],
'rocm_devices': gpu_info['rocm_devices'],
'available_backends': [k for k, v in gpu_info.items() if k.endswith('_available') and v]
}
return Message('stats_response', message.msg_id, stats)
return Message('error', message.msg_id, {'error': 'Unknown message type'})
......
......@@ -417,8 +417,18 @@ def jobs():
@app.route('/history')
@login_required
def history():
"""Job history page - shows completed and failed jobs."""
"""Job history page - shows completed and failed jobs with pagination."""
user = get_current_user_session()
# Get pagination parameters
page = int(request.args.get('page', 1))
per_page = int(request.args.get('per_page', 20))
# Validate per_page options
if per_page not in [10, 20, 50, 100]:
per_page = 20
# Get all queue items for the user
all_queue_items = get_user_queue_items(user['id'])
# Filter for historical jobs: completed, failed, and cancelled jobs older than 24 hours
......@@ -446,11 +456,33 @@ def history():
# If no timestamp, exclude it
# Exclude queued and processing jobs from history
# Sort by creation time (newest first)
historical_jobs.sort(key=lambda x: x.get('created_at', ''), reverse=True)
# Calculate pagination
total_jobs = len(historical_jobs)
total_pages = (total_jobs + per_page - 1) // per_page # Ceiling division
# Ensure page is within bounds
if page < 1:
page = 1
if page > total_pages and total_pages > 0:
page = total_pages
# Get jobs for current page
start_idx = (page - 1) * per_page
end_idx = start_idx + per_page
page_jobs = historical_jobs[start_idx:end_idx]
return render_template('history.html',
user=user,
tokens=get_user_tokens(user["id"]),
queue_items=historical_jobs,
active_page='history')
queue_items=page_jobs,
active_page='history',
page=page,
per_page=per_page,
total_pages=total_pages,
total_jobs=total_jobs)
@app.route('/job/<int:job_id>/delete', methods=['POST'])
......@@ -655,6 +687,32 @@ def api_job_progress(job_id):
return {'status': 'no_progress'}
@app.route('/api/stats')
@login_required
def api_stats():
"""Get system stats from backend."""
user = get_current_user_session()
# Send get_stats request to backend
import uuid
msg_id = str(uuid.uuid4())
message = Message('get_stats', msg_id, {})
try:
comm.connect()
comm.send_message(message)
# Wait for response
response = comm.receive_message(timeout=5)
if response and response.msg_type == 'stats_response':
return response.data
else:
return {'error': 'No response from backend'}, 500
except Exception as e:
log_message(f"Error getting stats from backend: {e}")
return {'error': str(e)}, 500
@app.route('/update_settings', methods=['POST'])
@login_required
def update_settings():
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment