Implement client-side last sync query and accumulated shortfall tracking

- Add accumulated_shortfall field to MatchModel to store historical shortfall at match completion
- Create Migration_040_AddAccumulatedShortfallToMatches for database schema update
- Update extraction flow in games_thread.py to store accumulated shortfall in match records
- Update sync report logic in client.py to use match's stored accumulated shortfall value
- Add test_last_sync_query.py to verify implementation
- Add CLIENT_SYNC_MINIMAL_PROMPT.md with API specifications

This ensures accurate reporting of cap compensation balance as it existed at the time each match was completed, rather than using the current global value.
parent eef784ce
# Minimal Prompt: Client-Side Last Sync Query Implementation
## What Changed on Server
Server now has a new endpoint to query last sync information:
**Endpoint**: `GET /api/reports/last-sync?client_id=<client_id>`
**Authentication**: Bearer token (API token)
**Response Format**:
```json
{
"success": true,
"client_id": "client_unique_identifier",
"last_sync_id": "sync_20260201_214327_abc12345",
"last_sync_timestamp": "2026-02-01T21:43:27.249Z",
"last_sync_type": "incremental",
"total_syncs": 25,
"last_sync_summary": {
"total_payin": 100000.0,
"total_payout": 95000.0,
"net_profit": 5000.0,
"total_bets": 50,
"total_matches": 10,
"cap_compensation_balance": 5000.0
},
"server_timestamp": "2026-02-01T21:43:27.249Z"
}
```
## What You Need to Implement
### 1. Add Function to Query Server
```python
def query_server_last_sync(api_token, client_id):
"""Query server for last sync information"""
import requests
url = "https://your-server.com/api/reports/last-sync"
headers = {"Authorization": f"Bearer {api_token}"}
params = {"client_id": client_id}
response = requests.get(url, headers=headers, params=params)
return response.json()
```
### 2. Call Before Each Sync
```python
# Before performing sync
server_info = query_server_last_sync(api_token, client_id)
if server_info.get('success'):
last_sync_id = server_info.get('last_sync_id')
last_sync_time = server_info.get('last_sync_timestamp')
# Compare with your local tracking
# If mismatch detected, perform full sync instead of incremental
```
### 3. Handle Recovery
If your local tracking is corrupted or lost:
```python
# If no local tracking exists
if not local_tracking_exists():
# Query server for last sync
server_info = query_server_last_sync(api_token, client_id)
# Recover local tracking from server state
if server_info.get('last_sync_id'):
update_local_tracking(
sync_id=server_info['last_sync_id'],
timestamp=server_info['last_sync_timestamp']
)
```
## Key Benefits
1. **Verify Server State**: Check what server has before syncing
2. **Detect Corruption**: Compare local tracking with server
3. **Auto-Recovery**: Restore local tracking from server if lost
4. **Prevent Data Loss**: Ensure no syncs are missed
## Integration Point
Add this call to your existing sync flow:
```python
# Existing sync flow
def perform_sync():
# NEW: Query server first
server_info = query_server_last_sync(api_token, client_id)
# Verify and recover if needed
if needs_recovery(server_info):
recover_from_server(server_info)
# Continue with normal sync
send_sync_data()
```
That's it! Just add the query call before your existing sync logic.
\ No newline at end of file
This diff is collapsed.
......@@ -3170,6 +3170,12 @@ class GamesThread(ThreadedComponent):
logger.warning(f"Match {match_id} not found for statistics collection")
return
# Store the accumulated shortfall value at the time of match completion
# This historical value will be used in reports instead of the current global value
accumulated_shortfall = self._get_global_redistribution_adjustment(session)
match.accumulated_shortfall = accumulated_shortfall
logger.info(f"💰 [SHORTFALL TRACKING] Stored accumulated shortfall {accumulated_shortfall:.2f} in match {match_id} at completion time")
# Calculate statistics (excluding cancelled bets)
total_bets = session.query(BetDetailModel).join(MatchModel).filter(
BetDetailModel.match_id == match_id,
......
......@@ -2943,6 +2943,44 @@ class Migration_039_AddMatchNumberToBetDetails(DatabaseMigration):
return True
class Migration_040_AddAccumulatedShortfallToMatches(DatabaseMigration):
"""Add accumulated_shortfall field to matches table for storing historical shortfall values"""
def __init__(self):
super().__init__("040", "Add accumulated_shortfall field to matches table")
def up(self, db_manager) -> bool:
"""Add accumulated_shortfall column to matches table"""
try:
with db_manager.engine.connect() as conn:
# Check if accumulated_shortfall column already exists
result = conn.execute(text("PRAGMA table_info(matches)"))
columns = [row[1] for row in result.fetchall()]
if 'accumulated_shortfall' not in columns:
# Add accumulated_shortfall column with default value 0.0
conn.execute(text("""
ALTER TABLE matches
ADD COLUMN accumulated_shortfall REAL DEFAULT 0.0 NOT NULL
"""))
conn.commit()
logger.info("accumulated_shortfall column added to matches table")
else:
logger.info("accumulated_shortfall column already exists in matches table")
return True
except Exception as e:
logger.error(f"Failed to add accumulated_shortfall field to matches: {e}")
return False
def down(self, db_manager) -> bool:
"""Remove accumulated_shortfall column - SQLite doesn't support DROP COLUMN easily"""
logger.warning("SQLite doesn't support DROP COLUMN - accumulated_shortfall column will remain")
return True
class Migration_036_AddMatchTemplatesTables(DatabaseMigration):
"""Add matches_templates and match_outcomes_templates tables for storing match templates"""
......@@ -3101,6 +3139,7 @@ MIGRATIONS: List[DatabaseMigration] = [
Migration_037_RenameDailyRedistributionShortfallTable(),
Migration_038_AddWin1Win2Associations(),
Migration_039_AddMatchNumberToBetDetails(),
Migration_040_AddAccumulatedShortfallToMatches(),
]
......
......@@ -495,6 +495,7 @@ class MatchModel(BaseModel):
running = Column(Boolean, default=False, nullable=False, comment='Match running flag (0=not running, 1=running)')
status = Column(Enum('pending', 'scheduled', 'bet', 'ingame', 'done', 'cancelled', 'failed', 'paused'), default='pending', nullable=False, comment='Match status enum')
fixture_active_time = Column(Integer, nullable=True, comment='Unix timestamp when fixture became active on server')
accumulated_shortfall = Column(Float(precision=2), default=0.0, nullable=False, comment='Accumulated shortfall from redistribution at the time of match completion')
# File metadata
filename = Column(String(1024), nullable=False, comment='Original fixture filename')
......@@ -1245,3 +1246,32 @@ class ReportsSyncQueueModel(BaseModel):
def __repr__(self):
return f'<ReportsSyncQueue {self.sync_id}: status={self.status}, retries={self.retry_count}>'
class ReportsSyncTrackingModel(BaseModel):
"""Track what data has been synced to server for incremental updates"""
__tablename__ = 'reports_sync_tracking'
__table_args__ = (
Index('ix_reports_sync_tracking_entity_type', 'entity_type'),
Index('ix_reports_sync_tracking_entity_id', 'entity_id'),
Index('ix_reports_sync_tracking_last_synced_at', 'last_synced_at'),
Index('ix_reports_sync_tracking_composite', 'entity_type', 'entity_id'),
UniqueConstraint('entity_type', 'entity_id', name='uq_reports_sync_tracking_entity'),
)
entity_type = Column(String(50), nullable=False, comment='Type of entity: bet, bet_detail, extraction_stat')
entity_id = Column(String(255), nullable=False, comment='ID of the entity (bet UUID, match ID, etc.)')
last_synced_at = Column(DateTime, default=datetime.utcnow, nullable=False, comment='Last time this entity was synced')
last_synced_hash = Column(String(64), comment='Hash of entity data at last sync for change detection')
sync_count = Column(Integer, default=1, nullable=False, comment='Number of times this entity has been synced')
def update_sync(self, data_hash: str = None):
"""Update sync timestamp and optionally hash"""
self.last_synced_at = datetime.utcnow()
self.sync_count += 1
if data_hash:
self.last_synced_hash = data_hash
self.updated_at = datetime.utcnow()
def __repr__(self):
return f'<ReportsSyncTracking {self.entity_type}:{self.entity_id} synced at {self.last_synced_at}>'
"""
Test script for the new last sync query functionality
"""
import sys
import os
from pathlib import Path
# Add the project root to the path
project_root = Path(__file__).parent
sys.path.insert(0, str(project_root))
def test_imports():
"""Test that all necessary imports work"""
print("Testing imports...")
try:
from mbetterclient.api_client.client import ReportsSyncResponseHandler
print("✓ ReportsSyncResponseHandler imported successfully")
return True
except Exception as e:
print(f"✗ Failed to import ReportsSyncResponseHandler: {e}")
return False
def test_method_exists():
"""Test that new methods exist in ReportsSyncResponseHandler"""
print("\nTesting method existence...")
try:
from mbetterclient.api_client.client import ReportsSyncResponseHandler
from mbetterclient.database.manager import DatabaseManager
from mbetterclient.config.manager import ConfigManager
# Create a mock instance (we won't actually run it)
# Just check that the methods exist
methods_to_check = [
'query_server_last_sync',
'recover_local_tracking',
'needs_recovery'
]
for method_name in methods_to_check:
if hasattr(ReportsSyncResponseHandler, method_name):
print(f"✓ Method '{method_name}' exists")
else:
print(f"✗ Method '{method_name}' NOT found")
return False
return True
except Exception as e:
print(f"✗ Error checking methods: {e}")
return False
def test_endpoint_configuration():
"""Test that the new endpoint is configured"""
print("\nTesting endpoint configuration...")
try:
from mbetterclient.api_client.client import APIClient
from mbetterclient.core.message_bus import MessageBus
from mbetterclient.database.manager import DatabaseManager
from mbetterclient.config.manager import ConfigManager
from mbetterclient.config.settings import ApiConfig
# Check if _get_default_endpoints method exists
if hasattr(APIClient, '_get_default_endpoints'):
print("✓ _get_default_endpoints method exists")
# We can't actually instantiate APIClient without a full setup,
# but we can check the method signature
import inspect
sig = inspect.signature(APIClient._get_default_endpoints)
print(f"✓ Method signature: {sig}")
return True
else:
print("✗ _get_default_endpoints method NOT found")
return False
except Exception as e:
print(f"✗ Error checking endpoint configuration: {e}")
import traceback
traceback.print_exc()
return False
def test_integration():
"""Test that the integration is correct"""
print("\nTesting integration...")
try:
from mbetterclient.api_client.client import ReportsSyncResponseHandler
import inspect
# Check that collect_report_data method exists and has the right signature
if hasattr(ReportsSyncResponseHandler, 'collect_report_data'):
print("✓ collect_report_data method exists")
sig = inspect.signature(ReportsSyncResponseHandler.collect_report_data)
print(f"✓ Method signature: {sig}")
return True
else:
print("✗ collect_report_data method NOT found")
return False
except Exception as e:
print(f"✗ Error checking integration: {e}")
return False
def main():
"""Run all tests"""
print("=" * 60)
print("Testing Last Sync Query Implementation")
print("=" * 60)
results = []
# Run tests
results.append(("Imports", test_imports()))
results.append(("Method Existence", test_method_exists()))
results.append(("Endpoint Configuration", test_endpoint_configuration()))
results.append(("Integration", test_integration()))
# Print summary
print("\n" + "=" * 60)
print("Test Summary")
print("=" * 60)
passed = sum(1 for _, result in results if result)
total = len(results)
for test_name, result in results:
status = "✓ PASS" if result else "✗ FAIL"
print(f"{test_name:.<40} {status}")
print(f"\nTotal: {passed}/{total} tests passed")
if passed == total:
print("\n✓ All tests passed!")
return 0
else:
print(f"\n✗ {total - passed} test(s) failed")
return 1
if __name__ == "__main__":
sys.exit(main())
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment