- prediction: G-01/G-04/G-05/G-06 위반 분류 + 쌍끌이 공조 탐지 추가 - backend: 모선 확정/제외 API + signal-batch 항적 프록시 + ParentResolution 점수 근거 필드 확장 - frontend: 어구 탐지 그리드 다중필터/지도 flyTo, 후보 검토 패널(점수 근거+확정/제외), 24h convex hull 리플레이 + TripsLayer 애니메이션 - gitignore: 루트 .venv/ 추가
225 lines
7.6 KiB
Python
225 lines
7.6 KiB
Python
import logging
|
|
import sys
|
|
from contextlib import asynccontextmanager
|
|
|
|
from fastapi import BackgroundTasks, FastAPI
|
|
|
|
from config import qualified_table, settings
|
|
from db import kcgdb, snpdb
|
|
from scheduler import get_last_run, run_analysis_cycle, start_scheduler, stop_scheduler
|
|
|
|
logging.basicConfig(
|
|
level=getattr(logging, settings.LOG_LEVEL, logging.INFO),
|
|
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
|
|
stream=sys.stdout,
|
|
)
|
|
logger = logging.getLogger(__name__)
|
|
GEAR_CORRELATION_SCORES = qualified_table('gear_correlation_scores')
|
|
CORRELATION_PARAM_MODELS = qualified_table('correlation_param_models')
|
|
|
|
|
|
@asynccontextmanager
|
|
async def lifespan(application: FastAPI):
|
|
from cache.vessel_store import vessel_store
|
|
|
|
logger.info('starting KCG Prediction Service')
|
|
snpdb.init_pool()
|
|
kcgdb.init_pool()
|
|
|
|
# 인메모리 캐시 초기 로드 (24시간)
|
|
logger.info('loading initial vessel data (%dh)...', settings.INITIAL_LOAD_HOURS)
|
|
vessel_store.load_initial(settings.INITIAL_LOAD_HOURS)
|
|
logger.info('initial load complete: %s', vessel_store.stats())
|
|
|
|
# signal-batch API에서 정적정보 초기 보강 (120분 범위, 최대 커버리지)
|
|
vessel_store.enrich_from_signal_api(minutes=120)
|
|
logger.info('signal-batch enrich complete')
|
|
|
|
start_scheduler()
|
|
yield
|
|
stop_scheduler()
|
|
snpdb.close_pool()
|
|
kcgdb.close_pool()
|
|
logger.info('KCG Prediction Service stopped')
|
|
|
|
|
|
app = FastAPI(
|
|
title='KCG Prediction Service',
|
|
version='2.1.0',
|
|
lifespan=lifespan,
|
|
)
|
|
|
|
# AI 해양분석 채팅 라우터
|
|
from chat.router import router as chat_router
|
|
app.include_router(chat_router)
|
|
|
|
|
|
@app.get('/health')
|
|
def health_check():
|
|
from cache.vessel_store import vessel_store
|
|
return {
|
|
'status': 'ok',
|
|
'snpdb': snpdb.check_health(),
|
|
'kcgdb': kcgdb.check_health(),
|
|
'store': vessel_store.stats(),
|
|
}
|
|
|
|
|
|
@app.get('/api/v1/analysis/status')
|
|
def analysis_status():
|
|
return get_last_run()
|
|
|
|
|
|
@app.post('/api/v1/analysis/trigger')
|
|
def trigger_analysis(background_tasks: BackgroundTasks):
|
|
background_tasks.add_task(run_analysis_cycle)
|
|
return {'message': 'analysis cycle triggered'}
|
|
|
|
|
|
@app.get('/api/v1/correlation/{group_key:path}/tracks')
|
|
def get_correlation_tracks(
|
|
group_key: str,
|
|
hours: int = 24,
|
|
min_score: float = 0.3,
|
|
):
|
|
"""Return correlated vessels with their track history for map rendering.
|
|
|
|
Queries gear_correlation_scores (ALL active models) and enriches with
|
|
24h track data from in-memory vessel_store.
|
|
Each vessel includes which models detected it.
|
|
"""
|
|
from cache.vessel_store import vessel_store
|
|
|
|
try:
|
|
with kcgdb.get_conn() as conn:
|
|
cur = conn.cursor()
|
|
|
|
# Get correlated vessels from ALL active models
|
|
cur.execute(f"""
|
|
SELECT s.target_mmsi, s.target_type, s.target_name,
|
|
s.current_score, m.name AS model_name
|
|
FROM {GEAR_CORRELATION_SCORES} s
|
|
JOIN {CORRELATION_PARAM_MODELS} m ON s.model_id = m.id
|
|
WHERE s.group_key = %s
|
|
AND s.current_score >= %s
|
|
AND m.is_active = TRUE
|
|
ORDER BY s.current_score DESC
|
|
""", (group_key, min_score))
|
|
|
|
rows = cur.fetchall()
|
|
cur.close()
|
|
|
|
logger.info('correlation tracks: group_key=%r, min_score=%s, rows=%d',
|
|
group_key, min_score, len(rows))
|
|
|
|
if not rows:
|
|
return {'groupKey': group_key, 'vessels': []}
|
|
|
|
# Group by MMSI: collect all models per vessel, keep highest score
|
|
vessel_map: dict[str, dict] = {}
|
|
for row in rows:
|
|
mmsi = row[0]
|
|
model_name = row[4]
|
|
score = float(row[3])
|
|
if mmsi not in vessel_map:
|
|
vessel_map[mmsi] = {
|
|
'mmsi': mmsi,
|
|
'type': row[1],
|
|
'name': row[2] or '',
|
|
'score': score,
|
|
'models': {model_name: score},
|
|
}
|
|
else:
|
|
entry = vessel_map[mmsi]
|
|
entry['models'][model_name] = score
|
|
if score > entry['score']:
|
|
entry['score'] = score
|
|
|
|
mmsis = list(vessel_map.keys())
|
|
|
|
# Get tracks from vessel_store
|
|
tracks = vessel_store.get_vessel_tracks(mmsis, hours)
|
|
with_tracks = sum(1 for m in mmsis if m in tracks and len(tracks[m]) > 0)
|
|
logger.info('correlation tracks: %d unique mmsis, %d with track data, vessel_store._tracks has %d entries',
|
|
len(mmsis), with_tracks, len(vessel_store._tracks))
|
|
|
|
# Build response
|
|
vessels = []
|
|
for info in vessel_map.values():
|
|
track = tracks.get(info['mmsi'], [])
|
|
vessels.append({
|
|
'mmsi': info['mmsi'],
|
|
'name': info['name'],
|
|
'type': info['type'],
|
|
'score': info['score'],
|
|
'models': info['models'], # {modelName: score, ...}
|
|
'track': track,
|
|
})
|
|
|
|
return {'groupKey': group_key, 'vessels': vessels}
|
|
|
|
except Exception as e:
|
|
logger.warning('get_correlation_tracks failed for %s: %s', group_key, e)
|
|
return {'groupKey': group_key, 'vessels': []}
|
|
|
|
|
|
GROUP_POLYGON_SNAPSHOTS = qualified_table('group_polygon_snapshots')
|
|
|
|
|
|
@app.get('/api/v1/groups/{group_key:path}/history')
|
|
def get_group_history(group_key: str, hours: int = 24):
|
|
"""그룹 폴리곤 스냅샷 24h 히스토리 (리플레이용)."""
|
|
import json as _json
|
|
|
|
try:
|
|
with kcgdb.get_conn() as conn:
|
|
cur = conn.cursor()
|
|
cur.execute(f"""
|
|
SELECT snapshot_time,
|
|
ST_Y(center_point) AS center_lat,
|
|
ST_X(center_point) AS center_lon,
|
|
member_count,
|
|
ST_AsGeoJSON(polygon)::text AS polygon_geojson,
|
|
members::text AS members_json,
|
|
sub_cluster_id,
|
|
area_sq_nm
|
|
FROM {GROUP_POLYGON_SNAPSHOTS}
|
|
WHERE group_key = %s
|
|
AND snapshot_time > NOW() - (%s * INTERVAL '1 hour')
|
|
ORDER BY snapshot_time ASC
|
|
""", (group_key, hours))
|
|
|
|
frames = []
|
|
for row in cur.fetchall():
|
|
polygon = None
|
|
if row[4]:
|
|
try:
|
|
polygon = _json.loads(row[4])
|
|
except Exception:
|
|
pass
|
|
members = []
|
|
if row[5]:
|
|
try:
|
|
members = _json.loads(row[5])
|
|
except Exception:
|
|
pass
|
|
frames.append({
|
|
'snapshotTime': row[0].isoformat() if row[0] else None,
|
|
'centerLat': float(row[1]) if row[1] else None,
|
|
'centerLon': float(row[2]) if row[2] else None,
|
|
'memberCount': int(row[3]) if row[3] else 0,
|
|
'polygon': polygon,
|
|
'members': members,
|
|
'subClusterId': int(row[6]) if row[6] else 0,
|
|
'areaSqNm': float(row[7]) if row[7] else 0,
|
|
})
|
|
cur.close()
|
|
|
|
logger.info('group history: group_key=%r, hours=%d, frames=%d',
|
|
group_key, hours, len(frames))
|
|
return {'groupKey': group_key, 'frameCount': len(frames), 'frames': frames}
|
|
|
|
except Exception as e:
|
|
logger.warning('get_group_history failed for %s: %s', group_key, e)
|
|
return {'groupKey': group_key, 'frameCount': 0, 'frames': []}
|