Codex Lab 환경(iran-airstrike-replay-codex)에서 검증 완료된 어구 모선 자동 추론 + 검토 워크플로우 전체를 이식. ## Python (prediction/) - gear_parent_inference(1,428줄): 다층 점수 모델 (correlation + name + track + prior bonus) - gear_parent_episode(631줄): Episode 연속성 (Jaccard + 공간거리) - gear_name_rules: 모선 이름 정규화 + 4자 미만 필터 - scheduler: 추론 호출 단계 추가 (4.8) - fleet_tracker/kcgdb: SQL qualified_table() 동적화 - gear_correlation: timestamp 필드 추가 ## DB (database/migration/ 012~015) - 후보 스냅샷, resolution, episode, 라벨 세션, 제외 관리 테이블 9개 + VIEW 2개 ## Backend (Java) - 12개 DTO/Controller (ParentInferenceWorkflowController 등) - GroupPolygonService: parent_resolution LEFT JOIN + 15개 API 메서드 ## Frontend - ParentReviewPanel: 모선 검토 대시보드 - vesselAnalysis: 10개 신규 API 함수 + 6개 타입 Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
160 lines
5.0 KiB
Python
160 lines
5.0 KiB
Python
import logging
|
|
import sys
|
|
from contextlib import asynccontextmanager
|
|
|
|
from fastapi import BackgroundTasks, FastAPI
|
|
|
|
from config import qualified_table, settings
|
|
from db import kcgdb, snpdb
|
|
from scheduler import get_last_run, run_analysis_cycle, start_scheduler, stop_scheduler
|
|
|
|
logging.basicConfig(
|
|
level=getattr(logging, settings.LOG_LEVEL, logging.INFO),
|
|
format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
|
|
stream=sys.stdout,
|
|
)
|
|
logger = logging.getLogger(__name__)
|
|
GEAR_CORRELATION_SCORES = qualified_table('gear_correlation_scores')
|
|
CORRELATION_PARAM_MODELS = qualified_table('correlation_param_models')
|
|
|
|
|
|
@asynccontextmanager
|
|
async def lifespan(application: FastAPI):
|
|
from cache.vessel_store import vessel_store
|
|
|
|
logger.info('starting KCG Prediction Service')
|
|
snpdb.init_pool()
|
|
kcgdb.init_pool()
|
|
|
|
# 인메모리 캐시 초기 로드 (24시간)
|
|
logger.info('loading initial vessel data (%dh)...', settings.INITIAL_LOAD_HOURS)
|
|
vessel_store.load_initial(settings.INITIAL_LOAD_HOURS)
|
|
logger.info('initial load complete: %s', vessel_store.stats())
|
|
|
|
start_scheduler()
|
|
yield
|
|
stop_scheduler()
|
|
snpdb.close_pool()
|
|
kcgdb.close_pool()
|
|
logger.info('KCG Prediction Service stopped')
|
|
|
|
|
|
app = FastAPI(
|
|
title='KCG Prediction Service',
|
|
version='2.1.0',
|
|
lifespan=lifespan,
|
|
)
|
|
|
|
# AI 해양분석 채팅 라우터
|
|
from chat.router import router as chat_router
|
|
app.include_router(chat_router)
|
|
|
|
|
|
@app.get('/health')
|
|
def health_check():
|
|
from cache.vessel_store import vessel_store
|
|
return {
|
|
'status': 'ok',
|
|
'snpdb': snpdb.check_health(),
|
|
'kcgdb': kcgdb.check_health(),
|
|
'store': vessel_store.stats(),
|
|
}
|
|
|
|
|
|
@app.get('/api/v1/analysis/status')
|
|
def analysis_status():
|
|
return get_last_run()
|
|
|
|
|
|
@app.post('/api/v1/analysis/trigger')
|
|
def trigger_analysis(background_tasks: BackgroundTasks):
|
|
background_tasks.add_task(run_analysis_cycle)
|
|
return {'message': 'analysis cycle triggered'}
|
|
|
|
|
|
@app.get('/api/v1/correlation/{group_key:path}/tracks')
|
|
def get_correlation_tracks(
|
|
group_key: str,
|
|
hours: int = 24,
|
|
min_score: float = 0.3,
|
|
):
|
|
"""Return correlated vessels with their track history for map rendering.
|
|
|
|
Queries gear_correlation_scores (ALL active models) and enriches with
|
|
24h track data from in-memory vessel_store.
|
|
Each vessel includes which models detected it.
|
|
"""
|
|
from cache.vessel_store import vessel_store
|
|
|
|
try:
|
|
with kcgdb.get_conn() as conn:
|
|
cur = conn.cursor()
|
|
|
|
# Get correlated vessels from ALL active models
|
|
cur.execute(f"""
|
|
SELECT s.target_mmsi, s.target_type, s.target_name,
|
|
s.current_score, m.name AS model_name
|
|
FROM {GEAR_CORRELATION_SCORES} s
|
|
JOIN {CORRELATION_PARAM_MODELS} m ON s.model_id = m.id
|
|
WHERE s.group_key = %s
|
|
AND s.current_score >= %s
|
|
AND m.is_active = TRUE
|
|
ORDER BY s.current_score DESC
|
|
""", (group_key, min_score))
|
|
|
|
rows = cur.fetchall()
|
|
cur.close()
|
|
|
|
logger.info('correlation tracks: group_key=%r, min_score=%s, rows=%d',
|
|
group_key, min_score, len(rows))
|
|
|
|
if not rows:
|
|
return {'groupKey': group_key, 'vessels': []}
|
|
|
|
# Group by MMSI: collect all models per vessel, keep highest score
|
|
vessel_map: dict[str, dict] = {}
|
|
for row in rows:
|
|
mmsi = row[0]
|
|
model_name = row[4]
|
|
score = float(row[3])
|
|
if mmsi not in vessel_map:
|
|
vessel_map[mmsi] = {
|
|
'mmsi': mmsi,
|
|
'type': row[1],
|
|
'name': row[2] or '',
|
|
'score': score,
|
|
'models': {model_name: score},
|
|
}
|
|
else:
|
|
entry = vessel_map[mmsi]
|
|
entry['models'][model_name] = score
|
|
if score > entry['score']:
|
|
entry['score'] = score
|
|
|
|
mmsis = list(vessel_map.keys())
|
|
|
|
# Get tracks from vessel_store
|
|
tracks = vessel_store.get_vessel_tracks(mmsis, hours)
|
|
with_tracks = sum(1 for m in mmsis if m in tracks and len(tracks[m]) > 0)
|
|
logger.info('correlation tracks: %d unique mmsis, %d with track data, vessel_store._tracks has %d entries',
|
|
len(mmsis), with_tracks, len(vessel_store._tracks))
|
|
|
|
# Build response
|
|
vessels = []
|
|
for info in vessel_map.values():
|
|
track = tracks.get(info['mmsi'], [])
|
|
vessels.append({
|
|
'mmsi': info['mmsi'],
|
|
'name': info['name'],
|
|
'type': info['type'],
|
|
'score': info['score'],
|
|
'models': info['models'], # {modelName: score, ...}
|
|
'track': track,
|
|
})
|
|
|
|
return {'groupKey': group_key, 'vessels': vessels}
|
|
|
|
except Exception as e:
|
|
logger.warning('get_correlation_tracks failed for %s: %s', group_key, e)
|
|
return {'groupKey': group_key, 'vessels': []}
|