From 2e9361ee580c166af34eaf9340ce37cc78cb3a24 Mon Sep 17 00:00:00 2001 From: htlee Date: Thu, 19 Feb 2026 09:59:49 +0900 Subject: [PATCH] =?UTF-8?q?refactor:=20SNP=20API=20=EC=A0=84=ED=99=98=20?= =?UTF-8?q?=EB=B0=8F=20=EB=A0=88=EA=B1=B0=EC=8B=9C=20=EC=BD=94=EB=93=9C=20?= =?UTF-8?q?=EC=A0=84=EB=A9=B4=20=EC=A0=95=EB=A6=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - CollectDB 다중 신호 수집 → S&P Global AIS API 단일 수집으로 전환 - sig_src_cd + target_id 이중 식별자 → mmsi(VARCHAR) 단일 식별자 - t_vessel_latest_position → t_ais_position 테이블 전환 - 레거시 배치/유틸 ~30개 클래스 삭제 (VesselAggregationJobConfig, ShipKindCodeConverter 등) - AisTargetCacheManager 기반 캐시 이중 구조 (최신위치 + 트랙 버퍼) - CacheBasedVesselTrackDataReader + CacheBasedTrackJobListener 신규 추가 - VesselStaticStepConfig: 정적정보 CDC 변경 검출 + hourly job 편승 - SignalKindCode enum: vesselType/extraInfo 기반 선종 자동 분류 - WebSocket/STOMP 전체 mmsi 전환 (StompTrackStreamingService ~40곳) - 모니터링/성능 최적화 코드 mmsi 기반 전환 - DataSource 설정 통합 (snpdb 단일 DB) - AreaBoundaryCache Polygon→Geometry 캐스트 수정 (MULTIPOLYGON 지원) - ConcurrentHashMap 적용 (VesselTrackStepConfig 동시성 버그 수정) Co-Authored-By: Claude Opus 4.6 --- .claude/commands/analyze-batch.md | 70 + .claude/commands/build-check.md | 64 + .claude/commands/clarify.md | 66 + .claude/commands/perf-check.md | 72 + .claude/commands/wrap.md | 65 + .claude/rules/code-style.md | 16 +- .sdkmanrc | 3 + docs/cache-benchmark-report.md | 314 ++++ docs/cache-benchmark-summary.md | 102 ++ docs/일일 캐시 성능 벤치마크 보고서.docx | Bin 0 -> 49015 bytes docs/일일 캐시 성능 벤치마크 요약 보고서.docx | Bin 0 -> 33042 bytes ...ᆼ적조회,리플레이 성능 부하 개선 결과보고서.docx | Bin 0 -> 60296 bytes pom.xml | 6 + scripts/deploy-only.bat | 219 +++ scripts/deploy-query-server.bat | 47 + scripts/deploy-safe.bat | 237 +++ scripts/diagnose-datasource-issue.sql | 139 ++ scripts/enable-sql-logging.yml | 24 + scripts/fix-invalid-geometry.sql | 122 ++ scripts/fix-postgis-schema.ps1 | 24 + scripts/force-reset-batch-metadata.sh | 223 +++ scripts/install-postgis-in-signal-schema.sql | 59 + scripts/list-failed-jobs.sql | 85 + scripts/mark-failed-jobs-as-abandoned.sql | 75 + scripts/mark-specific-job-as-abandoned.sql | 75 + scripts/monitor-query-server.sh | 212 +++ scripts/monitor-realtime.sh | 154 ++ scripts/quick-check-invalid.sql | 50 + scripts/quick-test-real-data.sql | 269 ++++ scripts/run-load-test.sh | 288 ++++ scripts/run-on-query-server-dev.sh | 190 +++ scripts/run-query-only-server.sh | 184 +++ scripts/server-logs.bat | 40 + scripts/server-status.bat | 64 + scripts/setup-ssh-key.bat | 59 + scripts/stop-running-jobs.sql | 67 + scripts/sync-nexus.sh | 170 ++ scripts/test-abnormal-tracks-insert.sql | 135 ++ scripts/test-daily-aggregation-fixed.sql | 496 ++++++ scripts/test-hourly-aggregation-fixed.sql | 484 ++++++ scripts/test-with-real-data.sql | 274 ++++ scripts/vessel-batch-control.sh | 215 +++ scripts/vessel-batch-start-prod.sh | 191 +++ scripts/websocket-load-test.py | 175 ++ sql/V2_snp_schema_migration.sql | 584 +++++++ sql/convert_to_unix_timestamp.sql | 68 + sql/simple_update_v2.sql | 42 + sql/update_missing_v2.sql | 40 + .../signal_batch/BatchCommandLineRunner.java | 6 +- .../batch/job/AisPositionSyncStepConfig.java | 144 ++ .../batch/job/AisTargetImportJobConfig.java | 96 ++ .../batch/job/AreaStatisticsStepConfig.java | 220 --- .../batch/job/DailyAggregationStepConfig.java | 13 +- .../batch/job/HourlyAggregationJobConfig.java | 2 + .../job/HourlyAggregationStepConfig.java | 23 +- .../batch/job/LatestPositionStepConfig.java | 178 --- .../batch/job/TileAggregationStepConfig.java | 350 ---- .../batch/job/VesselAggregationJobConfig.java | 78 - .../batch/job/VesselBatchScheduler.java | 52 +- .../VesselPositionCacheRefreshScheduler.java | 194 --- .../batch/job/VesselStaticStepConfig.java | 239 +++ .../job/VesselTrackAggregationJobConfig.java | 8 +- .../batch/job/VesselTrackStepConfig.java | 84 +- .../listener/CacheBasedTrackJobListener.java | 52 + .../processor/AbnormalTrackDetector.java | 27 +- .../processor/AccumulatingAreaProcessor.java | 190 --- .../processor/AccumulatingTileProcessor.java | 206 --- .../processor/AisTargetDataProcessor.java | 85 + .../processor/AreaStatisticsProcessor.java | 333 ---- ...seTrackProcessorWithAbnormalDetection.java | 14 +- .../batch/processor/DailyTrackProcessor.java | 53 +- .../batch/processor/HourlyTrackProcessor.java | 79 +- .../processor/LatestPositionProcessor.java | 60 - .../processor/TileAggregationProcessor.java | 291 ---- .../batch/processor/VesselTrackProcessor.java | 3 +- .../batch/reader/AisTargetCacheManager.java | 246 +++ .../batch/reader/AisTargetDataReader.java | 86 + .../CacheBasedVesselTrackDataReader.java | 132 ++ .../batch/reader/ChnPrmShipCacheManager.java | 121 ++ .../batch/reader/ChnPrmShipCacheWarmer.java | 134 ++ .../batch/reader/ChnPrmShipProperties.java | 61 + .../reader/InMemoryVesselDataReader.java | 54 - .../reader/InMemoryVesselTrackDataReader.java | 73 - .../batch/reader/PartitionedReader.java | 181 --- .../batch/reader/VesselDataReader.java | 408 ----- .../batch/writer/AbnormalTrackWriter.java | 9 +- .../batch/writer/AisTargetCacheWriter.java | 58 + .../writer/OptimizedBulkInsertWriter.java | 702 --------- .../batch/writer/UpsertWriter.java | 271 ---- .../batch/writer/VesselTrackBulkWriter.java | 111 +- .../domain/debug/DebugTimeController.java | 16 +- .../domain/gis/cache/AreaBoundaryCache.java | 32 +- .../gis/controller/AreaSearchController.java | 2 +- .../domain/gis/dto/VesselContactRequest.java | 4 - .../domain/gis/dto/VesselContactResponse.java | 7 +- .../domain/gis/service/AreaSearchService.java | 3 - .../domain/gis/service/GisService.java | 387 ++--- .../domain/gis/service/GisServiceV2.java | 206 +-- .../gis/service/VesselContactService.java | 19 +- .../SequentialPassageController.java | 41 +- .../dto/SequentialPassageResponse.java | 7 +- .../SequentialAreaTrackingService.java | 42 +- .../controller/AbnormalTrackController.java | 18 +- .../track/dto/AbnormalTrackResponse.java | 8 +- .../track/service/AbnormalTrackService.java | 118 +- .../vessel/dto/AisTargetApiResponse.java | 27 + .../domain/vessel/dto/AisTargetDto.java | 183 +++ .../domain/vessel/dto/CompactVesselTrack.java | 14 +- .../domain/vessel/dto/IntegrationVessel.java | 78 - .../vessel/dto/RecentVesselPositionDto.java | 10 +- .../domain/vessel/dto/TrackResponse.java | 13 +- .../vessel/dto/VesselBucketPositionDto.java | 29 +- .../vessel/dto/VesselTracksRequest.java | 34 +- .../domain/vessel/model/AisTargetEntity.java | 63 + .../domain/vessel/model/VesselData.java | 22 +- .../vessel/model/VesselLatestPosition.java | 8 +- .../domain/vessel/model/VesselTrack.java | 47 +- .../service/IntegrationVesselService.java | 295 ---- .../service/VesselLatestPositionCache.java | 12 +- .../vessel/service/VesselPositionService.java | 48 +- .../service/VesselPreviousBucketCache.java | 49 +- .../vessel/service/VesselTrackMerger.java | 9 +- .../service/filter/VesselTrackFilter.java | 10 +- .../global/config/AisApiWebClientConfig.java | 43 + .../global/config/DevDataSourceConfig.java | 67 +- .../global/config/LocalDataSourceConfig.java | 91 +- .../global/config/ProdDataSourceConfig.java | 91 +- .../global/config/SwaggerConfig.java | 2 +- .../global/tool/BatchDiagnosticTool.java | 78 +- .../global/util/ConcurrentUpdateManager.java | 245 --- .../global/util/HaeguGeoUtils.java | 2 +- .../util/IntegrationSignalConstants.java | 133 -- .../global/util/NationalCodeUtil.java | 30 - .../global/util/PartitionManager.java | 117 +- .../global/util/SharedDataJobListener.java | 104 -- .../global/util/ShipKindCodeConverter.java | 222 --- .../global/util/SignalKindCode.java | 118 ++ .../global/util/TrackClippingUtils.java | 3 +- .../global/util/TrackConverter.java | 36 +- .../global/util/VesselDataHolder.java | 41 - .../global/util/VesselTrackConverter.java | 2 - .../global/util/VesselTrackDataHolder.java | 28 - .../util/VesselTrackDataJobListener.java | 150 -- .../websocket/dto/MergedVesselTrack.java | 7 +- .../websocket/dto/ProcessedTrackData.java | 5 +- .../global/websocket/dto/VesselTrackData.java | 5 +- .../service/ChunkedTrackStreamingService.java | 369 ++--- .../service/DailyTrackCacheManager.java | 65 +- .../service/StompTrackStreamingService.java | 47 +- .../controller/BatchAdminController.java | 12 +- .../controller/DataSourceDebugController.java | 35 +- .../controller/MetricsController.java | 6 +- .../controller/MonitoringController.java | 35 +- .../PerformanceOptimizationController.java | 2 +- .../health/BatchHealthIndicator.java | 53 +- .../performance/DatabaseIndexOptimizer.java | 30 +- .../monitoring/performance/IndexCreator.java | 2 +- .../PerformanceOptimizationManager.java | 8 +- .../performance/PerformanceTestRunner.java | 603 ------- .../QueryPerformanceOptimizer.java | 6 +- src/main/resources/application-dev.yml | 6 + src/main/resources/application-local.yml | 124 +- src/main/resources/application-prod-mpr.yml | 6 + src/main/resources/application-prod.yml | 6 + src/main/resources/application.yml | 26 +- src/main/resources/chnprmship-mmsi.txt | 1402 +++++++++++++++++ ..._signal_tables_and indexes_and_commant.sql | 658 ++++++++ src/main/resources/static/v2/README.md | 132 ++ .../processor/AisTargetDataProcessorTest.java | 230 +++ .../reader/AisTargetCacheManagerTest.java | 309 ++++ .../config/TestDataSourceConfig.java | 12 - .../vessel/service/VesselTrackMergerTest.java | 243 +++ .../global/util/SignalKindCodeTest.java | 153 ++ .../performance/IndexStatusTest.java | 11 +- .../signal_batch/util/SignalKindCodeTest.java | 167 ++ 175 files changed, 12886 insertions(+), 7638 deletions(-) create mode 100644 .claude/commands/analyze-batch.md create mode 100644 .claude/commands/build-check.md create mode 100644 .claude/commands/clarify.md create mode 100644 .claude/commands/perf-check.md create mode 100644 .claude/commands/wrap.md create mode 100644 .sdkmanrc create mode 100644 docs/cache-benchmark-report.md create mode 100644 docs/cache-benchmark-summary.md create mode 100644 docs/일일 캐시 성능 벤치마크 보고서.docx create mode 100644 docs/일일 캐시 성능 벤치마크 요약 보고서.docx create mode 100644 docs/항적조회,리플레이 성능 부하 개선 결과보고서.docx create mode 100644 scripts/deploy-only.bat create mode 100644 scripts/deploy-query-server.bat create mode 100644 scripts/deploy-safe.bat create mode 100644 scripts/diagnose-datasource-issue.sql create mode 100644 scripts/enable-sql-logging.yml create mode 100644 scripts/fix-invalid-geometry.sql create mode 100644 scripts/fix-postgis-schema.ps1 create mode 100644 scripts/force-reset-batch-metadata.sh create mode 100644 scripts/install-postgis-in-signal-schema.sql create mode 100644 scripts/list-failed-jobs.sql create mode 100644 scripts/mark-failed-jobs-as-abandoned.sql create mode 100644 scripts/mark-specific-job-as-abandoned.sql create mode 100644 scripts/monitor-query-server.sh create mode 100644 scripts/monitor-realtime.sh create mode 100644 scripts/quick-check-invalid.sql create mode 100644 scripts/quick-test-real-data.sql create mode 100644 scripts/run-load-test.sh create mode 100644 scripts/run-on-query-server-dev.sh create mode 100644 scripts/run-query-only-server.sh create mode 100644 scripts/server-logs.bat create mode 100644 scripts/server-status.bat create mode 100644 scripts/setup-ssh-key.bat create mode 100644 scripts/stop-running-jobs.sql create mode 100644 scripts/sync-nexus.sh create mode 100644 scripts/test-abnormal-tracks-insert.sql create mode 100644 scripts/test-daily-aggregation-fixed.sql create mode 100644 scripts/test-hourly-aggregation-fixed.sql create mode 100644 scripts/test-with-real-data.sql create mode 100644 scripts/vessel-batch-control.sh create mode 100644 scripts/vessel-batch-start-prod.sh create mode 100644 scripts/websocket-load-test.py create mode 100644 sql/V2_snp_schema_migration.sql create mode 100644 sql/convert_to_unix_timestamp.sql create mode 100644 sql/simple_update_v2.sql create mode 100644 sql/update_missing_v2.sql create mode 100644 src/main/java/gc/mda/signal_batch/batch/job/AisPositionSyncStepConfig.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/job/AisTargetImportJobConfig.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/job/AreaStatisticsStepConfig.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/job/LatestPositionStepConfig.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/job/TileAggregationStepConfig.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/job/VesselAggregationJobConfig.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/job/VesselPositionCacheRefreshScheduler.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/job/VesselStaticStepConfig.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/listener/CacheBasedTrackJobListener.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/processor/AccumulatingAreaProcessor.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/processor/AccumulatingTileProcessor.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/processor/AisTargetDataProcessor.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/processor/AreaStatisticsProcessor.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/processor/LatestPositionProcessor.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/processor/TileAggregationProcessor.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/AisTargetCacheManager.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/AisTargetDataReader.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/CacheBasedVesselTrackDataReader.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipCacheManager.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipCacheWarmer.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipProperties.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/InMemoryVesselDataReader.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/InMemoryVesselTrackDataReader.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/PartitionedReader.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/reader/VesselDataReader.java create mode 100644 src/main/java/gc/mda/signal_batch/batch/writer/AisTargetCacheWriter.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/writer/OptimizedBulkInsertWriter.java delete mode 100644 src/main/java/gc/mda/signal_batch/batch/writer/UpsertWriter.java create mode 100644 src/main/java/gc/mda/signal_batch/domain/vessel/dto/AisTargetApiResponse.java create mode 100644 src/main/java/gc/mda/signal_batch/domain/vessel/dto/AisTargetDto.java delete mode 100644 src/main/java/gc/mda/signal_batch/domain/vessel/dto/IntegrationVessel.java create mode 100644 src/main/java/gc/mda/signal_batch/domain/vessel/model/AisTargetEntity.java delete mode 100644 src/main/java/gc/mda/signal_batch/domain/vessel/service/IntegrationVesselService.java create mode 100644 src/main/java/gc/mda/signal_batch/global/config/AisApiWebClientConfig.java delete mode 100644 src/main/java/gc/mda/signal_batch/global/util/IntegrationSignalConstants.java delete mode 100644 src/main/java/gc/mda/signal_batch/global/util/NationalCodeUtil.java delete mode 100644 src/main/java/gc/mda/signal_batch/global/util/SharedDataJobListener.java delete mode 100644 src/main/java/gc/mda/signal_batch/global/util/ShipKindCodeConverter.java create mode 100644 src/main/java/gc/mda/signal_batch/global/util/SignalKindCode.java delete mode 100644 src/main/java/gc/mda/signal_batch/global/util/VesselDataHolder.java delete mode 100644 src/main/java/gc/mda/signal_batch/global/util/VesselTrackDataHolder.java delete mode 100644 src/main/java/gc/mda/signal_batch/global/util/VesselTrackDataJobListener.java delete mode 100644 src/main/java/gc/mda/signal_batch/monitoring/performance/PerformanceTestRunner.java create mode 100644 src/main/resources/chnprmship-mmsi.txt create mode 100644 src/main/resources/sql/create_signal_tables_and indexes_and_commant.sql create mode 100644 src/main/resources/static/v2/README.md create mode 100644 src/test/java/gc/mda/signal_batch/batch/processor/AisTargetDataProcessorTest.java create mode 100644 src/test/java/gc/mda/signal_batch/batch/reader/AisTargetCacheManagerTest.java create mode 100644 src/test/java/gc/mda/signal_batch/domain/vessel/service/VesselTrackMergerTest.java create mode 100644 src/test/java/gc/mda/signal_batch/global/util/SignalKindCodeTest.java create mode 100644 src/test/java/gc/mda/signal_batch/util/SignalKindCodeTest.java diff --git a/.claude/commands/analyze-batch.md b/.claude/commands/analyze-batch.md new file mode 100644 index 0000000..b14ae2e --- /dev/null +++ b/.claude/commands/analyze-batch.md @@ -0,0 +1,70 @@ +# /analyze-batch - 배치 작업 분석 + +Spring Batch 작업 관련 코드를 분석하고 진단합니다. + +## 분석 대상 + +### 1. Job 구성 분석 +다음 파일들을 확인하세요: +- `src/main/java/**/config/` - 배치 설정 +- `src/main/java/**/job/` - Job 정의 +- Job, Step, Reader, Processor, Writer 구성 + +### 2. 스케줄링 설정 +- @Scheduled 어노테이션 사용 현황 +- Quartz 또는 다른 스케줄러 설정 +- Cron 표현식 분석 + +### 3. 데이터 처리 패턴 +- ItemReader 구현 (DB, File, API 등) +- ItemProcessor 로직 +- ItemWriter 구현 (bulk insert, 파일 출력 등) +- Chunk 크기 설정 + +### 4. 에러 처리 +- Skip 정책 +- Retry 정책 +- Listener 구현 (JobExecutionListener, StepExecutionListener) + +### 5. 성능 분석 +- Chunk 크기 적절성 +- 병렬 처리 설정 (Partitioning, Multi-threading) +- 커넥션 풀 설정 + +## 출력 형식 + +```markdown +## 배치 작업 분석 결과 + +### Job 목록 +| Job 이름 | Step 수 | 스케줄 | 설명 | +|----------|---------|--------|------| +| xxxJob | 3 | 0 0 * * * | ... | + +### 데이터 흐름 +``` +[Reader] → [Processor] → [Writer] + ↓ ↓ ↓ +[데이터소스] [변환로직] [목적지] +``` + +### 에러 처리 설정 +- Skip 정책: [설정 내용] +- Retry 정책: [설정 내용] + +### 성능 설정 +- Chunk 크기: [값] +- 병렬 처리: [설정 여부] + +### 개선 제안 +1. [제안1] +2. [제안2] +``` + +## 인자 + +`$ARGUMENTS`: 특정 Job 이름이나 키워드 + +예시: +- `/analyze-batch` - 전체 분석 +- `/analyze-batch signal` - 신호 관련 배치만 분석 diff --git a/.claude/commands/build-check.md b/.claude/commands/build-check.md new file mode 100644 index 0000000..0e02005 --- /dev/null +++ b/.claude/commands/build-check.md @@ -0,0 +1,64 @@ +# /build-check - 빌드 및 테스트 체크 + +Maven 프로젝트의 빌드 상태와 테스트 결과를 점검합니다. + +## 실행 작업 + +### 1. 컴파일 체크 +```bash +mvn clean compile -DskipTests +``` +- 컴파일 에러 확인 +- 의존성 문제 확인 + +### 2. 테스트 실행 (선택적) +```bash +mvn test +``` +- 단위 테스트 결과 +- 실패한 테스트 분석 + +### 3. 패키지 빌드 (선택적) +```bash +mvn package -DskipTests +``` +- JAR/WAR 생성 확인 +- 빌드 아티팩트 확인 + +## 출력 형식 + +```markdown +## Build Check 결과 + +### 컴파일 +- 상태: [성공/실패] +- 에러 (있다면): [에러 내용] + +### 테스트 +- 상태: [성공/실패/스킵] +- 통과: [N]개 +- 실패: [N]개 +- 실패한 테스트 (있다면): + - [테스트명]: [실패 원인] + +### 패키지 +- 상태: [성공/실패/스킵] +- 아티팩트: [파일 경로] + +### 권장 조치 +1. [조치1] +2. [조치2] +``` + +## 인자 + +`$ARGUMENTS`: 옵션 지정 +- `compile` - 컴파일만 +- `test` - 컴파일 + 테스트 +- `package` - 전체 패키지 빌드 +- (없음) - 컴파일만 (기본값) + +예시: +- `/build-check` - 컴파일 체크 +- `/build-check test` - 테스트 포함 +- `/build-check package` - 전체 빌드 diff --git a/.claude/commands/clarify.md b/.claude/commands/clarify.md new file mode 100644 index 0000000..e7b0761 --- /dev/null +++ b/.claude/commands/clarify.md @@ -0,0 +1,66 @@ +# /clarify - 요구사항 명확화 + +새로운 기능이나 버그 수정 요청 시 요구사항을 명확히 하기 위한 질문을 생성합니다. + +## 사용 시점 + +- 사용자 요청이 모호할 때 +- 여러 구현 방법이 가능할 때 +- 비즈니스 요구사항 확인이 필요할 때 + +## 질문 카테고리 + +### 1. 기능 범위 +- 이 기능의 정확한 범위는 무엇인가요? +- 어떤 서비스/컴포넌트가 이 기능을 사용하나요? +- 기존 기능과의 관계는 어떻게 되나요? + +### 2. API 설계 +- REST API 엔드포인트 설계가 필요한가요? +- 요청/응답 형식은 어떻게 되나요? +- 기존 API 패턴을 따르나요? + +### 3. 데이터 +- 어떤 데이터가 필요한가요? +- 데이터 소스는 무엇인가요? (DB, 외부 API, 파일) +- 데이터 영속성이 필요한가요? + +### 4. 에러 처리 +- 예상되는 에러 케이스는 무엇인가요? +- 에러 시 어떻게 처리해야 하나요? (재시도, 로깅, 알림) + +### 5. 성능 +- 예상 데이터 양은 얼마나 되나요? +- 배치 처리가 필요한가요? +- 성능 요구사항이 있나요? + +### 6. 배포/환경 +- 특정 환경(dev/qa/prod)에서만 동작해야 하나요? +- 프로파일별 설정이 필요한가요? + +## 출력 형식 + +```markdown +## 요구사항 명확화 질문 + +### 기능 범위 +1. [질문1] +2. [질문2] + +### API 설계 +1. [질문1] + +### 데이터 +1. [질문1] + +... + +--- +답변을 바탕으로 구현 계획을 수립하겠습니다. +``` + +## 인자 + +`$ARGUMENTS`: 사용자의 요청 내용을 요약해서 입력 + +예: `/clarify 선박 위치 배치 저장 기능` diff --git a/.claude/commands/perf-check.md b/.claude/commands/perf-check.md new file mode 100644 index 0000000..a6a9eb0 --- /dev/null +++ b/.claude/commands/perf-check.md @@ -0,0 +1,72 @@ +# /perf-check - 성능 체크 명령어 + +Spring Boot 배치 애플리케이션의 성능 관련 이슈를 점검합니다. + +## 분석 영역 + +### 1. 데이터베이스 성능 +- JPA/MyBatis 쿼리 분석 +- N+1 문제 확인 +- 인덱스 활용 여부 +- Batch Insert/Update 적용 여부 + +### 2. 메모리 관리 +- 대량 데이터 처리 시 메모리 사용 +- Stream 활용 여부 +- 페이징 처리 적용 여부 + +### 3. 배치 처리 +- Chunk 크기 적절성 +- 병렬 처리 설정 +- Reader/Writer 최적화 + +### 4. 커넥션 관리 +- 커넥션 풀 설정 (HikariCP) +- 트랜잭션 범위 적절성 +- 커넥션 누수 가능성 + +### 5. 외부 통신 +- HTTP Client 설정 (타임아웃, 커넥션 풀) +- 재시도 정책 +- Circuit Breaker 패턴 적용 + +## 출력 형식 + +```markdown +## 성능 체크 결과 + +### 데이터베이스 +- [ ] N+1 문제: [발견 여부] +- [ ] Batch 처리: [적용 현황] +- [ ] 인덱스 활용: [상태] + +### 메모리 +- [ ] 대량 데이터 처리: [상태] +- [ ] Stream 활용: [적용 여부] +- [ ] 페이징: [적용 여부] + +### 배치 처리 +- [ ] Chunk 크기: [값 및 적절성] +- [ ] 병렬 처리: [설정 상태] + +### 커넥션 관리 +- [ ] 풀 설정: [상태] +- [ ] 트랜잭션 범위: [적절성] + +### 외부 통신 +- [ ] 타임아웃 설정: [상태] +- [ ] 재시도 정책: [적용 여부] + +### 우선순위 개선 항목 +1. [항목1] - 예상 효과: [설명] +2. [항목2] - 예상 효과: [설명] +``` + +## 인자 + +`$ARGUMENTS`: 특정 영역만 체크 (db, memory, batch, connection, external) + +예시: +- `/perf-check` - 전체 체크 +- `/perf-check db` - 데이터베이스만 체크 +- `/perf-check batch` - 배치 처리만 체크 diff --git a/.claude/commands/wrap.md b/.claude/commands/wrap.md new file mode 100644 index 0000000..f516029 --- /dev/null +++ b/.claude/commands/wrap.md @@ -0,0 +1,65 @@ +# /wrap - Session Wrap-up Command + +세션 종료 시 다음 작업들을 병렬로 수행하는 명령어입니다. + +## 실행할 작업들 (병렬 에이전트) + +### 1. 문서 업데이트 체크 +다음 파일들의 업데이트 필요 여부를 확인하세요: +- `CLAUDE.md`: 새로운 패턴이나 컨벤션이 발견되었는지 +- 이번 세션에서 중요한 기술 결정이 있었는지 + +### 2. 반복 패턴 분석 +이번 세션에서 반복적으로 수행한 작업이 있는지 분석하세요: +- 비슷한 코드 패턴을 여러 번 작성했는지 +- 동일한 명령어를 반복 실행했는지 +- 자동화할 수 있는 워크플로우가 있는지 + +발견된 패턴은 `/commands`로 자동화를 제안하세요. + +### 3. 학습 내용 추출 +이번 세션에서 배운 내용을 정리하세요: +- 새로 발견한 코드베이스의 특성 +- 해결한 문제와 그 해결 방법 +- 앞으로 주의해야 할 점 + +### 4. 미완성 작업 정리 +완료하지 못한 작업이 있다면 정리하세요: +- TODO 리스트에 남은 항목 +- 다음 세션에서 계속해야 할 작업 +- 블로커나 의존성 이슈 + +### 5. 코드 품질 체크 +이번 세션에서 수정한 파일들에 대해: +- 컴파일 에러가 없는지 (`mvn compile`) +- 테스트가 통과하는지 (`mvn test`) + +## 출력 형식 + +```markdown +## Session Summary + +### 완료한 작업 +- [작업1] +- [작업2] + +### 문서 업데이트 필요 +- [ ] CLAUDE.md: [업데이트 내용] + +### 발견된 패턴 (자동화 제안) +- [패턴]: [자동화 방법] + +### 학습 내용 +- [내용1] +- [내용2] + +### 미완성 작업 +- [ ] [작업1] +- [ ] [작업2] + +### 코드 품질 +- Compile: [결과] +- Test: [결과] +``` + +이 명령어를 실행할 때 Task 도구를 사용하여 여러 에이전트를 **병렬로** 실행하세요. diff --git a/.claude/rules/code-style.md b/.claude/rules/code-style.md index 0cb1563..f5f0203 100644 --- a/.claude/rules/code-style.md +++ b/.claude/rules/code-style.md @@ -44,7 +44,21 @@ - `@Builder` 허용 - `@Data` 사용 금지 (명시적으로 필요한 어노테이션만) - `@AllArgsConstructor` 단독 사용 금지 (`@Builder`와 함께 사용) -- `@Slf4j` 로거 사용 + +## 로깅 +- `@Slf4j` (Lombok) 로거 사용 +- SLF4J `{}` 플레이스홀더에 printf 포맷 사용 금지 (`{:.1f}`, `{:d}`, `{%s}` 등) +- 숫자 포맷이 필요하면 `String.format()`으로 변환 후 전달 + ```java + // 잘못됨 + log.info("처리율: {:.1f}%", rate); + // 올바름 + log.info("처리율: {}%", String.format("%.1f", rate)); + ``` +- 예외 로깅 시 예외 객체는 마지막 인자로 전달 (플레이스홀더 불필요) + ```java + log.error("처리 실패: {}", id, exception); + ``` ## 예외 처리 - 비즈니스 예외는 커스텀 Exception 클래스 정의 diff --git a/.sdkmanrc b/.sdkmanrc new file mode 100644 index 0000000..128dde5 --- /dev/null +++ b/.sdkmanrc @@ -0,0 +1,3 @@ +# Enable auto-env through SDKMAN config +# Run 'sdk env' in this directory to switch versions +java=17.0.18-amzn diff --git a/docs/cache-benchmark-report.md b/docs/cache-benchmark-report.md new file mode 100644 index 0000000..4986f92 --- /dev/null +++ b/docs/cache-benchmark-report.md @@ -0,0 +1,314 @@ +# 일일 캐시 성능 벤치마크 보고서 + +## 선박 항적 리플레이 서비스 — 캐시 vs DB 정량 비교 + +| 항목 | 내용 | +|------|------| +| 측정일 | 2026-02-07 | +| 대상 시스템 | Signal Batch — ChunkedTrackStreamingService (WebSocket 스트리밍) | +| 운영 환경 | prod 프로파일, Query DB 커넥션 풀 180 | +| 캐시 구성 | DailyTrackCacheManager — D-1 ~ D-7 인메모리 캐시, STRtree 공간 인덱스 | +| 측정 방식 | QueryBenchmark 내부 클래스 → `cache-benchmark.log` JSON 기록 | +| 샘플 수 | 12건 (CACHE 3, DB 2, HYBRID 5, CACHE+Today 2) | + +--- + +## 1. 측정 경로 분류 + +쿼리 시간 범위에 따라 4가지 경로로 처리된다. + +| 경로 | 설명 | 데이터 소스 | +|------|------|------------| +| **CACHE** | 요청 일자 전체가 인메모리 캐시에 존재 | 메모리 | +| **DB** | 캐시 미스 — Daily 테이블 직접 조회 | DB | +| **HYBRID** | 캐시 히트 일자 + 캐시 범위 밖 일자 DB 조회 | 메모리 + DB | +| **CACHE+Today** | 캐시 히트 + 오늘 데이터(Hourly/5min 테이블) | 메모리 + DB | + +### 오늘 데이터 구간 구조 + +오늘(D-0) 데이터는 캐시 대상이 아니며, 시간 경과에 따라 두 테이블로 분할 조회된다. + + +``` + 오늘 00:00 ~ 12:00 12:00 ~ 12:35 현재(12:40) +├──── Hourly 테이블 조회 ──────┤── 5min 조회 ──┤ + (12개 범위, 1시간 단위) (7개 범위, 5분 단위) +``` + +- **Hourly**: 자정부터 약 1시간 전까지 → 시간 단위 범위 (약 12개) +- **5min**: 최근 약 1시간 이내 → 5분 단위 범위 (약 7개) +- 각 범위마다 DB 커넥션 1회 + Viewport Pass1 1회 발생 → 오늘 구간 커넥션 = 범위 수 × 2 + +--- + +## 2. 전체 측정 데이터 + +### 2.1 요약 테이블 + +| # | 경로 | Zoom | 일수 | 캐시/DB | 선박 수 | 트랙 수 | 응답시간(ms) | DB커넥션 | DB쿼리시간(ms) | +|---|------|------|------|---------|---------|---------|-------------|----------|---------------| +| 1 | CACHE | 10 | 3 | 3/0 | 443 | 986 | **575** | 3 | 0 | +| 2 | DB | 10 | 2 | 0/2 | 352 | 587 | **7,221** | 8 | 3,475 | +| 3 | DB | 10 | 2 | 0/2 | 12,253 | 18,502 | **8,195** | 19 | 1,443 | +| 4 | CACHE | 10 | 2 | 2/0 | 10,690 | 16,942 | **1,439** | 2 | 0 | +| 5 | CACHE | 10 | 2 | 2/0 | 10,690 | 16,942 | **1,374** | 2 | 0 | +| 6 | HYBRID | 8 | 5 | 3/2 | 9,958 | 29,362 | **8,900** | 16 | 3,301 | +| 7 | HYBRID | 9 | 5 | 3/2 | 547 | 1,927 | **1,373** | 11 | 550 | +| 8 | HYBRID | 8 | 5 | 3/2 | 4,589 | 12,422 | **2,910** | 12 | 715 | +| 9 | HYBRID | 8 | 5 | 3/2 | 5,760 | 23,283 | **3,651** | 15 | 1,048 | +| 10 | CACHE+Today | 10 | 3+오늘 | 3/0 | 105 | 301 | **6,091** | 56 | 0 | +| 11 | HYBRID | 8 | 5 | 3/2 | 52,151 | 162,849 | **105,212** | 45 | 93,319 | +| 12 | CACHE+Today | 12 | 3+오늘 | 3/0 | 6,990 | 17,024 | **9,744** | 56 | 0 | + +### 2.2 DB 커넥션 세분화 + +| # | 경로 | 합계 | Viewport Pass1 | Daily Pages | Hourly/5min | TableCheck | +|---|------|------|----------------|-------------|-------------|------------| +| 1 | CACHE | 3 | 0 | 0 | 0 | **3** | +| 2 | DB | 8 | 2 | 2 | 0 | 2 | +| 3 | DB | 19 | 2 | 2 | 0 | 2 | +| 4 | CACHE | 2 | 0 | 0 | 0 | **2** | +| 5 | CACHE | 2 | 0 | 0 | 0 | **2** | +| 6 | HYBRID | 16 | 2 | 2 | 0 | 5 | +| 7 | HYBRID | 11 | 2 | 2 | 0 | 5 | +| 8 | HYBRID | 12 | 2 | 2 | 0 | 5 | +| 9 | HYBRID | 15 | 2 | 2 | 0 | 5 | +| 10 | CACHE+Today | 56 | **21** | 0 | **21** | **14** | +| 11 | HYBRID | 45 | 2 | **6** | 0 | 5 | +| 12 | CACHE+Today | 56 | **21** | 0 | **21** | **14** | + +> 합산 검증: 전 12건 모두 세분화 카운터 합 = 합계 일치 확인 (VesselInfo 카운터 포함, 표에서는 생략). + +**CACHE+Today (#10, #12) 커넥션 56건 내역**: +- Hourly/5min 21건: 오늘 00:00~현재 구간 (Hourly 약 12건 + 5min 약 7건 + 폴백) +- Viewport Pass1 21건: 동일 범위에 대한 뷰포트 교차 선박 수집 (범위당 1회) +- TableCheck 14건: Daily 3건 + Hourly/5min 존재 확인 약 11건 + +### 2.3 캐시 경로 간소화 지표 + +캐시 경로에서는 원본 데이터를 메모리에 보유하므로 간소화 전/후를 측정할 수 있다. + +| # | 경로 | Zoom | 원본 포인트 | 간소화 후 | 압축률 | 간소화 시간(ms) | 배치 감소 | +|---|------|------|------------|----------|--------|----------------|-----------| +| 1 | CACHE | 10 | 1,083,566 | 11,212 | 99% | 133 | 50→3 (94%) | +| 4 | CACHE | 10 | 13,502,970 | 172,066 | 99% | 1,075 | 602→10 (98%) | +| 5 | CACHE | 10 | 13,502,970 | 172,066 | 99% | 981 | 602→10 (98%) | +| 6 | HYBRID | 8 | 7,582,515 | 152,734 | 98% | 500 | 335→12 (96%) | +| 7 | HYBRID | 9 | 1,049,434 | 11,634 | 99% | 74 | 50→5 (90%) | +| 8 | HYBRID | 8 | 1,618,310 | 61,434 | 96% | 125 | 72→5 (93%) | +| 9 | HYBRID | 8 | 3,202,500 | 155,633 | 95% | 277 | 137→12 (91%) | +| 10 | CACHE+Today | 10 | 355,256 | 4,159 | 99% | 24 | 17→6 (65%) | +| 11 | HYBRID | 8 | 41,634,918 | 732,470 | 98% | 2,411 | 1,813→42 (98%) | +| 12 | CACHE+Today | 12 | 14,404,225 | 259,541 | 98% | 1,258 | 639→23 (96%) | + +> DB 경로(#2, #3)는 SQL 레벨에서 `ST_Simplify` 적용 후 수신하므로 앱 레벨 압축률 산출 불가 (before = after). + +--- + +## 3. 경로별 정량 비교 + +### 3.1 CACHE vs DB — 동일 규모 직접 비교 + +#### 대규모: #4 CACHE vs #3 DB + +| 지표 | DB (#3) | CACHE (#4) | 개선 | +|------|---------|------------|------| +| 선박 수 | 12,253 | 10,690 | (유사 규모) | +| **응답시간** | 8,195 ms | 1,439 ms | **5.7배 빨라짐** | +| **DB 커넥션** | 19 | 2 | **89% 감소** | +| DB 쿼리 시간 | 1,443 ms | 0 ms | **100% 절감** | +| 배치 전송 수 | 11 | 10 | 유사 | + +#### 소규모: #2 DB vs #1 CACHE + +| 지표 | DB (#2) | CACHE (#1) | 개선 | +|------|---------|------------|------| +| 선박 수 | 352 | 443 | (유사 규모) | +| **응답시간** | 7,221 ms | 575 ms | **12.6배 빨라짐** | +| **DB 커넥션** | 8 | 3 | **63% 감소** | +| DB 쿼리 시간 | 3,475 ms | 0 ms | **100% 절감** | +| 배치 전송 수 | 2 | 3 | 유사 | + +### 3.2 HYBRID 경로 — 규모별 성능 변화 + +5일 범위 쿼리 (캐시 3일 + DB 2일): + +| # | 선박 수 | 응답시간 | DB커넥션 | DB쿼리시간 | +|---|---------|---------|----------|-----------| +| 7 | 547 | 1,373 ms | 11 | 550 ms | +| 8 | 4,589 | 2,910 ms | 12 | 715 ms | +| 9 | 5,760 | 3,651 ms | 15 | 1,048 ms | +| 6 | 9,958 | 8,900 ms | 16 | 3,301 ms | +| 11 | 52,151 | 105,212 ms | 45 | 93,319 ms | + +- 소규모(~500척): 캐시 일자가 대부분의 처리를 흡수하여 **1.4초** 수준으로 응답. +- 중규모(5K~10K척): DB 쿼리 부담 증가하나 캐시 일자가 완충하여 **3~9초** 수준. +- 대규모(52K척): 캐시 미스 일자의 데이터량이 크면 DB 의존도가 높아져 **100초+** 수준. +- 캐시 적용 일수가 많을수록(현재 3/5일 = 60%) HYBRID 경로의 DB 부담이 경감된다. + +### 3.3 CACHE+Today 경로 — 오늘 데이터 포함 쿼리 + +| # | Zoom | 선박 수 | 응답시간 | DB커넥션 | 오늘 구간 커넥션 | +|---|------|---------|---------|----------|----------------| +| 10 | 10 | 105 | 6,091 ms | 56 | 42 (H5m 21 + VP 21) | +| 12 | 12 | 6,990 | 9,744 ms | 56 | 42 (H5m 21 + VP 21) | + +**핵심 발견**: +- 두 쿼리 모두 동일한 시간 범위(3일+오늘)이므로 커넥션 구조가 동일하며, 뷰포트 크기만 다름. +- 오늘 구간(00:00~현재)만으로 **42건의 DB 커넥션**이 발생하여, 순수 CACHE 경로(2~3건)와 큰 차이를 보인다. +- 선박 수가 적은 #10(105척)도 6초가 소요되며, 이는 오늘 구간의 범위별 개별 커넥션 오버헤드가 원인이다. + +### 3.4 줌 레벨별 간소화 효과 + +| Zoom | 대표 # | 원본 포인트 | 간소화 후 | 압축률 | 선박당 평균 포인트 | +|------|--------|------------|----------|--------|------------------| +| 8 | #6 | 7,582,515 | 152,734 | 98% | 15.3 | +| 9 | #7 | 1,049,434 | 11,634 | 99% | 21.3 | +| 10 | #4 | 13,502,970 | 172,066 | 99% | 16.1 | +| 12 | #12 | 14,404,225 | 259,541 | 98% | 37.1 | + +- 줌 8~10: 선박당 15~21 포인트로 압축 — 해역 수준 조회에 최적. +- 줌 12: 선박당 37 포인트 — 항만 수준 상세 조회에서 더 많은 포인트를 유지. +- 전 줌 레벨에서 95~99% 압축률 달성. + +--- + +## 4. DB 커넥션 구성 분석 + +### 4.1 경로별 커넥션 구성 패턴 + +``` +CACHE (순수) [==TC==] 2~3건 + TableCheck만 발생 + +DB (순수) [VP][DA][..기타..][TC] 8~19건 + 각 항목 균등 분포 + +HYBRID [VP][DA][..기타..........][TC---] 11~45건 + 규모에 비례 증가 + +CACHE+Today [VP----------][H5m---------][TC------] 56건 + 오늘 구간의 Hourly/5min + Viewport가 대부분 +``` + +### 4.2 커넥션 풀 영향 분석 + +Query DataSource 커넥션 풀 180 기준: + +| 경로 | 쿼리당 사용 | 동시 10쿼리 시 누적 | 풀 압박 수준 | +|------|------------|-------------------|------------| +| CACHE | 2~3 | 30 | 매우 낮음 (17%) | +| HYBRID (소규모) | 11~15 | 150 | 보통 (83%) | +| DB | 8~19 | 190 | 보통~높음 | +| CACHE+Today | 56 | 560 | 높음 | + +> 커넥션은 순간 점유가 아닌 순차 사용이므로 실제 동시 점유 수는 위 수치보다 작다. 캐시 적용으로 전체 쿼리 중 CACHE 경로 비율이 높아지면 풀 전체 부담이 크게 감소한다. + +--- + +## 5. 종합 성능 비교 + +### 5.1 핵심 개선 지표 + +| 지표 | DB 경로 | CACHE 경로 | 개선율 | +|------|---------|------------|--------| +| 응답시간 (대규모, 만 척 이상) | 8,195 ms | 1,439 ms | **5.7배** | +| 응답시간 (소규모, 수백 척) | 7,221 ms | 575 ms | **12.6배** | +| DB 커넥션 수 (대규모) | 19건 | 2건 | **89% 감소** | +| DB 커넥션 수 (소규모) | 8건 | 3건 | **63% 감소** | +| DB 쿼리 시간 | 1,443~3,475 ms | 0 ms | **100% 절감** | +| 포인트 간소화 | SQL ST_Simplify | 앱 레벨 95~99% | 캐시만 측정 가능 | + +### 5.2 경로별 응답시간 분포 + +``` + 응답시간 (ms, 로그 스케일 아님) +경로 0 2,000 4,000 6,000 8,000 10,000 +CACHE (순수) |█| 575~1,439 +HYBRID (소규모) |██| 1,373 +HYBRID (중규모) |█████| 2,910~3,651 +CACHE+Today |████████████| 6,091~9,744 +DB (순수) |████████████████| 7,221~8,195 +HYBRID (대규모) |██████████████████| 8,900 +``` + +> HYBRID 대규모(#11, 52K척, 105초)는 스케일 초과로 표시 생략. + +### 5.3 캐시 적용에 따른 운영 시나리오별 예측 + +D-1 ~ D-7 캐시가 적용된 상태에서: + +| 사용 패턴 | 예상 경로 | 예상 응답시간 | DB 커넥션 | +|----------|----------|-------------|----------| +| 과거 1~7일만 조회 | CACHE | **0.5~1.5초** | 2~3건 | +| 과거 수일 + 오늘 | CACHE+Today | 6~10초 | ~56건 | +| 7일 이전 과거 포함 | HYBRID / DB | 1~9초 (규모 의존) | 8~45건 | + +--- + +## 6. 캐시 범위 확장 시 권장 구성 + +현재 D-1 ~ D-7 캐시 구성에서 조회 기간 범위를 확장하고자 할 경우, 아래 구성을 권장한다. + +### 6.1 현재 구성 + +```yaml +cache: + daily-track: + enabled: true + retention-days: 7 # D-1 ~ D-7 캐시 + max-memory-gb: 6 # 최대 메모리 사용량 + warmup-async: true # 비동기 워밍업 +``` + +- 7일 이내 과거 조회: CACHE 경로 (0.5~1.5초) +- 7일 초과 과거 포함: HYBRID/DB 경로로 폴백 + +### 6.2 확장 권장안 + +| 시나리오 | retention-days | max-memory-gb | 예상 효과 | +|----------|---------------|---------------|----------| +| **현재** | 7 | 6 | 1주일 이내 CACHE, 이후 DB | +| **2주 확장** | 14 | 12 | 2주 리플레이까지 CACHE 커버 | +| **1개월 확장** | 30 | 25 | 월간 분석 조회까지 CACHE 커버 | + +**확장 시 고려사항**: + +1. **메모리 산정**: 현재 7일 캐시 ≈ 4GB 기준, 선형 증가 추정. + - 14일: ~12GB, 30일: ~25GB + - 서버 가용 메모리와 JVM 힙 설정(`-Xmx`) 여유 확인 필요. + +2. **워밍업 시간**: retention-days 증가에 비례하여 초기 로드 시간 증가. + - 7일: 약 1~2분, 14일: 약 2~4분, 30일: 약 5~10분 (비동기이므로 서비스 가용성 영향 없음) + +3. **HYBRID 비율 감소**: retention-days 확장 시 DB 폴백 빈도가 줄어, HYBRID 경로가 줄고 순수 CACHE 경로 비율이 증가한다. 이는 DB 커넥션 풀 부담 경감에 직접 기여한다. + +4. **CACHE+Today 경로는 retention-days와 무관**: 오늘(D-0) 데이터는 항상 Hourly/5min 테이블에서 DB 조회한다. 이 구간의 커넥션 최적화는 별도 과제이다. + +### 6.3 단계적 확장 전략 + +``` +Phase 1 (현재) : retention-days=7, max-memory-gb=6 → 1주 커버 +Phase 2 (권장) : retention-days=14, max-memory-gb=12 → 2주 커버, 주간 비교 분석 지원 +Phase 3 (선택) : retention-days=30, max-memory-gb=25 → 월간 커버, 장기 항적 분석 지원 +``` + +각 단계 전환 시 서버 메모리 여유와 워밍업 시간을 모니터링하며, JVM 힙 설정을 함께 조정한다. + +--- + +## 7. 결론 + +### 7.1 캐시 효과 확인 + +1. **응답시간**: 순수 CACHE 경로에서 DB 대비 **5.7~12.6배** 빨라짐 확인. +2. **DB 커넥션**: 순수 CACHE 경로에서 DB 대비 **63~89%** 감소 확인. +3. **간소화**: 캐시 경로에서 줌 레벨에 따라 **95~99%** 포인트 압축, 배치 전송 수 **90~98%** 감소. +4. **DB 쿼리 시간**: CACHE 경로에서 **0ms** — DB 부하 완전 제거. + +### 7.2 운영 권장사항 + +| 항목 | 현황 | 권장 방향 | +|------|------|----------| +| 캐시 보존 기간 | 7일 | 사용 패턴에 따라 14~30일로 확장 검토 | +| CACHE+Today 커넥션 | 오늘 구간 범위별 개별 DB 커넥션 (56건) | 오늘 데이터 범위 병합 또는 별도 캐시 검토 | \ No newline at end of file diff --git a/docs/cache-benchmark-summary.md b/docs/cache-benchmark-summary.md new file mode 100644 index 0000000..393de8c --- /dev/null +++ b/docs/cache-benchmark-summary.md @@ -0,0 +1,102 @@ +# 일일 캐시 성능 개선 요약보고서 + +| 항목 | 내용 | +|------|------| +| 측정일 | 2026-02-07 | +| 대상 | 선박 항적 리플레이 서비스 (WebSocket 스트리밍) | +| 개선 내용 | 일일(Daily) 집계 데이터 7일분 인메모리 캐시 적용 | +| 측정 건수 | 12건 (CACHE 3, DB 2, HYBRID 5, CACHE+Today 2) | + +--- + +## 1. 핵심 성능 개선 지표 + +| 지표 | DB 경로 (개선 전) | CACHE 경로 (개선 후) | 개선율 | +|------|-------------------|---------------------|--------| +| **응답시간** (만 척 이상) | 8.2초 | 1.4초 | **5.7배 단축** | +| **응답시간** (수백 척) | 7.2초 | 0.6초 | **12.6배 단축** | +| **DB 커넥션** (만 척 이상) | 19건 | 2건 | **89% 감소** | +| **DB 커넥션** (수백 척) | 8건 | 3건 | **63% 감소** | +| **DB 쿼리 시간** | 1.4 ~ 3.5초 | 0초 | **100% 절감** | +| **포인트 압축률** | SQL 처리 | 앱 레벨 95 ~ 99% | 동등 품질 유지 | + +--- + +## 2. 경로별 응답시간 비교 + +``` +경로 응답시간 +CACHE (순수) ██ 0.6 ~ 1.4초 +HYBRID (소규모) ██ 1.4초 +HYBRID (중규모) █████ 2.9 ~ 3.7초 +CACHE+Today ████████████ 6.1 ~ 9.7초 +DB (순수) ████████████████ 7.2 ~ 8.2초 +``` + +- **CACHE**: 캐시 범위 내 과거 데이터만 조회 시, 가장 빠른 응답 +- **HYBRID**: 캐시 + DB 병합 — 캐시 비율이 높을수록 DB 부담 경감 +- **CACHE+Today**: 오늘 데이터 포함 시, Hourly/5min 테이블 개별 조회로 커넥션 다수 발생 + +--- + +## 3. DB 커넥션 풀 부담 변화 + +Query DataSource 커넥션 풀 180 기준: + +| 경로 | 쿼리당 커넥션 | 동시 10쿼리 | 풀 사용률 | +|------|-------------|------------|----------| +| CACHE | 2 ~ 3 | ~30 | **17%** (여유) | +| HYBRID (소규모) | 11 ~ 15 | ~150 | 83% | +| DB | 8 ~ 19 | ~190 | 100%+ | + +> 캐시 적용으로 전체 쿼리 중 CACHE 경로 비율이 높아지면, DB 커넥션 풀 전체 부담이 크게 감소한다. + +--- + +## 4. 간소화 파이프라인 효과 + +캐시 경로에서 원본 데이터 → 3단계 간소화(Douglas-Peucker + 거리/시간 샘플링 + 줌 레벨 샘플링) 적용: + +| 줌 레벨 | 원본 포인트 | 간소화 후 | 압축률 | 선박당 평균 | +|---------|------------|----------|--------|-----------| +| 8 | 7.6M | 153K | 98% | 15 포인트 | +| 9 | 1.0M | 12K | 99% | 21 포인트 | +| 10 | 13.5M | 172K | 99% | 16 포인트 | +| 12 | 14.4M | 260K | 98% | 37 포인트 | + +- 간소화 CPU 시간: 24ms ~ 1,258ms (DB 대기 없이 순수 CPU 연산) +- 전 줌 레벨에서 95 ~ 99% 데이터 압축 달성 + +--- + +## 5. 운영 시나리오별 예상 성능 + +| 사용 패턴 | 예상 경로 | 예상 응답시간 | DB 커넥션 | +|----------|----------|-------------|----------| +| 과거 1~7일만 조회 | CACHE | **0.6 ~ 1.4초** | 2~3건 | +| 과거 수일 + 오늘 | CACHE+Today | 6 ~ 10초 | ~56건 | +| 7일 이전 과거 포함 | HYBRID / DB | 1 ~ 9초 (규모 의존) | 8~45건 | + +--- + +## 6. 향후 확장 권장안 + +| 시나리오 | 캐시 보존 기간 | 메모리 | 효과 | +|----------|---------------|--------|------| +| 현재 | 7일 | 6GB | 1주 이내 CACHE 경로 | +| 2주 확장 | 14일 | 12GB | 주간 비교 분석 지원 | +| 1개월 확장 | 30일 | 25GB | 월간 항적 분석 지원 | + +> 캐시 보존 기간 확장 시 HYBRID 경로 비율이 줄고 순수 CACHE 비율 증가 → DB 부담 추가 경감 + +--- + +## 7. 결론 + +| 항목 | 효과 | +|------|------| +| 응답 속도 | DB 대비 **5.7 ~ 12.6배** 단축 | +| DB 부하 | 커넥션 **63 ~ 89%** 감소, 쿼리 시간 **100%** 절감 | +| 데이터 품질 | 줌 레벨별 95 ~ 99% 압축, DB 경로와 동등 품질 | +| 동시 사용자 수용 | DB 커넥션 경합 해소로 동시 처리 가능 수 증가 | +| 확장성 | 캐시 보존 기간 확장으로 추가 개선 가능 | diff --git a/docs/일일 캐시 성능 벤치마크 보고서.docx b/docs/일일 캐시 성능 벤치마크 보고서.docx new file mode 100644 index 0000000000000000000000000000000000000000..bcfb8233e71290d5ee728e3eabccc60a8c30a4db GIT binary patch literal 49015 zcmeFX^LJ%!5H5IP+a0rGcRIFh+w62~t7F@CI<}3Iq+{DWu`&7Xy@R{vPng+1?6tL4 zRlWPEx1NHc36gm`+Z~Q`}($QnL8NT*X;K!p0BX%aq$t*iv>dGVw&$EX;%AOs;Nl7jWy41w> zWdYZV!4;02hNWJGGmyz5tY}~cA^D8mj-*FziN&O5p6ewcb66n>|BmT?5@!YDS{;g) z$_>GqJ-*oy)JGiyOJ{UR)1-d$Bg+q4$cVy0P+aIk7q}uhu)g7FJ_}kR>C%K%^8Tc%{ zgfFi(a5S@ZVq*O7{QrCF|A$@l|6zJnLf@CSA_{_n-h*bklvew23+0(i{;lDzz`*KA z$zrT6TQ7aQ^DZxg>zx=$Ow7(D{_}Lo5_j27(!2VWqJ|XF1-JODKcMs4=?+K^=`3b( zU;Mp`n0feM{yt7FMKc~0sf86W{S7+)=}+2ZFUA3_=)*xNf)yFf^t=g0Lt&1C+R|g1 zFI!$Z%fh0yq>dL%1)o@VC}H~)o-e$)KwT;uLtJeCX1(5^>SdYltZQ71!Q<)|P(ejQ>l?x|#r7LMOD7Iugrrh+@pCUie8nc|85nRvaz;-ak~0p){bAe7Kt4bl-HCTqZ~1IjOqH!L)y@ zbT==Afbaan3Bl2-uvuHLw6aUxx-?d=%0B0m-*0#Ox%I4+d)f^JMnQGS!prWC+oJqM z-l=QmoKGj$?3_FHtj17E+^$?-FLt=Dd4;p5*@c9V{)u0s558@&Avy4oRe0J5Cl5EHzhCFK4O~s!R`|jX@SY# z1wASWVO-3;XF6je;3Cz+=N!ul6?^nfB{y8U}ls8X}BmZmIWlNLCJY^-RM1 z4>8E5vTjwEFSojlNL=Q+A=&)EyV#+ZJsW#^iW=6Fp}rfN2eMgThbzv)A#=etXc*;N zXZ=jr&SKj$?)}mI>QMDjb@k-Zs?D_6!-1y9_&@d3o!1aWlkTZbm+pEjshj9zmA~0* zRSR?$i-kmD=Q>(U`4fU@W*?d?uS(WxV{Go9orrWhaD(#+MXzRi= zIC-P6U-2x=A=tFm*NScTI8~-B5b(C8;nxD`>io6MiL6nji!NEytjywf&3E0!u^(=s zcL8Z}TbAB#RZZ*hFX;Z-dgXt?$+dQ6+_$C*y-_u(tVfT2Q}0?WTVzs;Wk@}=b8UM! zXj%Pv$!H5|?)jznX{+S5(^V*cGBH}|> z4-wD2pFNB9H2XS`KSbQuJjzqF*e>ed(`Ht}_`TQPYRt|(k-zdE9^E%BJS{oRuviz~ zLTMIUY9M7{BfI4MobjsHQovgGam+QMOgmU*U!AHHvd@P>tHq?8#Dz=@< zfnW2+Sv4lGkr%4z=v^Qy-dNl#yEJ@)jvYTOhf`laFT3~%Jd{A=X9G;<55MoiFGKoZ zh7%MJMPh?BxIar#aD}JCY=8H@Y;p5=ykkOFwap$32dBvJh3fj0&f>RU8X{@IM5)Av zS+kZ;)f6~G$+Ba#eB7x;_)BrKMr;d#Fj6wx9_N#3jq}_sWob2g{aG}#>y9UJL3~zz ztYH6we_ABP^dsr+D$GP!q{I+va-$*zvPG{PL4NNeD%y*rCyWUlrOsLM(SnH8Ocq*_{SSLzM=)wYBAsmF&+$<`-Ne=96DE2?$N zI7VjVMA?^eoA~|aSKsws2$}HX?xbjH@0f~EX<6Y1H@B5eI{{P)$Kb_;@l(+AhHzK!?HpVd%h z6UBbrn$-p@7==%31B(w9UtD+jq#|JF?YN@2#wcWXBK+x}M|;jV@cp5Qg!j&pQw!Ta z9XjzbUtts1N(d`mxMJlysIG6>Ht}(@C3bbqAl72sNP6@Pbx+l>C@LC!*;21PsxRYL z;mP>wOY$JeEgjuaMITIdiSOCE;r5yEq+q>vFTlU~YQNdS+Oso8D=3 zz82k|WV4L9ny?-7KUpRisiD|pPKvW5Cl8OWuAZ3q+BGQY#L7G~=5m96al9x{uD2`e z{9W)e&F6+>F%7LARJjjl>r&!s)8|Po9{Idm{CvsXqnfnnsD{BvqaBbP2_jL24Wdjj z_&p%20hIktKq&p4QJ9@)?)_ov<0A7mviPA%C@e18D#lv6^Ivo>fDL%a-V!y2JM-#d zjsC)^%yrrEQ7IIjVlqumldAvT^mu5~J~&al-%n@1q)w+HZR#mW&+n`p6^4$+RkCby zJA~b?fqqDE$hD6yVU02Qh^EV{XcLZqBYSUKO%#ns1;?M<5D&O`TL_c35ESpH zu+>AnccTeO?DIOj!QvY$cC2@Heetky@jwi1Lx^1-)Z(VI7=gF4zOlB}wffPoj%xm~ zNoi~vMWF?I=%X#RbYs7 z4XLWU|B?o5!5Ml&SHx?jMod2`{?QB#8&;B{B_F4bhd?x0{Ces0IyxFCcHOUs0{uD% z=Hvc`hOG@fpEj;$iBvMQa@firpwGd1&{)It>yKmIyY5)TUSucF+LY_raMPkjN@GNQ z01>0}kE_i+;)m|%k1}8%bF9M^4R)nzC~=wEyrn_F!?WnE{jfz>IrC|LR=SeBUW zp$h>`lMBb)_opl7w%Y$<$T*#j_G(-&FFuc-u@?%)6pn=Vw!AFrjYVEjI(9)6#`6}Vixv&SJKc_Lv~##)??Rht~| zkF2@*b^6?I8sxCmvu#`M|HYp6GKah1i+=g;wOo8`MI?^<&=%&C(B8DX z%ao20Tl$x|h54?og=dTmAeCw&SxN;-=!-K!*}1H2lTio>%Z6gK`Q$hQcY6@?k6@kB zd4?N~_11D+wtHFR2@~=V4Kod*OGo;VayJf|g1u|d1UZ@yo7H9ExNPRv9|m zd6+WS8@YbYZyIS!gXOP}mmBCW0)A*19)la` zlFzEkg0ACx&J@*(0^9h0I0v$Tnky^Xw^LJ%^>d&8av0|4msUj$}b1n_c!X*oAg~uR_s^8ZmwT^scY@yVq4$u z(9>c#MfW(N;QIL`@*mHdx3_2CkKkvR-{+C!Q#55);?N(k&-8G;4e8kineW`uQ>5{})dnTJ zraepZX|ecRQ;n!FX8)z_u#Y)~2c{}Rn$3nOo#!xJqeq6N^T-#jC7R@BIlW5XzEqXND^Tg@mcG#w{|`DbMjyhKbkkP zfl@y<9c*OPML4FE^9SJS8H_mNFc(AE8uyoYm5IjUxu(Q;2jUMyA9u~q_t(Xv*c&z< z@8fk%1+hwlquW3IF+5^NL2}x|f$*E-K!}uS9hydUB96;y0w6HmUfwj zg$zzDk*_;Gl&U3*itgF3vN$ORa7wS^%}O5&`Xfw?ppn721I;Ya>zp(SWe$A-)5iH* z&y45JcfaraTdW1Q26DnoD*?S|Cen^=H~$pYtWp0C;JTkiY{lCHD4`A0eL`l^>j*gk z$F1~bl9~eH6@O>ZE~X5-SxvCd;_xo`W$S+?5svumNU(*lV0BH1Tiw&omA24da*EeY zz|syY2K($^yzJKIiAJ{S_!q0uU%c*8&y8ZtyJp}SrppYQ-wd%3xd(_2*-H8q<%`OA z^aA%IMZnu~f&!STgQPr&nAx6pAu;&X7o9x7fMzw&srKnuK>7eP0_qa#^E4|wAWJu= z78QlTgJ4M`j8u{!_lGNBg?6NZSJQl=Dw0lQLzD)Z{zAUkjAKlNh?v`LQ3a;UVASyx zW2;sI8GRoe(i1p8t-T$8hQXB1>nPgFs@Tm^SvexzR?5|IQitd z0`4c743Y;hccb%eb48TWIwehyWl++N;i3%&z=KV=$6Qa8v$~#JjKQ7TRCF5io=9X` zW%C-EI{~>QVa>g*lmJecz?;FS+oykR4LRJ$K4}i);W*GLtns93zvwx((fUKKKzSzVEP>6Jos2`|$ zJ>*m~I=0+a_)qmou|t8$gQ+piKH56*iZlQmwS>B!)9AskI|xJ@LK!x!s!=5{gnP_f zu>FbXA*gl7bQpSz?kq=wbJn?EZQl8%vd@k{(wYQRPgXf8a3(fwbUQ->3bPUe{cT_T<4U~9md{CF@a}MQzbl`8E!yr z$r25?@c}o3NVz6BODSYE;1;3^l7RtxXo8NjW2^RQAt2Q@B->Mfrmr@(8fB2k2GH;h zF$dVT)wTNTFy8tfI87*>mi697ujuJ|Ya#fW``uCT3yIh!y-#gH8gVu!7);$Ty<(AEE zaDOz<47tt1j?fy?I~`enL!`2l;nX63cNkaId>khg5L7mbYx>e#XY@3WH|H)&r=z_e zCqPH`>G_XE>meB1o-Yb_p6p01Sek27A*&dMx?dDZHqTjk(B`ziZgBVCl0kkFlm8Hr z@gE{|oxt116M@UBGiJrS2nXgHw?fbT7^sXKU+ZjKhr5(D4+~S{&rWyden_)8dYCac zE*1b0AbO^#?Ka6W+*C*!zrdda)q2>9N=iv_iQ{jT%B0nu4PK9_6z>li6Ud-h9TYw> z12_z1lNs_S348)86G+*~iepH%S38&Wo>1j5pH;C)W1FQ5TI!lA5X(nd->Yw6rftFOJmc7d`8C@_*WPIx3ToZCLRnnLdnv?c?8d2yOhKPz)K6_mbGjFe9(W z9b(bMJ6mo1OVxO*zXOdzI889~8j*`#{t_&vbp+@{)xp9!tvs{NePpZh5QV+-Tm+4c z>mI=QntuJLK+Nw@T#hlWmq;{h#sWq$aF}tZj^*@wT*E99{H)xEmVa<>_ddW! zQ_?w5(pFMZ3&$zpQ#e~tN(Rk2Oov8qbzyiC(Sn@Y>PG!d{s&Hg4j0mLX*sgx>4Z+t ziwcvfW2ef~B1S9+j&Y0@8!Qt2u+*kCMc=Ln!b;zz)lWl6ol4F1B+3e7NvnPefBld#&*0g(pN0){K8nq*Nuy6a$Aw z!KR&gM>r2HwXSrf)K?Z$O-7L*kc%M~(_ItjEO^t>37oQ?iQ!$7MK>)KO?qSr@yuetxh-<<+t(lIwNIRN@}CBeq&D%Vvy$3=jz0Kq(m#IRKkX zJnTJZ-(`|B3cyL9dBpuv6psta{W4fR{s**2?IKsonn95K6X|yXx$-PX<|CLn*l%;H3rI_+So8MO^F=ykZ`35sZzpLOL<-vF zA^cZ}wpd@aTv(IxZ#>qS0CTiy$P6mHLeL{&Q`PtLN+36)-#g4RJP1}U>3v`@dWT>W zM|l{7LTMQ~z^JGhDQHG* zSHQ;Tl)*r3RV;)ymjNQ)QhF_-sD@_{DsmlNzp?N_oQ}nkp9J`K5+QJWe=98DNt?^{ z4)BFqJn2c7gwKFM=uvt%5-%c>8`5b%bFus@i}K%X6r$;BsPnC;Q_~8kcB$02Q$0(q z7)cqEQl8bfGVDYSt$S|?tG<|C1);1WCE^n-%|A3O&Rx96+Lw|z0mPh-S(nrA1DLo2 z5*4Hwowb-q{~&LvF>=`K@r8_u{9UrfL4FEq6Y+M~KEWTrImzjr)E3yVUu>71!GHUR zefIFN`|)wF>m0Ru>g7{YO%=_+o(@uxzHMrPG6+?^&~2 zJ6%Isxaww$g9}kH@pqANap^Dpb##;5SsAN@ax9BtVpZXb>jBVnLBK$-jhSfvixG6? zaNqd!?(`1n3zh~yTQrmM{1XH-2?Q+1oj{~$3cS1beLH|0|0%S;@|>!%0y#~85p)-@1F>Fj9OmN?ysoVaasF$ ziW__gO?Z#GICwD7>OgLX}8A6~ocT{4Kv>ViZi zYm6{L1SxY}8)mK>3#~kRT*N!V2EX=A$}s%Lq3*z9>0%u{Sulqf7=V7}$A-WzjDNk&Ul+}m5XXX>S2SSy&gyHetqkkqn?v!g{CQTfz- zc+#qho`{?x45cLEg3?%S2(@qJ_&7qxp6f53RQ_gYn#%MA9vJYFU#sgN*%cS?VS+DH z8Cn1fG5y#@GlTqx`hN-<$M07|eHoTUYxhpF(Ymt@4S^3oB$^*cqbiP<0xcML*>{&* zOnZ~fbHAVfnvkfQr?V;0=~M?cJ-;Fk$xjx;7*NO1J{5S1s~+bc=uwGgoKh%y1ZJ#s zOk5W2s1?7Wx-8U(KmY4ThT#lkU_E0ytD$z}aI_L*h zD=Pg=&w2wUv!;oy=y&{EY(4O87nLq+i}o&#PQ*d*6}JXruR*BDHGme6jDnyA7vl!jgiD-;p%N&-`AdWp3i$5Mk{-SN%pbfQS_FoYhCsKXZLtKd?A1z^;H5b8)bB{A91vR; zxL-XWdo1}A3<x41)E+$@{@f+<+^n)F zP~+qG`dbm7)x~c6;XV;3kwnMurT%`W8s~Oxb!}Q(q2*Da`#sQi(@kMDmv?2$S4hl^ z9f@EnESqt6?xCBDLdz8L30PLshS~3;p#)!g zc7gOk8#p-Bgnn>^jwh6|`ZSY#QMxjeh zU+tNXf!sMFOUB#`Fv~;Qk(t3e+D{z_`Aq4bG0PV&`nTR3FTVeG1vn-2P2VFUGpdxK z!9_g4;lD~T#Y>LFDycJk{uXd!_AH zL5LUzBq`RAhQnaGx>+HBdPNP<`rvt(IndGf?N5b5`mR)^=m zIv4om4f@U38(wcBX93}^t}ScC>jW4Y6R^)X8jk`5=gQo=;34^k>&~8oz*!5}0b7+3 z%{Ode8p$$a?A+^}sUE-1g%@+9AC-$B*R{vpKT>B6LkpKZ?SvAq0)CC7@E<=FR^31# z(7M(8!6~j*6Dlv86gVSfi>&NWdUbSho6hp*-UIlp$m>eb!+Z97_AA0#pG|Yt;;q}o zoJ}=8%F(aGeL)17jr5*OUp&J0$80U)e--lEqSexjZxu-tJ5=TQ*@)}^qiN1n?a$zG ze6M{W3_>@)R17^A0$WW(T)6{pn$-4#J~yw4D&n@OIbPOo@?clBkCk@wx%7S|?|bif=jEwlJy#?w*qm{C zm(Sj1+Ih?2&2_Z6w&H9p#czULCsVI%th|ha#mmdI)8s1BdL6X;=pH1(NFRJE6PL!X6~pRc;ZAral*CE6CE6UZ#6OwoTn^^zg|6?ZJD**;C+Z^N+tA zyn5AMxUdFKwhI?Pq0;?u4Mdw65cMW_Vm0vrkp|sMPcAm0yRV%>pV^BPk2wn!U_-2; zOZA)*2!y0|fg7F=8>*^?IOP;*h@m2dH4!%2QhPm@g1Pmnmdyh*O7ObI@Gntg=X$vg zinTmx9q{Goqr)WTMwwR`={ATL%LsGS-e93O;p+&&j&-j z%F4NR4+oeJ$-34?G`aA`|4lOFr!TN3oCc5bBPGNHg9lm>14nNpiq1#DdAa!)*6-z( z^ufvs^Q4u0`H!Obz954zdi3)%beDOb6 zIBh(G>TY|d^O7~w6GT6OKZHb$XO)vC2fz8BMW#0E2p_(SkPyg)bYVvPd*bmPt!QA_On1vAk zvWVqk;D6`K2q@~V=(~dd(=ACFc@PLc_cI#(z)A3`0gb#fTYQRu3w|XnpZ_H-Do5SK zkjt6{N~z`0kFoWnBxs8Xj_^Cwb6<4{lF0f`tiE8!Q9vzn*;oBx$FT$gdWCDTZqRHn ziWL_N+< z6a8l-(AEzaoRZ+*vc{eFke&z^0;RHHg;v`bW@budtX3jcb+R4C=*Ud}&xlYZ0Kws~ zHtt1Rq(0e4kWfkRzK56_Qu!zXwQbP10P9+d8jr$pSwdwv{j=^`0SWIj7*1Gk$#z!& z@&g01_ep`cw$lmv09ZPnEnATu?e|(IHta%@&avol75yJtc(G02$c5Wv+*yW);-MP> z)4JQ}(}}1Q%`ub=7^34LMFq3Wy)g*!VrOZ_QxgUL2SXtwIn!On>(vrGH`5n7pQeNh) zew3_uKY}JB|G$_kPWl#OLM8R#^==%!8%3jj(osvoX|jaRboerIgo+mKS<3|QQ%CR! z>IWkT!ZX9&WiS%y5vZSPPau!=`^0PZFUY(J2m#JN@4zN13ruz-fBhO*CX!h(_?G-T zSCTAJs~Hv@aQ+<-_iurpZuBk;60p`sJ^jerJIkFyUyjbi)9RlPBd9_MUCSQkqDV<; zrrv9gjLB9+!!Vrf)W`;i(0pNnEssg!5}f16Ky$;++aa4)iV_)4_ThEVGJ4sAA+F;@ z=$*g$)XZo8eU)?(OM+k+sDz;JAN2)clsx1_c)YoDPg}973CRGTEu?(uaZZNl(NVGn z0IEuqd+lhek1B$pQcW^df*{ENe_9m~l`avyLM2|J%$(?){THEXe>SVdLp4&l?)Y=# zLIv2&Ga5rbjv)IYql=*m%$emb>kgD{@j%@GRM6m85f}dS?Ws>9c&Zgm@IR^wk9$D& z4>?2O>_lHmw!_GZ@cn7{$NpEh*}ba5qVP<2bSkrB@xjyVRu@s3J@A)Fwd2cj$ZEJj z=L+G#eolVfIe*LlNmNb$7g3ECZ@rruekmVTH={I8uJ&wFsy3(~(pl8E0ubMVzS!s& zY3S2iv*-Fi|C&-M2%0zF;j2DtVdxh@#*k7^j7=O#Pc=F^XPWj_~E$FZOBy> zyZ-$0^*rW}hmz2mmB5CTkLxGYd*`F;nu39%uMJ;UbBovg@9vAI^wSmXE^ThTiYzOF4 zS#SIBM{~6XSMASd;>gvGMYlDtt<8*pqc}rSv}NZ!+c-@the7TX3J02S)nvvW((yk3 z%?Uz7KR*(;fP8w&C~6;FFzPtxzHr3oRpIE)c`w-vWzN}imowXg~HqAVP z2a35({rug2+rQ~$E(t&f9)WQ*A&%bKrWkFVH`e_)&m?s3klUiC0GJWg23-jYHx?7U z$|EsB#=xhwiEcye__=%z2o+Y6$0Wo=k)<^}&L&5=8>PNiK}p0ALBs&X;>BIOmlfB5ORXXS;Pe+Lam zj)c?nCJ?RDg~D0>4w5XD;)ay`WQpZ0Z|Y#{mF=>@1I|+1nHXRwlU)^~$h2orjiaGO zN~K@%+T59EvF%lRK*0S-eE<_P+u5gS142`~Fr!5YE2X*!#>S!zxPe->yx4m>cFE5aXVeBN$cO8kexQ8SU z$+L|s$Cr1CqeFcp#r(Nk^SJ+U7hU@%%ag`gi-OaVqHVGkZQ$L=9*qn2Cgl>WzZ%|b zRBvpofToD{2t79344N;$@-Z@oytU%~FE-YZBTf76^bNgR)LGgS#l2hbo!j|75{7jj zdZ|BmFwP``k<9GE9z|y@w`r>dbg;X=z`v-Xz6iiy?QP7lfIE%S*CA3q{ND5%P@=|s zoAhFVxMsYS9AE*2K+!BQNf!%Ho|>*dR`lvzW>+dhUT4vdY<@w3@eO9KWmZ^Awl&^*E|Y)M|VfthU`<|3-W8j@dvMJ ze~`dAVHw=`I@Sas@0?h9Is{$Ofg3A^ze(3*$p%vpwS^WVLrA_8h?rO{x(;WI#_T79 z)Qm0i+6I>EO+cQtTV@}sKYIo)V9IO9%o{TG7U;t%OBGD=NPzItee8)DG2eVe;1esERjj+w!J|{Io)SV2ikE95Y&GQ-H{1%;97t>`S|fz? zg6fELCuej{x7G>fbl?XVPiyNM*EF*G2Tf|mHfO^PV*Hr>?8~KV&B0uvrbaiF@g@}b zQZ!@{kpSZ!)BKN%PI?P>Dd6xf(cGQW8D?|Z>pJkyEu`qMNU&+-`I6ztQjJSG@J|R> zXi?M!CR5*v6(g6a`Vhls#y~XD~f4hDZ@49_u-F7M>yw{B{#xVGAoG5`KV#}7~vdK{YcMFG{xIL&iON*A)Q|$ zG+FPFO9IRpLashdl$D{!%_oJYE?`v~(@S|%(HZq;td~n)-KC!(yT%YxIiqQk#jx4q zjxu|Y9#&9^%~i!08OV!bxmR#gCy0c>X!u*9s)#5Tt&3?(DbP5& zEGT1z_Y7-geJ?6iW%;ot1idyVnm!#tL9)lve4Ta*NUX1nKmImH|Bxh1NX^Jn8Ss&I z@&^AY%sj+QFd1LJ&m{BF#wv{n4i_ipvu>tUUzoIvnVzCwQ(ycl-$;eLo?~2b2IPtS zOVubCY}8ZxRSTFj_*iEYfaAy!(R<(^SGT58`Lj1G82{a@#rNu$4;`(`W<%4|yB7^M z&%UUgngsN1Jdax17%vTzr~{0imjdXSXV?03RF)3nRJk%HGXBxP?F~u)g8^^lPqC|g z7yx>V*;5*Fs>g-jA<{|jkuC8UPB27UmZT@ug!?9lmby8drB;jdLq-uQ9r$gcb}w7M zsIe(WE!NK1jUI8QwbN$DbP=BhnYO@4(;WjHIUHWkvb749xK1oNdR66;fuRH|ZFw0C z9<<9-j%Ai<5D|_gC=-Uf%h#nE;j+mgfomWBvJyyT+ZqG@LuB03x<1A=Z%>w|bI`!D zWW4CJikKYcl(|Tr=u)1H8xIA?e7WelHo|VH7${%YPL)jHIx=9V93f|Xyf8lpyo*Qb zdn+y{TPtoRL)mVX!I+Qd-jR(8%o!@FFe0N-{vgYS6zIrnX(A7-QzU2((lnb6LljvF%U}M%u;ALC*?=hW{UnWEyw#Z8mTrtQ?^L6SW&DjqprRo8%=J}=@!-8Ff%k?lHdwHSWBaJ zm=?2`yEmq?F8hGasel#p05w9p*@%mq4Q1{}DTOID%911)*|PY8^YGLik>204V80k1 z)RId444{KkYN`5Cvb19XDhZYf^_=Cp7(DG&ON9A1=xXQ_hvB}SxP&e>dejJT;r^t_ zw0?thtmJ%Zo=~|{Bh&)aN?=B;XLr2c4!ScbII1+J?qWOpK0aI(gt8E2My0l3LnT*U z?nP&$@F{99ZNE~;>OW<(XC}XVR)>#z$C9~s}@tmMe`?vfgE+e@XzKkBpR095#!(3KYvQZexr9TWvdjGA|jU; zot}F&1I2I7zb$$||6}B>QyEujd0q8$KPu~@q7B$Q%y=Yw`g;H>*)@gl8!TPhbc5M! z=?^4!aV35}Tg$Gt`?S#~5y5NeLiH%|P734#9?O61wAa4WcVB@1KJ)y24j4`@WwE|z z^2ux)B%Bhfyx9g1Cp5+o)r9Gu(%Gkj9>t&*hG|J3eD-Cg7(Wt^|c zY(%?kmkRrcNE6f(}r*QQ3hk-y_f{ulb(d4A~v&zJW#)#@T=~s>4nk!1@~_^`!<%EFMZn+ zB?uf&Ykd%$6Ci^eE}8N<>Uh+T$Wvp8P+hdxaCbeL4B0UX?~-8F zbW6cTCd@M>_w#qaVJinn`)H-1Q&HV#XLbCohn~qGT`(j5bCOAZfZ{6%mFHs~Q9H`Jsw9WKIG38fyhCq>W0cWWy-?n;o>p{opFQ_L>6MJA67#!^#FG1v@+L{yM%8jdS3 z;-&i=$Gjgu_vK-v*#_YLHQAy3aAs3OdvVf1hLXh7;7#+Az_N+3C5fgWaZ^1(%(RsV zk(HpV8A5!nu#qS6FwUITz*$u$mG%!p+MKvKPv4BI%#lnP;!v$HSuG(9H-JxUVE%5+ zMvu)x*E<71hD)5?r(U_9uVk z@+7p04U|Cl1J>Gp$@gCWw~tMu^MK4-mw5&33k9rPo|Y3WJmmh@#%h#! z!K9jZQW2BBHz&(Fo%inp9Ac>{pDzDi(`NR!B=?MMgzziEP#<!*jLFZay|fBkjk*Cvy)v7W{&oVz2xrM6wR2|G5h(1WpzKxzrvyTAcc~x( zWh^-{NtzZW0wk`Y5-qiXSes5kJD!D`-(&Hgj`efjpoGAHPQz3z>C}@|||n z_~pmDoF0{o_Gh*--rVuW$0Yk8(|L5}T-|#rlac(#)F1JEvC1--kYrcK&jFuh4302{ zOD=X^TTZ9fWo#rjBd*Iw4mR*|D_k9lzX%ArY`tQChG5+ecR%WR;P9lj4z$geA*IKd zqXoGbTSX{kE;DEsPISR<97YvnX(TP&l=K#brS|P&Y3?<~y)ufnr^m`>$==KR78VjC zH@x~FtlJjUY>^GJ>a-5v7JGsn&G$aUd>$|=Ff&8jRQ7bUEyMN3geWmFLEwxOMY??; z>N50*!2+x9_;5uJA48(4d46(2B_KL?y;H2u{)zlN<3sR%nyA=@E;|ukThvN@P+(KW zU%{sgeu0aI!Vo?=zbUvx%FDv5YRLLCdPCw*FFdi<>1Z|PR2I(Z+{##=u(5teyncHL zoBa!R>5tB1u-OjWa&!dZAueTRmqow%Ku_XU z|K?ON&(UzS1town87e_txH_~HB)&f*!z__Y>shuZeXWU_{?LQ-Z0S7a@Z%Xt4gJF6 zZp_Tgel3>~uG4WL;^p#8S@*pg{E_G{2-v&)$h%23xw#Pm2Qy5^4mMc@cTx~cjuWi! z^`;;2r6oX2ZeC=2sH>K0KM?LEz_3&%Nf&O!fau_MrZ9!IvHPZ*EaLq+wJ~3HI;zL( zb^?=u;3Dji+9`b!23Gb+e=pc&DC+b|0&5C0eduu`Yy|L27q_6=9Hzsz=>x_1%AP|1{Xy9| znER{bRQQu6Ra_Rbk>!%~ahO3JJDe9&O7PeEEFQ>=&aA-*xce3S= zDmAN7y&e{+O6@+mG`rOd5y}`$H=_!df-aQWOR!`cwrtf*=@@0!Pb71p*!CZhiX9;!KMV0 z%FQUSJ@B5ph73NWrc$sl!)M5L0(0I93%m)i0CvEaDgp1TzeKmJZpp#H!b9&wGpRHi z>_5{CzAQ;T-s;oETNAei?~0f?-;st&DrbWI?_Qldq{BcJteyaT7&l$ECFwEJ{t88_ z7Mfv$Y$2GwA)80UrJ%RHM%^%9v3L;r1~sBbQB6~W`qM-V(H4fLhBt7Rafa3v8y?x{ zF4_93&|;1MgaDUe>I&VjHhui|;KMuckax+v(N75Js+y-WJF{}o+m3a zGEc-WsK5_Kd2EqTuu^$W6ZV8Rww5j#w|AFvzaFxK4z^yKy6NdWrt@8ndW*gu@C;zQ zy7sO%DE`A-iRjPC7EKD1334aCR(`brk^7S@N+pCb873-g#+QacX6XmIb3QT z=B!)p&8!3SHqsvjTl1a#L{a9?Wn*;SUB*xo1woCYg?L?P7i5O=Yr&JUpravpfl&(( zasyip_ix*#S@fDdR_xLCfB`3+!x~hH%18mb;_^iJI?J0FG#FlQ&A&zUF9q|GQ&ke| z*oI0Vwd1Si=*e^t+3oAk6mA5TB~rTKL@XNzg^5->iq03iySv=JX0-G=T3sE#Y2Q=j zA`7iP=tY8E?O$#k?pCn#yPyABeJy#S%?P**(q zOmTOgd@{e$duA!J2AWM9mH7w5O&)*Chi5;y3J8Kr##ti&8<8!W=?#FXku;~cyrwdg zVd;_qKS*mborYqpzE99$^2lO(e!Et?`ZyF?d!7t&bew+*pkD*J9x}-;#xSx!r6cjz zh|&_QQi&UiN(U*C{~k=?n_tx^21Xdl1`dn+zUn%d1@6AL9DNX3DY2|E4n?RQxh(x& z0faGbel)&}XRpCjlP@nPJTyLGRMneJXUWhsf>i8Q5JfOI_Q;+M2V7OX*VQ_e&RT1G z*zN2E1FUJhu&G}pG_QTvZ$44+$t9L~`<%Ni-J^>xzY8BkeQnr2-gg2tR`R6x)(B|y zG+Q?7FZB($J>P+5 z$5KZ~H8xvyYg&8xIJdSkc{j;y6oxhEl%`g2aVq{A`!5ZTCUNYCM2Eb9nT|xPL8VYGjz|8$A zVkpUU1DMKR@cHtoq?q6*0^ zlPyeu^hdQ%k`z`xOt_vmkf61MS2CApUvV~+WY7)bi3Gw+{Z>4+$qcbgY)pQ5=6tKTr2ziAHr8Vrw( z_xWMh$+{vkAtVy zmRE#C54c|`3Adzj9b5`2Y6E=A3Y7E=@d^XY5~x!1I!j|E*-HBQdL-QT+}0F>py(xT zxx88_6|#)R*-i{Ml)iJ`%h}*Q49ySF#nUzeO1Y?8()R#beVowMCfxF2owUvR{;A2M zAb(N>M%Xyz0+fx}p=bS6l{kU9g#c2+lBNho=8%O(zP0^J$y@|fwcHA;h=Lo0*16n6 zE&N?&8z>7uCQ18Sbn=<=OzX=x3wo(X(-< zm`JB0{1b{83UMQ~h;`PYuAj7*x`~)vFrU$`o&Dz;J27f_$C-OX(#Pt*n8TzYlnJFj zL6Q8zOF57u3Rzwtd0z68uj9D9MwV(SPemyOoVUV~^8N1HMNmT-&hl@3;DrSjpOdAU zdTB)2TUWN2#PYhIFCo^{>g=~P-c@fPG1%lm>nCNjyK zz)Bo7F>W4gz80IF&S%zE_zUHpuP+<2Y@)vUKbA9<&Z+GJ;a;ZB=G()s8sCdG2*t=> z0HRF^nf_*L-Q1Z_5yMc|J{<`^ca=!Eu2jai+)>UEWl#1t-l?E&IODrPj#kqCsM= zjb`nDH@XgX;1v0F)SOvKQ{X9k7$4uZgtSR(-@ByS7?rg(YM=_Dfv%8Bz)SXE|GE+w zI5^QZ^^Zw!WyP!#OVH^0uBsYt-*$NGR_Vx&kaO%9maNLFn}&!cgs0X`do$-@Hv-mC zincBA2iIKQ^k$C36&0C@rdMKZpLJNEY#gUo&=8@JUd!e6dVg&D7Q{Z($Q%X5hHIA~ zleH3&K_OS25n&U7O7UCUPIY&Dg{{O0i)MDd9J>-$wz&w=cl9aE@x;j#Cc!k1z`$CI zbSnMM5#?I7>?hYN51ig259wOt)J&@-Qb0y)!ery@G426n1@p+Gag<8dW!Kk0 zq^h)vw?7AbJ1HYHsQg5aFb0J_+*Vw#5?fTt$dM!wC8&`%&vj}Lq)P83Ab~^H5Jl_pzY+nc zdW6PMi8EImTp)ZY`%hr7){=}`+A`SsHAYStb=tO;@7e3?zys_J*8P|?mHMRKXHCmE z-efk1!Xb|_-+!hSCFbrd2nm%~fGb5X?(RztQ_BD8Svc+(xG4}vNVp!*HaQDs8B0s&y**& zNnJy>~tkGT>BF0j+9tW|gcDC`RmG zApML>w0F)-ROuedMIf1NTuA}=4luOX{%1n*Z*7}eUH9nZ*LM1nELRY58JsuH7!5=&c>v@^7(x0{=7 zPb|D@e7k&H8jBC?t!J^8LLrLklRnl>4-|r-__J7=zpFoQW4oIN4T&^y0;Q#n)!oAM zr11LkIjT$vS&jSm+vdH=HE*Dle_+ipX{Xm(>dH`Izm0x>mWA|DGUxlM02~eYJLnKtg z)7q^`fIwz%NGRQ%iB>tdL92ITd+aWWSsWkz;Gd4HPT?HzR6Q<=TAG>n-3w3U0Z50zX0F)OQaY zW8gcN`r;YY5P9!}jZ!cI%{kDevQWUik*&v3Y$@bLkUSd0oNTQ$OE=t_x6Vn&@)upClB9x zqMnC1{fI;R=;Mb576sd?vtHyQuf_vV(S1> zhLp~b%3wq8NzJ8IN1DT_w=|&O%nc?ij&lGc$98?jMwTi$zRk`0ad{M0+YkEwva!>y z9+A9wybCJi)5U`2x9x(>xtpG~=N2CD9FOFbFObmDbHn$A!am)x)E3Inn&MB5?N&-w zK<%{us^uhKd7T5Z*afRWA+9_2Ra8sW^*Bx@y(^g7akJz+2hd>4JecXLqaWr^@iix^ z!&NChOhGZ`Y^eE9rkaV|2uT0W!PuPd>Qsj4%1`U77*Kv&GLGhhWh~t3a$K~Zzu)-_>x-4)O~V=MekVt4^{}x)yLyS1 z;*+>A4gTsy!Tzqh3tSln7wApk+Q0WQDkk+%Gi9WEa6m~F(0W?VDGX3NBSTDrSAV zJ?DCa`-Vkj5>?^fT2X)5YX^_7bteyNUIJZGl9qG>2nOh8g3B@jPY$e##`3;mX6#i| zdaVU+NpYn}L9xmi|1J_LeiwmchK|c3LyKCRqn*>+!dHl9I_0yAZUg%9x>6fa4~<0p zfR7PWfqC!M><{U1)63;_EiFqkN^KxkA4pB2(t#+g^g|oeM|T3rt%wE`W`yi_NGghD z)kgM0*B~&XrcETv5Hn!~sGtRu3bUhq3)<4h3ivzaUD%~?{bz4x%LwF8Kb7qHI^VH1 z+4zTh1(L9H&cO;3^6;ul@KT6v@knc$jYBe(U<_CVj$&CyA*`y{L@mxN1`~>gc_2F$ z?`=_z>)Im$S%1vl({uC`(E|wU~>py5?RD>+tueJ)!Av1Q5A;@6~+xL-1I62tV*NQ&@8@76W(t zN?pZfTIV-$cX&IGuAsHpfcOgs{L)}Xu)>)=hP)oNlni@OSp?6Oq-j^J%Irck5IDj8 zTzG0hLaA5Q&F>+l-wZHA|G29Kt(5f@8Tq9XVA3HXhXJ>0fRa403jWPzjmi-)8iCBp zi+nTB&wlQ{xYkDU3NXHp@-ITx2c@hw=@mf6Qv2d9zA5N_==PUO5bHL_+_CkA8_rvy z9os)gh4+;qptvge6;@?cvhZTEM?EFR2P5Be^lw@0Is&biYZV$YjYiW^@2^3ZoXAFy zUW@gl=;*ZH3u4dtDzYN+ZReo{mgs0{bh~_93R{2QD9uV#E@Df;VbHMy!|p3t_^qJj zZ0c+0)HOATCi#HaMU7tC#d1RJRT+Y#vJZm0nzjs)w4z& ztFm-jT)ct) zc7cP8fYQMud*+&r3?N@paowrQ!&lM@WMU zr|Pli&R!8Z0cx8N8zR7onY%q?6QlGffLNZrtw<1h(|#g6vwAS}<3NMM7FP%hJqv5g zH!eOgRX_aYGS(%7_c*hr^oJpX9YfM)SbUHOM@XI`d6TPTDT$Hj#*3!*f|ITA@H04m zcy_#D4=|K0lbX{+v#hAZ%P8A0tX;toTr8U#Lwa3gRKb_JGUwwC5I6=_Hs@WoKi;+! zbUkrr0WLugrvXhBVUC_lg~_bLScS$;0g@CrhE3z0)VLeJ6fV}LeU^IFvis}wl*`4* zv{rqY0WS7>kJCpiRn zw)nIh;j($*1g+ZQ0c_QFc1tZsoK!uDt)j&7a+0CNu!eY*75O=uwD98nJB zSwd;TK@3g2%|1^afv`$H5il?{1fYpYE%9jf_jw{!;j6pa?16=eOWsI9w%v2(C0MP| zQZ@RwTUhtHbmM}LkebXq$R2#BegU;XJ2A|Nc(O%-RE-Izx{2z=dZS{BX-@2ylz0A0 zlGF1wsXs&4t|8r3jSL!3`gpHM#1g4t?z6K3-qDe_RUq^>Dmc^u^oy`Cz+7)HQ6+GI; zt^r&BYlemJ$*^~c{YiLe%x(5ot36K<L9Y zcLwz%wz=goA8nF9=9*{f4MY|+I+iRr$(vfwEe32jaM_}I_*@ugpWU%jOwj0-@YiiQyjSp$H((ml`V%BS=OTV>*l!0BvJoIwi%d#~TJzT_GCS;Q*uA{Hwx2e_ zy(`I@)fnydRc{M<(Mu4dLd*&{i}yfz#D&eU>LBv1-h2|$d9jN*BAb|{jJubM z3qUq#EZY=vUs2lPfy;(4j*z};`5qLE>o+kEBfKAF`ox4I8^p*+dAoaLH#i)De(r9&Kdy=(R* zzvWw2SJ%l^QgF=#M5nmTO2v+y5mTf3&VO?_2Tk{8>v;`I zwYF}S-4S%TV4i0ROC!{}2&K_g2NZ6@Cx4eXVIN_B$FgR&P2|JhX-NruD66#l*n;IQ zsJKh4p_}mpo)CmaY=7NX8%R@J0z$r?gS%bT)E|N4NsmC8!!ydVDO?-%a#;3@j#g7} z>rzQsr}(%q#nkKU7DSoXB2K_TS3R%tyUN+IyDFDQQ!6u~ zpGgzm^d9S1y2aU44|X)T`dvJp0G+db9K`Ga$2P&>4T1N5i3 zdaJmu0xx{P0&PH6U&yQ^7yF{Uq`IXDOs=RwMLvTRl$8j4H$YvDVU0lki-Ao*%l?fH z6qAUV-m78GeCR8@uXh~@HEydNksmTAIE-j#?FkYNH>dnWh0EZ=aLuzJS&RvKf%!cO( zZd>WGi@d~M>^V=!&T-O{HHfS^R5p&&8Lg+4<`_WSe7Blm$O1X%_As#KeZenl$^=dv zgD|Dd$3Qr&3P!*UqUp2CYu#pCC(|19?MTa*s*{~s(rnOEsiEw|PF2oGR=`nMm_y+b z{kvAJbw8I4s7(rATPvV-x-=&srD$CEQQcE@U2lTz^faY z5m0_)DC^v@1)D@?P`b9p`I_s5%|>7xZh=___hM@ukr~{p3<;_2{`=UD**yBMXPaV? z=p)Ah9yPXhTSmuk&KnLh0SrW5w>{s*JkEhxENh-Xx1IXLgXfC|i~HH+PXqUt`AscY zHppOb0=am478@UnR^Q{RIR{t4-_ds;NO$#zyM@=jKDgQeo&$0qtd;FN=IS>cfy zaPmUkSr!~hNb1w}EG_KqZ7OuOJ=e=S42l|%Cg{64YswiSzo^R>I&?F)iz}$!`5)oP zoEKi>KZVl-SXoTZO)pKD(d}qX3u4|l2L(oA5l2n#Sm@IS5X|V-Ca56WhX%?}r#xMb z`6VGvs9M6@0QeJT*VnHz3pU^r*El?#pXZ!`MAg|`&Qul8QdytZ->rAgk^e(@ET37oWiOqfHOP9fmK?<1t~i={?SYBO`?*6RBS-*Lf>vg0FoDtx6n z(ujscZx1u^i)mFsyTaeWGtEFcpzo8V=;iKi85=`w(<4Jw!NmKuFmVAxY;Q1 zb+bL21)VNP9-b;vE$iR_IyLugI+|ZA>=W?O8Zn+MbYERn(LXIrucz6CxwH`Yq^oV& z(cI#|n1eE_d&uQ;eeA2N-sQymO3+Qn8IzVU%uv6?QHiAu5l=Bu@8t+t!1w_l_Za`(;3Cq@3p51g zKUu>ABE|#9%)bVglevkF3H`r58UKYCuOVgkLkzcD?bH?X0`EN(FAf@NFbcs2S~x6s zmtzy}3Ndv2WBug!|mBB}O?uaoF^?v_RS`)oxdVNg>4@(w<22f`h+d_f^(D z&%Gucxx^?x!0*G>duZtusWwWK3y{Xub@wSH^zBAOBGIHp)M3E+Ac44P3+0Q8l93j> zx!}V+kB7g-z5QR)3Y$3%HchS*DM~TCej>71DdsV(Y}XX|?yI|`{1QuP25;)_QaOE56iaWrn<+%ku6b?p0cG>gc@1NTUU?N; z($2QusCB-!NhJ5ysUcM$E(SzaV!P zDPEC_M8Ydc6F7R)hWsweN&>qWhv2z?+bBM6+kn#cKy+}mX(>JvqL3sGwu7&S?V|in zc3e8_vzU)cQn~M=fZ6C9ED2p3ejXufXa2EuNfD%7M36wTK zv5q?->Ag+LyO)fB$|)p|tw=Wc{v;L`obx*I6bE0{L2lJ0=i6$eqdtXrwgo=xKbL zt{TLOSl-Z3DVJS3f~N};4^m!J7fl+oHW8*tJ=L>gYao~!1`%V{kc~)eT^Kd#JdQ(z3qh|{y4u#O9jCp_ zXvQbWXsYI~LHYV0lgjFcAatSicviC7ovqjVsl1j-Zezb*;jr1!_|kRd-n3 z8_QcUIse2{C5VluY>NXM5Eg@DEz@E56)?aV6bnzwP0l7~PX}pD1=zJoxI<6%3@5YO zts_ftZJ$W(9B;D@`_&mdzA!qjljL5uE6L-K2YxC2+2;S=7%4qw`}yCd_%X2wTZ{x+ zJ9-Ep|1*V&qluG~g{_(6KMPWW+M2@}+js9P=`Vr)qcLL2@HXlFNL%akS{%6p>1+X% zIQhLk&|jmHMPHi`;-O9z#uW|j$i2MnP!WT=C9YT5SQqED%EH&B7}_toPMw_K7WStL{(Ssr2)9Z$>MpSPC`3hGw!N_aWjmVI@l>(3_a0Ph2Ehx76bw+m zNl1mTbq|_`W4x#$Y=5fzu}Hv>y{^D{^FLZ*Gd@U;lCz2=$4bMJv8|>^510xuY4c|U zd5w1^jw%|H^=q%;d7>y?3;b>*339ML7XyH-bH;rjGaQ;BuWX15fBLW66 z0Q-(Gv`#o&L7fb?r#TJ-_B&dH|jilkbve3$Kmp) zwX2_z48q`=%JA)BP1{Ko!Z55By%F#{XMIaa&*T?;7Z&pM9m?Rt(6x~^X}pyS)jx)zbKrA>pi35PqhVA@Zbr-Z_@g%cT2>?VipR#y zc8p(EEqZr85Oxj#jaaY~p{Fh$r>1{I7u5o{2#{t;l zxs4OhNSzo5cLWk z$u@#n(KTr*7~cGu9Mxy4F#ggw)g|d}jlTY+ai}YaMaZz888IY}_-hR%Bdyq%ol~|k zH^T0(Xy5Y^0_jUu=CAL$P_fV_69W|o$TC-G6oSxI-GjMRQ1swhGQ0q#mO+_BrdWbEYWa%i25()E1 z)acn>5ac>%kAOhN{98UXvkBuAJG z^;v=g)vv-1OVZ7aO|F~^J?`f)OtD`%~y z;#m|~7d%9GWD%5o&QIls?%bzauz0vt(e8Ypr0sA6p?1PF0k8fB?Bsua1U;Hi);qwi zsQ{^%|K$NG~!*OvF`O2K(f4wZn$$C4%` z&AHkY6kpU9@1z?XzNOyY=N=vFXco^fh`TTf&q1FjVqtZ3x)l5wakus0%a4VJ?<@82 zqVRM*Z#|mIPsE^iM#ZLDkm}zqznJTwh>L7EsvvQ0;*TljuoK@b3~A%#C!g&445lcN z;n>5J-lP|k%uFdbv<-=1G2qBqT!(5P88na=WTQ}}{ab8-Yo=TwP+jd2Y&m|{{y_}o z8m$EcPWaNaJ!PZDrPV8v%TbZ|Dq-vL6nXe>7r#{sgxWFS;s*lUk)Z&;{au)H@nMD?g?NGmaEaYu|4UtY>=vqcM zs-@U4NC@>qlWKV?{8&5Mi@$r|WP1VP8f}k4&JvH#aU-l_uYwmUpp&1RWj+p%?+z{h zgb1*Jw@=FBwh4i~c>9Rv_gV|KkVp!>4f`=A9`QR}%yl)bmS^FCSd#=SLMu*~g}M%{ z{V!9VS-A=dk$3v6Iy(XoS~tTKnF@BwJ@5n)UGs0m=G&|xZ1cK0hz+l%(Admd7U)zk zheytvO$;ZN_07DI%#P7}=F5W&||@{kSfK>rgFLaKk~fA3T}HjD4yV z6o5?CIhKkGJ8aJ#4|Tw@jFI2(Zj%=Wzo<ADh93KJ$vE|HXBMyUwr zIQB(9_`;jsMt|!zuo@!u$+g2Av$9Mon|jG$-kRvQMi+3kEY}}?vqCa{<}|M@*Y+|8 zFgx6C-PbuYxZoJ~G<>l7O(3>`9{A|tJInCAt7gsHq`dQz_Eak11?vYoY)vbruOFWHnwmKjKxW0n30L8q3pE{k%K>LzW&q z&KU+@yyO!8`3hj>9b?=J3%$DL-A5lk7m zAZSZ{8-uJG{47-Xc6tgig#}R(0(*>IdC>bEKC4b-3`+gJT7$r|Niqqmm={5$u`BD} zSlsenF*wXSv9$`(Yf*U84uuE_vjt=M$=zfCgawxoDjE2ax$R3|G!yINj~1I(-!Nr* zQEFA^qi&^>??@sWWt?6J_|Xj0Eipap@Qiv})6$B_V4!6oFU73VdIu`&4vS8s%Lli0 zywES*r=X(8j5rm;>um3TlK;?GhS<2C5(U#&hj0o;zv%O#BlS3av+^L~y)|p&;47SB&aRi{FD=>2Lu|#+bQ%~x~p7oZR;xbCUgU=@zbsZ~(9=#sU zTNTRh*`u=D9Qck7z#|9 zSS&ZXZ7LlSvK>|>hXmm-1<4}Cqx6S5cPxHWkE*lOu05{eR9PscO?)jqzH6QSJG?b6ghaDJn39er zTp{o6=d3*qyeJQ#07DZlEBv&Bw|;tY?yWKpXyRfsM58ra=vEAuah!!a7}c2w94r_) zgWs8Rpt9)=jQRD;Pgb!0ePod0T&hme2FvnS=PxASy!Kxg4tR{-ogOw2kZx88kbgM- zHS;+-xm%n3%jiG%ma5(pPukr4^A!+7?HQXHS7K&R2jvly38AL4tZeFB?M}AYak`LL zMYb_krrC(G8MzbWcQTm+l>$11zjizb@(uGLovZvSV3d$DHWHdi_WIiU7A)1B@T z-8UrihwsDk7XPK^7H9F@rJLJ_;dR7kM3&%9c`|)t6+|}$RUX?KZ13%Y!Q@06;!67$N*ZpY~%(TCUD8`cNG$LLFaxc^}N?K#zZ^W5cX{iQ8{?{fWSiw5y%1Mca^FrOa! zQ6}EU41AS)Hou!5>Sima9&vJV`^DuTwcX5J^kv8vfbJ-y|9WgJUHE);ldl7*nlmJM ztLv}qPluDK!nfBTZ;uz3osDNI_4F@0<3YL6t(6|RN7Lb<+AI2~vlQ*_ZRCwB9)6At z**?84_s0PPe!Zw(!pJ{gSBdA;Q72X14xP(dTVZY4Tu*u}r@Pi$y6Z@~-TFqY7~LQ5 zXY-eC4tNJ=7P0pC8Yy?UuiW4BIfZS8%hDF24|5 zU#(G&DqlW^+;6pu-%ffTjc*fO`#0Yn?=IOm+CTV11|a=Ng`fC2nlSi10!U%lyuXL# zx(;*JX);b84Xgs1e|^I;L13Y>%CXn@#wbK#lw+asZE_4dG?vmKXT#tu80^v82Q$hN z9WRX4|3o|zL0ZgEN}mfw33NVI9V{hEc(&Jx)C0bVL_rB8bKIU}zAv;62gOFNI317V z{@64GoVN{t{43hFsed^?{bK9k*tFddShT4HEUFKle^9F2JgQ2?qid}^|8(oXjlw8) zweOmio8)V0z$_WcJ%Zpp3ak?cXX-M81GBfa#7Y0iswA2af7p8&?Dy&1^S<(V`h3~X zaFko;^g23^mXn>{wAJwl1(%yTcp=>hjgrmI@I2bbWkW17+(Ukd(Z9NWcMHi$mwWVN z(VQ}fnCOZj4p(ZSYKX`t;2{AJQ9R^#a^l^|U6|*;gwVSjy?5M(-X~4Jbp{BZQRAW+ zHQX9~6aC50qHlal^3M;3&=L3u5l~iAS%`%gOmumn-Sp~gbuYLQq8r+r;DP2pnm%2f zYdVh;gJShjejXRLjr<MZx3Wv8|uruf?f+ zH%+{kESZNBC!Uz|G!5Si-g>{@&(KzXBw{B!YuO^;=hET{x1%r4ueMT%Ct!-DNbPue zzSqxog{K}AF9~_Qh6vwGepI@I@a}AOg~Dx3r6;)9=n6>|=x~2J!02=xCPy;$i0mb5 z;nAfV7Gp^hAiL&&Uuu1e=J=uALil|BZO#%y{D4T*x={$4iLY=EJuEKW^b(Of0dws5 zgZwC@J>PqESN`pF|G7Fr8<#ZP?qZmn|KwK69T#@|)^`iRNbOHlKZ$t+q^wV38a7e7FGHjK z8ua$nwWB-PS7?=V{3Qn=kCn4Yq)#x9;g1`qua03Oeev&q1Be7z(D9$9qeGYv!?Ab= zAeez%BFb2zNV%lL#~-H+Yk4pM$o+BPB*A2mqVQ6Xk(gy5x81ovh?2foGofqWuTGx_ zVif;CGt%%1A$*C;xf2i6+(#n!&oKkJr!j%W%uBpSpPMrNH-gfEq)AgE;=2%O{e$ja zTv_MyNaCcEop+m}6fv(0U3_>~os%@f0beNWU=r1$onq2W-VW-5pOuawY^>4fhaKOk z@*aJBd59O6Y(Yaduqbh(j#8UzGjUssuOBuyIgo}wmN+GrNV}H@$L&6ButrbnEhrLs zyH`4WTkbdcw97vfnBsj7Gl}%0HFGO2Tb-Ec@q1`bO5YxR!hV+`uNA4;sxNt%zSOl3bz7A`M-~ z4jnPQNPL-Fgo;CEJ};&89zFfY{Pmyhq%aY}wE@fqQDI-?Q=SS##P zuH5u>LK#1Cu3jI79XJBWW+k)k?6RVLHuxTUB)pZDk8F3HIJkL<@Ps~kRVKzqUP{hw z_em2zqA~*OzQx7M-wMyI3D8N%iU8P0B`CNlIbmv=cS*n|v~HgIJJ5W)qndY1kKM{~ zHU4A-I&cw^gR&`Sfv?Cr8pArE5DFdGE;A`-!LG==qv#7ANc=%al2f?9|1G6qkYwf&up#?GkMgCo(`Ts=zzndTjWm3NU--SOagJ1vOG|U4Nlk3oIh4sIxg^zy70Ms}o_x9-wsIkbf zygSu@bm-sO{YUfgiS0V$|A`t~l-)}r39x$n=C-KxI@I-=U#_pXCEV;FCV_QY(5EqK zQF^Wx&V#mZsVPB zs6c|cU-+PqCiFL4K^Bx$juNXy|Dd1;B`JRH2df$actlz;2pX##D3xWFe^Lo2(`(+L zghfOec_F!FRTen<|Db>=FYrLnDmWV7i%iDP0ShJN7Q83|!v8IMngOaIDXO=?LusRM zS8y*pX`oqC;tngXaEF?ccSva-mPV3KZ<&?QRN}7ak#{H$^cUiRDESYL2I0xrSqxyE zS_?d-EDFpExIb9M`(oD&B-Hms)qh2;Lpx5z;;_yr?%T>VpyuX;CAnz?F@@w7mdxy< zDi@L$=TM1qLd5VC8gKh2#)C%M3GCVbiQGREC%(Y>tu7(5>%Wu!7a3R~8nD8`aO~hD#eZ6J09FSS z`$vU;H}ta>L~JN44?H3fM5o%{-6o>z9sz%{_A7ge zE(@`1{z|KSbiL-?4hE^&<70Qe`#n1QYv-wx|7(rk=e3>REx#_mJZ>2)XboBAo2gr@ zQsIg+GrFq|Vt3kFtAuq9_nOIeHSbWp+{N~7?^$zl%i+novuZii+rC~`s=%{>{;_2T zEMnj5Hiw*EYe1dVCVka;X8m-;8J32tt}oTFqo_53+;3ym?e9@O-*V`?*7TrYIWXX3 zJ@!*R+7n2IJCF*+q6RNJ@Hh{3bakU~+wZ<~U#M5zE=e;A8L_k+ivrJ84=uHen8h61 z&pR)+<8&ak+fPD}!9!M%O#x1BOCeEF~*!y-A!#xLPjA zXl-6}%!hg(7nIX5m)3!&HWbxOtme{87+PZb=)*|XbYN1zMnG$7GACQg5NgU)rZx*% z^)ZRGYr`i=+Gt~~rRQw5R%mnXvq(E6#0qN&l8nfsztGU@hVj@0L$dd)_dBJcP+Q#` z*j}CeeaoIk%0lslpNf4VZ~54KlxWSa&iMG0TKjsuk@#Hfbi4v&zn$bvduaUR>%NON ziP`iuoOUb#*BvC^`%0^V{}lM!JA{H&?jDHu=rcWPQnFP)Xg&$!l{9cB!mC{t5U=LF zYdetvZ<;Fu$v32;_GsMm!Tz3w0p~^H$(7YvY*XK3PO@bH*6Cwn`qVyQ5A|2=dxXR}HsOJB8Dc*Xr zU6-!o^Rh)V=)?oDZ|<^lu4f$ zemK&Y^1%`RPTYtcbo^G0GgzP>;tD!?(z=l*-e69Kz9hc3+KUi~x~+GfywYl_kH zF(=8Xn-Lxl>Z|otfH>w%r3Qj2gEKGtFr3}td7@eFa#vlT0yN4T($q;AI=h|EhoXLh zLdcF@zawT>oCGc;jVtUS{qmy6YebQR>aO2&-}*z~dWS4UO2w}lE?@-Q+sD`W<2xK> zZ^%P+4kh?!i!MCapm>*O%m@_fme`*Zc0KvG`!Am>O^%rkp<~KLe~&cRzd;nW#_91G zo{mpT;XIRB?l+jd@B*T3MkZp_=Eo24ulqCcADoQDFa7jKQVwb*r-9D_K(J_lW&KHH2 z?QRc540Og1qOYgp%APl;{mmtC8lB))UV~YaYF+o&5YvVWd^RV879YR)7!PlHe(HK* zRz}ReE(~H6-H$~eZFNzLo;2?+dWt1nB2J$`Ft0C;qiFqK?Y(7N9NW4zjJpJPg1b9} z;MQ1>?jXV49fBn|friH2UAiFzOMn2u-8Hxc2(BSWNZw}cwf9;pXTRrqe!$@a7j)Ga zbKLionzO6sD5)iLGVO2qK_6K9B6KulAovkLhFC7C6nk8UKr?Xy2}Y+kmOb!A6>7Xq zl6QImH2h;==M#=qaGQPI{IkAkg!zGXKJxHV?>*5nb8(X^NZ}+tDeHZo`e~8+AyuDU z4*#_F8hT(&7wC;ea*A5p)$(|GZalz%;?gGq7udm>Me&Vu)lKStSo+uYryqOM()YFB z(|_Hr1^rwK`Z>HN7h{lF^K8hwLiIg>Yd>!*At=f+OJ3WLf$>$oGr1dP15CP9VbwOM zRgpRW4Z9TYo3BK_z}w@$PN&C%@L%Ei<@e?&@%5}B5ApW|@jVHxRXBzrL*hSiEroxV zIG3F3Bue@;tHV&judZ_b5=g~VviY*T65O}8#YQlS!a{lTq7au*W8noc2zA?)Ck4X+ ziwO8`w6-y{YmKUcgE>eqUqMwcBc+5FOL%TN^*#GDkkL;ciRh*sKKBUUxLB;0qB%H% zr>J|7QUB;@<}@TK6yC6K-goORrLAzY2T@X^)QvII)`Rv^Ec90mkvl%^F4CSVNMM| zCB>?+nkHua`G~xCt5Ayb42<)&A}X>J=(VeB3He&f4`# znocw4@zMQjYiZ26_Wk%f#=*sIK=>N2ku6W;=@ZTyU8)f3L8|GCbLG~4gjB~Lcx3}O zDKb8&W26w{qV0n+@{`wXo6jkqZtFAv^}*bcihjCI?eiE^tg*WuB1TIF3gM%!q-S?> z0qFZt5!{gct&yUZ!$}q^#`AuC-PrB325-%u$#Qd-3=XKPszml=jI<@jA4Y9t)o`i< zKtxh!E9Qc@)!8q3rdA|e&sTm@wJn+UE}ZK+tSP}Ej0VP3W)R*f3ky~~X zrG6&ullL^iX@}WVG>%zt1Ci65&ROnx@nm{go*$%(5ryU#OlNe@!KJvvqd}~Kk91p< zX+e%jO*~G(6j3(Nxr%$bb!Y{DbFgE2n+rWvwm?$3!BjP27X`GT^hNw4k)L0GR1N5d z&nrqduI(FMuoWRV=p#0+Z6o_a*1qfFQxxlNwjOxtB4Ejs#NG`7QSx{}#Z$@*MF%BRPx`6y6^Jg;-*x+Il7cKn~N zjZ=&$U6g(vUiKJdY0a-Us(S8}USvnVXd;&E^yY#ZHEpY{H}NUyw)h`O^n)jn-aOEb zr|LR}C2)PuD-Zq!vsPaB2{P%9cyhtx2vN3)thX}km(GTLKFyetc6x?Ev%;TC2 zuUM1UbH}c>qdGJu9w-zb3aK7Fa0+pYQ17$RkA)jA`IVVqI&~aF1cTc1@}=NW0)UT7 z9rAWmq-q?7%gID1H9=DdHN=jBY9g^hC(hUMxumlCJD&D4aiJePJ5g33)#r-h4C4F# z6~J0?2DmwaPXV=iNfKk|3rjCPWfYfoWk$okomWiI6vovIl5ql}su)*FeB4hl;tU3< zNg(YwmADpK4vKo~M07 z>VK5mdBT`<=w^jJM8L`+&d|iHhVH3gHjOS+WGw`dDN+^RgcwBIpt{fXW|M?RuJuwU z{*g81KeML%N7j-U7r#@ns7opTn6>|aOJ0&g8ChfD!zNr}e9bOIbX1^KX_5^n)T*|= z^{=o!a7E^AzL?7;C~=-pysNKy<3@{LM+le@^phcZFLUHBN>@6sOT*?wy4G zeo>hm7O+v(WXLO|Z*D|0j9clkw-CMk9*jpM1^b;bFo_P zOrrRv4=jL!1a!-=g+ADbn7Ht(Ru*Fd)NH(K3j>8i83Gn{2DxkpW93w{M?zF>h+`|( z{MeZVlW4Sj9?uSwl{JdYZJPLhS}k4jxn5$bVu|wfT2UDi4M2K*N{?B|_vwpw9=@GkGSK zv;4U9VzgChG*k=)U@^GcFalz~O@h=BkF*iCl7>O*3M-|~D9vupQY)o8X^|U#V3GovL&Mkn?XB;OV1LDU$)!q-Sy% zf0g>Gl&7<}bHy(#MDNF2pY>U5Y)1o?76HIqpTjJO#qKPC&XK9jSJWLcyl1Uc7dCjz zSP;lBME1NjYzE?X>0li2kizJP6rx@UFy_MEj1{Jguy8O7p3;A%;mVx6sd{J=Ht;sF zGdCi?^deSg-%f5S$-vl&%@k5FaPh9vDy`m1iM~{0M+RP?(vd*>B0N(*G7GPOyr%k& zo((qg{6mxHz0R$a>Hi~xu}KRB<6caL{C86e$9azfTpu4+zj5j6CQBK%-iVy%X)~Yu z{yHf%CO`1t#YK`q{c~*!$;jsr3Q4f$AcbVq>XLnjxq|`y#|y>5Q`vWQ3D~4}$wq6r zZ8xVsy#jrvZi1lh1zP+vW-+c>; z_EM>LDpBKrITSc`z}mkv3ax)_^q_MBXu@KJIpr%DgeX#aK7Gj#V6DGC-4iXnSZH@@ zF}l=C%A4{XGqokA`CcO!r-U#;p~PAJ245liX{EGXj>F8S3jItmkbt4vp%3^Ocojjh z9QdidAC&&3!@(!k;;EP2tw%TGRA^pGlbFa7TXx}y zx#kU1udZcMj+1Z0`YM&~V>ltbnmu5p;iVA$1f~k^Jt=m7Wa8YZ`&-&ZgCu0wf_eOG z`5jtznQF`9t9RuO%xQfN!pWECExEA?qBvh;i-^#?`X(`uCA#fO?@mM5R#?BGLv$gx zWF*`~<$emq+edu(Dugp>Uirjrh+xgV*RSF+IXPEq=4C-}W=0W>R4@3ALAAa1<>Jxm z1q*g|v*i|vU0h^|;;(SI?m}I`V|<2tQh{`;-vRfsTfpLDL`R#%6fpi{?!y*;i^@1Fq_(?)`-^YNS^3-`PV;?vgw5 zwOV+;+sy?oQO_3{?2c`Z_{h%AY$*#d_vY`I2Jikxl9uAp>HQUeb z{p71}!e1?*qK#YWVlwrYR1|CTa?SR8!kpgX6dySSBiK$-WGGzT^A- zt2;Y$K=o1}{@BpM-Q3j8B~o8gyk1{4gIK8#NpS6J3b!areL8l{S$`rMG-L6LTfnUmN?D8?Dy zh%%M}f3HVu2L-b)NpSrk9^K+m+uzQ%$oyZ<9NzD0j?d(Ka=12sZ8LO!k&E8p}H!%U(Io6&*Mri@!RFD;LGqb8kze5rygJ zy!5%07i`*j^>h7|w(suFhz*UA$?^Lq#vk1F$R# zx4ulFf52=iF)h(wDGQKRBtl8GMDqa#oZ#s{MmwkBeq94;w9c9bD8X#JW7aElBYIB@ zEpX6Zu9lGF)mJK5)@Eo#jLh@Rs@D;BPk#PLVU0ns8#rzud4q!L}+$`pNJW zj&d#sS0ojfpL)3fVj(tCMtE;)AwnWx+6@#O)WK13rKOmH4&VRQZzyAkRCI!oE(Hd5JTajn1~8TJxY7w63a&|+G5q)cwd zF~fny0K8%6e0#Dox$P|{&wkxTk<#wkP6OY)fWNpFz{X{ za!*tyA-zQHEXG}s>nqt>Vog*beRr~L8K}mBHTnZ{^rqiUu3Kh%T>7mf4a8_TH6-}^ zdprYnUeO@6wSoARIfg6_^L~Q_0HEY~@=weNA0l?8wcZ^0BqR6}&(TK_J;7hha3qj3-t;{@j*U}sjCYD zdxS34%ODv_=34@ik)TE@Lry>T!W+3TKHQZ4aQoBP7(U#(vE5D~tU5+@U7#eLx~{Ui z?yKVXEX%A-3Wbcm7q%>AgLdyddWF_)IK%BJgYAha#XBtZK`ZnPN$G@HlR_BNGJfhA zXyZ*7({u-cwCcrlPr8H@dy%_@8heCNMWvum%Gh&c!|hwWZ7i~4cXajCkzJ$|Gs+M% zlnQHc;i6`_l^;Zj>-{gHhF^|zxT5HJs_ROIVNClTjPwXmj9$OZKwFND=hHyugUA*0 zLcW)C?$ef~wx&!}y#w6Y2TQW3huIp1+b4KS;g@MA(3ocxZ6cay8CqlwQCZH>OcRgM zbqTFX2=Z$vc}{VbA&Rmxt4|}S7f)P2qV0z6msLDawKmFTpbc4e3HkOZW}q4DUYJp$ zE)bNlbI?92t7I&zY}a-h<<~gFVNl z>WJf4ZWc?tB}0%Z&F)n-`FIi$%T(%ep*iN}{!tx#OgeV`G$XgG@kNmuXPy}xELh+? zUUXy;Z~GrJ$CQhmG7`o-x19C?LWz6(;X1k6UZIs^Vw4*|Tk`5)sPrSgk}mWv9O#XI zI%b>ElP+kUKHEF2Mklzz9)iBXFcha6K9Hc!7h1TXeyBMxb2B!W-Uil{f4egDU0sI? zf}ejBgSdN9D%#@|lNfzNM?%xPmdWT8HYK7d%y(3Fnxv0GV9<&q(2hq`eg3{P47V-3@)z|1edq|Ag`$lT@LqcbBj2}z0s3U z7+hWxTwbF|4H>63H%y?ZNRx!rNEzHovOG~7*6HmsN~#8M30#`W6m8m%Et)0PvZT)S zF{A9ZL)E(v3GO73dIVooeW=#|y1PhV^sPs(6I5ZX<|v-fO3XY}#}UT44>a%et|B+; zd@}s;dc^zvevKmUyPwHK4pu5PmwijHc9 zePR;X(L#}vSZ{BU5yLnG=|qFvXHKv$Ql2g^_kS7UtV- z_Usm}hR`*OtW+XxWRl`C#7Y;X9cuxWYNAs?z`nwF^&-kb$>g%<%g;-&aHbZ9B1jX} z);=T;$6e-BiJ!eDo)^mqE0$GS4E|uz%M{j|+`6;2QHGW}z!wx0-iIxlqGlpad%7|A znz>2NW5Y<|u6Tc{roW*xX-(wtYfP=E@Q?cS5Za%@l@6RrlRw_J4llhW2c_a^t*hFizQFtT!ygFjt{ep#dM{+byZ+N=I(=i`In|47 zm7K!?u%AuRN&1F#?vU!61%*bRP21$z1>bxPkZXP$d`G3VD{zb0sQ3iGNeVXMv@ZWx1zW#)|Y(E zT}*EHM4zPCO=)*(7&hs@3Nt?Wy)cSbs5N|;%X$fB zI>=^IcFp?LE)*6cQ%KQ{hZ;jH&_+@A?5eTV5+|r3A0U&*%TH38@+tF>Psl?)8PglZO=Aw^}c{!D{FtJlhQT z2FdF)hhzwS%a+L1cs6$x^siccqj8ze0Gb#rKc)~;e$7pg?`yN0$ONIoj z7RQsWOw^lho~OlH)_}2`wrC$=s~TFNj(1kkq?;LwwPK02O56yPUV`R!PeIZ(l-la_ zv*VE`sr#?ZIkJkB{YIfo@4$z@_F>2d%&10tv$Z@SZ2MJ63IWM)%h2;$Re>-;0+o7> zH-l^qj;W;d4q_D;R|5ASMESc0bdEQPxx4Q#03kkkF(NkNRY`&*6?Bo-2Ea%17(vlv z+wN2<(cW@L3Fs6iSwtfxnAdKC#_ivz;YrKZiKr4cJmYO|h3h93exjhQS4yy;ig0 z9!Oo>G-a?|zDeFdFsbo|3y{ERr6GT#EoJ+ErM*e|2Q6dSWo9q`sb3zDno(`sc`q4>`ze|8$fiDX zxs8rH&Tw6YSmYxFBHQU+XFBFN%*K4MC9$3a><5rqUbHGHn5PB5s)kg{9S8IoNPXbo zUS+dOlk5V*%Usfp_UgTz{G?DqlT8k0&A^!epTx83uVr_{saqt8@e5)w=$0Gam+^4aF-dZ{Bwb( zo4!JNTw|5gHHpPRRa}%w=jU>Sv+2k`zf?FDK?6!8tgF^zK{qLig}GW|s^X5_o^u-m z?^posZ_pmKsTw$nBk{9XI`XXg%KJH@qUnT6wj?*F^^RzMdD}FCVqGI2aY*97!){%( zeh9iDXDTe!x`IFT)LlP6=q5(l_r!Y@ht5B>Zr%e$;4uXe<0=gUh%Ti~#DsQb&}7)c zhP6)5$R@N-Po6#?f>u>&fm191BvmeB$DY!rbG3MS=xwEmCjVYP1QEU!>&h@rrY!kp zLQ5PkxNiRaK=G3WM}v_=g)8u|y4ID1&VWf_d8F1A)|g2u6(%-!M4jG{2~}PjWg0(u z%wy?Z^QYD|+Za|(ke%s>78$kx=T>+mZ4Z%H)%+2OB|MVFx$HVUw=3xK-m||ne11I6 zq8t4$34}T14>u_OB#^jVbp(tY_65{bJmi8X;?SGWEv|09t4?qGxul$0;UPTqNZXo= zJ-fwp3Ykle2MIkwEU@;bA_ig%>l#S9^2@sUx{QVBLz48&gu~hPm*g*t7dY$p?si{| zQ;mMZLR?Gnoae)=qiaOUSak`al(6Y*}{;V7GT3{Mef zS+WQl8|_(QwICd9Bu$h849;t8HU5d%OBqBbtUH(rG=Lvte#kyH^2zI(wA~dmuB^nJ z=`k|+4G8fvTo@|GfE4#LzTRO=w9kA3{@hVf7fn-PojA|wQC4)|52n11r!t8>YaF3O zjB`VU)MxW773GktlcEMQe8WP&OBUCa#?jmaN#k@N`r^;rY3EcZ(d?{O<+K?!lhm7) zCQ6a=5uy~dQjyfU5`%F(DpsC?G?HhMQE? z#+dee%l0LM&J~iOBGX?cRYyYS^vr3xeUL#`_ICwSj%dG_QU#5 z{SB(;4;5CoPgEv6BGbI^H<0el11Rk>e>ybo*Yv*W zk3zCY&HOoKh&N|}z?{YLv|PoN4jO0D@k#-+Ej*u-#Wq?=y_THtvUBP)BjJe#B_kym zXdAC0VvzfkVF8HQmf`4V5uN)@%%{=&t74}NMw^wR1>@wfn}mUSFG+jT9Fc#>T|bH^ z(QDCGqf^?%O;q)-@Ve|+ViYi_(KssuXv(jIM zi#q+wH^pQTvCq)kWQYJl@Z7?u9GROPgh33`5|>C3BjF~CVJ2zGXC=Iv+M9;YzM>vK zCK}PWUODxs{aT8GNaSCcEHp6@(`Xc*cBV{bk65dEY4I>*vsV{Y68ZyN=>o@<$co5L zFk(AeQwY?W#v2%;nDs~v{Y^j1z4A7ckTAP4^zRZBg+$O_b-1Gw`mWEaa?(OQCHV{W zliaekJw(KlHP|oeam!f5TiIZ-9u%7%sF<-lmB-X%@Ad#k}Z@BA)i-v&Zg$GvZ72mc#Z4Vr}*!r3^)aR0e34!hGLi(&0t*3w2}O(OQyx z;6CuIXkUvT8X8ce4Me@pJHNBn29{P(nv*Jmz)%N5VQ$g4V8?C-Q;;>V@sFvq6sWbr zflUeI_KXe;ML0`e+bwm}o&`PeWbr}kYI_!;Jlhj)p`E&vr>repHct%VjD-?S8}+Z@ zXk36mN+3^j+&PPP3^^PoKpJ*9%P(9oG@%4~nTKT@1J(6xKW}93?ivC^qcMAnIObQu zMGopU?9k`>i153Z~Ki0>H2W#-y?K^s898{18Q97i=e_el&{>S z-8ViK{aZ>vZswmXUknvj|y`4pBz-?}HAgFX4G{0jsrKl+eYN&8nTl>E7 zCBpohUik_srAFAZmX^CuP0su6DK+xjSX+LpVq}JLd5|yruGF>C;QG1GN9fowe9o|4 zaLn3GRqG7O6KgfxsaWP^1G09^CMh+=CG#inCp*Bm<+b)C+`$Z*#Dr3&czsSgQ(r6F zA|ku&kh|>K(?=pmwN>f3f7)H&Moewa8x@;5sQ1koDe^Bp(p1yuGL$h^Pv@D_o$KO| z=hz3z%6eCj13RDCu~mrBRqO=x8`%++qRehS<9chV#r=9z7z_IFus{!mLSU^k29%kL zl)Hj72tjmpOLwF)G!$A02UGnU-1{dOf2!)=;OW6Yy9%!NxU!Amy+x>?8BgXoy}dsuJ3i(_v($; zy_-+r$F2BUZJ`;usHn5i7FyOUsQ;-M`sf z)lS?`M+27#(VI9}P74jn!ZUSH2b?C3xS}@p|#Y^`1_b0>1 zw6JfourKIU$#xzB6y;2a-P$Vkql4e)hwT7_yO{*)g&{fAor&IO*Y6za&_wdrUtLc# z%kuL~OR}bZ_g1RhBC*@)(+|UZtk#fq!`Ju%nz?dxK%i_a$SbM?2r8KtGERuA{m!PJ zmeehH-iEfd6zCv}N&tab9gP`!#?KXyG%DhaMSQdKJ{L;f=~A~5O-NfV>Yj@4wrBwx za+Exq5}De^&%j466YU7?HqWR%#Gw?pizDvN>FrG{+Wk?JK}&ZP7PU9s94FRunYSHI zHp_P$s3f!_FwU_9b*u$;_ZU2r6wZ)qKgi~Dn%*Hht=OV)$ZK=PHGHovi)EY{-lu9P zCe?k@Q;54U(H_4vg<1-h{oKf~Y+6Pv^7y2tL4OjqWb^~+zimI%#DaC{!nb5s!(B)I zv+>%-)x`^FZsBD8uKJvbk zWVaAxDU!bRkuH;nUn~U>O|pk`R?YPjXdpS6HyG zZ+w6}*Z_Jd74knEBp{i1XV?<)RO+%FkDn*KbDVY` z$UZcgAbh?5X7)>&qt!q=n<72oMv=uMkggXNIJ1-CIG0%1UD)nh=6c4EaEZy?Xz&@z zH%2WaJqI%`4a{NAs`%DQ1}R)`32beKcQXT&iw0scDGFQmLL0=jRSYbZWSU#$Z1f&@2B~z& z73AP{Oo^L9TLNs6lHRDcnX>ofXDOPV{1zmO9Zw)Q9fVPqaR!PAA{}JsFInnnrJr2@~gDDa;h7?o~Z2iLraE^juDka6mC|ar6&u}3IOXhGc@4$yFxME9|jxZY}3}Ha> z1BMorixB!rq)w~EMm?IH8ZcPY0#Y`Q*A>c8w(Z!fv<6JsdHi(r_!;2g&M*GUAWi)v zN9M9*=1|5x{@~1Zcldfq&l4WsveH(@r(b~Qc$8(WN#5s^b31h4WTRgPIp|Opf*;Mh z1caqjV6UF7y06-C`q#vRGBqDh**VnDqw|Ye>E8c>4}Jf02e3qvw?P{G2tB+T62Pqv z;r@ny_(kwqx_bQ41L=xT{?FeKezgcp>eSqN@WkLe!%IA=%tg^BE>%M6!DM0r`}B@} zmUqbdoc5G?bZM)^S(UTLx$k0JbZhPHTT$oaz)<|fEb&2`SSGU)(Bh2ed0o@98Wln< za%M_LGfjVtFIoBbl_i{|-%h_&Bga$95dv6JrbY(|CsR|5*C}4^}Gvp(;6B+(08?(N2YT2!SQ|2Um zh4(t;I40Sj=o0=&qstsgvCwh;^x}%CN=$R%3g;Y51q$`l7agh-KT*-b^AuoppTx^kBvD$(4KJq0xx~+ zeYzn&3>F%mZq@&ym}#66+lF^Q^X+l)kxPha^o{kG zw*Wqul{au_#sAWn#WbJ2Jcl=CBKQ^l(cc@hxtrU=)#U%$GCXbYpB&vzmnA-&HvG@h zSAskewLaJ^-F$WA|1`E(oi?nkssPVIUqifKXk70lyts4?MY zFRlMEYsOs0UhILN%)U>LS1z%eV~-lXM8YsMi^vzZ;DsHp6Csy!?>2oPutAvN_Tt&- zWD#vK?&b3{ghS;Yx*CQ={;E1DA}nsR1GJxsyQ3#?*FtQuRh4A&A&yUB2YbTIjAaK; zUmBq}(Pxj73Nsr7zpi}_=tTBOUs2nL6*Xp~MH^NmE@0v<^o4Lok+F6sf>OSuyiTX& zHX3E4Ajt9z+-+JZE{56hYqyXXbyB|Qka#PpbuKD%65N00(iDq_Z|;Xc5X;&61A^Sn z6_dX#@3ah+bYpGC+twFJLgYLMvxMMaH4;5GGfA@)xBR9Ryx!_Y7lz)mdmKshL;lT6 z?30XY&-$=8lMXhlFMTAo9?u56b#LKMoLIa}b99mbU}XOsmR{^z?t}O_9{BMsMc7hY zy%&72K87j-T>JKLTw2QZVW+KZDf! zOI4myT^$p5uK?oX@Bizd?CbU|J|Pq|5#Z5j{oO8_%Ad9LQ*{f!vB~R{|^7>%;WEH)AxUa|2Yr& o9sSSo`0wa9&3~`K|28^nsiDGq3lLJ2$rN+}ONva%0=JZQHhO+d0AH_kX)*x~F^QnfcuB`mmp~ zS7Ftvvuan>uG;capkSy#5I|5sKtO~*2PC1=Ucf*=ln_8bKY*Y>w1jMJoQ!Rp^pxG~ zj2(68T&=AL^1whSa)3a-@&EVuAFP3fgmH_01|*T^z)%0#rUA(|io$`>ylCT}W-yyn zcO1xzu;v-V+b$O*MX2O-GIfk2%+Fi-z>{_zY+M?@LdI4j;OF?HdL)ynxu;i#9|~Qj z<3jq5esRNMtqU0K&ps~NwcR59P+U)`1<&)@;TZ$RL{>WPRV=jYg(~=+;z)mxoC4Z? z`F!2tgx+h2NR7yb%_m4G8992N=4~qhaxxM(Z2dbik#V4fwRo}E&DY;Y zQTE#?VZW_b&%xNrk)H0K>;Jaa|G_5uAGTf**ZXa)a01uKOGjsniEk@FZ?E{&(E?~ zS$6)Bnk*8 z6sI24ih`oHiQ+G;MvDNX?C_ZRGGs{x{KAmvA9o5M`3VD_2`vheihPp9dksufJCluX zWsdfxtD2=w8T?upW7LUDg^!K%2)m7v#kDKyHcnMsELemDO6C>1W3xoZr9v>&+O*8z zwHByaX4igV|x~MHLQp(Ux*PNo^#lVKm zyQbR{jjInwQ8i59Y=}^ThLp?yMh&A07uzq?UZk+uwJ?H$S)8Ye1JRj5a^(y|nl)Nxt_@lpfZ!$0 z)+fpy8Wwt$u;2|AGQtZA#Ny|iK(9E|yPHiTO}aP9`UitmSHa!bUG^D;y``%NS z{${ek$4|YWhT<{KIi!BiLAH=vO1a0Nwu~axE2Z=FG-zBcqoL!bby#RVFw-AB1bW*> zHgdA_sr0$Z61&^~s%wbwF0D+Ck;vda|JyTYa!JKi#fk9}*eF?kSDP=CkqecUH|BVy zVG4DbQ=j*>pP^$1WO_Nz>@g_^fVzB5)i{ z>~p1+iEH()idIdPo{@sF4}JC*4X9jn~elaiCvYIm`x_%OhBQ{!+~@9Xg6unE=cplA5KXt5+$kDfy;IOrK%;Lke3@P$WpOlR`|HU-!=2t{*Vc$~gyNXu2$(VR($LG01DGT6#s0()C=3D|lYi$ULI%Yh zIv`Y7Bki#w3x;z`natFWb%GR#-7+FbSkOSJfPR9(BMn!=1IFn`w8Vw{XOM1z&C34z z^2iKhEN|AkuM;wk3I-(%52cc=dKeJW$)8u?XFpwU);+OZZYGOZ=WlM-ZaH110|KyL zKJm}PkJA^W9ub0?wl=P}Jue^176BH9hWGZboF#OkUEq)CE75wcBpsf=O7MgGJ< z`4VKryIEF)?GN=U6;6mFH6#ZE#PSQ6sR}?=vpja7rgr-N0&Q%Nm(_3EFysf4m4=aL1f!4okF-o7idJZp5 zP!;q!LNNX+h((P9XX~K;-38XqLl6;JR1{_{ZswLGuLrL2H>JOv1l&q;(m-QSZ4E`f6KhkrQXB_qi>D2HYIBW7(5KP)el{3S4=6cecWm zORXlUozjH9+Cz29mE?V58Ci68PLM?0L+%O}x7={1m7H#_Y()ON=lBFCO5-w6!dm_Q z?HbMOB(iY`GV1dvQ&NH{f+SQhb-DAGr3Q&TZahD~x#J7`K)CdiB(S<-*kV8iC6 z`n<;!P@bn1MWdNXex#vFOrc1mRhSvV)k@C|trS`TzMdDz9`4Pj zyosY6rL@0afMnt{jLFmBfgk7*&)aeGl3dgtI~Yx!n2Z|5LG(3BgE3~(AT#w11YxIE zACc%7laQB6f;e+5sIfb$K^j5XX>3TlDlpjX-Wpms7=bzzRT~eDIPs$bf|ea#$$pS; zo@+#5NBsT58|W;F?%-E;!%`K?QJ=cXsKH>Y%ZjOqn8pxa-@{&<&a}(!VLR_@cbn(; zum8szq3w}G10%8lRrG8r|Hb;~B{E)lQ~9QF{dH`i`(t6tYqR^6YmUqm)OdaUsoeTJ z*iM%enYzlMqv&+L6xl6~mv;7@{|g{zs8jJAe9Mdc=?D|E^384RYcw~lGqU&A=F(bo zx_`8s(#hy#0>bE(_b#r$9-G?#l;$woOrp-l$MOTspW*S^W-Yq!N1$i^;#Ag$8L@89 zOfNTv#t(S2BC60nBeIe1y*|HO0U`;%ILhCI`~=A*j$~D#L$C;jOTLJOwWkxB;=cug z(#!~|$kNmT`@*Uo(pjr$^pZqbSkGmicaBRaC+zfuL{!ia01&p3i`a9UZ*h*e;#YXF zYJSkFTY1E^^{s2>#2OZAKO>nPH60r_ohCpnW<`t}d0of6^cq_?tia!oHt?^10j7I3 zRBxvAskoT#$%ddLPKrk^wybQmINwC$o)?Sj>yeDgplDImkc;a-Uw24> zc)Za6vg+-)pi3rQgv#kl2Pb|}soTuX{fvHRAAvW>61jpIgU8vJQGr=zSc$nM<&-QN zblVYkl1|O-lXT*AfT1lxnKrYzwQ;x#3+ry#Z>GJ*acAsbZa1O!<^LfqjzLy!pmFhG zU2*(;k-2yE<TyFuDdgFNi(!?479Xt{z_y0y@c}t#jZ2xy8z}hX{`2WM zmRkw+*VER82ETAs9)@3a)i#}Q5zghtG295OIiRZBBfpSch_pW|4>1`(FVdF`;|9a! zZGD$__lEU0InrYo&#~iljXN5)W4^(8zp8UxWc3fC3v?rLI8N7EO;Z?bOx71O7l-}8 z%ozkR1J~7C8Z&zX=oTsyPoIJH^|cmze9_IIK&^iATC>snwQcwiMheBfng~iiHzP+w2l59aoOG=oy5ZB$%=lm;Xv)aC+D;{`KR(<$ zof`|(xDxNDMwBZtqR1--29UU1H|wAC%`&kJ67F3BxcNE?1XkqE_}EPlY&jp`%2jqxmx-)a*%|Y(_sIUyB@ayS@5Z zObQfHRj?RFCsL7Vv)P!@NSZJ%IE-iFUO6BY5fF&Yk~j|jYI_4$VPS3=*<`m!8)fm7 z^v{?Uj-6T24C6t&BySkHu<~2Lq>=}DL>NLfX~OX&P!iArya-B+zz(QX!vZ3>)Jrdl zypvKaoYfV9UUXbsgW}w4kpi)nel8{Do0DXH=7}N>jij1ke<8Yr9qPyolAGr`jzZLs z{R1}aC@3w#SPVO+CUr0eHz{52odU!}|7k=~2uc-r2k?+|GV_8UOD=_6ZU0@=XPp5^ zM0)RAT0!~1BoXzPfPUj?gai3nIsxGd%GPrv6jET>)aZl}uyI1}?_P8_?3zHxJOjge znuJXI3*o>Fera&J6C@1_2a5IMsq1h79Tw@GyDLM-zm(-WYGw ztlC72y-sc?pRNnTaQw1TL4CJL7rouCUDnsIfZiV-$JP!n&)gq@IsiAtxbG~%x;FhU zj=$}CH*-4TJMWxr=bsA!4&c#$l5IG}Uxjhpk zdAM_mSJh7{2BKPrF-@K%?PeTng!?}rw+F}ZU{2c57evrFlDz#(1EW7zx&!A@0MBhp zS6Uv;HQ;t{h8V#W$YqyTtI;oN6(TaSylT*|8ZhCd-W{yP zsy_QPwE+%4K0CFR^68;ikwD2WKl)TZ!?z1g!ET;F;b@(_=6=NIwq9$-G);vG++mAf zH7rNnD(!)rot5)Hd9GuNIp)4)lpbx0x5|W zk;$;;Ryq^Fnr|3W4~t{X%7Pmgb&#`K3w>W?<%isfOAEb2tM3E~vLw7M4(?wuo1));67f41gcBTw>X6bt>(6t=7F!Ys^~d^`<4rW! zssQBmk}3%(r0?IHyT1yG8Eq^`S$%&zag*ki1()>QM0(MSPpIRCH-_%QD6z`ub}Tp| z>mNnT2ZqA#`>u<6WEK3GZVBV-?rC6Kw^2;VEyKtdT1MGWi!X5eGwTv!I)X#f{iZD7 zh847=($Z)DCI-H4V$hj5T-%7$na_w2GKKXPE`H>bQd3wAE*rR!uMh>4krH}M{GXv($A4~Bq8L+8 zTcAqnod;H_q`{l-l61>zg6er8Y?0b1k$uiiir*U>Z^dJ;;W|6ERO-C zk(vyBsOv&P^QOx6jav5jv4lxo zyzbN0W%47NgDJv(^x%1wexQ{fk0xgJ7oB@c5)|&pAbF&QMknfm8s60}i|S;i4e5F+ z>bkbPjqTIFjK*SzfU&141`f!>e`4CLzH&wKnWu?NgKaJVQ}62TL$7l%+~=7h3C%JC zdZhq-rA&13%{G?9ZQk2&x20Y_+f+8W#DLGD4`X}oTEIlxt5CsH_J|?*RThK|C}YNx zE{C_uk0n%NSwlq-LNk^4onHae*;QUf7d0L2Za_+lB+yz&2KrGOk*oO?nnM|fTG4>b zn~Zs8%PYu8$zM~tJD9qPD!xu^7?Y-Qm?HJiqQZxsD1L->9W>Z6=v=O*VEtR2zpX!T zb^*kMcmj#)y)h-51hjawL`eINk?TzaB*5RX_GXyTJVD03v(VgWQ#9(7Om)8lHi;;T zu!34`B?n~wWq32>G|`8H{ZMKr+0g27^Aa%}>Gef$=chjdE(&^p8iR2qNylMsx zPzyI5@-caNSQ-ILLUzFG>CdM@LUuG}`W%sYH$KVI-&wVKHgRf>LP^R(#fust<(w_( z8p!x6#&Rjg`Tj0?)NvU8j&3UP@S$RreZfm+JGJA$VM{f6tg=kP&{XbqC9=SkB9;Z; znT?>O#))w9gIGMk>)fQz2reECa^@5os90&}Sa?{gP5<&=sjtf2?|L$*7&P-NZu_dX z6`BldkAglW0fP<*tf>7OQY0nWVf#01)V%MSQ=oktNNyTPN$GS%UQKE~gYKeC;-IOs zr%c$hkBII_%g8Mg83EHi6*gJHO7}-Ycc$rAs8xnvkrjoXwCQP!v#F8i%Zc1sG5&d> zgM?bBqsk7r$V|X##2b0LIw;IyWG#iz4c#DI2mWul&)VM~sI8|Os(?S+-k7NtNVs27 z8Uz|L?8=c#Po#7)bF6qn;&YfBsJMA^tvP*R*!tx6x5LGbf-)@9iP;n5G2GrU%=`n$ne!mRf^)h$Biv4U}e|p}HStp7rvV+6ckt$9q#Z6jvF13U>Aj1iTn5eal zgQ$baV zySbbZ0;iGfsUUyOI9wth52z+NuSkGA6z78_n^V6pBc3L^P}p6*X?`+3nusv-|(XQnf~3O`2bf`E0hq} z(n;Kr_^DGLx-3h4;C75&0ZR{+&GHlE;+$)3)iGDlw_ zsKN-OtYKjFH03U3uMle)WXlQB`x6^9Q%$G6otqOEFdOut)51$ADDscjDqw++A^_Rp zToSLPUC@y**qJcM>4EhtfoW;JkIY!eJDcEsa)~WWg$)IB6H&!P-X`L7u6)_!Vk)t- zZ{?l03kFFtKx4@4M3Vgz#F_zuVgx6H>Kywf6k0#ec&Hwu?*q7g!V3b?QWEt$L25Mx zO+b@L@{|tY9Z6>8NScM)SPcAG&N=}2Cy|38LWgwiCsnkjW7qOs6L=oZ=PQEcWP5_z z*Qx__*LrGs@+`D}StB!Q4j--WfwpXa*run}y{76696C6)9Zd25~hq7vQZDcS(oKj$h_NM`m z(;iFxH+JZrbDg_~L~x#?kJJ00~uowPSZ#53i-c@IFEFn6%1lutRO*1atRKYL<>!dTDJbo+h zIRUr6iM{I1d`p#L%ZwgOW^1QQqeES*&sRrbI3c83&EBf4r4=3Aj5^7^V2492;4hIH zJ6qz)wJ_mX0-QHh8Z(T8khqxFv36p6Tq==ysU@Ps?nWS6Y_dD5xI9r+Q6q>yZ;sWP zsEi=_8dPeZPH8szcXQN_#is{d|innjPG|Fxm0>bf$EU{O) zL3#tlz)4J;g7v^5tDJp9Lx>-jHGZfi&L5Y_Pmx-qjJNfo?4uIU_0GFIVvkj@)b5?| z=O&6pV^#n<#yzhlnV*p-bjM(oaQbkoNM?^~@1UsQmGi)rJo1=X{w=(Yx?LHY@Nx=5 zZ!8l79aSQ!$Ikx!O@F0$ErN+yn_h)IAFn12{>;~TnT89`-8q|bFBKbaOre{&v*{mO z`^K7B;LSwTZbWTvQrn!1#+Yz|Fysn3ErM;%NH@=jdH3_Cc_plxxmkDB%~YvzA8X!L z^ImJa3;|e z`XCxNXi_|{&DnOLVgyyCTcKt_VJ`VFG*D?)lsu$|Mj4!W=+>Dr3<3E**?sx zW2vfu0-v|3UYTv_R3=N^+K*e^f#~9+W3^P(*3A)JQO(hFe$oBkR#cadpt62`LxjPA zfN+4Ifd5rd{aU+pa)CGJ@^KhO;To~0^?=Y^VDW6AY`en4mDu(7VCM`jzp?-;@5=$G(O;+ay z4~Qd)Goe<#LeF1>rqjp8BQ!0EuF*xA;}n-!Spk^^Ui+&%>_n&7Q+YuAoI2@fa?@Vs z1ix{hC)%*3%`sj~mTR33Vb4{Vmj3F@F6mYgO*&4t%og>66PX`3wx$nol7to^G{-{h zGq5+`C!rC~t8T{J*O-3_4P(VGrW4)E*|%F+ocso`*nof$4>3`%mj+~_FtSAU0E=6R zB|>w_e9CyhAGS0FO}`dI=-WsDJIS3=%ExqS3z!3jlh~&MP9iY~|LVsB#(!6?C)CUf zF#K*nTmLTEqkW&5e;qC-Gh=IG`hQ&+|5dWrkh1+DhSRNf=7M>N`yPTD2L(A81#b-{ z9Gbhwv4wk$kh-Zc84>@Y+FPqwSk`csE8h7i-zWl7it%PXr`V{xNBzLheEwL)O9tT= zrs-L8d<*EtXKwHEJR~tW34xMuo4d2XD90lTod!({puJY@b}^U~Qrso!i6AdHj0}CK zvhsfEHSWkILiqvq5W3MrORq?^S)yEkIIgaHKq;YbJ0cQ+CM}{44aNu2#aO;jzPKnE zVZN6OHr(@cWGC+B_m)=J%xSP?e3M8~is4Cyz+$PG$FQQ3aQ}5HORdOO`r!FsmM>ywWtTYoQV7=!i7 ztJs>(NIQU64IFj9o>YtLHT!*a4?s|Qn#>eB>(E>pXyWxur6=ok{TsvmOJVFwH z*juD{LoN~tt0YO_=uI2)y(%jS=wcj#O_eA=-FrtJam;A+!Sd?7#~P8@6p+X&r5 zu}gMbI_fi@k4jQ`=%aw%>>DfzSs#8GA#G>=v3*4ms9i*tlo^t+u<;wo0VghiqTu<3 zkPxAcJ0a=4P0Fj66rai|IFGGJHu>Q+CqYIj&yppmB|-=0SBF&BYG>qHM(c`gd_EIO z83;3mRG*|xN0=DQuxzCLMA*o=x(FR9;!qzwr#4lHlQ^c>=3nvCyjIE~QfHSO=a&Ai z@gaLGN>pAa!s_2s;Um`GE1`B7T+T%j$^+gch=d}HYx`rRVDxGsSwc8=bqpKLl$3#h zi?uG1{y52buvu>6T`K6l@kb65A!)e#G3n|k^SinsT%qkLIR5S-_{zK0X2i|R$4skL ztsOA6$qBYIk(Sk`ja{OgYj2mWp?srg0YQ$O>-2lT(R;S-RQTEJyVhL;Y&!MJL7w%> z0Icn%(AbJEHnW3H+9z#qM-F7TiDAf{ls9%S!w$k|h!=N~w+76nzfE%8ax$SbXW^32njB6i0~TrQ zB+Oa!*E$+DjEY(ALEbfszll9j~Y2eY* z0G+NH#0p>D)KDpxT{?!N3l$GkURM`Q8nZGMrb#{1vt?_*pBe@dW7d!jPi$Q4E(v_4 z3`&9v9#Df-EtRIrPHup)V4JlU*SMU#il+&W0hf*P@|2d#J5HECo7jFFXN zP0T@=u2$(yO4BshF;6)&)cd)=T8Ta{vxum@yQL~h?|-FIayjmLx1pTmA{Mvwh##Ac z@Bb3PX_LoR=qBIUV6PLP2Q$u9`6b=`cd0Ed-+a4{M_RBqQ0Q_V2^uC-R>UucH*wVs zqF!?et$u6-)7XnlO|9t3MIZ|5{QOL3@yUZC%_)8Y8G`+1!yS*~5Wzy=Ta~W1)mq0{ zFEg6aX)>Cs*;`<~{>P-U`VkOaNIkBltafMX%|R-!g_7%-z#A+!I~rfQt{lJu z5l@gvcSc9X)S!z4DDyHeu}DJ$+V%@QVLa3RPc6sP?CLA2#n37}#`D|Mlqjhl#{J0d z*_dajD$F#Y3EQ(6D*gue>iUf7C#wswRiF)yxXWjRTH}S&OOP+z;Z1u59Gb2D zEVdG`>DmN4!3Zu$iB%h%PC~8=FJO;#Vr(h~M9+GzCr>*(!7q5hjQ!(#4mw&fM4D(f zaeF*D&mBT*egBu?3{E17vwDiR+lhv*qyCn>V@6T2 z2pK3Vl^{4lNlrM2OvuW%_`kRl;UvihT}%AUr93DkU=Pn zA^YLzUemA@yp`Z?UxAjO`!Yzl?Kkek2>W6&ApBtYWcK#!3sD7F+opw zdW7|^s6}jZY+W?7^pTLes%`3A6}2gZNzi^=&u_GI^Vq4bX>oJASV2Ky(XqyvoHgs# zK%+>7lm+3S6M`CwNE(y3V-=^oDCbGb79sJ9Cq~VXben3mFSuMri#goBWkpaKl1L&; zaS3Np4CKkqO-`z$5r2uH)-H=H8SE|k>!nah>dnbqMoo=3tor218Z=++()YdzC0v8#9YPC$-58VIh_X=-eb zMm_u@ht<+JFjAoY$+4P~I4Fy5xeAe%@)!CZOyx33vuiKm4wtS<>pgO03U4!?tZ~oA zk0)m1LD+u`zU-z0OD-n519`=blzSbFzyD+aT;Zl#NCYM*>yhL#ovL|lK{^|w4SRlU zQ3~QC#(wiCu(mW*t?Q&V^~cEeVV3bFONUvMNN50pM(>WYHpd0KKR7ZDSNAMpU`Z?@ zeeM{!g3=ha4O^>$S>!RAIhqha6bb?|OwivG_tie_Enmb16>;AtLDaydE+o(9VmheT zkSnl@2QaMK46r>r)B6sl|7&;b=UFa9**662J45>cL;>{M+0n_?THV@;-rUL9`d`5| zK^_SZMGnC?(*OIYh!TY9V?Y(IR{Y#5YD1!wDI};84xng}ncHTIIY=f!^Y9?n?LF); zz?;X}oS+u;^m?UZQ(P95;Q$^WW?9!*HA-S4fbd_yt+iwfi&Z0#KXf9WA3rq?2X z0V(J@u*-jf_h+I`W4L{7d7rKntjFX~32=NYNmA0Bi)}&iWo_|py1@}3_3k0}_(Vsu zc!ojz52NrL)I}l|R!8Tbg1;kfHtu}+v2bvGrS6^-9xfNH$5Z)<81&Al*i;Kr{X6BC za~%|M5e>%`#Li9pF~uCV;#-BmZM^*ClU<)d6eTho`c%;Tjqi_8_D) zy*_$Muu+zW({GQH`IZwc{PQ`W&l~hbU_^MIRNVzPN0GATMY7j^@ybi#wVyoW3S*FJSfnwx(GM!*FP@5^V6_kdQX9Wr%UxFGo_S&JdK1wsp8VfpN??mC4rF2J0Eo7Uk9(iaoMLjjpuWH$Q zwEa~fe>Xs;8sfa~2;!_hAc4u&osZ}vJ;J+v$h?8e5C53I0#-DA!zyK6`s~g-R zCI3)BY4q@h4H)-Rz^vVtWO}_h4J}6LmT_12>N%b)q2+m^5*R>uh$J zmT0xnM3<$@7^BSXEGYTQ$s>4lv3kQce}#KM=fy0!@~UX1A{1~pi5WjX@2kJME3Aby z*~4j!1ED;TN#7M8I*7;Nq~xQH7npI>9J%_ob)S39}_ zA{L)YgvT&_lJ@p34%Fl~aC2R~-r*=)m>KkFOw`7X^4$TcVbN4AdV;pL0b8H?ou)$z ztoUVju8*MTSqlIz$P3+GCHL@Igic7wgXkCvOqy6MSGpZ4FHy<9Y3We~ln*h=RLRVF ze+;49EBv9tcVmG+*myv|H=RrApGx_)6Z|?3n)0uI=KZ*OU~q_UsG;S4Z6cI(MB)p{ z&;vCcJ8_;lF&sYLrkuJ~33DB%-c@k)CoCtS&p%kt-(({2cOn$=U`D4_?Q1k)Vd8p^ z3Ejdep>HRNqw}-z5PrFpOpGzhYtU)Rue(@}-*e+6a(V_~Wf;CCL3>g&aIwXY(T-NE zmAP(ye{S{PxC}Uq-rXKHAfRqma3I8gf95$lxmg+iD;v1*lB(VpPukl0`{f@)?Gc+9 zS7K^V2k9P@39hEHtZd?3?MAxQakh|HMY=gwrrC(G6|o!Wdpel|nF2h7w|+7R1d92X z&Q<>9KT1Fu8v(^6dvoJ;2a@VWc`H2N<`W$8!{>2%oBzsVo3r@-%GLG5@Fx5-Jj-zf zr@6f!uuR|OW!jn1d6G&0WMFw64SD<+j-PQK-u-#l{pI5Gcw2ve7~JW2)e44iJdMX* zt;eHHCzsvn?!3jr5&iUvF-(5EqUyuBUmy14oS*gOnY z>-YA#BO-8A%ysDK-nPzow7-8p!J)hOR3kUNR^-8(LToQ~6u-=eX^bA8e>!#A;5e>t zKN;?nyPn;`tulfjj+)uGapcO1eooeF35PZr5N~Zp{QlG>8t~JXF`_4im|>M;yWeaA z1iN~JJKyP#7G~yX;*`zL+;M!J= zZ;St;d+Pb3Z8&L%|Jmc3pKjUvH0|oCMT7hK>HgE<$(z^93&tDX`}ddnaR1>3;3d^- z>*CM##%r5D-_^$LHVwk@ChYT%VLm$ZY%FwJnGkNGKCJpgM&Ah zUN8L}U!E?kY?r@=4BIfZS8%csufE`2-mFlLD_=i`-0rlC0jIrBMt6xW{ab*i`ztn% z_7DEx0SI3b;b(r1CJcUee-damFQm|1mtoF2O~%RNfwk3U0Z=Sscor(l9D5B=Mj?vd zIp!LmlVjK+v6K!un+E4WAWvT2m{At!xS_0mr{WRt(qe{E`dlbV!1J-{ASqG8v%O9v z?r=rK3Q9nkP({YI(PE3Nqc-vNy1)^=5`j_+5FSj30Oxhj4r?#}d zr|N^|AC)S%j;m5}=~^o;2GjpJe($&oc}SWDbovXQQ{$lhZn*mmDoV!BqHlCZ{Lc@DkP*0W5nxslS@4Az zOmumnz4YpAbx+t5!du#$pn>MUn%-TV>pD*pgJShjzV4Scjr{J>f)3OBmscOXe}4RZ z-z6(aed=nGdjiCNJj~p~%p2CtvI-4r;5G9 z7X};Ys=13p{3%S{9_VtKH=-Bx@b@>};@j2<5FMwW#Jbe-__`o(c&(i5I#{kr2Uc&N z$C`v2FhXW|p{CD#90=WgwW*)rul-s1Zj$&QSuzhRPBby+VG_04)hGnT#8v_=r$o+9(#6hGG zqHt0W5twB_cip)^2$Q~8GofnVug_ivVif;EG1Bk~!GDR%xe*Q2JVYS(&oO^vPh);h zGcWNTe{RY6-TsmeAW51M5#Iw(>mPLUjx5boRV)!d`4KE&ZtBI)(ZQS3pah8PzKr0>$fLi2M&MIS;?$>+pK8sO}?id z2`{DPW1BrE4sKpTT%nI%m5K3@*OCjH1CoS~sEmL*(71T{JK?!?K{^Rpk=5^FGX*y# zCv;8o9x=#-*6nkD2bxcJRP$cxiEBBI#@~zp2QC6~U^e9}uoZbnBNzu10--~jWhUh; zm=$?96n&vXiNElPay|M%W7~+#%Ael2LWiX%m{fA$RwX>pHMw^rJwapfKtHMEK&(pA zT00;9(0(I)ll(hRjjoh_`B#9!H+}W*5+1aF=>G%w|6GC`m`VBS|4#f-8T9u5WnmuZ zm|TZuORWDa79RQ$!?(mKxp&X!-x7-q%ezti+lKyKx_?_9F0ox_{69cri?Vx(C08w< zKwTG=-iEr~^2_xVw}qP>#3ZoJ3i>og%}cLE)bXP6-TNYgA^Lw>2>)>bY{n**kVY@& zJFQY9=Rjt6>lR+$gep$2I)$2NevKqZ?TeZ}VYJ$B+($6FJ#$RH6S>G$UH@=EMJ4A; zR1&Z6fC?0xj%M+Fwb#ZgY+ZoDb+h(LI8q=+Jt%xsNE3<-Q;-EFk)y~ZkF5U9ujJn_8iXffXEDBcYAx`TvM4Yw;QU|}?~7eGkWfDmRTqfbfO4FQ z{mD9`cwi&bfSQ{Vn&hex$P}DgSTb{fs$583oI@qb2_C~!Xtd*(7!M+)F8eJVLjws) zV^{7NrGN;$kd&fA|BPZ;iGdlV%0CbcRyf*F;qQ4Og>!CG&hMT5KY{ya;=~s?LF*DC zy8b)re}TUlqJ1+g48smeQv7E%hi`V@WdDfp?+g8B0UioH#u0K4r`N>*PN%z%kRHFY z@#+42nmBFa$xgktd`+!B*(JY$n?Ol z(~)}gMA6)t%)%o0z{pY=#>3-H)_!GA(PbfY&0lMkk8aew+d?BYyMOG>cfUtxf9*bZ z@_()Kd%v~wyXM#Bm&Yw*1+F8jfSS0*Diy9MGo!ocAatj#w@O&$aIYKhRPzqi%U$l= z^`18;w;Y{rIIEUJ0uJ=LQUzZO^iM20U=aG=b~xnpTK(%Rx9F=bGV7djV3-u;W3(U?w7d*d6~(+(Uww|3vu21I8cb;xvvvlhWG=Cq zg*<+ZSNJ{+uo!VLRX}U`{`$jg>^ZlH^i`t}8w5rfR}>_@mPl`8p2 zT9RaTHr{~yhGOFXxRk804Ujkscd=NI(b~G~m=EzfDJZ95F0BJgZ78anSj(lEFtot* z)`ym?>A<9b35U|uWKOn_A<&emOl=mj>|+vX*M>`ywARL2PtVzItBxD$87l+PCFKW=?;?bf2CEyeF{eQ4xwO`y9MAr zc~6fTmu%M$noUA`CJmg6@M@R&$E$hm*-T`>ndHhq@C~V`JsI_Uu)k+vzW*ly=7Zm%epm; zy99TFy9Or&2-474XlOLJySoG@&{%MH_h1RZg1ft0g1ZI@YTS2K%~{oR)+ix>Zgw8_yhyfgrT2j^HM3W572S~7!%4R2L$%n$ZSmRY}CGL#VP zLGad&*hXBxJxcuAM_0Ju0PYEA#U4g*+2;9kc3`(@d$*~}8=a<-`Rxei$6hcrWu#(D}g?A`|ZO-^W3?%+F%e0?j-i;UKyc){ksPhg9Om~ z4gD@>;hQYuCV=hjsq1&>n)<0?u?Ga+;bu0h{v`Cog%3aICRCf%3VW$cA90h*=0#qa zd77U_+s4jB8(8AjNNm*d9vh+0%HrAFHAi!|YgQT1#x;kg^uOJPt3T401T3jIN^X4m z)EKM5={Qejs41t0r9&UqOIM9?^muOYe6`tYpMQ!m=7}|u1AGd z%^p|OTpT7>fV=%6u=m-1*XI&+tyTnUpPsA{jke2E#4)33QQN&9%LfNh?(MU720b6* z%7}@(=^mn@%fSfjwKi73-sjC3Z<&N+tg$^rp5>V#+_$1jhD4yNY`@P8y&)=!;!)Tk zV46ra(F99OD<1!{obY(mC|UWL;FH&b*I{Uqxb~**6Zc5;>;MJ7RjYs z2qw6lHI3wob=5)iVMy%P*4LkVQ(_M_Kazjlt@%AJ`8^J;NknL+R=*i^Es^^KWZTdB z_Q5aAI874dNki+KZA# z-|7%&VWIca$d;1hNl7T+MB|&C zO8mt9Mos6@O*p)9o69l8BRUezT_6L4=M-@-YQ!r%oIVMT41qH!nDgVhU13WiJH7y+ zPU6O>e*E>&Ol)7euzj1%{ODK2+iNG`&~$D(HyRj{4n`^j!mYqZv>V9v)hXLLsW>MQ zx_Ers`9;XyX+JypcQ(bX$UMCgGfq%`t1|m)j0na7Xo6*9*+7wE0g2<+XB+qm20=kLX?FRkDv=ei}n1liPs0n!yg)s6Jn%RwY}Fkf_d54^=#a>3}*Qi2_G zkVbfT!wM7uM$7x7GM7LB{Nol$V#ZEiF)F2u=SL6kOvO-V+xBDbX$Ka&fWd2+I_4aq zr!QG=RmlR#2gs%_&ShHqU=po=Vion@CWyNsjuJ-c=4~Am5udzk-Bc%iwWU%I)`YN! zN_(o>w9TQAF-Gn<@#`#UNd=GC6Q12mcq8wJg|J6ue;dwgI-FoIp*`=@RE^v^t9Mm; zjF*_bq_IL=mBX_nqNOa*{XAm!S{|d)TMbY2Y{ifVvohVCV{%2<{(R+;taVAhXW`@* znugfKTbWq+wg6J%HgZrs5>hnFcJmY^atKjSlVuAHk%mPQqM!56#(M0f&&|)PZBR25 zZZMNuev+{Oa8{N{6773cD_wl&=vZb7m%P4S-yD`tJRG~UHIf^pbq(-HNrMy$=$9?< z<-!JcE*KhXnG;q(b1w6UnTzNjUUw4czL&d6>AotzK&kT1Yd#BlAiN1qr=mAnS)!Y{ z1}2h}zhq^Yy~It93#E!}a`xR>7Z}~^41+F0V`B4ZYdZIB1XPZ!);uiA(j7K^T|K$> z#%A;o(@Cy%qC zjw=+ANv-zhveUMs`WljAK^&uhWR(>(NTr-byGY93?@ZS$VV%08z<7~cgX>M==H#t< zo^k8+g?!8ued4+^)#PQy4%0_+c2br|X-_QRr)L8yS^2{nvc%ab+g?72-n@}(aBqq) z$Q6(R&dEVb=&!SAg$6z)d5+GLjufNUB!5ZtB>k{6d>22lnY6<|n0NHQXl(m5%M;Ix zNYrZ|P&_qO$wh)N=zNnY(J3P1wC(k3ZH#0Xa8WovwCps%(2`xJQ=x8?oM%D3s3#b2 z^ZtSyG3i@tPwXqgEulY>=m||CwV{s%N5xG9gU|Y&OBVD8X63A|6L`XHq4@UQ^bb2J zy(Tls)6RCI#ju{ye3@?mxb7rqQeEigaYqhWPjzDPR40GY!5S`Wr|0vjB5*{m)#na* zVzC5Kc}Dw=Ws~nP3ZH)q`#zx9n4Gizy-4W`ibXmL?!czNm%Q-`-=>8A81`$%FBvbr zFE$?bZVvB~{d0Jqv8Hp<$ife8PSgWf3A9*hPErmLaE8&1+SiQn>zSk1TVd^r<4+V)QBsLc-B1c4qd?cQ zk>=bDcWxQFa+O+^L7V~5oMa(n1PACQTMNGx7AhBwVs|p$L5|ZHKptg5LN*>-q7v<4 ztS%y>@R6hKOo;a<$99;Bnw+|{5RK5jR|&XAh-Lr_=$1q7SP(}W_}18kO9sKNO@>yx zciWe)EP-}4gt%)==Vblv`$YylDBw%eRpQ@>)6Aq&cSmX z%v+{?DQ>huMSeH(#Vek+T#>Qw9m zcmSy-;&ewTD}bs~s{y2?=pYkN9Kn!GM55weTV{HVQ!BrC|79}j8XT#|THM8EDTQ|m zvc0mA^_o?B)m7rqN{*3Y6~?Nwn0YU(xt~`gfd+1n(;IXN=$#!_3StL5cjcwF+$-nc zPr$wIz*ku`!2+Y@8%SY_4tbKKnIOln$h0#IQJwD%kEg^IWy7oNJ5JdoumHFo_WwtEX!UDU#3mZW7;I{Y`ran&kkUIifNz% z8dEhu*(Ab`C3TA}6(nmJFQhyW8HhviOGR!V(gZnSMMnJA$SCdLEYayFN-Z>|q~ZU> zBxm;Tm}H*{+b#>D=MfH&3@B3{P)Ow%UrzI6(})1cQ7Fo4zbKEu+$zVx_uRxu9Ck_? zW-6#3ATKcibc88&u@;&DYT*ZuX$|5t8nRMtGyy{IXsj=-6y?nd$Pzb8Ne^@!>3Jr< zr0tT+4cJtuynxTXTY#pILbga;tvWfCMR!+p7m%g0xP8sd$4l+WS(i4iJi4ujNC^X^ zugjq4L1VUkfy|N$;>zm^7}_%h)CLV4)8_ba^Af4I1WiXdTw3XRKcz7IDTRpFFK9E% z-;d@d3^TCM^PE!eQm~~?+*Ulb2{UM$*q$AhTzVU+vTq?V8K)iY4ZQbU}W4v&X^0GHuwGH z!g1DfZ~NzmmGA93yND8oOgBR3I9d&7f4qwejL7ymczY42Ri_Rj5eZd~A`yWo4UmY0 ztu9%%8(L{mH(y8(oWA~8`vINsK3-=nv-S4$(Z$DYD%#%w6te-`lUMW7;-+e>%8a2? zvb?^Cy`dg?rmR~9#Mh{%xvHx1+Nn|tcag2LDUfF=x5}|;FK_!n%e%hY;6!BuR4R|; zW0fqS;U!7v{`x)Jo3ZZZbWfo0Vxi5ZN#{}{E^E?bRNow*;zu=4GyrCt1i)JP9$PB> zRjHUohSl`f63tXWwHMkBhi;HJkX0DzV({0tKDFfU?N)9EPFH)69~4#TSFpzda$WR~ zC)8(Mgc2{iT8?hV$dFtB6R7aQ-zK)Chdas!tvLAd1;dz`;(Mg@BDtpL*B^_Yn3H;~_~I|mn=&Im2w?0+ z=HVf^^iH6_3wK!*-k%0AtuS68N2x~L5fQNC6?;nMZyj-A%i>Naxn$$F!a|hx-ucF$ zva+sJ&%IWIGSl)aCb~dx49f4dE$5F^E*LR08!R^o?_k0U=kG%0I`Ve^*YTHDa&kkWogjbkga2iAW2!y4a_CzNAlX zvWW?uEXUc3lcvd`XRC7%n`tb1&)pN|Q=a2f-jnx6Gm+D4wdC|gYHh{cAXlyW5x7QK z3w3Lfu-A-!ly9foYx|+VBV?CR?|j?i$F3tYy?5o35BBKb!u{;z?Im1qV~j>`IE`ST z8-ah#A%Cgl5q~Md5K2<)(RNfPri~h~9Yg*GVYpupHI1NC8_s-ChsFW09*Nt6)~;oy zy2L7&Z*GP#+VT}OW9zf7IM~|27zAgo*~#btxRzV5cs7eGsJe|CzyoKVu&xF(`r_>~ z{SJ~wMDC|gOqxCO9pIPRaRs|@MFV(hHQ>xcJ|?xNzDP#Nz!A(xsD^V$THv}v8*RtT zM}>YuyEH-;LN;HY+)k2oikcDIiYv$(+<-8e@I`7pvYmw9gTTM;5Q}Q@sP%708>Rj) zM-Q7FYB(U-^(sB#7wnFIws>OJpl0NvX18nY93{4EQCHAA6b;kEnInucb+A%oU>1y! zex1+cBjrVUGY9E4rnv&TEMQJEE}99H34#bJPfaQ#fyLID3?O~odVl@)j(ArQozbk* z(e#z$Oo9FbqL}NG=wbnM4ae443W0LX40pFnNuI`S-^X=dkjKvUuo;Dp-tniGy3HCd zR>&2mU4lUV37HO<6%#&QO9;GSyv?0*KaTxn-uE>NCPJ2@spA*QG6~3k8|`L zBb`&QzpIXFFio2S0?N%?Bi2h)LwZhgjWCeiR||-->Pn@IYf=D5wD83ed{?5x2)=e77Cw_)2tmt94(}#iSeAd*TLcWbI1l)x?DYT z*aEoU2UB%~PM(`QD(WbDLyg5E+1LaAHd;geGj!-)>5m?nKR-h{J;_~+7O`R6LyTqj zL!6u3Of`I^f;$~8wfbV7HG-|jftUZ&Pfw2!w8g!hjgfCJ^)Q~Np|d^;-tzbD&X9IH z43~Bq-AHkV2Du}uV!U4f8%?PW7fEber&uv)K{hOGY}_%bRbDrfcainG*Av@Sj!fKb z599}Mw~5u?5l0KF$znX0d+YrX1?ycb`*>*_oI7GiKIVc%Z^5@E##mXxk0)EkKJpA` zBR^3`Zu=Z0y2Q7}#HvInqI8B51N?t{!qQ^q6!4Q@>yKHPrAcEk?9=)H1Qw{rKca@X z;V}c&dNL&AbmC_y!jD3_{gcYPyWWO^TVwH1+LwB>WZy@LIKaIv9kZMoe5RE#$CN{F z&2!X1VYuso^0glIE0fK1kW6)%+b>Icc6u9ryJyh>=P4gBvK(=eUeDf&XaZil1)D)2 z$ie1T3ve~(QAd0-ygfW&Tv4A#6%=^D-Mp6yMNugL!*4G{Le(0`v{^lwb8jVrxG)p? zf-O%YBe*bYM|V1S(Q0WGRKX%t3aT;+s=oO#X~t=rBvL88Z_OEs1}r`~_3*Bnu?Aa` z`di|Y3bh++s;yAh$0g&YP4J>jiF+!fAdNMmOi>;9P%7k8z3k+b?t$;*ZRqAr6c8b)T{*u_a-=;v?|h(qDu@A;?@O*z$v` zD0UI(1BGE)-X^SJnzm8eAer$j#T5Q1RVVMNFb}sPzt-Rl|1lM z7@;-{a8eiL@e$I2-C7j=3h+y;#QoE#gpqfvZzhfV$Zl@sT1@0B)b`+Fa<8Zmpko+V zd=c4BT7s9uGaUpx3Z&wJO+_M6wE2`>_qF&jE4PaUt|9?&r3MdjN?aVVutl=9nPnM< zhF)RqTy!cHeH6oYt1)?@if7I#EDUInEKX#2epkz%)5oNX9pb{e9Cxgi-n_AU`@t%i zAeX?>Q9;rTusLz1f0@`bu7XbFPK>f!uVmC#otK?uS(;2A(Hd;Hb(>%`wT8+tDxoJ5 z#JOA}2gJ|S2L=whT2ou#+TyEgZI9LUG9HL}vtX2?3#qgvMm~Yg6*3%>#*KJN2cJG3 zMQ*mWjKd5bI--UnnmKz>ga?k0B9yP{iBCh4yD{*d$(5^-BWlLV4Nr5}$NuHs1(L>- zymF|#4mGH}^(T3aoyZcHRbQHfP#$z%29`tRHA3Yz=v5Q3nzEO_Xv|Y0Ak>k8bPz0$ z=LdDT+Kmv(1MPg4CQ}6(_apOW@Rg0pGu;eGyDShj9s>M32t=PjCsm&-HFtLwadfV_ zB|6kfOy#YGKD6M|Pu8*ovF?KnJ6tP>bvj-SHQx-oe%i0Lhl@x(!Ib}wSp^*&ymFo) zmf1-y@8$9u(`i5s6m>?#4kp!cKC;KV`;sK$JcA1jI5&yC_n?OP-3`@btz7mOqbG@s zkPcUg4!)^!?F&;s7VfpT6JqubGbGa1^0~c&!s+3>p%Z9Tckr-5tUgr3>uL7+A>(5W zw2k}WA%W$ZLrLY*1+4~2NopiIpkg%CsxIdACbs&(HKVjdJPW4xeX|70ok@~WF0UE~S!e9ttto+*N_@U^_j0&N%cldLHDM9(K0gL{hje6*U zdg5EQ*EWif68pLQ{DOPYUnj`x2~nPI%)X;kMb3PGf#V6U%P(JlQ zm0qUT46oKS{)nXI!W{rY^yV!02UuHMgzMAOI7qFBwS#Pf#n(}!ISvYil3G?l0v_Op zw!@z=%=RofiW+ZULw3BT3{-kYf3Pa#nFDNtf#r{lVsV<G3RU zFmT3wz8^dOya;a5rGcQpqe=ji!Ss^rODCNJHeMhQ(;s-Tp(FGNGqL!a14-!D&(@9&5oqGUolD|3`50= z#}C_Yr$ekIqkpdy*f8B|8vRES+TLWN+A$8#~w0NZ^R!s_0pq?2EJxSbuXULM4C*wI%M)whN_-nr$UW*=4 zXK$v4BYf~fE3dh)@1IBK0H;J{p?BuiYE%v@x z7!&rf2m#+ij5U*_d$@yPQUt4M?BJPelu91JkD?ett7Z1n&ipX|&QReqiNTVYWjNZ>NTpx}-raIpe75$d-T6cFjEc z5BBLzO#*Xz)xX#o<9h!e*^8I|!EV!;AX+C#NZF*+!(Hpb5}ixfSJ5C82k!m0fJe}` zZ=nU1iQ*sbT=I?o1MWfa;+r0B2y3R=JLr*Q>OgU#735hBbZCSuHZ0Mu4T5Iu8t4P) zVKj&0JvFn?x+Um?)~yP8-N|Q#z5Je<2H(5!dn5NYgu8rg>~y5p4ZB{N&-|SGj!Oi_ zIZL<7BA6(**ej&tEj&_U2M!>dGdr2`>)FVIoY}s!QO`B*Q;` zFR{)m^DYoJtyqs#yG@YJ%~T$h6SD4dp55sG$N+46kMyinPRm*dj+?>Qnq$>N($g9d zNhMIEDgH}R&#=<>s>Wdi(`w0(LjtdDX4C5RL$zCCy4*tLYv@x?U3GH<4uYh;FI`tL zsJs$u=bR8;JSV}UU8SH=qe^Jy*P~n+&>J!`W31KCF$=8Kkfinwp_BtGunKysi581n zFekLCTrZv;x|%2sFN(h%R~EwZubunUpZ{{fT5I@F>KZbnpnNT? z(yy0W9IAYcHma9MhKkM}QmZkjN0!w}n#4^U@m#FO@Dr*5q)qg}{42@)QHoaEE;ks;j@6BHt=AVx-sD}Sb0&WKJ(+!G034||Kt%1Xbz24O& zPr1MgIdsK!h_0RMtku|37m<+9J%olHYF>S@XR(+}B7Vv8B%zy^p}egzj|Lybv^pwT z#=Ul~Hf16FkRUnr!{JPuJF$EI0&Csg{f_S#*~k?d>{^2J92aUSmAm41$^dr=!}A+0 z*85O71{z!c)cIP1VPT&Wp)S>-Ftd*|uV7~w(r_CZEEyt|qga>-8cAQ!*sd{Ed&QzJ zrQn?~Zlg+30Dq2p!n>JC#;+?;c9l%qGvaq7M~GwB!^DWQp~&h26CBUDdWMXV=DA*Y zv4@3SG)@L}V5n0gtf)XAOnDbWrWbkEFiZ*??SL3nm(H=2mqDyRi0Dsqg@$||FQh7l zp|q(chS3h|fjxZ>$|wO)Y_C^jwCa2zs52;x6(!`tMaXF(C$_od_0L-&i~1mUxS981-`A7}>!GyPBeg0Ncg-mO#FEfCKy7V*Fb@2J+J(kR6rm^g zj4L-;7zmHqn(}SShIy5U9xY5QFS(&1g(J>huD@7rm%f!bPuL6g2Y~Jp_gz&=27TI4 zw~=i73qdd7<=aRcOzE-!J^X&F93sTBV7-c(2>tG>bPuAk*<2z-cWBSX2YH9MXK{rByk15ON1T8^2Ie(6kkuJXe~>@ul_|+ya)T6imQaZ zu0P`ZN~XubKgIdFJz;MSNNSP#Xw|S^-SfUL49+Mq^>I=gYt{&cK8@v7v8+8+S+rie zuM}!)a5gK0c{o6!h8Xvk}&%6?JYjE9_ z?hr?5HL*6LofvWu)>7ypXltCs^9s1{LvY4>C(xpEN*TL}sIUvI%l0MO7h2VdXGK7e zc!f($zsD>7o*ZS~r~1S+nSs`43>&mQoqN73P`kg+CklytgWM{P2jqq37CPie-E7D8 zqnQ%EgbNuC)>{nHONu`$;8X%_YQNb>Qu8^hyaV{RT|GbJ|5Aa6O(i% zLu3hCBX?=^bjoI~%qzw90@_pgi~)%9h)z%<+glQF2(`WR=OuhywS|QZ zuC={7c?69L3_>kT<&jP#n@)(R zksPJR@*DXsG#|$h7>c?8YhvaEKO>zcQNkpqVlbRF6T%MK{j|Fe3rMb%v~1fT@3;w-ejHMTY|uaC8UOg z(rS=0D_lNyfhve~7mdD}DY)T}p|hx3ONkW|K+WL|6{HO2EO~9G&>A$O_R^Wb4XLyB zO^D1)cd!vCaVbj%#8)(juf`f#hBu|tw}zp3p{52gy=xxY3;VZzKi@HAV=CphZ76kEQ)b!dK)QLNmUj&mC21+jY-Ot)s?eki>Q=dF zY%h`n?=r)E)C$Mc=C*YvrIiHL>PxKF)_yE}4>7#bC|)5XRSbI5)O7!~(RRNrp;~ea z?VIPSAd$92R@C=>d-57FNZsuG5i)uNmu>kDBx3EhqGcN4rKvpTWF-Bv7Ev2&qo};} zlHp6}lO2$|;u=c=c7GZrd|Xj|tX`Y#$=%Y{kkC#G_)d$q|^ z2s4{+*sAoE+24)up_M&7EYM9NdTw89ItsuCCu;Rnenvp)_u z-676ANzM`e1h^=@>9N^h*aRTY;1Wg;X&Z%6;p z54x1?f3A<__@Vs83c5C$4!Xz}|KE$<+)a&C|K@*@F*Rb}Nrew{dLQ?It#~5mYNcR& zRcoVFTW#{(9lnU;=+eo?TXI*Q`TGdaUk zV=Q&m7}aL#w8f(;a;|auD<}5EOFk0D0X91l?JS_uIcO1mDDMS$))mC(vNRiR51E1||D|5L-@=TSE z-!qZ~j3koF6w+s6pq!9_@eMO(iny@g4NY7$<`OTN7e{sDxnhbhsxt3sb%7S#RR39lhw5GR3EE5(S z%GC@8oT$7aKI)43sn*2fbtRHNt%RdevChziW5^UJb=XKJc0Xpqznn&V7-K1V)_tJL+BLB0aL`w{;$gezKQI1MWUHbvAS z){2;xQW{ZJ$8dU1gjhD1h9eQ@22eURnuLt;;!Z52FU&E3y+l}=%exM%*i=rY*$n>J zb!rB4VH_Pq^KrVLbWuxCJVEN4CGQ4)O$7}@DUs5*VkT-QEUiQ;_!44B8>;Yat~m}m ze?d=J>vYj4;8L8>? zEG3iYy*4L5yqsxt!x~5Kj&WmWeWj-J6Zb0K6hv4&2dGO(;P%VP(p>X=;3AX8#X6k+ z#1kQld+oFOBi=%d&e5TO;@NhHPK`Lo3uR+3=cLMSSy$guKwos(r*)XIVN!tIfWEFo zYd?KkPCMaVP%DI$%6y*F(3D7vPtSE_Rj+!NG?6ap0fv=IJ8|adM~olmnzdh5+seB; zoy|<$txTIQ-y4EM)xrT^Jhc}h(8LdCvku&d{7b&Qrov5!ZX!ej=LrtXE9S@b6i%Fy zjSPD>Guf}Ts0=3kGFF=_PTIO{Dcj;bY2ox}?eysF!`NW9LkgAC*{2ArQ?c5rYD$sjJ>-)eV3 z(V92-1LD=qr_3x0=i%9TEmWU={nswoMlz<81pS5@+6{4_Zir9|#6LVEIF0R{{^)_w zT^Rq*0ug%M@QLeC`u60D!Fq-jds3Q-pow1yfa^x3D>y>y9+{W4N}Er5ML)9it-w}} zwcED$Voc!MT2+;RZM;t)_F|gQfLSD+L4n%hwDY;2W42x_T@IHV?`(U<<+FZ1v3pgZ zhTOr%z*}TWzF@jI1*XFEc=|k6W=tp4N|d@1u9_P=UskT7wj!ne#FBvRNZm<7Wu|~Y ztoCU|sai=EA?lSy0a|e&8fLu#*SJm6L=a)e=Z(J5nsdbkt3!&POe;i;R}8zrdDoQ+ zR)FQQSUzVA_OIUZqD2fnAXSS>tyC1|DTK0_$qydG7__sTa)o@*iSbvUrkzXg4@Ru*{W2P2#G zk)IDTBe+!{TJLc)-SvK>rzB<4YUj~yM^Lm$-R{+@Mpnd9bI+>{p&0;A!+v{w(P@2fUm3-?Fj2hy-|b)w?CW^< zpp6*^daZx<_r`4K;P7-c`Mr64$d9qFphRroMzs8g7iV*9STMxt183 zz|pr>rg(+2fhLiS`fG*LA6=GI?RlEZHx4C#Zo7`p;P@)fRvg?`l(OD*DV%`Xy`p*VE}N@;%hm&2?TWeEq3puh|7b zhotvP7E6&D+K&N)cOnw+5+*5Wzu?eLz8Bfew!Mk%lt z6I1*$2j1!WW&6pqNvF>_aUI4w6d=GsdCt3WFCy0~Ot_+&$^%*tM21<_1t~y9vo8^@o36I^<&o|h07n6s(hPB%B%$cpNXYco45ii=x znbg+=!AtU(lS~YMeJ?Y&xa7k5wbzT1O=&}`r)xAo0sXC>ezFXx;3hh6ej@22%gKKY zq@gzjpIy?!o9dipNo85rcdWR1BGuGcyoX(km&vEtG)V*a9Pjt)4DY`Ug~H_$ne>0&w|{h#fBlIZtARkbpG8`KSYPe1u+Lal9>gh@XLflh zht)(X15_{`(zLQ%Yuhejb=hSK|8);)ZVi#v<*qBX$SJi_CZ@m?RYH1q|6m0V>FMp@ zz@KQRAo;YiBjerOBZqo+h*zE`#m^N{U|+c~Ld>R-z?C#4E2PP6oaIt`Y{NLM@7g+h z`xSO2z7#7cz=xPr`Qg>ZOa7>Q_eNTGRN^h^5H})1wBnCHO<1DJK6^@VuOoJ?m6bwJ zo2;*<{8s+s{s9Ze4xRk`_np%J7U~~w|I5DV%JTmt_~)k0e<%0^E%I+$IR6g(eK*^` zptDfZ-2bt+?eFk^S~vd%hBj%sKjHsBM$W%8{qEEI7gq?wpG?2I_Wn-syQ|_~6r}8b zQvBUx@pt$?t;_xb!@yi}!~8cRv)>8+Y5etff*&t_C-}<-?01&m{jdJw;phD`ihsCa z{f__LOXe>;i_pKb@E>QH-@(7n&Hn-uIs6I!ZKnP^!|yZ5zZmKq|77^beDZhvKZn16 zp+^WWDt?%K9(+qP}nwr$(CZQFNkdpGyVFI7n@mA@dF7gIGe=S_E=?{xR4 z`^ZZHgP;I_0YCr%01yEDC618x0R#Y`00RI(27mz460);(HnDZqQ}(boanhl6x3R`A z1OXz?2LSrF{{LV94|||FdCGE#9zow z&+Ur16orgdrjcQs>2utO9p8-R7 zDtJ~*UI7Bhn2E5sIhJzkeMf{cb#gjfo&#vI)WEVMYay+~8fcw?p zN=J76GOwZqkQ5|k_@6VWo65eSpl zFB|N}=(Fjv1#NO{^_1UL|5L4= zqlvW>9qoVa|6i^D4@>laF}*r*;GbGy_-}*0gBE%eHV4p)Wa*6Nx6n7hA+#ieUG|f8?l4nTU?Y1VSKoDqv_8At0a8M`OBmc&?GItH zPF_sECy68|CWE3hP$TCs!4lrW(`WjTk12&;j?17e2r1^4jfk6yavfCGUekS<@-rA# zRxQP~yud4YM0!JUI%hF_p-lN2(wJ!C;|6ydb%vEMlG+(#EGeF;F|Fz8NU{?QtD!LO zx)&}yi~7n&B0{Jk3m_xtKD|w~ku-l!BTq2+SZ@h5n)MFsu(EXo9{jF;+397kQ|D{E3alA z0NXbskHsEKx4x_0)#d^~adeq>#i5IU$RR)ThTn?`5ik3m#q zCq0Hdd}TDq(Lx?YFcels9(SuJPc3FNU@5v>u_^t2`h8!vOmt^gr4%BUK@n2+Af@J!EqxBuOcF*w)4;6|NbR;FU;3GXGSPJIYq74m>g% zzE`T_G=)y-ys~QQBGv_D9r#-_x8YM(I)B^4RNe|0E*LW}a3J^u2lT^OR63ED-WIfz z2Ui;5zlZ|^mUD_m;@A*}O*n{QMciOh_%Nan!MsueS~vy)MgD1hRn zyqilX*Dg~Y>F-CD|1pIg3G@VxmT;Ug7#4h=M8pSj!- zh^2(vVu#Pyo)5%lv3t*xt6hlF(~H_&n~_pSElQ=uqwWyY?c`cRssUX+ic%4AUx8K) z4JrlrTAuU5eE(3;DZa`UqFs}c)z7U)&;zB#wvxsw@xY#+ZeOdJ3RPx&N32*JemPhO zd;0(s*;N0JS@Y+s=|^C(?8 z;0Of{QOaXE;JUvVCcf^O1!RW#z~AofCAFc_gQahN&t3*o`A% zlc7}LzM%&hJM|ADU9+H(>eD8eG2JK zl|&f_COreR0f^ayf{lYmFBR3H6#^gVPznH&7zV2U7m#R(?MUvt^PcA~-ep_|;TMq} zQNAR07Le?Ib#@=%9@Rb&mj)uL1Ln9q_tf3JH6^d9H?(v)R9^`)13{MY(*BmjzWoVZ zE(VbVr+`Vv^njX%g={sdiaSYeEE!Vteexveh2MihXuVI;#YmUi>hn* z>S08#+M41iRkCe=D`oj;lLwkJi!$n8!9F&NIaky_1@<%-^K961x1%igdM&GWms(Sw zXLR1ER=#Q&QdGM$>43d*89+pXwplf5!a4e}QAH{xCsi!0Bkv9*lsv!1u5Z|ic)+h` z42^7jnosNddJ5LsM6==Q+yaWvpJBK5Bx*QM5X>rRMlXW;oYSP4xmx0QQ919oB z$?6iDOKj>|j#kJVpJ8({s3f-jVN6ae`n zSyWi*{8FuU6M&^$Bskj7g*18>yM-Gmj!rrP6;M8rFauWz1a8 z&_(AZAHy#ia`@eyzn+i*E3!|dPGqYTLGDWa;6%03=w3yRkD7% z>dR~9922J*m{VTb#?eyP@HSX3kg4T1b&bdzfm|!;tuywX_orshX+bdZN~)DOUYgl= zpl01UUY?#Tg~Woh)OoM}@Iw#Ia#cgWV0AXa6ALAUIGy~?LJ&;|C2t_Eekk8wu|Pp6 zIi)gVc1tsZGWzYPr}Hmsj5l`4HV6kAF=c-HJPDvW*vgyZd?fgvc4lCO%01I9d2*0 zWAfsKxUx(bCXUNy>rNW-AEGzIw_G}`)>5ZQkTF6Ue~ciJ zexD2V{1o-R*LHCeD8?HzW}P%aqVGQ{3FLBPT*=2XOE?`7^T;}iClgPZojX(S4%NO^ z5X(Tprtn`g+96OYCyq%-I2u#VKv8V|vqd%0KE`W`L40Md5tDo?1@`;qanz=apIFmG zYODe!PZqiy;h@r5E|I#8O3UnH4&`LZFhM-Z(zd_~V1|Zy3s27lzVKxtZw+P42i~$juyFfJc26y_SHjx)ORa2AT3M|Ig>bzr-b_p;o36=B5in!M z$iNw9myryekZGqB{OVb^IrAeE)%=g=1KQ=XXZRRUV8%_0+0_7%^34WTBOLqiS$VOh z203K|#u5ZM2Vg>uW$?<7)O$-A85NKqU_L#&Hz|k2)^-=3JG9nk`BBBDqh)5Y9N~^! zdodKhe{e+yy4SWO1q zGtesiY<;Czl4#*u(tR`*L;cp88vy#ROXIi$aL9A{91pGz_yLH4oi1cIXeRFn(p94I zoQ;_7K1`lXr*wCwOCC>Y5faW?F4`-@JE&F=)BDw4mP_~bv;QsFrK^g1`}XpO541%Z zCh15wlK_2`{^Us*Goss1B;nhCz%PEJ(QCNMB5&j`_bBlc@=1E7G?FJ??MK&4^ukG z>%A?Oz}K2?KcvOy=nG25X=G#_ihdD%^6I6TTs@Lh}M?L z!4>&x2^fsRy-#54r=dYJ%@l-gPJ8d+1 z-iA3x;aIT*d?sA=QU^YvdvYBR*-*qbjj zO5!|H2Z7qk$4KVK`>_59f4o`QIfMjwC^g}d_zrz>%z=$}v?_}Ip~9V?4JX{f*>@#r z?gW+#HM}@)G=A-8LUm^G$mBl!St*PCY+VM6RT|__(L~J>J|C7d`reZCuO}#ga-M!x zIq+L3i;vaNAJo=}&r%YxomPXIQ5Fu+(90%_E40n|$ z|5Nq}vk;J=kL6mcNCHl4kH`H^c?L|9Xlw(#igQHXtJm)!v3WVH%^nfy24qta{b>%K zxsDv`4#x*v!7Jv5AR*Bifvl-kHj;b~O;0z!J*+EQ{wjc@*H+H7R1KFZ45xpK(@ zZof?N3df50pqu{g;x9{GpS<$HQUXVePDs!S$!vnjMV3>B{W+qt-7%Y06%}P@4y%X$ zZ-Uv9pYMmUygRhA_lzmDx2QC;w~UPQN4$Uv26X^D_7h*PpUPtLknUhu3acg&{y`^y z)DaKJhfO<}T@XS(qLjykSZI>RsHGXL%z^jpci+f2Xi}xi+$p&1>4*r(s{!h1#Ds=& z^$Ot`A)p1mf#>1%;HR=o^=i=vMHSFMaHn>;7@*aIsZwAA!+1v`qBZ|R_N7u7gQgNm znUvB~#U_<}=pb3(7gnCP#uJ7dJ{fI-QP_k4%N^E7h=|_+__x8)!}cIZ(Bt?t(i0LE z$4LKlzNH!bn8@z_w9fqTH5lAX+CL>8h@)A`5q)q*Q2q;qMyJDmX3bp{+J4R1zmo%rZjHY%sI7g*Bybrn0kPm}g-8W@BoxzOq!I_2p2BL)8O;>zPVH9dp#R|h< z^upwm!MRhx0K&8v$8wqt)i=0{>N6U(7f+UODIQa=fP3Ryf8KUd3w}0W>6hnt9!2>s z=5m@!Z$h>sx&)!2r?8Q}UP*D_F3Jx{Z*U0|v7a4PPo@Sk0GQnVs)spH_?CQ=%C zHsVe81K)CM+nNqy{}2lr=6)&$Kl*#cpuNR4F5%_$Wd8^wyhZ5~G9V>VJhvHbXA)8vQU>&Uu2S!FQhHwNm_3MPtt6D`q zQTW$nAP5^4J=D>l^h+`<8kA`=%3vq|k_$6vMuiF+zNv5AAaFTOdEFksw;Q}nD53&? zH+O86z}l7f#E_2Y+aMa6En92dm2bQkG@zX1IS;_`%aE(?c%05?-f@8Jx>{!Mund70 ziXW;ORy_3H<0&L6m&8}e{j?K%E$T_B)ll}$wdv`lm z*m!fu*u*%@E~k}F9==2jgUrQ^IQK|DVMi=FvoD)s3O0B!cjGij;O4 zo+z@domqQEV3{q494fb6AK-Liej675mydV-qVK5;dDT0>!2#xcuW5a(YLc7itV2Zk z!{K@)2=H{T_h9cUi_)q@CQ{D+At@*gR5=b-q{6B}AWTqNh1^7pDjrJQ9u>8_o7VG# z%Nq6bp@Wfe8BsX<>@qYnG4jJw19zi#f9lp4>3@+0jS1g{YIJ&4o<6NfKEKyyY7Umm zozvUmkKjC!O0=dFAOZ3FRK}W^07n3uFXc>hkoCARGnTvYt_L-}Bc_II_&JIaKY?RKP%17SyrDxre)h-); z1hgClOG(RR8ppCBXj$8K$^@@WMvG5LgPqF7ohayVvit30YrgG+#zeBlF@!fgG#(q$ znNhAs?CMV_+R<9h>t6nO@fhdPX*W9;=%>e~2K3EJ9d<0HPTh|PW;pH5Hfqmt<26U; zq^k%;wDm>A$O&GCZq)83R;|OH7P0&>$c1tESs~Bvwvjb1q^!WuNZ7~T?_?7k5rz41 zI(!D+&rlf0wgufi%T7len1i~Afv}e0(!A44=1p>}OlH&Q!%yQwn0hZP7QR|sNREz( zRtHg|Tb*2iW|T)_BPTER~agKJhJdj^pClRShbE8ldl)bUGRdl zl|QUcu)69$k8^#N(FUGiQk1Bs2A~Fut2R3rEabvF=d#7g4+Re0If~*8OI>7$l{Gb3 z28t_0-LMs*da7aZnYw2d=kwlgjaofrr~C6P7Rz9`wr}zZH>XP&;6O?M0SSoFp_$>> z8sj6{J}B%)>}6|%VgwplEI%cJesn39;M-7Rcf)$oZmJ3zi~rah`9ppt57Jxe?xOWc zN)5RQ%4Hy!YX#6#6@o{Ij`PgDuke(uKD`pTpX9JiLWMelg?RL7>N#j}WL8-nDKZ43 zDn?KWCTG*)zs+4`(k;uPw!s`Rpl* z0(UYP-LDPj4zcv6(LBW&Alry-|KOm(_gEI5A$2;ZbJlA4-B7!N(=)=W*7tm>!wtT+ za$B}|72TlBq-u^qbUirLnI_{Tm@N^Of(i?9jX^ET{@giD(sT}zRV?d@G2;IWOYPiP zW>a?|=gJ_ynjQ%j7u(YL^Gst+A9(#xTtx~0FHG}23(Mba$c4RArL$tONIqn#ww8)# z7z$8UEgfrgsLpEb2otS-idgi@tDqK5klY!=^F!38m_zVQKQMdM3p)JP%Ttyoym+3JP49(@%D0S@agpIyh2dp{2Wf zPS3G(2x%aALL@68OSjFxBZeBrwgne9L_Rl9@#&@^goJxxY}au?WY22JqumJjbw*MZ2>?iVK5_wyP^Ww{j#Ab zj1G0G6s=meK6+opR6g-26LfD@zVF?y2)-dzk z3c=DFvm_8yzwwHCMit6ZldTCxP{ZP^{Zu` zW_1eu>6uN%3;LRc6!GtuhR7~|8aX4`>nEH8UCdT^-3I&%H9rMDe@cP^834|U_Yn)s z61npXmQvpcm;eg^TCupN46p3`HgZZs-944!8>BfWn(lGOc6g}7lNd=;uAa_GZh;ra z@cqj1<5~j$dIXptjYFBVdszLxd^+u5_m#!@sFfOPA{ZJ|2UwQsB%e#5tOl7h#vMnM z>es#FvYz&It?91d2(4t_j7cpJ7o_sbuI3gS3*e)q zzp37QWjwPk?ZR(Sx($zCQf8D;>YG-E1@`1dbt%78Ykq0FH| zi3+*E$ZQs~jjN*$!Jjf(pP7SbNcOib**}TXPL$$Mxc)QdIz9d< zG^41IJ4vj*tT9ut>{`B&=9nw|nGc))PmSlV|yo zf!H#~biKO2Nx}^&bk?(c6yeu6WzF9qOQr@n5I_p5(bMNRi}gi3k$nysmy#ku--@ zb&b`p|EBZ)y{wycG7h5II^ncFyU$Y1tFXC?_SQ0rtV7K4XdF>?{T>}tfR4kI4vTf9KKM+`clauT&(FMlJcdja|bA%ot{0g_gV=N z&Io?0qfObbzGruH&y}i&5dze&boP_SkBM|0A{~9ufhy%d= zRgVR?j&N8J-sb@Pc#Er<^QcQotnz2@@5hO!(ToGZ>Mi@t@yaJQtO_`28_ zer`RSJj?A;8SPK?87}ViTrpa zvnJ(mL76HZlYhQzCE@QRh}TG^7NPvz{5ciYrp(#p*`@6&y*ocy1bPIE{|&)T|G}l6 z1xVcD=wfZ_yX@ZfO5di{Hf-tGv;Nt)U!1m*x?n9&sP>iK*H){4E-jb1U>NyCgJ=|jcx%87|)&LyQmNF+R<4SP`3h##UJ9n?#*`FvG5 z8|j*EawA<{$b&-d|EIEsYbA?pG1HH@K)xmLx9zC4`LV_#z0j@?Gg+))#}KM^22-YM z@;O^8P(6n$X3kWTW?zFFm*NnilrULoPYj^Z;gOjPGq?U#ifq4*0HrdZQ!}7Q`rV$t z={$}BtvbxRzL3)k7-{-3Txb)#DvRqW@YY`Y;iZ2lEerx)v?{@+&I~N?-pwmmU)n=| z{?Q?6jU)IDS*!Lq(zS-d)9+cOD+mD@A^~xp*6-+Biyc@{Z~0IQw$n#{)mwY!($kL`uwyf*0`&fTU|zlAm0k8Y z$c$`IW<0X9>MOWVCH_vWKjy4%kvy_$t9R>PtiRNYsvDTFdIJQh z?ds=LK8WGWgk+pGVPr5xr)3A}@4jGty?T;9`4Yps&yW&}qQQi=`Cj-3T5imQf(_jK z%OzJV1!#xjO#~`s$rUAfDWBQ&M+Es^;h@*#Od#Pm(+2) zwGI13pVHNDR_XXS>3oLiWwl=CSjX}|m+k3q`%3sV!wz?yDwGQ7!=yO`2!!1;%I^Kl z@<^?3(YxjmUYI9jz$D4oLwJ$GAsM(?Gt7^LI&Hc0w%1)FaJ6Fcw~_HfT64;bD3EEh zFWZI9vC*~4uJr}C=Ndn|H0b+ZO&i8kUiUA7~fI%=iLfs9=T2T(# z&l}k?TG?#_8IooBak52*!HYhMPb-(OYEl0UuW}@iV<(mrF;y-BxN1a=O`pvp{0W3S z(>a6IrBIb)E}M@opO&)xYp;51#8)whjmS{;KxY>2Sfh>&D%qa~ z_+)^jTba!VEzbvp{!2y6XbDbyarHRrPC6kkF|3=y1I04D?U7C}7p)oE4L`%eWN0fe zsi!GV%-|=G8kL(Ht`{VO-pWvD@FNd2@USctP@oUk-BPjk&w0QlQ~E$&+?i5$-qAvB zy_m){=jmCz{k*Wa?`N)nG^hQ@BO8Ur?J}psjS$;#WS&F)s$E*%*)P?ZD&JMeCg+%( zBP_!`6vJ84YmH)JTd^p*Z z?Yci<8;Nf0eQN)@E(IZsH8PJVSU3w}+vXDL+_mz0hhJ{Y!d=Z?4K+{@q{x&aZuui_ z?8wYS%gw!sGtPNQx20}>-z4<{pWf8xT3>-^TKa$>EWa_H^;{3_XqDKjD3ci0%KwQH zX8K_TrdJu47|zT8vGtY6=4ih`&x;}-eyclKc@@)FTL8(FpJ`(PNsoEf5ylAc5kbC* zs4Vs+PAwi!7bta@Ae?vdqk?wO;93YDXS(=`RgdGVz~&>Un6cQL!q^Q1az~k zw?&WUqHDWEk&2S2Fl>Lsq{J5QHobqhFLY~7+zNidrXbeYnG|CqrFf7FPag*w#KfY6NkZw$-VLd$4< z0B(SznNU^NL0F}G2h>!!A6H|7>f<_TZwc4a@$;x!9^2~btDYS!-l#7M3HL+WaqH-< z5q%y3iz^%cPTP*<LT*KQUg=*?{`oGRCytf%zZI>Zq1`eqsD09jtfH)s*(ALf12nMm4p3 z8X;3%INoXbTJTRXH7|>z3SuFMM@<#Zi6`~;DMQA22V|e^)-35FkmQuJ9{R+Lw8l-u zQMKhQkOt{5OL`ZgburONIqQd>%7?;5C-}73tKVm{yrNeaa##BDuSET^k4635b9fh7 z1J1TMpjYUCsq|z`RDsiP!e$+d211AGtfeMJ9U8Qw;ucsjh72MezJ>C+Nkm4fCtSgN zW*;Lb+@ukGUoKZq1KD?SmO&&tfKlXlGWw4_L#9NkI ze5|UMo2!k!UMhRm@XO#yXvbsBd#eK57PAQK!4ch<^dgz*|Rls2!o3v+)g=nzF`-2@IOFI&sEX_ zpTA>_TB+80G0=&LdHK^|;X9NmBq_OfsF#;B2@!8K#KuS~MIxzvYr!rr?+)?WP!2T(=D zK7-($Lsb^>=nSO})w({r>ZLRNJP`RA(kg?5A|1kD)akA_UVh@%O(zbn1F~+y{^g)e zKXZ-@1QK{z)E$Tm_5a~v^l~d?0I=Wm>`{HYE2`d-6vF+7US6A0(<{KMHWhoY*V%Tz z^Blqm^8(;eJaBLXySAj2)T(9P8t5Sj3i7^snUI`h!AvAG>|g9MewYa z$1z%Rh<)t9*pE*-=}?f$yv5_bJ^=R97fSfsu#iy_2kY#f))5+@7VRdc#;RX1xAk^I z*H_^+6;h%Ilp}qAW?6t81mg#d%NMv+k*(Q&QSj<$_VIUq4(%;G1Ve@PUn%=_2p9;J zXe19C)w_%kUNYMYdp4(Q{%U8n_fu_0Ga#dS3Zdr*?7H^h6YFGE;a~+k@D3@ag6FcV zq8)gGT)`eCsn*iy(M5^LY{!=>G4DX@mHqG%w(foiF@Lb7+1b!q;P=Zk>#`pc#IzYi zqYVgxsn_)>yHc+GM=Wecky9rd$ux+Y#nq=9U*V zLqZP0FZ-8 z*UyqNMpDR9uDiEnS7%j+i0vbK?C1s`;fzIg_+*4f0ccxKxQZ9P65?cdtS0j8(=@qi zQ5Yg^AGF~S`?55olg=(sYDgW@>1`D+4dc7f;_Ci7!U)C67PUcO@aX5e*8Ux`@q04N z)HQIjj~34be&i~uuT4OOn>hyU?+qk`7pf!Bzn4v%$J zmk|B*Un_|@`G$4&UwrR`91-ogACI~e9?76iLXfA^;{NooX|}tU=caQC+nBDubiJq8D?uzz zB&qn6_4HA*db2k3z~aeLvvQX6GJ>Nv@MZROb^mVC(E+XM0!(3Sohg`AAxk+UD%}3Da#tUk$pa7=aovCw1~%EK<84#;SQRz2HuQa)n6)GlysP|@>Y^f^p%rlg zjsk}AK11gevJ(vDLvI;ko9e$?XUmMmDw)nC>G(5gYbf~rAZSr^bQQFqwhlsYUe7f8 zg?w+eVSLNK$mv;ZZN+?EQ_U-A2xL8{g5i!X-H>CIH3^ckEQ^${jTrwI2hPd51T3z) z+}Z65=(lzuwL+NQa5fjFIr+Zseu^?ksC-`18^YdnL!-Mq|8d= zn_{^gm$>6U9+1JCd`LMVNtD50n?p^m^i%*d8hHCCPN)Fn!_l1{AJMGXcIP`x|4D|4hyHJ5SmhrX_DTKU$?$3C ze<#Di%l{$6W&eW=Z@P5>G%8vT{u`Cz20c7{do2Tn7yXBacP#)7q^pwfVk4uX<4H82 zTz2N=v*BZWmi%p(pW9?3xXmFkixQnF2ZBMKbMim8^jynI3)*@#X4&b6kCY3+N4T|Q zlSoN@u97Wsa^aI1An-J2kE}T~A}8WeI)^QkvZ*T`2!>v?FZSsCz99V^GV?mT1Nz)O zLh~{*cz+XqRa1yEq&Z_c?aj$Aa3t5o5m_BeVjOwurQ@vmdJe#SHk2tx>>ZZh6$yN} z&Zh7g8kpxGu|=LCxL+*{Wd>nVXu;F8&QUVgkBkkE8l*EN=EtSS=%uefe~PZ2#LVc* z_z*#U3U4>l2s6;fM%$FH*_9muFl&o1tifH{26FY*Y86LDTBXUd> zrBh{`c*OtEnh0@Wwdasf@Jg9Ang{jsZf!W}zh`nz>E&kp!2dklY775K03$uORuw1U zw-j2FwbI*yBH((WE9)ocP0>fRb{qSK0!JXRf z06am1HY0k>s2q7$kdRB=3n`+a41+RP{JvbVN)v)fdosA{0#L&v9D=K|;`CfPaE6eJ z#zbP#*+aE9f;kFPICs5L4M!tBNkrUk_V;yiEWA?K= zl-s_)07nwfm-U?+Ytb`nWG0h}<0nQjhOOlr7zW~_jqP}_f-Iz}YcIHo@jE0mnb6Q6i`1#Z|Fc(i2@+y}u=++KK_U^T&P2;3Y zGJkq)ZL;>_WVEF|&4k4~w7nP*4vBWM`6vRGA@a9BXzvZKrCF__jCLtS{Frk+l7`K`y+Llmem{AMK_8h}kj=0ubPvlWSk zzKpOTYZ?M~|3>+i@Ih%4Bt0Pn)2_W`S*B0K5xV^sCU*8wj(Eyvx_vOcF zwF$-{_Tn%>3dmSN^Ar^*a3a$f+;hdAPX)soKyTr$wc3jaWj;m6d`#^4UCpli$jrWWJYTUg4?0xO>Yj@|q-Uh}AWZ>9^q4_%PuC{gEN>HLJmQ>t*sLda_I?PMgAVinuZS16i zabfJ|4R7nB=f0m7Xxjwt0l??8JS62{M=!hIXH|Ts&*!@Dz3a1#Yo@_srj16k!g=z! z`60)Jv2zax^!ee;+s^C2Pxc|cR-V?nXcb~5x#Q(Qm5DQX2U+T<$DrbRBEWJSlCy~G z3+~v5*oiDob*jSJ(@cEdeG_7nlQxkS1=Rw?WfJNjDJN(oR0?JQ@~K;sL**aig?wyc6uii4__#@^6kl9$C#fDs zDhD@%u72gHJnNLwu3jc;hj0-_xSXIUq0(*D0rKH;-qY?txAg$juJrYN&$!gCFeo0t z1v}C}`JGw%1`abSrH(YrEE3TXb;O;E_;t2+A*)zHmmQnsA9d>Si-i@Gkf2=gIucAY z&EQJ`MY1rF_75QCzFF`KpKMFQ!tOm)T1-&G}tjc=Wf34g1w|9m2~+K zULUY(rvfe0Ekw#@*O?!fLz80(p|aFL$(LA1{hNgl`5KpA;IvUM9;ZVf3Il^MDDDtF zNdsKBl>q2H8$!Hpv*V5%J%{CeP+A72N1yS)RzOXsrWq8E8yt;Dzy2 zP1Qdo_q%>X2{bEYA1Qsc8_B_FF={3-1oI0XwPM?(PB{5a+&>@Wz(cKN@nG_1;q0Yn zl*IS*UvkAlODq)xMh4F<&w0*35{^e*;%w&wQXH%SzW|mjM8=p%d(ZZOEQbJpwZFcg z2V*TZ<8b}9hohyrtz&c2LAQ>6>+L1tCkGuhK;UrhUM~ue#lTDk|H^ukP3WDF$C6kKxy|NgX7RveUgzg&rRkPNcJ@#@$AbiJG;phpgk zCBTdw6shIdYaN%?;Ma05cY%VfYc+Lt!$U?>& z^o;u>xii9+Y}PInw&VpvbJ>*M01xHLqcbZl*{Z8uShKaF|T+UWYt=6Nq4t;-q)M!|pAJ8h; z<>e9#?NRBzYMH(JVy*gZSv)>luQrGhk0iAM*idp>fqd$|BuZrX+#N9h&d(RI?G_sIPGh|F+W3rDsXos7))@T>ZEPx zR3mP;sl?va(+}wH{YpO_XYOoenVe@nYo6S2QUwSeN{d4`Wa-tmtVbu)W@DauQxYN> zg(xUrJ8*<1sdXthlf$3$`QLz{t0~nh;tmHH zzvX4NtPeM~4}Xp3m|YFpiIK*RC++$LFB^#-sICqeat>f?# zY*jLDtutMmvpC-~Ezyw&tw0%A z%q{3x?#W?!U3$NvGoSfg^`#G8RziLGC-Al>ohlVf@L0dSkFJ_Zbfopq1Zl@BL zvs(I~E#G~h{FNuMFILFa6@2aRKREq?u{U{hF4#CM;I~sVC5}InbaU96jNXD)w{-7X zAnb6PY+QiUECM6jYY2?7sjO>F7W`VxZeolSv^lb)K(Pb$oB}TRJBNfp$zu|vU907~ zC{kyEk0dIHA(6^DIzV}ek%n(|gCu~@(=BDz($Os3r)Imh=5cO0EK^q!QGhYr!AxUW z{Orv-u98+i@wIw2e}tUQW}{;vPU^x(LMf@%wWmEg+emw3`>$4To)J6+HNU$51()io zXhZHK4#CE6dZ}*(RB-Eo#?+sr7@V(nQ4!_+Z(VcXh1^`UOP0IP@W|t+yOfvHSh!Zv z^GeF{n2)iR#C**0)dlG5xdizG@!5R&edpkzgN-V`&A}7TM0R5IVrC$?;Uk>ya~J6f zU}G3Bmw5~_iG3Ie?PYX`z~5dS)4hRfF$yIY1PS+XWQv-U}85R-=7BrbX z_XB+IMYbDB$YW>u;)oMdAw4nG&7i>OXf}(Sm(dYgh!-Y76?Ifp=SM>gWk`{72)k_{b8Oh z_IUR1z`(!HlK7coc#{Ife;2wW%-Hi)5vd+X66%Ss5xJrAJx(_S&M0oD%htKEroecJ zW=p5Ae|jZA(0~Sg@6eM_BdrW4+U=jSsnd2er{ww4UH8;}CEbo)5nF~X!s>3d%uF|y z5@bvaC!~4nZNGI(HCoM&8?e-Zj>JEGbE@TOB2Ac2T3%7+4S?NjV59M}AYfLY*C$Wu z_lEZr`*G%9Y`I|0UfYx}Z@*_NcrNWFSce+^8x`lkomsT3>)8HcTOGS&qhs5)ZQC|G zwrx8d+qRvY?0wHTW89yxYR+1-o~pNAFFl?aHT=(BMSQXD>|lpVs()kxXwkEg<`uyA ztz!(_c$;?R_dYyYX!M#*ia&Sm{W9TGhN1u~C0e9pc2ma%D-ughmJ*v1Rh;677n)s6 z2WB_}u{Q)5xe@qdcr^Vva(9@$=v)p&+BUJxYEI_NY10D`=tT(*XwS0()QR>lobK+e zQ`awnU8*U65er*Oyh#n>S5TgV)A}ikv=S?0C$kw$R~TK z@@IY8<+Kqh55`s`VC#&V&iEv0*291!`9&6}Nv8glB>tdzQWKzk&j;fYVm5AblP|CS zf{ytNly-e>mnNWWOE@W?&@QXI1$bj>aT6E2M!hyh;wAX6<${ zmESI$&xN2WpzK0z*l8>Ap->x`A^bjtEjW#$pJeQ@^)*rkLsmHfvv)fLt0PtwD*uNB z?Z4ScJi_nqN6P&*6#tZO!tpCWNfq=lj_siSJK zbVo};ZKHyLwQ_ERYZqLL-@LW^LM4~m186MXwtycK5ux*IQyw*4+pmolxFpn;o(=0$DKPg@1VnM{@&eoBPe5FP)D~nPYH!svo!*kP#c1>s3?5Uv>jL?kvAi zDPr?O`;`Lb=AW<5xje<3&YyC*q5!!ZHus6!t@_KAag#~xE=>fsXWQRLIkgm6LVC2% z8`WZDm@NZ=mMYU)jr0qXA}Fs$bWHN!TdIAQEVy+EBj;!cHid0Gw7*wj$uAm*Xm)C0 zr9pxUrvhqp8=K#TCUU^{sptlVVEq@ho?+(eMd&`3VRSh%xz7EF zjNN~$lPns{qUOEg6%HcQCPr8Z>tls5Cn{x2!`hc~OS|;8pEQr26nP-+X_D_>>Xrj4 zxL(!w#z>pg^oSbEp#0^|*TLQI&CVsc|DHauCWh2wVvk}vaV{SJQE?^6Xtqs0j7WRT zN#JTTT>M20l)w$aUeK9Z%TrWMpX4NBPn3c-%KXgc#YDgh)!Q@>k{>L86U*;*5iRYb z!yt)e+P+@NCw4sG!6}OnR9CCYMB8Yt$kV~%{;`920<*SoreAs1QnCP>Q(&CK8@oFJ(A{jawdji2*wR?F67Fl)?F1<%Ae@o2V%;8H zYPVuG{OnhLHv0!z*eB$kvO)2EBf*5-z!)%As{MVA*Np!Q_d?rx?WN`nGmKZE>Crd0 zqXY{5vXRIqvzr7>k8b&XDg^Ey;E(6 zHmbCNPrR-+c1wnvD#E-eX_hGbc+SSQG3DExg$<`55AF8~0hK7uIAD4#aG<~1{wN%J89I051Q25-e1m?(M-c!#7Lq}qy$0zO5z?pokpq;x6CG>Qp_KKSb zY|g$;5+#f4f^V#`;mVEDa=1z-{pq6bdwPuNk_G~r&qJ-R58Zm-kC#G3GmH>|zTSeU z!-sDZ;$Dks+^B(E#Kw}ACTEX)&c(u_bNT1EMzQ!W$ON9F1*(us<))$(V71uq4%6?y z5P!m)X1-*xL%&asT<(?UI@|ENfNihSuj_srX*(w-122gvgYaRa|6+Zlu0$Lv+FU|| z{1DPvyA}wlDrlsmd}{dC?Tp-JZMt zG0jNX zB>!T4x7XZsz?Z0m?5*v9d~Yfdr6DMeD*n7#f9WWy;+uB` zRTJ}F3kpW<&;+J=-1PqZ+m*qErvU;7>-3qhAs0bRQp13e6w+?O^#oqoc#p%dy)Sg2 zu0LF;^;1+!-+f_y#EL?^Vo2W&96x%*t;V23 zIkSM)mK+-AyTvEXyEsdihjyI7rA@LyJY->6+R$>vhUa<5gu!@wvwC0lEae4;ZDr&T zB8uSfE4?F#u5BKZKC%t`AqU0kZ?`O+5eu0yjgnuwnrG9<+T`^rYxpX9yi`q7plBpj z30lL(4Q28Cy}oT>y^dd}z$U0+cEqsb036<-eaQjV#R#q6{gdDJP`{$7q5pQQ1St#U z2N~~?*|*iLl>yi#v^iIuFXW9|(=*x6`aMHDusj>7%ZG0YQIG@o7_$F1nCTW{NE|DM zkGd&S9C4XBgF=X7V)g)=&9d;{2+5=SD67ef482SMVSD%CHw%*z%upl>=$ftS-eE;z zKCpzVq9ZL3Lv7AGOK`Bd%Im!Bc+vZ>Gd$Z~++6>@lcK>joUVd)jt^g5Y+FE8*BSd2 z2fa|idNbuAO_Q3sBM=AOrpw^>>&DTtux4h$m}qt)-Vw52{GmI6ed#9Z#g{eeN{xwVUr*Vg+IT?YL2*AvdU#tkNv!`qgL zrDsFaZ=4VEn4G8j)wfJ*X|mVWQ(Lx;_hX9C^^d`LOKrYc7vI3(iEV_9WiKa}nOWySq6+A>q& zc#)H&a?3KCi4vPwNU`WbZE~h&(?@R0FL2BSBblzaq43hkQ8Jex45^X|e_cc?mxGnd z+C2m+7sdmqr*(i=W2yT50wWwA6G}@@EzBhRl2It^%#|JdxK5gBD z5=BFhXZ_G2Q1U3{5#Q0AQZTWX?N4b=nlIB0(TrxLR9sh2OpSE%vrqYRM{ufYPSE+wL|RqJ*afRGU$N;=w8Y?1}ED@Hv4EMv_?nqj6?32Ko>YP zMq#8+H@}UoMQgow57KL)F%zejKY`!|ToH9)4;wJ9OBKOt#)L)u!C>(#ycXhIR>L({ z{)|K3RLs|IyQ?!NL(+n-tZfhm%UL>}o*8VimbCnkGiPFOzHHKHJ)`cVuC9k*WM-}R zytVq;XLD^0*OlzM{Zc$Bg}1AiZ{@i+DPD~_`{-XI@{kQdKq)`(%22YHhU=C^Y;)b= z&bnr=^B@|hwW02NU)8GXlJV3P{IbzWu^Df+v`L}BjBQ$hO@Aks!6!7-EW-1xbyI)G z>iq=9+raS}q9*|4<{8-U#pCz}!wWjB1vXelp!Z3EnT6Wvxj8lnYn)usDhJks4a8ap znj_g{#JbU)tW@eMuW|MWt!9wRYjL5c5yf^*4-VBnf1EU5%c?ga(Mh7TFZ}R#9gPC4{9`wW=VV>>rNl? zjdy*|dHRldX;B``7{-I}r0^khpF4#I+z$_6HFb4-O_#sX4TL08V2UW={7DY4zv$R( zSR{QXz|4xj#6MwY=%Vonc*gWx%6=CL`NF#fU9#0Z_iQMhwhN|7IWI`GyTjX!BHRUY z7*lK;)S-K%(CsjdDb-4#O|6U3x*IhCX^WC?8`Py^{}#CFD z8TohEcQt)}gpF@(cDx-;b?d4--FP>Yu`t8~9_i)bxjsJeGmkHzLP)0fbvp5ixLSR6 zWLmP>F0zQ1g+-DF+b_4UmWR35{#|p|d9Op_^!A4qe~nHYk@1`L{(Z@_?7F2kIxSN_ ztrc-)@ApNcgRVC0FF}2*zQ+4vx~IWoV~@@G5|WVvxAOB>4IEdlD>nd>cNWpGrZhVc zOU)MI|6M7rnx;6p@tMc%4lH12tcxX6z5WSn1`m)PohT5K2Usvl5RQ-E*fEvBkH9l-*G3EXOK5h6@9# zzBc$-M-^g~p!6_*#MWA0gdO;YMO{8+uR^zKN@_5Z-VukSXlw&`;(vxllgl6+!q9`j zn86`#0_9;2LfR;i4oymsX@gL`V3FiD<$D3D_AU@P+^iJJsfVIXNy+~DDThonm=FPI zL~YEGeVBo{-rS4=ZF8(%g0{u?1sCakaR^ zrsTu3a^(3(77yf#2O#2MYYuULU!b|LTKO1$;i7UhWMWiyb)n&Z;Y!6wST!7V$5AyH ztB1U`xRm4z7W@@?juAJDe^xI~*6O!Tm#UVlq+0+kvOq7dBFFW7j}H=v3rEc6bc*7T z+BV^<;TFecWx1pciC`4!B9J6%K))*p=wtt}!qQ9%)+LTdnmdBTY$B~SDlL7|t?xyE(uQdG3k3)eJf9(lMr%b>)@7QO7`2ZO6 zNT#m@Wq@>Bh}d5`%*dq)=Wav0vahjrFhNeX{5SiVa0T)--64)vZ`gT783ISUPGUYi6FiJ6#sLm?J|It+zKRw6BJ4m^eh8=hoNwnZ&NPMqnf_b?YjNfa|6&gJdR*oCDdxrSou^xxEj^I*Y3K|ee zFK1{OCGotIe2$phxxXf9}mKw@PCmj#mmkWD=g`C-Q%V@SOoqPN@7^kohQ4!oINEL) zlc((Xv?Aus+g_h=y=*mwTtoE?w7jove%%8{XJlkD`L%xg!^oyQL!=ovD>7x=*5Yu<}~8sMASBeIrr_Ev~nBF1J4YY@fe2zaIO0 z5u3JOXWs(Jg95i1wokqTNI?6o&0q!QUV3=?)S4yheF7f8%3UGI{Pis6l1wS#r8Em9 zRfE_Lr8LxwD~4W3b0-Eje=mmiJ#&6dPBXRP|HHyYAYvK?!aInc^&G~xlOQ691=E(2 zJ;bm{k+6$}(?6GqW=5QuR|vFgWb^&!)94Ze)|)na;dUfRr#V{mqNGR5nnW*lBIaC@j=G$}z|= zON|&y`f$&Gv)DR9qeym~`^;5$SAr#d;!d890Cfl9?UN&7i^XB4mr`pU|Jsax)5ajf z8scfIx%-L@;L-g1;`=%CDHdZzu%%_+=X{f6HaKN4LQXRgt?oN&HZ$cG%~BMg#hVf# z%L(c5ktC(+_!7xVOT&7}&m2#Ga5u*oOF_j41~B{5^TC0%`oiMQjlJoSy0ScpadY^eL zarq3tLGyLT<^|9eIi@Fdygj9tFg`peEjk@v>NN4-u}a^w@Itd`PY6HlYSZq8hLUMY@acoOK%IcvGvy9m6nZE9{I z2it_>dQ1$V7P56K3?feHqlns~l(mcHRp|fc-h=5S8ZBMyMu#4n5}N^x9Rc#7VS zNpWZ35T8A4%!?G6%N-=@kwA!eaVJh2@2&~tcs?QGUjs-?Ezf>E?B0R%%I^f^d`XMU z7!YR*5^x|x5AxapouXs_D3n!Gdp#B8o&M;2-bwWsd#0SwyF0(UiC9+jcnsqNzQU9k zq>B@&>Y3*_|NCJ5YZqb4oNmN@b1LY>2l;$IwjBCiI9f7e7ZGcMVr9YEMz8UL;>GcJ zZ-Gtu#wU`v;q z3m^V`h0BM77pI~)T$%e5p8V~!!GV?BCp_sNay7cle7qh3$AqCj?TcP-Q9%gR;Po-x z-Jn)l=O#dCFl?zBp8LwV-i38mZ0cz-%)XuK&Pt|RrpX%qGZe*2P2$gvcYW$K2@)8s zaI{isA0b<ob1~=A>e93NTLuV4U9rNmk5ke1l>g zf5Lq^UDmzn(-A2bT5jPBNyXn;z%A`0>z61xS}47HTs+Gt=iMpRNXToDa$6D?}%Uk6_z4FI%AO zuo#-A&WSV~X~V5j0ztxOp{W21C0vXii459qeVr9&hW)Y9wt&6?s`aI9xj47R=rBp~ z&fLc)Sd;TTwA{-6xvKYj)zguNWe4Bj{V>kgv<~s_lquP0EdgfX)(4@wolY6ty-vYV> z&OiSTdEf;P`BrATSIsv)^RK(DH{H2Fa$g+R3SVhv5IRtk>c`-+xqUZjUb!uhr{oPujC;1d z$a81@!V0W+eSNZBY4==oN%i+*@2gth?2029lRZGxGUg3c!9 zdw-5KbsM8wD@z9)7xJ%yZ5k3QOU`N{t17vT7*uZ8OWDh~!0|+}pXD~`Mh{j!ltZcZ zQV)d`gegCE)pyb)(R%I_PTEita@kFn)r}*S2wi9m2lteK27GPHHXb3qO;s&zzyA$N zi`H>cM42ZtINwqY?YY3R_E~$Y!ONJ&tThJr?hUtNi&c~&9RrDYie(}Lz2i6+nD|*D zB<&vwpmRELr3|K;R<13PqXYO&?D|KVa@pwd8@}q2fW3HMh#)*u9I)r4g?$Ikp}Q!N zH*<%uya(ku&et;^bsFaJKG5r)Am*NTtmsUd8fj`|YQ6qk zk&g{2CQ377=>UWEx-G*$-@pEam3rHFzb^!5WY}!sTUkFJ%71Rf%E^?(_pahQk#&ZI z5yRQE^;vSYuF9Hm)?sb(?3RIX2%x4JgEzK=+XG6eh>*i7iVMU5)dJ9;yAx}u(4`I) zjFYaqlarJVH6aT`*L^;#JG;8`|HzUm2c|LCuqlNua~Y;iO(SQebrB{w<+ll}Jf+Wj zzvGw8t*?P_M|4N(iYCgRTLp>Jr|sniq}8v}IXO!^wh0rgt7@L6D5rFhBjJ@*KIEE+ zwV*``D!ZQH+dlcp*krWpO-pFA25eVJ<~RrRp|R8CeDZg{2zh9s`M!s z*dV9DJvu`hA!)=Q9rD<#6#k5(;(gA+5|SgVV$&SFFd`YOF9Fl!t+&@~)=h`Pz_hpB zy0)|xc3I(H8CHNbeZqhKY)5}KH}mG3-#P-1jviTGO0Ey0%4cN64g%7RZ0a-CbkztN z3a(j+5M~SsG+T@y!rh00p(MN|yIkPg8(UA}}?e)~>Q^nX?6QpgVUsCV&;o)ZuEs+L7x)vnqj_ zz|HzhpqX%?6)BcPROm1Reg7KKc^ma5yqxP&y2$=u#%V2WDCWM{vlS==UzSOu#q}54 z7w1~+OjTOT^UTF9ge>_CUFv&S7ZT&UVjoT}gV_?00b!t;pipvj%cUq}KD{^P1jf2; z9d5?&S!Sp_=C_e$v%K<+^%LFXDJmfA?h}zC9uew3-|7I<+7N$%tbOUqH=P&726|`T7`XOwlU85%P!6HzP=@xiLjv=%w_Jz9G2i zD|;4MUW_rLK&};66{U~>0YA8_ov_&v7)CaXNEr{LsarsAH6ts#lJAY8WdmRR*}5EQ z=2jy~?v7!wxQ)0;UxN!vb1=ee@fxGs<>*jDx*!pD+Pj5#5|tW&l1)bh?=CWR=&)U@ zld?P#gauq&Ymk<-u^Z9h*Qg(}7{m9og zIoST}m01CuLK7_#<0IBjQ3dWhy%=iG?Ot0@PVv*3XFHr3#FA&Usn$ERH)gSbLC{5IGQ4Y5Fg43!wk&JecdHYB; zI^!h6^Bgt#d=VoM7%2U`t$C7WQC{vKM&5`6`mF`1ikCrzI((9(`q z!I*!nSTsQY^!%etV$Yi@O%Y%tP>8~(hu^51xz=^bPu{D~{y1ZPR_!YZ*mn%aNcG_DHLx&5T`1}dfK zLk{X{bTh=~gC+a%QVjUmz3c|E;vSn1{DHYvGqcW=cZ>qZ?kGj8@!fqrppe z)7p0R=Hu1;5pC*Eir`210gCG76$Z*m?e+pP<$-ef)aZ+dQ_sbQoCc>-bQwpW4Sg)! z4C$V;PI)nfKvE;$ZDrmBey2b#peD^3pNfxSP59wbG5aqINEksOcncnr8y=+pw5YMA z!Bc{A8y{;CxehX@m5XM;loKjMNvujHuPbkt77Aj`QjGW8wB#O&=0eAI1Idl9+q>VL zEhnCs+;UY89ud^4nyk6<*HoqPYDvGnEI05?zFintO}JBccTlli=?ZtDSs9qw8RC_Q zb9Z5c&e(4l8PNXQ45JYnX@9^;VlF5bA9{0|lwi>WVhIQ5iOQn(6L4fIf>}4^zV!mF zdYYGWM$C;$w@S%AZnYw2J)*5U;tG4Nnm8tr=LI}Ss8z*Q6&x+FDz=))75Lgnmqx2tcXs8 zwR@iKT*btUSkRbrwxFEM;Pm#F=^sW&CjGEh@Y$5ZC=&YKybxN_TjzF*43XDiFK_w0 zX%37Ix3%U8Amv@GeUbBKM80m73D0rAIuUQ~8e%kES4p)VK3?xlXL5~Fc4mJ4uZa3- z^v8t4f94(+1HRP?V=J<&ko_5CM}-2M5hiwHYffiv3-{a+Z=@o}E5*;pl*?h#^$8+w zbcRzNQm_Xa2>V-=R#wyN>%snO-|LE0G4UKlDvha5WD6gI{vRNgG+|7Q=>&O4p$Uvv z6CZ4V0d=Gt?*0zeNlYFZKyEz-cHDOl>lG>QQ&)x09t-^aT;r50Jm2k zzZsj)fH6L^vgy#*WrY1g{)ZSo<+geoB;4MmYCVkCO+8pxWD{su8vi3hD-eTY*-3epCW01yfxCSbAH5G zoJ%`7fokJi^9dZTQ0}MFKZQmt@Vbb*93yIa6X}A+j*w!S zcR%NPE3Aq2nRu@NO{-m_apg&w@Y{7OTLxac%tl zx(X`8yuuw#z(8^0N8u=zI^m6~9hR18_S1M|FhfwBK_bu3A0J+K5$c;w2%Xmgw_w-(-J(bOh>fX4fq` zYNwp=c@BFL_CA9;DA5Boa;bP~iNE%d*nj#lW2$;(3>NE2{xVUI$m|w8ki>L4*ZN+t zuYQeS`GjU;SWnxTj1paQ8*LWQ8YBGXcFgJVylOk${ek-BVI9&hleZ-!KOZS&fR^xz59@(9Art#4IDBE3rtd|!{qsZ6{J}I8N@L=`* z*8Mu)w_=t;j>3hLz&gaidj-lJw={{NxQg6jHfC1x>Li%ZulFWKK~R`}*OJ{-4L}~5 zANe@vpKac%$d8iHYiB-?U1(ZIa+TDlh(S&SDq(V91<^~OuaAI|>tBi9_m|#4gBV;r zh}|11oMT)k+Oh@Z$rVoRC6bFQcm1h{T@1zs;`wd4d#(*9>u`Ub<$dVWiCGEHYwAEa zUNMOARGDQNXt#dD9!sbeOwPX5aCt4VVomJ+P_(Oa%SvM}(>3((->V@iaE8uaW9FtK z@mQ>|G;G7IPNS=k8OBH)EKrG$B0t3xwfa7DF|`k54q8xB8Ffi&Q8aPV-A4EgkWDq{ zj+0Qy`8^`+I2a0KLzMwa+29+wE4MH*bBQ4=vt;CHSq`PM$B3L;cp1Oi^f$sSIpgTlvuCQlpIVwdV?QPobLZCR zuQ05-M+7ypP$Y8p1v>Q$PzeGW+k`YC_Ur`;h-|y)s0mIFA6*K=lX0ZGjQUm<0*QVl za**Ax@-G*5y_HBBL9F5i$Wh9K@N@#kmRclU!8m&k=m0-B>dmM%sDw!+D;5S+-nls#5F23NWkGAgf zGnVuaAQFUj9_0edbreaAn=!hCKve?f*eqiPf+zn#7l{X1Jxnd$O|(D0uOIyu_BZ+1 z7rdoV|3OY0&N7xy%H&EycX?H|Wt3A{PQVI?=`|aBfHjc!kx5VM&sAI-td$7o2>%6! zXRAPI8_I8bm6r%YGr&mNNTQy@NZW=prPgF1@mE`W5$F7Jfdp%6H>w}R3<#SHnEMhY zh=L1x_K$W(5amR@IEi^N%@pN{L6@HL@5vkkwRdOd9 zHv%KAII*~hUN47jG?OC~=^;XbT5^3FSWVUxb1}P9)vC@?6I>j!bOpDbd@!O_GPZpq z*cLT1RvyWpUGcop6B_+-lxTi2r-~$CUSc8Ri6fe9K=HHR_FKt%CyzXUC#cCqFa z3cbFj$SM`AlW`u~b!-gLx{9^MdEgj03V|Y~if2aRb;uL32-#H(+%*p5%M`}{MG4!K zNv4?y%f~TO`}3RALNS!}JPvN$UrWirCSnR01635bn9svt^%Yg>p-=&3L7+6sp#Mbm z^f990ICQbRV(}qSlS6uzVGdt|%rk;@&jvaUm=^?W4A^Dkw-4!b(v3Vi5+-C(dA%^3 zW``J84J&7*KDD3G))}TR>~&iroS8R3qcBC*!K!~s>U}6X9ck7eYgaftjTZb`+rHn8 zaDaY4iF&+&wFh4?EyrMGvW)Z5ZsKiKbbc@KaPhLn!j!i3@XJy>Q7a^>(oS4qR|M;6 z_%mb13wXDlERuTMuIsnAjO8gNfXl|=Uq01O(oB0~F8m?rImzgdeaoI5bVi0^aYc-} zw#D@AA>b>d*2VsQ_ozKKYP|(gh^O@f^;8|P<^JhQgT(@Ex}|{=qZOk|9B5{LQM`Z7 zbTbeHKR)2zW}wK7a)Wj_>Y)?MEc)Z6Hd$QuTOu9UUr%i9H%9E37FU0-7H{_N?S!=9 zu!lIJW3R5Nx4v(+E1VnR|KSn&ZW%w+$BXHO@?hFpD z7q#a~7Y&@k3*a7=1Cpm%n(o{se&x=~nmb#I*vi<#r_15xmyPGS0#o_|5KtpYeO zIoe+wVjNeGuArqn=Eh#(tDLWrnxF19-cxN^$2Fvv*4}WR6mXY%nsRFzebuwX>?vFk z@x^mY89mdEbJy6KssL%v3JsJM}Qw`l!f3;9)E+SuEBsRkNL4pJ^Nh+~uSB<3@fo&9=+Y zW02{IrU&dx-(}EUCq5*5-@k^mZ8Lsl%;3J(_0vI@$fj&9r(SaMc4-5}IA7zaUX#oq z_u+X?3e_4??Zu4X6~5O#@B7!iZ*+bRzHrhBc{Hv%nNs*(NuBjW%E{$Gf{LdhLQ+g| zwGP&+o#dtT%b9=G&c8F#fhTc=i`3_Yw|R6}x;(*18}QCrGo8V}pOYKg)x0w@{c5FQ zE4wWaVo(4k5b%~f-R5hqF+v?WmeS`d` zJ=*?$64EvwPERQul^s&&+hgmcee$rhJ7qJ7aX>UIv#&jON1l8*mC}aeF$+fo8m9I2 z+K7LJ3zGUhK;L4)yNdZlM^Flg*~$9ZKlH$B{8cfU?CurDl_hSTv}LL|8GGx zE0D^TErr4(FDf>>d)(4X>4#udBeM&52f+tcW|0E|RQG)~PlAl{op+Eh>c9FGq^ozf z%xz1aYtKpQP`CaF$w6f#)tl`Y19omHLOLcvpz2+i1_5>U9MJ7)3_#LSBbV3FJw7@> zoH<(z&&>(5jI+o!VnvU8R0|GM6tnLAmSZN`ZUm0)#RyGOi%aY9&kkzOmdBE|tk}np zsdj&$8d<#Ox;W)s&pixVMa;W)ADQt^wr&PGw<=XG_*5%Z&+)GSaz*QOOncVP=z+E} z41hRgQh&hk!&i2z``d)fP9H?=ek-dX=n6)v{QbkgJol!&hLv8NA!IEwi`YF}BO`v! z_o?mYz)J^T#UUqVXq-H>j0BG)b3m`6xvUP42LvYEO61X@^}V{9tGH}ljJ3=Wo`i}# z5N(Y7Zx#&@_mPb*5|VwGsjynEPEP~u*ke!=j@#!iq+p8dB(04%@U4&|nh6>M6Br?E zzhY!RGvFPj=^xJqQJ^AhKe_}KpFe{!69s%tjMk_Iv{5o@&3N>`Ss+nF@;3^RhJbXC zev|1a0}Px(|8(ihWqKSoHWx%nYja&Iv|+CK{;n=_NMPyq&V6Bffxc$TI1?Y3SFG$+ z2nZ@x?Prht*qIeMbN*G-S>|0j6%+2UseLc6RalesG(bKmwEjL^DD!>faYc581X4njnvcT#~;V7|xF6v%<(0 z2=4#e%0<{e99R6*_7ZbB0LeVS8)JqKJlbHb5u9olhG>e-(Nc{lPS#O+0X@Z5d3@!C zh{iyDX>OHZGeEMyam(9d6Mlbu^TyInF#D0t-|+?W+drkBa^VpFOf9b*NHwpjfA8XU z>RCo4&FFhwQ{sZNU*I9fCS#kN{UVP|x+7=EU42h&1}*Q=%7T$tiwf$-A^B+%zo;fY}So91FN7jnRJi0zR^b zAUOeWv2~e7{XG?8W4K z29Je+A%N@XDs_0Vb?iAseDhDfxn~wweL(=@;!gUN zg`9i`GCFp*N89!(_H#d5xagB5rGf&OLdb^^_4*M^(G%Gz|yEm>Uu zu3aU^v}=|oKq!|zt%yDfR-8azCH001t8V4I%>^$3415{IuPs?;de*uBA;=tvX{-2E zR;OdDi9w}j3FBh5h&-V17~4()gS}$AgsmY`1NF1YNlQb6`|ddRHxMr%Yr_{UsC$U- z9f~Sg5KcweKLEC2kpK}5-Gj25JQWbu|=q4f&7>8>Gs+c&zt$vJLWbI1HH8< z3#Y93KbI2UUo0X>Z9J}TSxf&I--rPs+OAv?n)%7jz8o3awm^FQ?OQ+w87-RxS)F1C zzo@Bc{~7)82>$QV6OP~lLoxF@|JRsb$>217fCx@eWx(?&^mBf-n1dM>+FlZ(-#rSv zI8fCXqDA6WDlFVh4aE;W&&t+BAD> zzst;@b9esLj5+CiKvaA*V6@io>|zdBA58BKk^#5Gq0zt>l-#clQM@bP(z5JbjDNqJ z5W03Iix7Z&8@nMA;I1u2&^z1sBc{8MrXrp7r{XsWi>0rcgb5SKiA9JP zu*;!Rtj|6spn%V)S-1`#V~wR1ISSiVVTslS4USv~CkF=Hiye3WNq!8|}PyY>4IxxNh z&@W`AwMVT{t4kZ=Pg(XT`u3x%M4u5~Cl`%0A%~~}yS=nUmOmeaX^C8u`sy${>$ zL))nho=76B*l(ti`+@?l*ziZwr`@I0Y3u<8s5ND3ynZNjlv!3RzD3-{>*(0B8zeNu z;nGXQ1@%%>o&#z2h~nC|%`Gqn?uobCjXL{oyR4Ie_{& z5YOq?9s{^gz^gt8LllgtXAruv;pjOx-b-iVxCHiO=EHv#Fen)*KP`TK>GFnxREbL@ z7$r9oKpa*U$Fl^es0;l1V*osIYgeM%f|2^5Qn6NQxEMi}?Bc9!TiK%M+6h;!VI)2a zQ!^W}G+&K8GO<&&@OKPELk|o;xvq$2s#MX+>5Z5iDxOUJ-xmameY^PSYL5LgA+%Hk zkc6jZpFZ$>z>kSBbQY#+RM?&Df8iF&UlmO>s5#Bmr$1kzM)jds=XX`riGkwsbcYgM zcfRG+Q+@T{*mS!D;Dr8uo2jTu$3bv_TZOQZU#4A_DH?C#{5R!2>Y((0fY+e_E%9u?ZAfX;#45vO>!W zNO6iEbzBfsn%i#S0t$%uD=4XoiXB?$QOQqMW}BB5?x!o*HzoY3V@Zsz;Q41c_qt#_ zc9-NANu6LkULVCTYNcit^-|gP18ZyPAFzzsFp8?bdc%q1(inlU%Fy6gmKduy&+vk# ztE8YtyH{D-b<^1SB2^IIhrDNSZc$d)<;^qdzt|I3x_-&T!JDBOK0koesvcVMW`hL1 z&*xw*vY47A*gy-KDL3ijac!r4Pj%!_X3Yd?zP((FM2uvQ42(=y=A1mGOmL(b?YzS^ z3g$7^FIym&o;!0L{hj)%YQ4Q`LosuzK~t&E4joSJ2>v^*;q$eq`=$OPPCb9{X}#)@ z9Vx61yaK1Itt?<&D=R)Wf`wabLF`+m!+j;=cuf=9)F?>vsA) z#i5wtM;a*1jpE`ulJ4;sAW=ZqpG!mWXI3;N;@v+F75O{e*=c=co7RyZT~CN~!iVr1 z?YnyGnkPo{578vDn4YiWyv>V!q2UE@3t@4arkYBICqZUQ&$X>}Cc7y_Z{(Lm=ie|p znm-_eiIG^MF?LwJcEv|MPtnub_YIA>{TESij@X ze=M1McX{#7>a(7OCV3c-=Z5cm+rK_ou6%31!GIijt^~&wYSK)3{U|fcPOmCkB;c_L zoT6>)^#@DVKXEdLyBho9P^Folg*>Iu{C$VFjQ8h02go)R%zy@z$92G=LfQSWVxOi= z*N3|%K4bSc6!Z`hCa_b2mzWeo%Dh1nKGyEf~uDx1a^&uU^MqCG0~hj=v$j)Ocb zUucir$D=g{5Ts;Gk{yk+b_6V=lU2s39@2D|&JP_cH10nF9*-{GvJC_g1huH2*y0oE zSQI9xy4oJ5*O9naE^&^QWJM11UoV|4(>2x3=xj5eyR-f`dV!Es!2bIjD~4)Ig3V4= zAl&Te13W&!dj0=C#>g%P(2pkV5D!f^ax_rdNLN1%NHf5R8x7?Hk1_F9elI#c*jj7m z^f^?TP|UH+L0xfDW5$DJ2;S<8RO^4{FffS7OZwf2UGqpS+)?b2Luxzvl8kxBfZ{UT zbEJ)|Ql{f>)FttLtXWbBvi=Ww?-*v;^Q4QGU0t?q+qTtZySi+5SC?(u?y_y$wry9P z-M^XtIdf*_&U5eQyFTn^ugu6d-i(z`Mn>+nBbUoFYV>%E=$5>bxD?#mDi;Hvn2MjR zN^YgK?KsxQ4AI&qlQ*qcClTm(5}d59OPEi^25j=e)&Y^%ZRzIqXdZ;T$+MxJ-FKSD zY?{S#7lbG1j9aE)K`6U60nX=4=%B+|(nwfTtSd5ddW4^vXSfyII78aQYUZ8#5(hhh zM$7e?kg8HjQs|GgTz8w|Uot<}?KfgYhD*ST`2$2~c++2NTJp_*HC7Mqv0~)oMK9Ne z(^w7X^>NBmf#j6d^C%D=XgCvjs;0m6*Ik5{$o@j7-A3>UhN7AaVO|k)%H(qz%1(Y5 znfG-N*nl&AuSOwskTMcY2g6hxPlU5F-IfFrVk%LDv#$j=h0>U0_Enivq{u-4^rjdxW4KE^oRL#eS?=XIj3k2qhw08)VC9nGj^_wVRd=`&{qnyGY zRvB$&vO~%^1>G{%kvO1X3v$^};D+A3-p7 z8jtOyDC^CJDJ7O5zl~Z%n1n&XM$4v0sr0mzdmT1&vvpaYxDN|?5HFop6lAIK3DiXc z4Gh(b2<;e=hx{&9L+8Q6R7fjhO3j611TmzvbKFlsz<(7&86j523eKUFwA7jVB5Q4+ zw*-81TW2Wi$Y;(QVJ_b|Tioca0^|U=z<5%bP{hO1E7;>D+a1M36Vc=Z`)~~Q6$D|W zi_|V84fNf4#B6au_xeRgH2Ajf=kfM7?++EKQoGTMkG?kDyQ#7tOW34SDI%0h7i^K?T2-7ESNa!@3XzG1TdyRWzKZNrx z`_>9wc)#lA*+LCXLXm+(S*dK~!Y%{Tt}7bndg!&7&M}@krWc6eCo{%gc^!lL=LViCYDrM&_T&PGK=G#00G3}rdO78GUA2At3vg9_=z zoS`qr$}o3gc~ei53T9U7-Lpv`vPenS%Pw8)k?jLwT z<$M|?y)S<_iTsh!HE%@SvL!8Z%o6X48T3^ky5YdzId3bB?3SI9jqZvndVdm)C7Xka zAC8uk41HYO)n`;A_sh*ixZBY6F~YXG&M2wdd>@oX&W0^uvV-mi8Y6t>NTw~NRIV>` zT~Fl&yv#?6d$}Z5Amta8&cu-#usY1gCsgW@<#Hz6R`M@F?X{ z=RMzARd}?4EJnaD(CK}7&SktwszFK_alk1H%2+91P_zzT416Ssf49I>i@x(StZelC z+nzK1JRpQ!q{l_j9Vdeq415ISlG3Eh_L{m;r8qOk3B~6+4b9#Rk<_Wj=zEKUgwdc( zBsXE(aJIF#iDA#5gN6Kha^&;g4q7;h@1Vd+Dvp6iRDvy+{S9L7c;kXKO`gg(0qaUdY*h}E? zql5Pud_6&UF!M5*-|HssEmjwUp89-XSv+lIm9!FVjgE475V{9C>8G66@)SrC( zE8w`&G6J_^so5{ziGneK^8PzM!sFP@`ZIk((Bizm1A0Wp$zc8OaxI6E?bf&L4+n$b ziXQFf@#Bmy_jO4h6DNs!_+(IoMR>5Y&lZu=>vmQV2-##bLxiWDgG`wpX&Oe zv-+19JVn<6FH= zKN7v~Nb6;@TbvQpVai8hLt>$Tk$e*(2+-(>vBTtR`XS}i`_gxL%SXDkNVYSd(GSVG zR>w*Dm%Vj-{IN*awwl-5T6M%@UA|&K=xjMaxXr$3!MZB)rK4%b^L6h#=_t~;bo*@f zbI_x0mU69c?!rs`?DfQ|&00ySGo|v9zxnRSG2gN0rSEn0wM@^B;Zpzz2jwTHHj;7X53?_EQE;gpN&h+j!*7Sc% z2=l;zDRY1T(*OUyD&i;P`xp@et|Z(28{Bpa1cgfE8K6qlp^()%R^CIFqt4-DnuhYM z;=-s;dxr3SnDcveq+di^tzc*^VbT$X8X1&0sj#*pUuSi!^MW{$x)5pSD|P?+)_C%; zaEPWY)j6^tca-8bBQGd7&*yM?i=F5kd!qOjKdV7DlH9nPIWAxt=#4h?!}bV2Cd<86 zm#F(POxs{(dWUSYh&CNJTW*tP-kJP254P4%;3O$+B52O}*e76L{!bzk-WR=$*{@Om z6k4VVVdi7Ki<#FOdEESZu-JfrVJ``BpXYic;xO_=jsUA0sYN17>3pjAH&^yFC9U7h z2+%i?ZycnzDk&dRsm)-H7|s%(O1Oz6AOb5N_Za`(;3Cq>3owEJ0$RfZ0zw0fnSTbC zvxTXRDZ@XXOn-;gxu%RAiUe+#`l%b{1>SoIUK|wUKoqGS(F zt+0v9aMScUk+KxSoBSK=59K_@pWQ7)H3 zT05>>SVhSKkHMR~yHrhI5XaKr?qUwsw`*FPctGC#bzaRBtY7|zz44T+9eBmiN$>0N zdvTpsFHQ{JpUEAs4WcMONA6^Vsak|uEW zqz(FAmX!o_G7ZA<*xM*SZrcFUb%VEaw`wat6C#r&4zz)-hwdWVCp#@3_FB$GC8^!_ zQbKR^4wQtf4LuK&wXvXVT~Y?>6wxPTh9u0d8zVa6#syFoJUtT;eXHe3NP2IT@#!HW zpmq+i8%KDkV-afQ{ zX}rtIger>6D2wy@?PB^vQLuMLV>{7$k^yFMx5BBszh+83uf8Ve)Sm&Y@TCn+wQ~8z zBRKj{$w1XL4e_K=Yg19$)Kh&s_IiTJAs`7BP5JP|hSjc;z!$2ZB)H&yb=dDkbJo$( zs7R1X_7M=)q>oI;8`UdliqM&x`)OHF!!~uGb&fk`4NPyN{NH*(Tg(8Ier`of&-`ime*;Ja6a($kDiY8YWryq3!2GEGTL{G*T8&( zj|o+cLm>K)I=mn9Ivp+7`>A|ZD(<6#udvu0X#DAV3UA&7dC|8{_(H^b)4FozhMk;1 znHPD9MVgw>c3*;#9jb>*xYZZb*ASmLiC*4`!iYWxfpDKZD zyyTl5*$so1IM*^AcVAcfxdLP1=y)jD6&&axtf^OaZ4&O#Q@z5->~?F(65QIxQ#;05 ztwRMng2v`Y#&nZB%66rB9rM60Wj|Ygzc)n4j@o{L{&!C42K3~<46xdXhX(>e`j=C3 zGIe&gv^977>yYZzZS2QgbR?Assd#K_ zzB)<7w50dY9h!VYOT8|$c;cRV+r7TVE$o5=N!v3#djhW>>{IW4C#07?oj9xzc=44g zY7&mh@B&`+_Pk>F`EjtppQu5l7|)kOX*zma+N4${4GEfR$0Tz5TfG^~ega$shJV1U zJI1}N+;>tl5OoBK?35A8N8pAQKHIZ_QblIQoD@Unwqcf>O20}f>LR)h(e7p~h1Ok< zMTdINfsio})19iRATBEMgA(VLwnXjQCb??_cZbh*mtmV1eOUg0cJCZS;x<SYzyszY4j0earTHGp7_S=hStatx9_E6p;J8d zJF(Z@t2_x+ecZ7N@XivHtXBTrY;kc*Ruen!VpztP$Y^)>TU;gE4bYN9uyP)EPZ{+R!I+~bP zu~q6YR3Q|JXhrk&OBqIBJ6Cg&f^gXR@tEl#RZ4OTB%|NM)N|@Z?4|RkArkCj1-Upi z)F>evxfl|+=TFeMCL;fTGADx`{lmA_d=&Ncs^xd{oZ+?UGrWiSh51*EUxc5_el*Kl}{Y_5$8k_WFdv zdU7whMkZ0~tN9=`KQv29P<;Zs6gy8tVpcL5=i;kgJVEKb;PVj93{qbEO z<2Wb%`*jdyf%ZG}%9X0mPxNT<*a@A;#t?!5shA8=WP#Ab;uwSD=-m}6Qe4RJ{Q@~o zsl1V0ROG-BM@QyHLqzc6j|j8;8&2*(9bNG78!3S;e*c$o1!QevhS&q%R8bkP*+*~9 zGJR}>-b&}1dkteI30f4ehsy_TlN>hY8&;CSJ{CB5G`Co_Os?uJJD?H7LK#U6$dnvS zEksSa>(z=AKIHVIC99Bl1u ztEs6-uaKvBCRjI8Pn*GX!nhBlMO&k2(XwhR8ytcjc-H0EU{umN*GB2>n<&wQXVTi5 zicsJ**7lDe)2#`kpfDO{<&a@Go6^n>bXEM3|w!HT!=Ejga!tvMFFA&GInutwzJW&v1YJzHnsU@txcFm3PhPh2oU<8 zuZk!km|jLy(JJN7&7xLBD!D?!KcWGY&2qC_>@oYvq-b7VBzipu?S}YsxLcFx3~)0s zN7ZyRl+!;>BMG>8FOMdG!%_;a3dy2zzem_xLWh`V}iV)+=jjt)d z^0|b7kuSpS8@9lV7(_W4$W^dbSMw^0s0|tpceDLm_X6d&fe+&lN~Z*daME%5rY8vRsF~La1wPJ z!X0YLd-Y^sy(R`rfa7CHlaglL>woW$4zbfO@q{=0kTjA`LX<3zRsKP*B-)_>s$dLjp$21v^nXEb8?{D zYEX@J*_lVP5ZGZ8={L)?;vcR!MWPBzXPlz1^o6muSVctH%xOWCQ(Q+#ZJt{?psd z>4CG2)KmryCW?W=n^}O=OQ@W1FHd8^TzPJ1<+bx#Utg8B3bNxnc6$JV*TIs(Vk-02 z|A#fZ*icBZ*JR!~j4|3cZ9ko}yc`rude0vRe8m;qK&CpaX>2^J)b&sOgbWt80l^c^ z1r$Bs6?vLC0vmW<vyvE%F+K3(X9ZNDXIX)Q zNEe_+Ej{+wix0AlGp0gLR@+&)7AYN3GV|G%S|;#x_!3@L(vR}%S@Z^K&_6W-ZXG6h z?F(AmN7S*2-1`wflNUir$Ay~~YY#UqD1UlHr_qZ` z`f0w?tP;Q1PQ|2=bRvk?wq*=T1}$upFh&mK+a!{>(?A$)mJAfISk-Dy?k2C0mGY3F z4uXBRuzl%`W@dYg_t1X&LN3#bSgbgo^evtx!VfD`b9%<+#h;@+5Kikf40-WnVUZVw z0xQ8j%w}iwNK|*&E<1^<8apucLcIOC(1)8l?VzX-*L z?*E{{pOS?0YbEi1IBNxeiN=2lb>x~y47qO!13CiHl1qbhd#&+=dx_d;h?s5uEPP*6 z7dv$3OvVHudAQ1mLy&lm#XQ=8J?~E?OoS5J1`5;X=UvrTSH(q9y;YKBuOrekZ^TRC zv4_HUs-z+o{JH!nPD7)+N}^~B@c0D3Rc?~{%Ey-&Y-sel18*f=;Xx}eWn$9^0ht>I zwp$v?%P7TmZr>o(^_CV5`Xlwx!+6iP)UaskW_=+$yMWD4gN~!Y@do_STlWW0bl;LE zmv77BJ<<2*`FNfP3Bv?{7)^ti=2|Dd{2q%6T7ZR2Lh7EN(1h4U{+k}$K-(89^*#gW z?L|L3?;oIR)9Pr~fY({(59d%zrh~Bws<{T;K1jmrNtTUoMeX6)%$eQL)}YUm9z}CK z3CTk$MbIFy&q)~dz6TaLw!n7B8dVY>!jQrD2yS&xmnd?5hl zwg2in;V^o3y4it%y4b*h{!08a^Eo+tSeyP65}f_S65q7HFK%*Alqy|olpEjwlP|!)qt@Xu@mTbGLZwB0z8Pnc02$CiusVvUH;`i zLP!-G0mUqTeeH7#lIlTqBiirrGdKd}=fl#Lz@^s~SMlAYyZeXHb@*p^meVqBQ(NEL z5<{ntc}GUaaVEo~;g73m$fJjFf{eTHuFr$6FSnl$H+6Rh!5vPQEnwe{rtmqc^m%pY z6|y@#T{d|+qaR-|hA57+Oo-baJYLv#-%_1RHrb%Qs%{%^TE{gkZV91d;PFWJ(B@Ou6;`VGgADWW@ugmHy)tIuK_1iNaT zC*Rps8#8kxancqiH=iRF%I|_Dq<Z13%YgQ?C($(J?nd&*Nzoo2YT{Fm!DTeRPfHejDnhWPc-k23K-rs4j0WDB_Kqi(iv=@Ta>w_RKg(AZ7iMPCMQ ztlRKVF7QVd(`gp#$?rc0$YoveKnG7h5Y%O;y zJemy+)?6_}ou%k>Z6j@5@d|Ke$oJ}ZdOY?U3g}1m5Jvp^x=K8!i8}ey<=C;Ly%pM; z&Hbd`e7bABrMHft*JWVbg3X%e^Yta>{YjBn<=s%9vr-}_;TUz z^!#{kZMXC_Xw-_Svy7XKaQOxA_G*oMRQd8T=y9u4{C3jwXmXqA*0=fgcz4Op+4dn2 z+z;VLD*7bA*@z+F=}!vH?t>Vb>o&wytHm^N)W5pYBnXOS3eQUYBga7#lu3lrILA^G zbYc`cB$moCXT$I;2;|Yn7cY3NYaB*B~iTqBA|tq@Hj^ zBuXkknPUzlbG;$8ILI~%#p!q?_s3?zV0^7BNP^L}jeSe`=@(lM$7XF#fTB%pKv7-L z+=EKx=Fy*2Jo=W(^H2A7+$fABkSP%zWE1dL? ztV*JB$%nm{fj-}kJ)bMzr_Y!D3@3$kF7KoBXa)JHO)>SkrwRACRbv^7Fi~Z4mH`7IK{0ySV)5aYgy{zC&J; z`qe+Ls{3$xospf(WTi3aQW$Ka zr|u~U;aZry)!*qnXTl)l|9r^>t2B|57>8xxZAM4y@5OhcyA$ zZ-T`7Ov8}*&>y<`BCK(+^3_-4HCB{m4V=5 zqcb>JsNLh~0HecghyuaPGopv6nOBc~NP;y@i0oS6eX->&niEB*neh1cr-4cVZ z^9O=3d;jsrApb>sD`JvThX@fm6;4+6mG6=DZCh(q6ko#VbX7J(#pkfHt}6$A%e$p; zgw7|bpVS;YLe?iSExS1Vmyz*)HG13X+R>f-E0kI~{-UFZ=knPE!Y7F5(8rDQSNo8$ zfh1z2KamhCI{wpCbTG?d7#3eYI14~ZOchHUA(wRM_~W#GEf3lssV@$UB!~<`98Lxz z0<#S0wksEfDCvtW6RPI@>h!rkM)?;M6D^+z{FkJH2XTM(eFRe9EDJz-3KLMwvdDMz zxhV%o`yd-YnlvdUxeK1wH{jvTopnBkAW1sW0eI7hGUj!@lOOM@V}f?b{|lJ|M5;=> zLqfLE$5BK0v(hP;oh=&uupN;)@6q=sFY&^nEpYG#78P#PQEF37CT>gd^~2^SC&JLj zBA3)6Y1h)gnB9jB*2qbnC1oOC*K)_t=KD>4o$?PQ=6K)3Od^BmjU>LDkfV5ef$-x- zhRlo07p4r#u2dtVlC5!8Zr3}?w?l1mq&BAtcM5?+(zi+NB-}QqvcEU7H}Yo1H9T|Y zhBn#!P-uo}MFQ#(6iS_zY3RCk=-<+dB$s%^s5#~4@={9g(bFTJ>VG9irA-W9sk7j} zd?3=qsraSDXN1-2jz|??Eptq|@i5eiWRT-ry*`RMa{80aNN3&IWkvgL@IQ7-`KT-% z+3q@X^6(MiiG1{^jgJk#l$_h{lO}vbWdziM#>Fe%iq5VH(M!pTt#FJ;QSwl6L032J zl7NhB-#qoTqy6lPYT7M5b}z@({FM>l$W2HA%&wXRwyfx60^^8GC~{!C#H^YHv#jWW zY#?$V^$R{xq1zy6bPIt+_0tzmBzlUf1Xx`Y?HI`@{eJ7_c>2!~n$#JVJ{rQ_a} z4j=$%@-Ixy&XhjImp4N|`zqrSUbMgM|3mfvRzU&Gta|yM8-G*=z5c&Z%nKcp>)7-I z>;I;OkABDqV4RYB`*a3iEH#WQ zly~VBUR{SOPptqJkC|V?2{L=)mX8=M4(oRjOde016Ys=s3V*Ip98pmz_!Ctm>)N3L zg{Goe{aze&@C#e!q3}Fxd=d_oNKp3+AC%HWBEyvAfk_ppu$m1H3c8V#;%8CV)ZxLx z(~5!6*c5=Nt+IY6m2fe?<{e5|hNn>!Qds@T0z?0|8W7caUT``kCzE@ziTGJSp|rxh zH>H2rzuKN=0BcH%>(BF2*(lu=+>1^aYE_qbz$hx+q2}ZrQdxwik>oR2WhFG0cqn`3 z9V#Me0PsrwU7}ueB6bD?fT=ytTgs}$GLMVGCfOUiW+XN)eB0OWOci=IG&r_uhxyj~y3MJe-{N2UW0T>}d1l>I_!*X_o8C|AAt!Nb0tI`M z@7st`zMwe_ooo7#Fq{~0v7Y-WA8iRFL+uEK5>W$}?RZ>=x_WxixNUb|dM`Aq?w6z) zg-lr5PDKId-w&;HidZC^+Ri&Jw&QdmblOgWk-&nNk<410%_!B=y2mW3ClrM&cawCh z@xg;DO;41Lbx$iLNPe(u{4w=Rn8wx@LDuSHl~Vr!*Rlv!)oo!FIipx<*f`5vX;?LK zVl^yTM%c6FmIlMDtQeyWp``6&_+44TXXWMR(Nfklaecix?LyY}fQ;M)R+EUA@K}Z4 zV?V12Cvyd~wx6&oi>deQ0^(P-Qfv?yRa{Y!>}oF2xa?^z5fy%+s86c&16fIu#pzf* z-Yc@1|HERk($<^QX}Fu!yqxytMf+Tc&v8LHElX)FSZaMy?f7ag?YNN@rmq3Cbagu> zB}_P!mKIC0l^mg#TxDvL$d6uTu{IsJBxxHRthMxO<|H@Mf4Y1 z2E9;Tn;-~|K8-%-RAibTHwU&?XOVB&QwUkeKj9{0pD3C?HXo&0va2#aK4sRv9&e;R z7do7-RHgq&&%%K9Ch_9V>L|9U>$V`- zG6d=HHMMty}4J=0il~ zIrxWkOyJ`xoByLq;Nzuu>&bRqwwB-97R|5&56FsZ&r}DZ92#Nex4#>a6|!Xby{MAH zYkS`N*_O|%-51IqO5i{ALQ3ux=qIyOsiHabIkd$_H_y=vaWWq-1Ygf5U!U2VK;)SQ zFSGB6&(F05yyuf&ZNDH*dnNG05JpuGjs$k%hV7u@x2j!00{oa}oa6>5RLgd63FxHy zO}qO|ofULiOO_AAsXhm7iK@_uYA9$+p&*0`AO?OHA0&Nv$vv*9cliJCcgpg;N33`v z;IQD-gWlY$n<7Ko47_hiF@8SgA~|(8#^XhOwZ8I~#GJ0wgg0Y!;o}&BwL3gdG|ye? ztPN0tLY_sKJSjuxu=D*;Hb_tk-qG)K!t9Kb!iAu9gE?eaTJU@gFOvGc>-XHd{t&R< zE>D?KAy~}~T^j!O@pbr=<#i>U2$6NYnOe6ugAG!fe$d&@iy9T!%47- zD=|-`Gb(Lrg@c5qpUBB&%e;JM9_H6kHn9s)2A0S*!aKFBmqxIQk|@@%ZBb0!8h;E( z;@W~zhP4kt>M!&~#MhJ^M0Oe*n`6}(9F}nmHKf(P>5zpCl2*Z8eBK%mY_@ssd`)(S zW90|6`7JL_Y=*6rB?Dp=m6SNx zBwsO!%b)yRzHGqJF$=sd7ll@B?hiza^d=ADucu?GUN@(GO(n3J9biAa2eKyAJMXW- zr;O(LZB7O(KkWIL4sW{2^}I1F!)IRS2QZ56N5c`eI%&jDnsyhwBoZ#aO`U+VtS^iq zYxA!eVkkXi|7;>33{;jAj6?`jC61uwPq2iy@;2Z?;wG^UHtq)mRp8NPaW2U@WdUv8 zJv`K#VI7vWOA3Q?Kug1&jFWB(TDC{zDWMxNg_qL<~Mkd`3q6!hc+KFIt02$MZmamWWcdpgP%lBc!q|O zC4u08G57~CRBziBwkOh|a-i!Z?o8^(KaMTL4yAM3bxJHxc7eY{$wH1_d>&fa0LAe7h)f{>_1U*)D=f(d#2XD4#)a@!@MK z0KM3G8vRBxvf3vVum!JUP9Jp5L-V47S%E$kUKOy2$Lk z@x0da8FCYn+aV2=h=ZGY7x~Y4;l+DmEAUNe6iW;e`~uyk2{R!XxJoZI6#knHLl*eT zbT|6h4Q{*JjZd78HT{8=t1lRJff;RySkO*CY|KtvrFsa6DC)zuIef@K4F6Wkc47>5 zizM)$PVde22y0Dk1Xb418M4pt$?fkr)Z$1R>sX0)gDREIK1-z33x#W*U60S_;Tgk650rV&!0&y%JPWpB>}_xDp#I3v9itYcC6WMY4)bH|#{( zCj2AyNS5L*w+7h@%hl0KERu?2U^6n5sZH`RnEsS_CzskO>#@32X@TS_-p*k zZqgAsdfvtVz_GJ&ktLoA9AnVVuXt{%k`W7R)af}>xR+PL@z9fRYYJ;z{H}0$Y~68$ zygj>4r{cSHa-Idzsvc*&b?_Z7c+y_SKrA2n0oOl9^Z*!<$k5w@zT!EY+0)?1lv++6QYMcVVCP_nBDyeJ1}0 z4%l#AD?Oi01&lsot3G$k;~TX__&uZ)U%`Bik)R$CjZYL=%~NaO!qb z4Su^QZ7*Hu#8gO+lu=TUO6VdaQnLew6Vb@G1d?xRj-vMAAcj%RT8|X*+nJM(2O-^Z z(|;jEBSjM(`vDMKM!#KdCfag$+?XUt%av=XN0CO9mP87}CXl3DC2K(sLV~2Dplq+E zdvKAO{cs~KuyCehOO&JBjlc6s$X3&L-f*$L(;tSIs7Zep<09rd^(;}X;UXUSCgqxg z>rfC!@>|>3nNb4Fwo`&cYw*yAv@C&SGX-r538f<34jTc!E1Zj1Gcit0DM~V`6`xwo zI!dxuBo2>K0=IlWWV?|VZyd=l5_UQyY33Oyl=>iq!;EZGBBGAIbBF)G=WRKY7+l$HHm+$ z7DuxBghMXNFY&%$*#wVSkbxakW#mf9R-pS##fox~qgbk!E|sfTY5M9}8x=gZLgM&$ zlf2!d%lF3O>(bq`&fa5N$zgrCo%8DaIM~d7;_bcXE$@1AB9)1_xqaQn&Cd}Ugr~Ev zDt!eGaI$rNsr6|g^lFfs)r01eMUtfuR2QFNSRS1pFYUUfa#%W%WFpO%0wBD$* z-{9i7yg!{dfeR~<#!7!Ai~$#{0jlz<$v0J)d;?K<{xJ`%G!w}94T)~5 zu)$U437P}<_><#>H1r`!4(b6Fw#AlR7}EkLwi&L3xD~}h69`+A^`1Da2MyJnGBswE zB3RZYGj=^&Ch5|AXek*pmzrE}w%^2FtI8v^<|7fp(&^)V(q^a;C0ia;WGr!bimn6; zV;DsZ?2Eg49yjG|bg8kns$s=e6;1<*&TbMN-=S&&eM%bt-+huc`&XYN|0+8y^Cx5B_7m|d`#vI@NQp7hygt9N6I1b8|B*|Q9B(F8jPLFK z^O5%VR=Qq)O&I#ugy4_NB$?&Gler1w2>PO}b-&L2IB-NRS6BWuG3IY_0UodyB0xab!xZ{gx zz@{BL`M+2&B5oyT%9&J)>1}r9GK;{=j^Mm9*tWM1BVo*RCuoVj!*KD*H|}?Mw)dI# zU7Tj!cO@*|pzo1bykYVqSiB*dYnI)HR+>a@cVZ*gLe;e~u;_2`I$N0?FV~;W-mY^| zzYLV3cce~a)I2qr2%G<8Mw0^mDRLM4Of-S0sQX6>RlSP%;ZKd{(I2%?XURJ20vYOZ zs~qd@^3Ep`*6rg4M?z~U`SJ)h8j%uWR;+~nuKjE;in{0P6OO{Wl}_tcoqP4TtXcO- zeREX2rz(~xaiD1|ahl3t1kq5wQUQw$tNE@HjZ{uGW-a@3*D!^!O(3yi)vnGVwdDP7 zD^~-@hm+43ITf-EgekvVXZ_2W?~Be{iT8c&7cWyd5YFN=(4gFV76HJShV`>P&w>e; z@vr`arQ#pqG?4@H{bAx`YknFrq$PAG{8XNxsF|}LulNm`aiHZZhS3YfZxHE4(yauK z)y032lLoEW;_q);Gb3U+;Ep5mP#~NKXP`j2`z#9IuKg%CC>|gqRU%(8(CAQ#Jw)>l zE*KFck>`_~vr#*M!{kqVeWIahXf~>rgwz1YBGHRXhdlO1ONqq?Ispfx*iH(U;+*;JPvk?cA3fV|KNzLHo94A~Z zJ%z;A>K&FY5#a!kjsFc9Px3FaoO$`@Ymp>Vv@2+;N8hhWO06qrtBCwN`QpL<1RmJY zMsSsf9huZbUIs7-+tp=!-je&AC!dUU%?O)0 zIQUT;y`=h{ykeXk&UWb@JY>nO*3m(; zi42~bCD+WUwH2?UjJ4k{RBIG9p$}%!2hGSP*^c@>4?har1CJ^6ZV%m`jvc7Tyeikc z5hh1h-WF$H?m-5dqtypPi8%{h(SFsOvzLlou$O|3K_$gr97guSTdPTR!^u2@jSuTV zr=j&~ftZi#5ZjAyM7XUF&*j59YWY$L1@4;#YXZl-W(&mwG54zTk%-M&`@5fTB4q_fyoz$Jm zoD7wL#F-CP1!AAH!uSd^*$tl$4JaX5o4^UgS*}m+#!9+|&WPcWWM9Bz9{O zRgl>ijgx_xgH2NQ(BNgj7ffJxEoZV}vqC5=g$)`LKS(*Rz|T{xnsJnIDxpS$4)3hZi@p3bl@a+vAC zb)}-lH7Rm|<4cSSvbCsvGt0}Dn0kf3UAcldVy-Nh*6Y`&DK}ZryR>}ZBr>UKgK)x_ zaMyDpjX1}PkUz|g*wL8v`&3y*l;I?djoXaf6Jb^lFgFD|_Xe&*E&mJwl!E3XwQ2cd zMscZWz!6Xl+S?NlgzYede?$}r(ZMqWhis8L%a8@q$PM5)U(0haES zzhEQsmlv?-XPJs&!gq`ZFu&P82{PfDsf2D+Fr~w!R^2Vrgwqb#v$8k7d$pF_Ns>Z<{L{lT^a}C2Huko zzal(Xu(w7LD~Voxbztl*Lk=_X4n6TQWG~z&crYdKhZiqWXDre0*Hhy+O)3VCpE6s+ z(Ho1zY1D>8nlVyR1>fU8p#xn}sKmDhGDPBZ;ur8jF9Q32C6#&gX$Prx#G*iTuMK8N z21oMRgJ_pdS)vLvG;vu3wNjPkop ziGj;@u13m5%y<S2YcC6QH8pA~E3`A-TU7iTo194KR?a6UuiQ!&jceoR<3+qV)Jei#)=h2}COL7Z zS(+kX4hk~aIUw2m>1RazzOvJzlD|;x4MHgpqsG0g?t@|}5SmAK2H4;$NJUiC_=rWN zBt@m2O7;^>ayM|qvU`;*pmT6R*5n|fcjfWnV!!(}@rGqV*Jv<@=b_OfUN;qHjfZfa zU!>bjq$|`;K%#RW2*Chr7+_Rh$zBpPp7DhqG!|KG!QZV#ypbgN_lQs|7B+ zejud=AJX!S^s&H2lC_y-8HR?QA>E9m$`(U-eze^`Mv3FF>F)M$S2_P~S^g{N+mA!Z-LwD%#^p<65 zP*%eTDmBCd4CKB+aTqAq%rm8aodtJ@B169CLUyoYuo7-h9Vc*+0BORQv z#Vq0S(R3alK_Jwh1yg$1^il9~vwuj~FQCAJt2@A$(-nogBXP+A@c#V8Qx{=s{{2Sj z!q`k7xnSjrqA~7m$8&NAq5hX%Ie=b|8i3yRUwX~GkizhPT3WfFK6H3~mjmcE1L!sA zRbkMW(v>qe=gFg?>qvz4psi2m`}ermPN2(3*?OyN!!T?mY`p{G)MJBz@0qv#E>4Y++*Pz{H5m&(wC{o$aNje4IGN%`A!hA-*rfaE|m zV2b0yt9)F)bl8YBo+4H)fQ%syY#s2%hxy=s?)iWz5a4i0u0;Q4gGl5)_jseh?fVW# zCJLdtg0#BiCIIyp=O|H`#mi&rKE6{Ybv+GB`W2N65@qAZsOt_$h;N8Ra1Srt@+m`( zj^O!PHxiMtIE$fNoNhV6E;ko((i&DpO;y(;QqU&e9J}anD~B0I!dg~ITzyq<&H1cg zE%Uo`kPV$r&{A;8`K-!I^Owr0X~$)rBnq(G~d|FX66VcEub{Xlb9=A!)8>!bz+ zxi3viFlp$&f}M!B$AeGbh%Lx$Z+!2`>_NMIqipn zxQVaa1z6h$B#5@rZ{(JNGBGf+a4B50o~Nf<;uQg2U!A3dPMCL`#uBBjz$UBlF9es8?YVcBcJLK8Zm~UG==U>>X{Z++r;l(9D@E2;$s)1GMT#h)7!r4p3mX68pv>hUl_ zwK9^FY%fW!`TQoURqk!y5n-^N^HE5D%jy%`$n}Xp637 zjS^4an8wW)z^{EbjUzym%!s|)=BQ22&lHenomK9&wFk9oi+7ci152WOJDES1P2ai< z=`9bdO~ubT1%W4B%%)-?j{-~HxHi3Z=q{&jdwhu~OMC(hLE(PPr^eBIrX0;DUmjlR zO46p5);(JuD7J$+C=#8mz&cuqtLH`r5G znf3ZqgJg{GM$HMG@T|H@isE(yIPM&iev|0ULE&c495x;gMGWQM*{=v8@O{Fi&!nQz1zS#Q8s*?^xl$ruLv5$;}RiS2(=O`3M6g`RtW#Mw4XEo zpyjLF&F_$4Nn_FO-3V$2r z$YQRH`vO0utx%I0E!oCUQ_p7NM~iq0Keg%MSL5>PbkppAOoLo4eR2gAyx3L;}C$yadr;e@-!NP6K!->Lr>QgjaNtFsB)$ zN#Nbm=xdk;)|PbuSXt%fn`0EFwhYlefqz7eI3cz5|1+}M_ zFNie`q&vvCaIf*#X?TjH3Rs_l^~O`*et)+0bAJgBosE9ZqVn9J0Uha>_a!aRR36E< zml)4dY=^=%;8eml~ALyqK6rO$>* zBqV%j9ly~ECuj99$d^N77WN%W@dU6@YrTs}U_(i?2S9(2`m=+L;`qX;t7V$$R%fbS&uv?DXyl zBmOZAD6GL{ZLVELWP|u-11`>%lF&>}fL8TPnS25C{WhrxonIYe?^E?qOR*=}8Zb(i z;lvYh|)Yf9xa*aWAL!b;I#ljq*OVK_>&(0E-11NT_|vCsMkTi z^Q>YRWcPA16P+$L#+K!w#~pSuY@BUPL(-Mrj|HLZ*y$2>G7_HM%-X~h9Vy2EW%;X; zHgzB4Y?r>tbj<@7cmEu`MyRp@?mlFp8fIBLvxz)_?>y0uQZgOc?Yhn{GQps34s~LA zZyj1$pMHAX>0q>+CPmcwV0yG!+^?4J8|Hpcz8Z_IY2{)uG^+V*ZdRzo#hQ=V>pegG z{EO38Qhq{8vom{r1_E}=|K@?;sPDTukZ{5wC;!{H75%gWk-#lc)hc}-ZfvS;=XD(_ zLfmaJLDxh*Xafgmw`=?)?w5O*-V$ny%V0_SP-7XDE zf|I8M-`J3^vL&>SwWD>KIV|RIfHZ(PQlZ`Y9aqK*8T2)qG5jsEyR)2G-l~4fsy!hKUR|HoP7NC)+Q6f9MW_S75;QY`O>( z)&nFbp-_kN?BTtFaE9Z5q|R!Hc8rIx9>Vt`HwsI%lehS)!V@-yPzewWChsO(L|&Nc zjdOtIJS)BcldGIzL5L+|nJ26V7HN$>AP{tA z#0W&i2x4ZDYgz`P}U2UV+@q3$z#72wVe z)In8)JF7%AuQ3Zq?FxOr*V_2&Y63F8+l{2#ts`$3$p$mvmiXqje}EicnXxXj^MLkG zTkFa^V>C9j5Vz8>f#yk0n@x9%YKv~bH8q1U9Plm`H(`uCH)bt-z}k(uf^=%-qIlgE z6N$*ja@5$Pn+1AL>O?enQ(^_g*(5M*QA7$0FU*FkV`H%;0GRV1aNkcb!+6a<;EBNq zH;i}-ohCl6uapci8DPI^=Dp9kWC$#d`G!O z??d%|pVzYBiXO!jh%Jc?o}?bjB!K}ceHQq@5&HTOy105RtP2=*^}LzH7vT!9nwdnz zQ2c)1iYoBElXSZ)7HvjGHkIgL;w)&OiT56X2N14EHYb|K zdh7^AOiE$c{#pZ*Atul@+?NwLPs5KU3^1${-2V6++KfOp^)Bkwm2Xl1vDq#6!Si%G zphKDsX!T|Jr`avU*}?2L5sbnKO`mRV7NVuY%&#}&AD9+_C!U5`wrn+u&+d^h+Db5m z(k)@XN1EVpI}RZtX)W7BBwVxwlLO@eSa^k#D&Zi)~>KCkTYyB2CsLmKR-})?#zl_a){$`Wu zg3UQ81ye{=`Gl-ZdTPTbVT;_%o^$UBlNIg3lhNvh)=8^<|X4 z`cjYP;@-QFus(4=Rk3%scr}iFO8RYtp6uG$5z#fQX9K*a*Ye_dIkZ;CW`VTV#2WiM z%!`vA#RP9Us}`6hwM~9pzk!vaTF>X+61wHljOa3%axk zj*9Y&yqT$V&ty5nB(rMDx^F~>hNgvUqb#upoK&eA8RwWXEud3+o0yc)l>w?xcNwK! z!OmeUO!8R`g#tVV?Qt|*%W$J``M>ILnjsASJaTrw)l_v9FD!f1c- zP)^CG-=R@X2ThV3c*G!<*=k*)K> zU3QtJp@vtmn%($Cl`xM#y=4wJ35EmJL8bh;#FdFgzPvAiFj?8cv6F2aAmhL~*Qt_1 z+X+f=)M^-|-_Z}p>2;tI0*!lWh^Z(d<+BT?bS4Xmjh)W)du@LT3*fa$XnlO}7L+l4 zU1zc^XT9cPd6L^1y-QkJpojYUynE-;r8#v$^jniH=aTe6dl3ttvfL2-e!k_W8P|~3 z4H~d8%H{Pt+jUX%HdovguBgpN5Qc^D;|daX0!r*LvXLd_D z+o0zQfOaJ~hd=i#kr=mC&eWSvX;D&(o>6rj&m#}4SBh*_i@ff?#K*-W67}w1vzkq! z*4UaZ-VA1s#=Owv1{EOWQg}~XgU6Ou$%C#b=S=7)CooQpKeU`PfrIalG?r+932W~9 z?cw8Y{@*&7VJ?{|Nn+VQ_M_CvK^{S4A?kvZ9?&^(Fj0J-D(x zGwA);^cpuR%li9fAq7^D>C63BOrQ!*RABFm#@Db^i+z?sjrs@gL`0$Q6K|KcbGLj4 z4t@XL0j&MWcq)U

rI<;TXb5o zPb47?cKjr`LirZU%qzH?UnmB74RFqz;^Kou8S@D^Dg65!f8ydB9~eh9hCze<2A>5i z`3_LynCa2MR(D>&NJFguWREmAO(#1waO{a#5Ckrm9X zgd{10Eh)CRnX&|U%qld8;n9<&rDe1A21Q)R*l>5pHv4rY$iz3YuOD{z1KK>UZoSSq z!Rt{gD*H4Rvn~RK&3AkuK_0$Pf;3QbkC81?SgRCxRt0CRg{moc#FT_+4oqIp708dr z#O|OMV6t{R`lLk!+F60^GGC{bvnys{@61UKf*I3@X2C5jn)mZ7ttSU^iF{nXNXK48 zDb+H(-3~E+k>xqH@WOF51bzCYeVor;8kIGp}q3K!ZPukitO9}y&b_h`2F^*uVB z{4bb)#4=H#S=ZHP(zFDIr|KLX3XNH^G$(ZXavfgV+mQ5nk6@uf-A<77$vd+%X^reg zv%%Pl+ZnqP%#g8yd$z0>d~Lx5$xR4??GDntVxfL$U~O}%Q9NR)ZS|3xMV`>RRl39d zn5#t%OR=O^?!@hFzGsZJVOr@U4tpzvo<^@HZ(h^b*y{}}V)6Ct^<+>^cSlH2wv&xY zG@Hv)<(92%a4d+;fti5Lk2NP)+8B8lE|h3b^;@-IEtRT(hyf>atA`VVSNyWh>)h?G zCU>L3gx5`ewXPr5x0nU^Dho(LtVy?c3Z86wLYvv<{o>wcE?O@DFD!RVZiT@8BV$hkxYX{n)x z!lp1`4PQ&e^e&PW!;l`TZzVkskl)d@0ZlhPg>K~Rso>ls#l}^ymf=slLu04)O8PXbG%JV*J)TNPA)rA3 zs2twws2XglHoGQ9G5ayy?hkpg(O2H>^RzvHHcv=ZV~^%PZInQDrLb-w7KDM#!Ub%U zU!qBWi{SoCI_ZB3xPR}MAD_u14X7SzTJoi@OAB3YK85opkC>_qMFLd}=-a>-AW+94 z3lI0jjOy#Lp8?g*7Bj9?M_xLq+^2T-F^9N$wsh{0zLJHAV{u^PTQ8x>D?x;d+(Z-%o178BDNYu)#mweBI^0S zgpY_%NC4H(KRU=S`4genffx`0U-SXE`o|^^2#X_@@_th+-Yb9~YDPg}+_)&n)~) zTIM+TcyayW literal 0 HcmV?d00001 diff --git a/pom.xml b/pom.xml index 38b07b6..269c946 100644 --- a/pom.xml +++ b/pom.xml @@ -77,6 +77,12 @@ spring-boot-starter-aop + + + org.springframework.boot + spring-boot-starter-webflux + + org.springframework.boot spring-boot-starter-cache diff --git a/scripts/deploy-only.bat b/scripts/deploy-only.bat new file mode 100644 index 0000000..f15a228 --- /dev/null +++ b/scripts/deploy-only.bat @@ -0,0 +1,219 @@ +@echo off +chcp 65001 >nul +REM =============================================== +REM Signal Batch Deploy Only Script +REM (Build with IntelliJ UI first) +REM =============================================== + +setlocal enabledelayedexpansion + +REM Configuration +set "SERVER_IP=10.26.252.51" +set "SERVER_USER=root" +set "SERVER_PATH=/devdata/apps/bridge-db-monitoring" +set "JAR_NAME=vessel-batch-aggregation.jar" +set "BACKUP_DIR=!SERVER_PATH!/backups" + +echo =============================================== +echo Signal Batch Deploy System (Deploy Only) +echo =============================================== +echo [INFO] Deploy Start: !date! !time! +echo [INFO] Target Server: !SERVER_IP! +echo. + +REM 1. Set correct working directory and check JAR file +echo =============== Working Directory Setup =============== +echo [INFO] Current directory: !CD! +echo [INFO] Script directory: %~dp0 + +REM Change to project root directory (parent of scripts) +cd /d "%~dp0.." +echo [INFO] Project root directory: !CD! + +echo. +echo =============== JAR File Check =============== +set "JAR_PATH=target\!JAR_NAME!" + +if not exist "!JAR_PATH!" ( + echo [ERROR] JAR file not found: !JAR_PATH! + echo [INFO] Current directory: !CD! + echo. + echo Please build the project first using IntelliJ IDEA: + echo 1. Open Maven tool window: View ^> Tool Windows ^> Maven + echo 2. Double-click: Lifecycle ^> clean + echo 3. Double-click: Lifecycle ^> package + echo 4. Verify target/!JAR_NAME! exists + echo. + echo Checking for any JAR files in target directory: + if exist "target\" ( + dir target\*.jar 2>nul + if !ERRORLEVEL! neq 0 ( + echo [INFO] Target directory exists but no JAR files found + ) + ) else ( + echo [INFO] Target directory does not exist - project not built yet + ) + pause + exit /b 1 +) + +for %%I in ("!JAR_PATH!") do ( + echo [INFO] JAR File: %%~nxI + echo [INFO] File Size: %%~zI bytes + echo [INFO] Modified: %%~tI +) + +echo [SUCCESS] JAR file ready for deployment + +REM 2. SSH Connection Test +echo. +echo =============== SSH Connection Test =============== +ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "echo 'SSH connection OK'" 2>nul +set CONNECTION_RESULT=!ERRORLEVEL! +if !CONNECTION_RESULT! neq 0 ( + echo [ERROR] SSH connection failed + echo [INFO] Please check: + echo - SSH key authentication setup + echo - Network connectivity to !SERVER_IP! + echo - Server is accessible + echo. + echo Run setup-ssh-key.bat to configure SSH keys + pause + exit /b 1 +) +echo [SUCCESS] SSH connection successful + +REM 3. Check current server status +echo. +echo =============== Current Server Status =============== +ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh status" 2>nul +set SERVER_RUNNING=!ERRORLEVEL! + +REM 4. Create backup +echo. +echo =============== Create Backup =============== +ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "mkdir -p !BACKUP_DIR!" + +REM Generate backup timestamp +for /f "tokens=2 delims==" %%I in ('wmic os get localdatetime /value') do if not "%%I"=="" set DATETIME=%%I +set BACKUP_TIMESTAMP=!DATETIME:~0,8!_!DATETIME:~8,6! + +ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "if [ -f !SERVER_PATH!/!JAR_NAME! ]; then echo '[INFO] Creating backup...'; cp !SERVER_PATH!/!JAR_NAME! !BACKUP_DIR!/!JAR_NAME!.backup.!BACKUP_TIMESTAMP!; echo '[INFO] Backup created: !BACKUP_DIR!/!JAR_NAME!.backup.!BACKUP_TIMESTAMP!'; ls -la !BACKUP_DIR!/!JAR_NAME!.backup.!BACKUP_TIMESTAMP!; else echo '[INFO] No existing JAR file to backup (first deployment)'; fi" + +REM 5. Stop application +if !SERVER_RUNNING! equ 0 ( + echo. + echo =============== Stop Application =============== + echo [INFO] Stopping running application... + ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh stop" + if !ERRORLEVEL! neq 0 ( + echo [ERROR] Failed to stop application + exit /b 1 + ) + echo [SUCCESS] Application stopped +) else ( + echo. + echo [INFO] Application not running, proceeding with deployment +) + +REM 6. Deploy new JAR +echo. +echo =============== Deploy New JAR =============== +echo [INFO] Transferring JAR file... +scp "!JAR_PATH!" !SERVER_USER!@!SERVER_IP!:!SERVER_PATH!/ +if !ERRORLEVEL! neq 0 ( + echo [ERROR] File transfer failed + goto :rollback_option +) + +echo [INFO] Setting permissions... +ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "chmod 644 !SERVER_PATH!/!JAR_NAME!" + +echo [SUCCESS] JAR file deployed + +REM 7. Transfer version info (if exists) +echo. +echo =============== Version Information =============== +if exist "target\version.txt" ( + echo [INFO] Transferring version information... + scp "target\version.txt" !SERVER_USER!@!SERVER_IP!:!SERVER_PATH!/ +) else ( + echo [INFO] No version file found, creating basic version info... + ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "echo 'DEPLOY_TIME=!date! !time!' > !SERVER_PATH!/version.txt" +) + +REM 8. Start application +echo. +echo =============== Start Application =============== +echo [INFO] Starting application... +ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh start" +if !ERRORLEVEL! neq 0 ( + echo [ERROR] Failed to start application + goto :rollback_option +) + +REM 9. Wait and verify +echo. +echo =============== Deployment Verification =============== +echo [INFO] Waiting for application startup (30 seconds)... +timeout /t 30 /nobreak > nul + +echo [INFO] Checking application status... +ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh status" +if !ERRORLEVEL! neq 0 ( + echo [ERROR] Application not running properly + goto :rollback_option +) + +echo [INFO] Performing health check... +ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "curl -f http://localhost:8090/actuator/health --max-time 10" 2>nul +if !ERRORLEVEL! neq 0 ( + echo [WARN] Health check failed, but application appears to be running + echo [INFO] Give it a few more minutes to fully start up +) + +REM 10. Cleanup old backups +echo. +echo =============== Cleanup =============== +echo [INFO] Cleaning up old backups (keeping recent 7)... +ssh -o BatchMode=yes -o ConnectTimeout=10 !SERVER_USER!@!SERVER_IP! "cd !BACKUP_DIR!; ls -t !JAR_NAME!.backup.* 2>/dev/null | tail -n +8 | xargs rm -f 2>/dev/null || true; echo '[INFO] Backup cleanup completed'" + +REM 11. Success +echo. +echo =============== Deployment Successful =============== +echo [SUCCESS] Deployment completed successfully! +echo [INFO] Deployment time: !date! !time! +echo [INFO] Backup created: !JAR_NAME!.backup.!BACKUP_TIMESTAMP! +echo [INFO] Server dashboard: http://!SERVER_IP!:8090/static/admin/batch-admin.html +echo [INFO] Server logs: ssh !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh logs" +echo. +echo Quick commands: +echo server-status.bat - Check server status +echo server-logs.bat tail - Monitor logs +echo rollback.bat !BACKUP_TIMESTAMP! - Rollback if needed + +goto :end + +:rollback_option +echo. +echo =============== Deployment Failed =============== +echo [ERROR] Deployment failed! +echo. +set /p ROLLBACK="Attempt rollback to previous version? (y/N): " +if /i "!ROLLBACK!"=="y" ( + echo [INFO] Attempting rollback... + if defined BACKUP_TIMESTAMP ( + call rollback.bat !BACKUP_TIMESTAMP! + ) else ( + echo [ERROR] No backup timestamp available for rollback + echo [INFO] Manual recovery may be required + ) +) else ( + echo [INFO] Manual recovery required + echo [INFO] SSH to server: ssh !SERVER_USER!@!SERVER_IP! + echo [INFO] Check status: cd !SERVER_PATH! && ./vessel-batch-control.sh status +) +exit /b 1 + +:end +endlocal \ No newline at end of file diff --git a/scripts/deploy-query-server.bat b/scripts/deploy-query-server.bat new file mode 100644 index 0000000..320e63e --- /dev/null +++ b/scripts/deploy-query-server.bat @@ -0,0 +1,47 @@ +@echo off +REM ==================================== +REM 조회 전용 서버 배포 스크립트 (10.29.17.90) +REM ==================================== + +echo ====================================== +echo Query-Only Server Deployment Script +echo Target: 10.29.17.90 +echo Profile: query +echo ====================================== + +REM 프로젝트 루트 디렉토리로 이동 +cd /d %~dp0\.. + +REM 빌드 +echo. +echo [1/3] Building project... +call mvn clean package -DskipTests + +if %ERRORLEVEL% NEQ 0 ( + echo Build failed! + pause + exit /b 1 +) + +echo. +echo [2/3] Stopping existing application... +REM SSH를 통해 원격 서버의 기존 프로세스 종료 +ssh mpc@10.29.17.90 "pkill -f 'signal_batch.*query' || true" + +echo. +echo [3/3] Deploying and starting application... +REM JAR 파일 복사 +scp target\signal_batch-0.0.1-SNAPSHOT.jar mpc@10.29.17.90:/home/mpc/app/ + +REM 원격 서버에서 애플리케이션 시작 (query 프로파일) +ssh mpc@10.29.17.90 "cd /home/mpc/app && nohup java -jar signal_batch-0.0.1-SNAPSHOT.jar --spring.profiles.active=query > query-server.log 2>&1 &" + +echo. +echo ====================================== +echo Deployment completed! +echo Server: 10.29.17.90 +echo Profile: query +echo Log: /home/mpc/app/query-server.log +echo ====================================== + +pause diff --git a/scripts/deploy-safe.bat b/scripts/deploy-safe.bat new file mode 100644 index 0000000..e25cb08 --- /dev/null +++ b/scripts/deploy-safe.bat @@ -0,0 +1,237 @@ +@echo off +chcp 65001 >nul +REM =============================================== +REM Signal Batch Safe Deploy Script +REM (with running application check) +REM =============================================== + +setlocal enabledelayedexpansion + +REM Configuration +set "SERVER_IP=10.26.252.48" +set "SERVER_USER=root" +set "SERVER_PATH=/devdata/apps/bridge-db-monitoring" +set "JAR_NAME=vessel-batch-aggregation.jar" +set "BACKUP_DIR=!SERVER_PATH!/backups" + +echo =============================================== +echo Signal Batch Safe Deploy System +echo =============================================== +echo [INFO] Deploy Start: !date! !time! +echo [INFO] Target Server: !SERVER_IP! +echo. + +REM Set working directory +cd /d "%~dp0.." +echo [INFO] Project directory: !CD! + +REM 1. Check JAR file +echo. +echo =============== JAR File Check =============== +set "JAR_PATH=target\!JAR_NAME!" + +if not exist "!JAR_PATH!" ( + echo [ERROR] JAR file not found: !JAR_PATH! + echo [INFO] Please build the project first using IntelliJ Maven + pause + exit /b 1 +) + +for %%I in ("!JAR_PATH!") do ( + echo [INFO] JAR File: %%~nxI + echo [INFO] File Size: %%~zI bytes + echo [INFO] Modified: %%~tI +) + +REM 2. SSH Connection Test +echo. +echo =============== SSH Connection Test =============== +ssh !SERVER_USER!@!SERVER_IP! "echo 'SSH connection OK'" 2>nul +if !ERRORLEVEL! neq 0 ( + echo [ERROR] SSH connection failed + pause + exit /b 1 +) +echo [SUCCESS] SSH connection successful + +REM 3. Check current application status +echo. +echo =============== Current Application Status =============== +echo [INFO] Checking if application is currently running... + +ssh !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh status" 2>nul +set APP_STATUS=!ERRORLEVEL! + +if !APP_STATUS! equ 0 ( + echo. + echo [WARNING] Application is currently RUNNING on the server! + echo. + echo =============== Deployment Options =============== + echo 1. Continue with deployment (stop → deploy → start) + echo 2. Cancel deployment (keep current version running) + echo 3. Check application details first + echo. + set /p DEPLOY_CHOICE="Choose option (1-3): " + + if "!DEPLOY_CHOICE!"=="2" ( + echo [INFO] Deployment cancelled by user + echo [INFO] Current application continues running + pause + exit /b 0 + ) + + if "!DEPLOY_CHOICE!"=="3" ( + echo. + echo =============== Application Details =============== + ssh !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh status" + echo. + ssh !SERVER_USER!@!SERVER_IP! "curl -s http://localhost:8090/actuator/health --max-time 5 2>/dev/null | python -m json.tool 2>/dev/null || echo 'Health endpoint not available'" + echo. + set /p FINAL_CHOICE="Proceed with deployment? (y/N): " + if /i not "!FINAL_CHOICE!"=="y" ( + echo [INFO] Deployment cancelled + pause + exit /b 0 + ) + ) + + if not "!DEPLOY_CHOICE!"=="1" if not "!DEPLOY_CHOICE!"=="3" ( + echo [ERROR] Invalid choice. Deployment cancelled. + pause + exit /b 1 + ) + + echo. + echo [INFO] Proceeding with deployment... + echo [INFO] Current application will be stopped during deployment + +) else ( + echo [INFO] Application is not currently running + echo [INFO] Proceeding with fresh deployment +) + +REM 4. Create backup timestamp +for /f "tokens=2 delims==" %%I in ('wmic os get localdatetime /value') do if not "%%I"=="" set DATETIME=%%I +set BACKUP_TIMESTAMP=!DATETIME:~0,8!_!DATETIME:~8,6! + +REM 5. Create backup (if existing JAR exists) +echo. +echo =============== Create Backup =============== +ssh !SERVER_USER!@!SERVER_IP! "mkdir -p !BACKUP_DIR!" + +ssh !SERVER_USER!@!SERVER_IP! " +if [ -f !SERVER_PATH!/!JAR_NAME! ]; then + echo '[INFO] Creating backup of current version...' + cp !SERVER_PATH!/!JAR_NAME! !BACKUP_DIR!/!JAR_NAME!.backup.!BACKUP_TIMESTAMP! + echo '[SUCCESS] Backup created: !BACKUP_DIR!/!JAR_NAME!.backup.!BACKUP_TIMESTAMP!' + ls -la !BACKUP_DIR!/!JAR_NAME!.backup.!BACKUP_TIMESTAMP! +else + echo '[INFO] No existing JAR file to backup (first deployment)' +fi +" + +REM 6. Stop application (if running) +if !APP_STATUS! equ 0 ( + echo. + echo =============== Stop Current Application =============== + echo [INFO] Gracefully stopping current application... + ssh !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh stop" + if !ERRORLEVEL! neq 0 ( + echo [ERROR] Failed to stop application gracefully + set /p FORCE_STOP="Force stop and continue? (y/N): " + if /i not "!FORCE_STOP!"=="y" ( + echo [INFO] Deployment cancelled + exit /b 1 + ) + echo [INFO] Attempting force stop... + ssh !SERVER_USER!@!SERVER_IP! "pkill -f !JAR_NAME! || true" + ) + echo [SUCCESS] Application stopped +) + +REM 7. Deploy new JAR +echo. +echo =============== Deploy New Version =============== +echo [INFO] Transferring new JAR file... + +scp "!JAR_PATH!" !SERVER_USER!@!SERVER_IP!:!SERVER_PATH!/ +if !ERRORLEVEL! neq 0 ( + echo [ERROR] File transfer failed + goto :deployment_failed +) + +ssh !SERVER_USER!@!SERVER_IP! "chmod +x !SERVER_PATH!/!JAR_NAME!" +echo [SUCCESS] New version deployed + +REM 8. Transfer version info +if exist "target\version.txt" ( + scp "target\version.txt" !SERVER_USER!@!SERVER_IP!:!SERVER_PATH!/ +) + +REM 9. Start new application +echo. +echo =============== Start New Application =============== +echo [INFO] Starting new version... + +ssh !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh start" +if !ERRORLEVEL! neq 0 ( + echo [ERROR] Failed to start new application + goto :deployment_failed +) + +REM 10. Verify deployment +echo. +echo =============== Verify Deployment =============== +echo [INFO] Waiting for application startup (30 seconds)... +timeout /t 30 /nobreak > nul + +ssh !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh status" +if !ERRORLEVEL! neq 0 ( + echo [ERROR] New application is not running properly + goto :deployment_failed +) + +echo [INFO] Performing health check... +ssh !SERVER_USER!@!SERVER_IP! "curl -f http://localhost:8090/actuator/health --max-time 10" 2>nul +if !ERRORLEVEL! neq 0 ( + echo [WARN] Health check failed, but application is running + echo [INFO] Manual verification recommended +) + +REM 11. Success +echo. +echo =============== Deployment Successful =============== +echo [SUCCESS] Safe deployment completed successfully! +echo [INFO] Deployment time: !date! !time! +echo [INFO] Backup: !JAR_NAME!.backup.!BACKUP_TIMESTAMP! +echo [INFO] Dashboard: http://!SERVER_IP!:8090/static/admin/batch-admin.html +echo. +echo Quick commands: +echo server-status.bat - Check status +echo server-logs.bat tail - Monitor logs +echo rollback.bat !BACKUP_TIMESTAMP! - Rollback if needed + +goto :end + +:deployment_failed +echo. +echo =============== Deployment Failed =============== +echo [ERROR] Deployment failed! +echo. +set /p AUTO_ROLLBACK="Attempt automatic rollback? (y/N): " +if /i "!AUTO_ROLLBACK!"=="y" ( + if defined BACKUP_TIMESTAMP ( + echo [INFO] Attempting rollback to: !BACKUP_TIMESTAMP! + call rollback.bat !BACKUP_TIMESTAMP! + ) else ( + echo [ERROR] No backup available for automatic rollback + ) +) else ( + echo [INFO] Manual recovery required + echo [INFO] Available backups: + ssh !SERVER_USER!@!SERVER_IP! "ls -la !BACKUP_DIR!/!JAR_NAME!.backup.* 2>/dev/null || echo 'No backups found'" +) +exit /b 1 + +:end +endlocal \ No newline at end of file diff --git a/scripts/diagnose-datasource-issue.sql b/scripts/diagnose-datasource-issue.sql new file mode 100644 index 0000000..105cfc2 --- /dev/null +++ b/scripts/diagnose-datasource-issue.sql @@ -0,0 +1,139 @@ +-- DataSource 문제 진단 SQL +-- 10.26.252.51과 10.29.17.90 양쪽에서 실행하여 비교 + +-- ============================================ +-- 1. 현재 활성 연결 확인 +-- ============================================ +SELECT + pid, + usename, + application_name, + client_addr, + backend_start, + state, + query_start, + LEFT(query, 100) as current_query +FROM pg_stat_activity +WHERE datname IN ('mdadb', 'mpcdb2') +AND application_name LIKE '%vessel%' +ORDER BY backend_start DESC; + +-- ============================================ +-- 2. 최근 INSERT/UPDATE 통계 확인 +-- ============================================ +SELECT + schemaname, + tablename, + n_tup_ins as total_inserts, + n_tup_upd as total_updates, + n_tup_del as total_deletes, + n_live_tup as live_rows, + last_autoanalyze, + last_autovacuum +FROM pg_stat_user_tables +WHERE schemaname = 'signal' +AND tablename IN ( + 't_vessel_tracks_5min', + 't_vessel_tracks_hourly', + 't_vessel_tracks_daily', + 't_abnormal_tracks', + 't_vessel_latest_position' +) +ORDER BY n_tup_ins DESC; + +-- ============================================ +-- 3. 최근 데이터 확인 (마지막 INSERT 시간) +-- ============================================ + +-- 5분 집계 +SELECT + 'tracks_5min' as table_name, + COUNT(*) as total_rows, + MAX(time_bucket) as last_time_bucket, + NOW() - MAX(time_bucket) as data_delay +FROM signal.t_vessel_tracks_5min; + +-- 시간 집계 +SELECT + 'tracks_hourly' as table_name, + COUNT(*) as total_rows, + MAX(time_bucket) as last_time_bucket, + NOW() - MAX(time_bucket) as data_delay +FROM signal.t_vessel_tracks_hourly; + +-- 일 집계 +SELECT + 'tracks_daily' as table_name, + COUNT(*) as total_rows, + MAX(time_bucket) as last_time_bucket, + NOW() - MAX(time_bucket) as data_delay +FROM signal.t_vessel_tracks_daily; + +-- 비정상 궤적 +SELECT + 'abnormal_tracks' as table_name, + COUNT(*) as total_rows, + MAX(time_bucket) as last_time_bucket, + NOW() - MAX(time_bucket) as data_delay +FROM signal.t_abnormal_tracks; + +-- 최신 위치 +SELECT + 'latest_position' as table_name, + COUNT(*) as total_rows, + MAX(last_update) as last_update, + NOW() - MAX(last_update) as data_delay +FROM signal.t_vessel_latest_position; + +-- ============================================ +-- 4. 특정 시간대 데이터 확인 (지난 1시간) +-- ============================================ +SELECT + '5min_last_hour' as category, + COUNT(*) as count, + COUNT(DISTINCT sig_src_cd) as source_count, + COUNT(DISTINCT target_id) as vessel_count +FROM signal.t_vessel_tracks_5min +WHERE time_bucket >= NOW() - INTERVAL '1 hour'; + +SELECT + 'hourly_last_day' as category, + COUNT(*) as count, + COUNT(DISTINCT sig_src_cd) as source_count, + COUNT(DISTINCT target_id) as vessel_count +FROM signal.t_vessel_tracks_hourly +WHERE time_bucket >= NOW() - INTERVAL '1 day'; + +-- ============================================ +-- 5. 테이블 크기 확인 +-- ============================================ +SELECT + schemaname, + tablename, + pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) AS total_size, + pg_size_pretty(pg_relation_size(schemaname||'.'||tablename)) AS table_size, + pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename) - pg_relation_size(schemaname||'.'||tablename)) AS indexes_size +FROM pg_tables +WHERE schemaname = 'signal' +AND tablename IN ( + 't_vessel_tracks_5min', + 't_vessel_tracks_hourly', + 't_vessel_tracks_daily', + 't_abnormal_tracks', + 't_vessel_latest_position' +) +ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC; + +-- ============================================ +-- 6. 샘플 데이터 확인 (최근 10개) +-- ============================================ +SELECT + sig_src_cd, + target_id, + time_bucket, + point_count, + avg_speed, + max_speed +FROM signal.t_vessel_tracks_5min +ORDER BY time_bucket DESC +LIMIT 10; diff --git a/scripts/enable-sql-logging.yml b/scripts/enable-sql-logging.yml new file mode 100644 index 0000000..e33ce7a --- /dev/null +++ b/scripts/enable-sql-logging.yml @@ -0,0 +1,24 @@ +# application.yml 또는 application-prod.yml에 추가 +# 실제 SQL 에러를 확인하기 위한 로깅 설정 + +logging: + level: + # PostgreSQL JDBC 드라이버 로그 + org.postgresql: DEBUG + org.postgresql.Driver: DEBUG + + # Spring JDBC 로그 + org.springframework.jdbc: DEBUG + org.springframework.jdbc.core.JdbcTemplate: DEBUG + org.springframework.jdbc.core.StatementCreatorUtils: TRACE + + # Spring Batch 로그 + org.springframework.batch: DEBUG + + # 배치 프로세서 로그 + gc.mda.signal_batch.batch.processor: DEBUG + gc.mda.signal_batch.batch.processor.HourlyTrackProcessor: TRACE + gc.mda.signal_batch.batch.processor.DailyTrackProcessor: TRACE + + # SQL 쿼리 파라미터 로깅 + org.springframework.jdbc.core.namedparam: TRACE diff --git a/scripts/fix-invalid-geometry.sql b/scripts/fix-invalid-geometry.sql new file mode 100644 index 0000000..71fd0e7 --- /dev/null +++ b/scripts/fix-invalid-geometry.sql @@ -0,0 +1,122 @@ +-- Invalid geometry 수정 스크립트 +-- "Too few points" 에러를 해결하기 위해 단일 포인트를 2번 반복 + +-- ======================================== +-- 1. 백업 (선택사항) +-- ======================================== +-- CREATE TABLE signal.t_vessel_tracks_5min_backup_20251107 AS +-- SELECT * FROM signal.t_vessel_tracks_5min +-- WHERE track_geom IS NOT NULL AND NOT public.ST_IsValid(track_geom); + +-- ======================================== +-- 2. Invalid geometry 수정 (DRY RUN - 먼저 확인) +-- ======================================== +SELECT + 'DRY RUN - Will fix these records' as action, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(track_geom) as current_points, + public.ST_AsText(track_geom) as current_wkt, + -- 수정 후 WKT 미리보기 + CASE + WHEN public.ST_NPoints(track_geom) = 1 THEN + 'LINESTRING M(' || + public.ST_X(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_Y(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_M(public.ST_PointN(track_geom, 1)) || ',' || + public.ST_X(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_Y(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_M(public.ST_PointN(track_geom, 1)) || ')' + ELSE 'NO FIX NEEDED' + END as new_wkt +FROM signal.t_vessel_tracks_5min +WHERE track_geom IS NOT NULL + AND public.ST_IsValidReason(track_geom) LIKE '%Too few points%' +LIMIT 10; + +-- ======================================== +-- 3. 실제 수정 (확인 후 실행) +-- ======================================== +-- 주의: 이 쿼리는 실제 데이터를 변경합니다! +-- DRY RUN 결과를 확인한 후 주석을 해제하고 실행하세요. + +/* +UPDATE signal.t_vessel_tracks_5min +SET track_geom = public.ST_GeomFromText( + 'LINESTRING M(' || + public.ST_X(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_Y(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_M(public.ST_PointN(track_geom, 1)) || ',' || + public.ST_X(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_Y(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_M(public.ST_PointN(track_geom, 1)) || ')', + 4326 +) +WHERE track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) = 1 + AND public.ST_IsValidReason(track_geom) LIKE '%Too few points%'; +*/ + +-- ======================================== +-- 4. 수정 결과 확인 +-- ======================================== +SELECT + 'AFTER FIX' as status, + COUNT(*) as total_records, + COUNT(CASE WHEN public.ST_IsValid(track_geom) THEN 1 END) as valid_count, + COUNT(CASE WHEN NOT public.ST_IsValid(track_geom) THEN 1 END) as invalid_count +FROM signal.t_vessel_tracks_5min +WHERE track_geom IS NOT NULL; + +-- ======================================== +-- 5. 여전히 Invalid한 geometry 확인 +-- ======================================== +SELECT + 'REMAINING INVALID' as status, + public.ST_IsValidReason(track_geom) as reason, + COUNT(*) as count +FROM signal.t_vessel_tracks_5min +WHERE track_geom IS NOT NULL + AND NOT public.ST_IsValid(track_geom) +GROUP BY public.ST_IsValidReason(track_geom); + +-- ======================================== +-- 6. Hourly 테이블도 동일하게 수정 (필요시) +-- ======================================== +/* +UPDATE signal.t_vessel_tracks_hourly +SET track_geom = public.ST_GeomFromText( + 'LINESTRING M(' || + public.ST_X(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_Y(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_M(public.ST_PointN(track_geom, 1)) || ',' || + public.ST_X(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_Y(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_M(public.ST_PointN(track_geom, 1)) || ')', + 4326 +) +WHERE track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) = 1 + AND public.ST_IsValidReason(track_geom) LIKE '%Too few points%'; +*/ + +-- ======================================== +-- 7. Daily 테이블도 동일하게 수정 (필요시) +-- ======================================== +/* +UPDATE signal.t_vessel_tracks_daily +SET track_geom = public.ST_GeomFromText( + 'LINESTRING M(' || + public.ST_X(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_Y(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_M(public.ST_PointN(track_geom, 1)) || ',' || + public.ST_X(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_Y(public.ST_PointN(track_geom, 1)) || ' ' || + public.ST_M(public.ST_PointN(track_geom, 1)) || ')', + 4326 +) +WHERE track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) = 1 + AND public.ST_IsValidReason(track_geom) LIKE '%Too few points%'; +*/ diff --git a/scripts/fix-postgis-schema.ps1 b/scripts/fix-postgis-schema.ps1 new file mode 100644 index 0000000..c5f7191 --- /dev/null +++ b/scripts/fix-postgis-schema.ps1 @@ -0,0 +1,24 @@ +# PostGIS 함수 스키마 명시 스크립트 +# ST_GeomFromText -> public.ST_GeomFromText로 변경 + +$javaDir = "C:\Users\lht87\IdeaProjects\signal_batch\src\main\java" +$files = Get-ChildItem -Path $javaDir -Filter "*.java" -Recurse + +$count = 0 +foreach ($file in $files) { + $content = Get-Content $file.FullName -Raw -Encoding UTF8 + + # ST_GeomFromText를 public.ST_GeomFromText로 변경 (이미 public.가 붙어있지 않은 경우만) + $newContent = $content -replace '(?/dev/null + +if [ $? -eq 0 ]; then + echo -e "${GREEN}✓ Full backup created: $BACKUP_FILE${NC}" +else + echo -e "${YELLOW}⚠ Backup may have failed, but continuing...${NC}" +fi + +echo "" +echo "2. Stopping application if running..." + +# PID 확인 +if [ -f "/devdata/apps/bridge-db-monitoring/vessel-batch.pid" ]; then + PID=$(cat /devdata/apps/bridge-db-monitoring/vessel-batch.pid) + if kill -0 $PID 2>/dev/null; then + echo " Stopping application (PID: $PID)..." + kill -15 $PID + sleep 5 + if kill -0 $PID 2>/dev/null; then + echo " Force killing application..." + kill -9 $PID + fi + fi +fi + +echo "" +echo "3. FORCE resetting batch metadata tables..." + +# CASCADE를 사용한 강제 초기화 +psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME << EOF +-- 트랜잭션 시작 +BEGIN; + +-- 외래 키 제약 임시 비활성화 +SET session_replication_role = 'replica'; + +-- 모든 배치 테이블 강제 초기화 +TRUNCATE TABLE $DB_SCHEMA.batch_step_execution_context CASCADE; +TRUNCATE TABLE $DB_SCHEMA.batch_step_execution CASCADE; +TRUNCATE TABLE $DB_SCHEMA.batch_job_execution_context CASCADE; +TRUNCATE TABLE $DB_SCHEMA.batch_job_execution_params CASCADE; +TRUNCATE TABLE $DB_SCHEMA.batch_job_execution CASCADE; +TRUNCATE TABLE $DB_SCHEMA.batch_job_instance CASCADE; + +-- 시퀀스 강제 리셋 +ALTER SEQUENCE IF EXISTS $DB_SCHEMA.batch_job_execution_seq RESTART WITH 1; +ALTER SEQUENCE IF EXISTS $DB_SCHEMA.batch_job_seq RESTART WITH 1; +ALTER SEQUENCE IF EXISTS $DB_SCHEMA.batch_step_execution_seq RESTART WITH 1; + +-- 외래 키 제약 재활성화 +SET session_replication_role = 'origin'; + +-- 커밋 +COMMIT; + +-- 통계 업데이트 +ANALYZE $DB_SCHEMA.batch_job_instance; +ANALYZE $DB_SCHEMA.batch_job_execution; +ANALYZE $DB_SCHEMA.batch_job_execution_params; +ANALYZE $DB_SCHEMA.batch_job_execution_context; +ANALYZE $DB_SCHEMA.batch_step_execution; +ANALYZE $DB_SCHEMA.batch_step_execution_context; +EOF + +if [ $? -eq 0 ]; then + echo -e "${GREEN}✓ Batch metadata tables FORCE reset successfully${NC}" +else + echo -e "${RED}✗ Force reset encountered errors, but may have partially succeeded${NC}" +fi + +echo "" +echo "4. Verifying force reset..." + +# 각 테이블 개별 확인 +for table in batch_job_instance batch_job_execution batch_job_execution_params batch_job_execution_context batch_step_execution batch_step_execution_context; do + COUNT=$(psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME -t -c " + SELECT COUNT(*) FROM $DB_SCHEMA.$table;" 2>/dev/null | xargs) + + if [ -z "$COUNT" ]; then + COUNT="ERROR" + fi + + if [ "$COUNT" = "0" ]; then + echo -e " ${GREEN}✓${NC} $table: $COUNT records" + elif [ "$COUNT" = "ERROR" ]; then + echo -e " ${RED}✗${NC} $table: Could not query" + else + echo -e " ${YELLOW}⚠${NC} $table: $COUNT records remaining" + fi +done + +echo "" +echo "5. Optional: Clear ALL aggregation data (complete fresh start)" +read -p "Do you want to clear ALL aggregation data too? (yes/no): " CLEAR_ALL + +if [ "$CLEAR_ALL" = "yes" ]; then + echo "" + echo "Clearing ALL aggregation data..." + + psql -h $DB_HOST -p $DB_PORT -U $DB_USER -d $DB_NAME << EOF +BEGIN; + +-- 강제로 모든 집계 데이터 초기화 +SET session_replication_role = 'replica'; + +-- 최신 위치 정보 +TRUNCATE TABLE signal.t_vessel_latest_position CASCADE; + +-- 모든 파티션 테이블 초기화 +DO \$\$ +DECLARE + r RECORD; +BEGIN + FOR r IN + SELECT tablename + FROM pg_tables + WHERE schemaname = 'signal' + AND (tablename LIKE 't_tile_summary_%' + OR tablename LIKE 't_area_statistics_%' + OR tablename LIKE 't_vessel_daily_tracks_%') + LOOP + EXECUTE 'TRUNCATE TABLE signal.' || r.tablename || ' CASCADE'; + RAISE NOTICE 'Truncated table: signal.%', r.tablename; + END LOOP; +END\$\$; + +-- 배치 성능 메트릭 +TRUNCATE TABLE signal.t_batch_performance_metrics CASCADE; + +SET session_replication_role = 'origin'; + +COMMIT; +EOF + + echo -e "${GREEN}✓ All aggregation data cleared${NC}" +fi + +echo "" +echo "================================================" +echo "FORCE Reset Complete!" +echo "" +echo -e "${YELLOW}IMPORTANT: The application needs to be restarted!${NC}" +echo "" +echo "Next steps:" +echo "1. Start the application:" +echo " cd /devdata/apps/bridge-db-monitoring" +echo " ./run-on-query-server.sh" +echo "" +echo "2. Verify health:" +echo " curl http://localhost:8090/actuator/health" +echo "" +echo "3. Start fresh batch job:" +echo " curl -X POST http://localhost:8090/admin/batch/job/run \\" +echo " -H 'Content-Type: application/json' \\" +echo " -d '{\"jobName\": \"vesselAggregationJob\", \"parameters\": {\"tileLevel\": 1}}'" +echo "" +echo "Full backup saved to: $BACKUP_FILE" +echo "================================================" + +# 자동 시작 옵션 +echo "" +read -p "Do you want to start the application now? (yes/no): " START_NOW + +if [ "$START_NOW" = "yes" ]; then + echo "Starting application..." + cd /devdata/apps/bridge-db-monitoring + ./run-on-query-server.sh +fi diff --git a/scripts/install-postgis-in-signal-schema.sql b/scripts/install-postgis-in-signal-schema.sql new file mode 100644 index 0000000..5bc71fc --- /dev/null +++ b/scripts/install-postgis-in-signal-schema.sql @@ -0,0 +1,59 @@ +-- PostGIS를 signal 스키마에 설치하는 스크립트 +-- 10.29.17.90 서버의 mpcdb2 데이터베이스에서 실행 + +-- 방법 1: signal 스키마에 PostGIS extension 생성 (권장) +-- 이미 public에 설치되어 있다면, signal 스키마에 함수들을 복사하는 방식으로 접근 + +-- 현재 PostGIS 상태 확인 +SELECT extname, extversion, nspname +FROM pg_extension e +JOIN pg_namespace n ON e.extnamespace = n.oid +WHERE extname LIKE 'post%'; + +-- 옵션 1: signal 스키마에 PostGIS 함수 wrapper 생성 +-- (public 스키마의 함수를 호출하는 wrapper) +CREATE OR REPLACE FUNCTION signal.ST_GeomFromText(text) +RETURNS geometry +AS $$ + SELECT public.ST_GeomFromText($1); +$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION signal.ST_GeomFromText(text, integer) +RETURNS geometry +AS $$ + SELECT public.ST_GeomFromText($1, $2); +$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION signal.ST_Length(geometry) +RETURNS double precision +AS $$ + SELECT public.ST_Length($1); +$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION signal.ST_MakeLine(geometry[]) +RETURNS geometry +AS $$ + SELECT public.ST_MakeLine($1); +$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE; + +-- 자주 사용하는 다른 함수들도 추가 +CREATE OR REPLACE FUNCTION signal.ST_X(geometry) +RETURNS double precision +AS $$ + SELECT public.ST_X($1); +$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION signal.ST_Y(geometry) +RETURNS double precision +AS $$ + SELECT public.ST_Y($1); +$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE; + +CREATE OR REPLACE FUNCTION signal.ST_M(geometry) +RETURNS double precision +AS $$ + SELECT public.ST_M($1); +$$ LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE; + +-- 검증 +SELECT signal.ST_GeomFromText('POINT(126.0 37.0)', 4326); diff --git a/scripts/list-failed-jobs.sql b/scripts/list-failed-jobs.sql new file mode 100644 index 0000000..617510e --- /dev/null +++ b/scripts/list-failed-jobs.sql @@ -0,0 +1,85 @@ +-- 실패한 배치 Job 조회 및 분석 + +-- 1. 실패한 Job 목록 (최근 50개) +SELECT + '=== FAILED JOBS (Recent 50) ===' as category, + bje.JOB_EXECUTION_ID, + bji.JOB_NAME, + bje.START_TIME, + bje.END_TIME, + bje.STATUS, + bje.EXIT_CODE, + LEFT(bje.EXIT_MESSAGE, 100) as EXIT_MESSAGE_SHORT, + -- Job Parameters 표시 + (SELECT string_agg(PARAMETER_NAME || '=' || PARAMETER_VALUE, ', ') + FROM BATCH_JOB_EXECUTION_PARAMS + WHERE JOB_EXECUTION_ID = bje.JOB_EXECUTION_ID + AND IDENTIFYING = 'Y') as PARAMETERS +FROM BATCH_JOB_EXECUTION bje +JOIN BATCH_JOB_INSTANCE bji ON bje.JOB_INSTANCE_ID = bji.JOB_INSTANCE_ID +WHERE bje.STATUS = 'FAILED' +ORDER BY bje.JOB_EXECUTION_ID DESC +LIMIT 50; + +-- 2. 실패한 Step 상세 정보 +SELECT + '=== FAILED STEPS ===' as category, + bse.STEP_EXECUTION_ID, + bse.JOB_EXECUTION_ID, + bji.JOB_NAME, + bse.STEP_NAME, + bse.STATUS, + bse.READ_COUNT, + bse.WRITE_COUNT, + bse.COMMIT_COUNT, + bse.ROLLBACK_COUNT, + bse.READ_SKIP_COUNT, + bse.PROCESS_SKIP_COUNT, + bse.WRITE_SKIP_COUNT, + LEFT(bse.EXIT_MESSAGE, 100) as EXIT_MESSAGE_SHORT +FROM BATCH_STEP_EXECUTION bse +JOIN BATCH_JOB_EXECUTION bje ON bse.JOB_EXECUTION_ID = bje.JOB_EXECUTION_ID +JOIN BATCH_JOB_INSTANCE bji ON bje.JOB_INSTANCE_ID = bji.JOB_INSTANCE_ID +WHERE bse.STATUS = 'FAILED' +ORDER BY bse.STEP_EXECUTION_ID DESC +LIMIT 50; + +-- 3. Job 타입별 실패 통계 +SELECT + '=== FAILURE STATISTICS BY JOB ===' as category, + bji.JOB_NAME, + COUNT(*) as FAILED_COUNT, + MAX(bje.END_TIME) as LAST_FAILURE_TIME +FROM BATCH_JOB_EXECUTION bje +JOIN BATCH_JOB_INSTANCE bji ON bje.JOB_INSTANCE_ID = bji.JOB_INSTANCE_ID +WHERE bje.STATUS = 'FAILED' +GROUP BY bji.JOB_NAME +ORDER BY FAILED_COUNT DESC; + +-- 4. Step별 실패 통계 +SELECT + '=== FAILURE STATISTICS BY STEP ===' as category, + STEP_NAME, + COUNT(*) as FAILED_COUNT, + MAX(END_TIME) as LAST_FAILURE_TIME +FROM BATCH_STEP_EXECUTION +WHERE STATUS = 'FAILED' +GROUP BY STEP_NAME +ORDER BY FAILED_COUNT DESC; + +-- 5. 최근 24시간 실패 현황 +SELECT + '=== LAST 24 HOURS ===' as category, + COUNT(*) as FAILED_JOBS_24H +FROM BATCH_JOB_EXECUTION +WHERE STATUS = 'FAILED' + AND START_TIME >= CURRENT_TIMESTAMP - INTERVAL '24 hours'; + +-- 6. 전체 상태 요약 +SELECT + '=== STATUS SUMMARY ===' as category, + STATUS, + COUNT(*) as COUNT +FROM BATCH_JOB_EXECUTION +GROUP BY STATUS +ORDER BY COUNT DESC; diff --git a/scripts/mark-failed-jobs-as-abandoned.sql b/scripts/mark-failed-jobs-as-abandoned.sql new file mode 100644 index 0000000..08922ab --- /dev/null +++ b/scripts/mark-failed-jobs-as-abandoned.sql @@ -0,0 +1,75 @@ +-- 실패한 배치 Job과 Step을 ABANDONED 상태로 변경 +-- 주의: 이 스크립트는 실패한 job을 강제로 종료시킵니다. +-- 재시도가 필요한 경우 이 스크립트를 실행하지 마세요. + +-- 1. 현재 실패 상태 확인 +SELECT + '=== BEFORE UPDATE ===' as status, + COUNT(*) as failed_jobs +FROM BATCH_JOB_EXECUTION +WHERE STATUS = 'FAILED'; + +SELECT + '=== BEFORE UPDATE ===' as status, + COUNT(*) as failed_steps +FROM BATCH_STEP_EXECUTION +WHERE STATUS = 'FAILED'; + +-- 2. 실패한 STEP을 ABANDONED로 변경 +UPDATE BATCH_STEP_EXECUTION +SET + STATUS = 'ABANDONED', + EXIT_CODE = 'ABANDONED', + EXIT_MESSAGE = 'Manually marked as ABANDONED - Original status: FAILED', + END_TIME = COALESCE(END_TIME, CURRENT_TIMESTAMP), + LAST_UPDATED = CURRENT_TIMESTAMP +WHERE STATUS = 'FAILED'; + +-- 3. 실패한 JOB을 ABANDONED로 변경 +UPDATE BATCH_JOB_EXECUTION +SET + STATUS = 'ABANDONED', + EXIT_CODE = 'ABANDONED', + EXIT_MESSAGE = 'Manually marked as ABANDONED - Original status: FAILED', + END_TIME = COALESCE(END_TIME, CURRENT_TIMESTAMP), + LAST_UPDATED = CURRENT_TIMESTAMP +WHERE STATUS = 'FAILED'; + +-- 4. 업데이트 후 상태 확인 +SELECT + '=== AFTER UPDATE ===' as status, + COUNT(*) as failed_jobs +FROM BATCH_JOB_EXECUTION +WHERE STATUS = 'FAILED'; + +SELECT + '=== AFTER UPDATE ===' as status, + COUNT(*) as failed_steps +FROM BATCH_STEP_EXECUTION +WHERE STATUS = 'FAILED'; + +SELECT + '=== ABANDONED COUNT ===' as status, + COUNT(*) as abandoned_jobs +FROM BATCH_JOB_EXECUTION +WHERE STATUS = 'ABANDONED'; + +SELECT + '=== ABANDONED COUNT ===' as status, + COUNT(*) as abandoned_steps +FROM BATCH_STEP_EXECUTION +WHERE STATUS = 'ABANDONED'; + +-- 5. 최근 ABANDONED 처리된 Job 목록 확인 +SELECT + JOB_EXECUTION_ID, + JOB_INSTANCE_ID, + START_TIME, + END_TIME, + STATUS, + EXIT_CODE, + EXIT_MESSAGE +FROM BATCH_JOB_EXECUTION +WHERE STATUS = 'ABANDONED' +ORDER BY JOB_EXECUTION_ID DESC +LIMIT 10; diff --git a/scripts/mark-specific-job-as-abandoned.sql b/scripts/mark-specific-job-as-abandoned.sql new file mode 100644 index 0000000..f6fc0d5 --- /dev/null +++ b/scripts/mark-specific-job-as-abandoned.sql @@ -0,0 +1,75 @@ +-- 특정 JOB_EXECUTION_ID를 ABANDONED로 변경 +-- 사용법: :job_execution_id 를 실제 ID로 변경 후 실행 + +-- 변수 설정 (PostgreSQL에서는 psql 변수 사용) +-- psql -v job_id=12345 -f mark-specific-job-as-abandoned.sql +-- 또는 아래 :job_execution_id 를 직접 숫자로 변경 + +-- 1. 해당 Job 상태 확인 +SELECT + '=== BEFORE UPDATE ===' as status, + JOB_EXECUTION_ID, + JOB_INSTANCE_ID, + START_TIME, + END_TIME, + STATUS, + EXIT_CODE, + EXIT_MESSAGE +FROM BATCH_JOB_EXECUTION +WHERE JOB_EXECUTION_ID = :job_execution_id; + +-- 2. 해당 Job의 Step들 상태 확인 +SELECT + '=== STEPS BEFORE UPDATE ===' as status, + STEP_EXECUTION_ID, + STEP_NAME, + STATUS, + EXIT_CODE +FROM BATCH_STEP_EXECUTION +WHERE JOB_EXECUTION_ID = :job_execution_id +ORDER BY STEP_EXECUTION_ID; + +-- 3. Step을 ABANDONED로 변경 +UPDATE BATCH_STEP_EXECUTION +SET + STATUS = 'ABANDONED', + EXIT_CODE = 'ABANDONED', + EXIT_MESSAGE = 'Manually marked as ABANDONED - Original status: ' || STATUS, + END_TIME = COALESCE(END_TIME, CURRENT_TIMESTAMP), + LAST_UPDATED = CURRENT_TIMESTAMP +WHERE JOB_EXECUTION_ID = :job_execution_id + AND STATUS IN ('FAILED', 'STARTED', 'STOPPING'); + +-- 4. Job을 ABANDONED로 변경 +UPDATE BATCH_JOB_EXECUTION +SET + STATUS = 'ABANDONED', + EXIT_CODE = 'ABANDONED', + EXIT_MESSAGE = 'Manually marked as ABANDONED - Original status: ' || STATUS, + END_TIME = COALESCE(END_TIME, CURRENT_TIMESTAMP), + LAST_UPDATED = CURRENT_TIMESTAMP +WHERE JOB_EXECUTION_ID = :job_execution_id + AND STATUS IN ('FAILED', 'STARTED', 'STOPPING'); + +-- 5. 업데이트 결과 확인 +SELECT + '=== AFTER UPDATE ===' as status, + JOB_EXECUTION_ID, + JOB_INSTANCE_ID, + START_TIME, + END_TIME, + STATUS, + EXIT_CODE, + EXIT_MESSAGE +FROM BATCH_JOB_EXECUTION +WHERE JOB_EXECUTION_ID = :job_execution_id; + +SELECT + '=== STEPS AFTER UPDATE ===' as status, + STEP_EXECUTION_ID, + STEP_NAME, + STATUS, + EXIT_CODE +FROM BATCH_STEP_EXECUTION +WHERE JOB_EXECUTION_ID = :job_execution_id +ORDER BY STEP_EXECUTION_ID; diff --git a/scripts/monitor-query-server.sh b/scripts/monitor-query-server.sh new file mode 100644 index 0000000..6ce7a83 --- /dev/null +++ b/scripts/monitor-query-server.sh @@ -0,0 +1,212 @@ +#!/bin/bash + +# Query DB 서버 리소스 모니터링 스크립트 +# PostgreSQL과 배치 애플리케이션 리소스 경합 모니터링 + +# 애플리케이션 경로 +APP_HOME="/devdata/apps/bridge-db-monitoring" +LOG_DIR="$APP_HOME/logs" +mkdir -p $LOG_DIR + +# Java 경로 (jstat 명령어용) +JAVA_HOME="/devdata/apps/jdk-17.0.8" +JSTAT="$JAVA_HOME/bin/jstat" + +# 색상 코드 +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# CSV 헤더 생성 (첫 실행 시) +if [ ! -f "$LOG_DIR/resource-monitor.csv" ]; then + echo "timestamp,pg_cpu,java_cpu,delay_minutes,throughput,collect_connections" > $LOG_DIR/resource-monitor.csv +fi + +while true; do + clear + echo "=========================================" + echo "Vessel Batch Resource Monitor" + echo "Time: $(date)" + echo "App Home: $APP_HOME" + echo "=========================================" + + # PID 파일에서 프로세스 ID 읽기 + if [ -f "$APP_HOME/vessel-batch.pid" ]; then + JAVA_PID=$(cat $APP_HOME/vessel-batch.pid) + else + JAVA_PID=$(pgrep -f "vessel-batch-aggregation.jar") + fi + + # 1. CPU 사용률 + echo -e "\n${GREEN}[CPU Usage]${NC}" + # PostgreSQL CPU 사용률 + PG_CPU=$(ps aux | grep postgres | grep -v grep | awk '{sum+=$3} END {printf "%.1f", sum}' || echo "0") + if [ -z "$PG_CPU" ]; then PG_CPU="0"; fi + echo "PostgreSQL Total: ${PG_CPU}%" + + # Java 배치 CPU 사용률 + if [ ! -z "$JAVA_PID" ] && kill -0 $JAVA_PID 2>/dev/null; then + JAVA_CPU=$(ps aux | grep $JAVA_PID | grep -v grep | awk '{printf "%.1f", $3}' || echo "0") + if [ -z "$JAVA_CPU" ]; then JAVA_CPU="0"; fi + echo "Batch Application: ${JAVA_CPU}% (PID: $JAVA_PID)" + else + JAVA_CPU="0.0" + echo "Batch Application: Not Running" + fi + + # Top 5 PostgreSQL 프로세스 + echo -e "\nTop PostgreSQL Processes:" + ps aux | grep postgres | grep -v grep | sort -k3 -nr | head -5 | awk '{printf " %-8s %5s%% %s\n", $2, $3, $11}' + + # 2. 메모리 사용률 + echo -e "\n${GREEN}[Memory Usage]${NC}" + free -h | grep -E "Mem|Swap" + + # PostgreSQL 공유 메모리 + PG_SHARED=$(ipcs -m 2>/dev/null | grep postgres | awk '{sum+=$5} END {printf "%.1f", sum/1024/1024/1024}') + if [ ! -z "$PG_SHARED" ]; then + echo "PostgreSQL Shared Memory: ${PG_SHARED}GB" + fi + + # Java 힙 사용률 + if [ ! -z "$JAVA_PID" ] && kill -0 $JAVA_PID 2>/dev/null; then + if [ -x "$JSTAT" ]; then + JAVA_HEAP=$($JSTAT -gc $JAVA_PID 2>/dev/null | tail -1 | awk '{printf "%.1f", ($3+$4+$6+$8)/1024}') + if [ ! -z "$JAVA_HEAP" ]; then + echo "Java Heap Used: ${JAVA_HEAP}MB" + fi + fi + fi + + # 3. 디스크 I/O + echo -e "\n${GREEN}[Disk I/O]${NC}" + iostat -x 1 2 2>/dev/null | grep -A5 "Device" | tail -n +7 | head -5 + + # 4. PostgreSQL 연결 상태 + echo -e "\n${GREEN}[Database Connections]${NC}" + # psql 명령어가 PATH에 없을 수 있으므로 전체 경로 사용 시도 + if command -v psql >/dev/null 2>&1; then + PSQL_CMD="psql" + else + # 일반적인 PostgreSQL 설치 경로들 + for path in /usr/pgsql-*/bin/psql /usr/bin/psql /usr/local/bin/psql; do + if [ -x "$path" ]; then + PSQL_CMD="$path" + break + fi + done + fi + + if [ ! -z "$PSQL_CMD" ]; then + $PSQL_CMD -h localhost -U mda -d mdadb -c " + SELECT + application_name, + client_addr, + COUNT(*) as connections, + string_agg(DISTINCT state, ', ') as states + FROM pg_stat_activity + WHERE datname = 'mdadb' + GROUP BY application_name, client_addr + ORDER BY connections DESC + LIMIT 10;" 2>/dev/null || echo "Unable to query database connections" + else + echo "psql command not found" + fi + + # 5. 배치 처리 상태 + echo -e "\n${GREEN}[Batch Processing Status]${NC}" + + if [ ! -z "$PSQL_CMD" ]; then + # 처리 지연 확인 + DELAY=$($PSQL_CMD -h localhost -U mda -d mdadb -t -c " + SELECT COALESCE(EXTRACT(EPOCH FROM (NOW() - MAX(last_update))) / 60, 0)::numeric(10,1) + FROM signal.t_vessel_latest_position;" 2>/dev/null | xargs) + + if [ ! -z "$DELAY" ] && [ "$DELAY" != "" ]; then + if [ $(echo "$DELAY > 120" | bc 2>/dev/null || echo 0) -eq 1 ]; then + echo -e "${RED}Processing Delay: ${DELAY} minutes ⚠️${NC}" + elif [ $(echo "$DELAY > 60" | bc 2>/dev/null || echo 0) -eq 1 ]; then + echo -e "${YELLOW}Processing Delay: ${DELAY} minutes ⚠️${NC}" + else + echo -e "${GREEN}Processing Delay: ${DELAY} minutes ✓${NC}" + fi + else + DELAY="0" + echo "Processing Delay: Unable to determine" + fi + + # 최근 처리량 + THROUGHPUT=$($PSQL_CMD -h localhost -U mda -d mdadb -t -c " + SELECT COALESCE(COUNT(*), 0) + FROM signal.t_vessel_latest_position + WHERE last_update > NOW() - INTERVAL '1 minute';" 2>/dev/null | xargs) + + if [ ! -z "$THROUGHPUT" ]; then + echo "Throughput: ${THROUGHPUT} vessels/minute" + else + THROUGHPUT="0" + echo "Throughput: Unable to determine" + fi + else + DELAY="0" + THROUGHPUT="0" + echo "Database metrics unavailable (psql not found)" + fi + + # 6. 네트워크 연결 (수집 DB) + echo -e "\n${GREEN}[Network to Collect DB]${NC}" + COLLECT_CONN=$(ss -tunp 2>/dev/null | grep :5432 | grep 10.26.252.39 | wc -l) + echo "Active connections to collect DB: ${COLLECT_CONN}" + + # 네트워크 통계 + if [ "$COLLECT_CONN" -gt 0 ]; then + ss -i dst 10.26.252.39:5432 2>/dev/null | grep -E "rtt|cwnd" | head -3 + fi + + # 7. 애플리케이션 로그 최근 에러 + echo -e "\n${GREEN}[Recent Application Errors]${NC}" + if [ -f "$LOG_DIR/app.log" ]; then + ERROR_COUNT=$(grep -c "ERROR" $LOG_DIR/app.log 2>/dev/null || echo 0) + echo "Total Errors in Log: $ERROR_COUNT" + + # 최근 5개 에러 표시 + if [ "$ERROR_COUNT" -gt 0 ]; then + echo "Recent Errors:" + grep "ERROR" $LOG_DIR/app.log | tail -5 | cut -c1-120 + fi + else + echo "Log file not found at $LOG_DIR/app.log" + fi + + # 8. 경고 사항 + echo -e "\n${YELLOW}[Warnings]${NC}" + + # CPU 경고 + TOTAL_CPU=$(echo "$PG_CPU + $JAVA_CPU" | bc 2>/dev/null || echo "0") + if [ ! -z "$TOTAL_CPU" ] && [ "$TOTAL_CPU" != "0" ]; then + if [ $(echo "$TOTAL_CPU > 80" | bc 2>/dev/null || echo 0) -eq 1 ]; then + echo -e "${RED}⚠ High CPU usage: ${TOTAL_CPU}%${NC}" + fi + fi + + # 메모리 경고 + MEM_AVAILABLE=$(free -g | grep Mem | awk '{print $7}') + if [ ! -z "$MEM_AVAILABLE" ] && [ "$MEM_AVAILABLE" -lt 10 ]; then + echo -e "${RED}⚠ Low available memory: ${MEM_AVAILABLE}GB${NC}" + fi + + # 처리 지연 경고 + if [ ! -z "$DELAY" ] && [ "$DELAY" != "0" ]; then + if [ $(echo "$DELAY > 120" | bc 2>/dev/null || echo 0) -eq 1 ]; then + echo -e "${RED}⚠ Processing delay exceeds 2 hours!${NC}" + fi + fi + + # 로그에 기록 + echo "$(date '+%Y-%m-%d %H:%M:%S'),${PG_CPU},${JAVA_CPU},${DELAY},${THROUGHPUT},${COLLECT_CONN}" >> $LOG_DIR/resource-monitor.csv + + # 다음 업데이트까지 대기 + echo -e "\n${GREEN}Next update in 30 seconds... (Ctrl+C to exit)${NC}" + sleep 30 +done diff --git a/scripts/monitor-realtime.sh b/scripts/monitor-realtime.sh new file mode 100644 index 0000000..24be36a --- /dev/null +++ b/scripts/monitor-realtime.sh @@ -0,0 +1,154 @@ +#!/bin/bash + +# 실시간 시스템 모니터링 스크립트 +# 부하 테스트 중 시스템 상태를 실시간으로 모니터링 + +# 색상 정의 +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +# 애플리케이션 정보 +APP_HOST="10.26.252.48" +APP_PORT="8090" +DB_HOST_COLLECT="10.26.252.39" +DB_HOST_QUERY="10.26.252.48" +DB_PORT="5432" +DB_NAME="mdadb" +DB_USER="mdauser" + +# 화면 지우기 +clear_screen() { + clear +} + +# 헤더 출력 +print_header() { + echo -e "${BLUE}========================================${NC}" + echo -e "${BLUE} 선박 궤적 시스템 실시간 모니터링 ${NC}" + echo -e "${BLUE}========================================${NC}" + echo -e "시간: $(date '+%Y-%m-%d %H:%M:%S')" + echo "" +} + +# 애플리케이션 상태 확인 +check_app_status() { + echo -e "${GREEN}[애플리케이션 상태]${NC}" + + # Health check + health=$(curl -s "http://$APP_HOST:$APP_PORT/actuator/health" | jq -r '.status' 2>/dev/null || echo "UNKNOWN") + if [ "$health" == "UP" ]; then + echo -e "상태: ${GREEN}$health${NC}" + else + echo -e "상태: ${RED}$health${NC}" + fi + + # 실행 중인 Job + running_jobs=$(curl -s "http://$APP_HOST:$APP_PORT/admin/batch/job/running" | jq -r '.[]' 2>/dev/null || echo "N/A") + echo -e "실행 중인 Job: $running_jobs" + + # 메트릭 요약 + metrics=$(curl -s "http://$APP_HOST:$APP_PORT/admin/metrics/summary" 2>/dev/null) + if [ ! -z "$metrics" ]; then + echo -e "처리된 레코드: $(echo $metrics | jq -r '.processedRecords // "N/A"')" + echo -e "평균 처리 시간: $(echo $metrics | jq -r '.avgProcessingTime // "N/A"')ms" + fi + echo "" +} + +# 시스템 리소스 모니터링 +check_system_resources() { + echo -e "${GREEN}[시스템 리소스]${NC}" + + # CPU 사용률 + cpu_usage=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | cut -d'%' -f1) + echo -e "CPU 사용률: ${cpu_usage}%" + + # 메모리 사용률 + mem_info=$(free -g | grep "Mem:") + mem_total=$(echo $mem_info | awk '{print $2}') + mem_used=$(echo $mem_info | awk '{print $3}') + mem_percent=$(awk "BEGIN {printf \"%.1f\", ($mem_used/$mem_total)*100}") + echo -e "메모리: ${mem_used}GB / ${mem_total}GB (${mem_percent}%)" + + # 디스크 사용률 + disk_usage=$(df -h / | tail -1 | awk '{print $5}') + echo -e "디스크 사용률: $disk_usage" + echo "" +} + +# 데이터베이스 연결 모니터링 +check_db_connections() { + echo -e "${GREEN}[데이터베이스 연결]${NC}" + + # CollectDB 연결 + collect_conn=$(PGPASSWORD=$DB_PASS psql -h $DB_HOST_COLLECT -U $DB_USER -d $DB_NAME -t -c "SELECT count(*) FROM pg_stat_activity WHERE datname='$DB_NAME';" 2>/dev/null || echo "N/A") + echo -e "CollectDB 연결: $collect_conn" + + # QueryDB 연결 + query_conn=$(PGPASSWORD=$DB_PASS psql -h $DB_HOST_QUERY -U $DB_USER -d $DB_NAME -t -c "SELECT count(*) FROM pg_stat_activity WHERE datname='$DB_NAME';" 2>/dev/null || echo "N/A") + echo -e "QueryDB 연결: $query_conn" + echo "" +} + +# WebSocket 연결 모니터링 +check_websocket_status() { + echo -e "${GREEN}[WebSocket 상태]${NC}" + + ws_status=$(curl -s "http://$APP_HOST:$APP_PORT/api/websocket/status" 2>/dev/null) + if [ ! -z "$ws_status" ]; then + echo -e "활성 세션: $(echo $ws_status | jq -r '.activeSessions // "N/A"')" + echo -e "활성 쿼리: $(echo $ws_status | jq -r '.activeQueries // "N/A"')" + echo -e "처리된 메시지: $(echo $ws_status | jq -r '.totalMessagesProcessed // "N/A"')" + else + echo -e "WebSocket 상태를 가져올 수 없습니다." + fi + echo "" +} + +# 성능 최적화 상태 +check_performance_status() { + echo -e "${GREEN}[성능 최적화 상태]${NC}" + + perf_status=$(curl -s "http://$APP_HOST:$APP_PORT/api/v1/performance/status" 2>/dev/null) + if [ ! -z "$perf_status" ]; then + echo -e "동적 청크 크기: $(echo $perf_status | jq -r '.currentChunkSize // "N/A"')" + echo -e "캐시 히트율: $(echo $perf_status | jq -r '.cacheHitRate // "N/A"')%" + echo -e "메모리 사용률: $(echo $perf_status | jq -r '.memoryUsage.usedPercentage // "N/A"')%" + else + echo -e "성능 상태를 가져올 수 없습니다." + fi + echo "" +} + +# 실시간 로그 tail (별도 터미널에서 실행) +tail_logs() { + echo -e "${GREEN}[최근 로그]${NC}" + echo "애플리케이션 로그는 별도 터미널에서 확인하세요:" + echo "tail -f /path/to/application.log" + echo "" +} + +# 메인 루프 +main() { + while true; do + clear_screen + print_header + check_app_status + check_system_resources + check_db_connections + check_websocket_status + check_performance_status + + echo -e "${YELLOW}5초 후 갱신... (Ctrl+C로 종료)${NC}" + sleep 5 + done +} + +# 트랩 설정 +trap 'echo -e "\n${RED}모니터링 종료${NC}"; exit 0' INT TERM + +# 실행 +main diff --git a/scripts/quick-check-invalid.sql b/scripts/quick-check-invalid.sql new file mode 100644 index 0000000..20f116c --- /dev/null +++ b/scripts/quick-check-invalid.sql @@ -0,0 +1,50 @@ +-- 빠른 Invalid Geometry 확인 + +-- 1. t_vessel_tracks_5min에 실제로 invalid geometry가 있는가? +SELECT + '5min table - invalid count' as check_type, + COUNT(*) as invalid_count +FROM signal.t_vessel_tracks_5min +WHERE track_geom IS NOT NULL + AND NOT public.ST_IsValid(track_geom); + +-- 2. 어떤 invalid 이유인가? +SELECT + '5min table - invalid reasons' as check_type, + public.ST_IsValidReason(track_geom) as reason, + COUNT(*) as count +FROM signal.t_vessel_tracks_5min +WHERE track_geom IS NOT NULL + AND NOT public.ST_IsValid(track_geom) +GROUP BY public.ST_IsValidReason(track_geom); + +-- 3. 실제 invalid 샘플 확인 +SELECT + '5min table - invalid samples' as check_type, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(track_geom) as point_count, + public.ST_AsText(track_geom) as wkt, + public.ST_IsValidReason(track_geom) as reason +FROM signal.t_vessel_tracks_5min +WHERE track_geom IS NOT NULL + AND NOT public.ST_IsValid(track_geom) +LIMIT 5; + +-- 4. 에러 발생한 선박 확인 (vessel 000001_###0000072) +SELECT + 'Problem vessel check' as check_type, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(track_geom) as point_count, + public.ST_IsValid(track_geom) as is_valid, + public.ST_IsValidReason(track_geom) as reason, + public.ST_AsText(track_geom) as wkt +FROM signal.t_vessel_tracks_5min +WHERE sig_src_cd = '000001' + AND target_id LIKE '%0000072' + AND time_bucket >= CURRENT_TIMESTAMP - INTERVAL '1 day' +ORDER BY time_bucket DESC +LIMIT 10; diff --git a/scripts/quick-test-real-data.sql b/scripts/quick-test-real-data.sql new file mode 100644 index 0000000..63ef138 --- /dev/null +++ b/scripts/quick-test-real-data.sql @@ -0,0 +1,269 @@ +-- ======================================== +-- 실제 데이터로 즉시 테스트 (변수 없음) +-- 최근 데이터 자동 선택 +-- ======================================== + +-- 1. 최근 1시간 내 데이터가 있는 선박 자동 선택 +WITH recent_vessel AS ( + SELECT + sig_src_cd, + target_id, + DATE_TRUNC('hour', MIN(time_bucket)) as hour_bucket + FROM signal.t_vessel_tracks_5min + WHERE time_bucket >= CURRENT_TIMESTAMP - INTERVAL '24 hours' + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + GROUP BY sig_src_cd, target_id, DATE_TRUNC('hour', time_bucket) + HAVING COUNT(*) >= 2 + ORDER BY DATE_TRUNC('hour', MIN(time_bucket)) DESC + LIMIT 1 +) +SELECT + '=== AUTO SELECTED VESSEL ===' as section, + sig_src_cd, + target_id, + hour_bucket, + hour_bucket + INTERVAL '1 hour' as hour_end +FROM recent_vessel; + +-- 2. 선택된 선박의 5분 데이터 확인 +WITH recent_vessel AS ( + SELECT + sig_src_cd, + target_id, + DATE_TRUNC('hour', MIN(time_bucket)) as hour_bucket + FROM signal.t_vessel_tracks_5min + WHERE time_bucket >= CURRENT_TIMESTAMP - INTERVAL '24 hours' + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + GROUP BY sig_src_cd, target_id, DATE_TRUNC('hour', time_bucket) + HAVING COUNT(*) >= 2 + ORDER BY DATE_TRUNC('hour', MIN(time_bucket)) DESC + LIMIT 1 +) +SELECT + '=== 5MIN DATA ===' as section, + t.sig_src_cd, + t.target_id, + t.time_bucket, + public.ST_NPoints(t.track_geom) as points, + public.ST_IsValid(t.track_geom) as is_valid, + LENGTH(public.ST_AsText(t.track_geom)) as wkt_length, + substring(public.ST_AsText(t.track_geom) from 'M \\((.+)\\)') as extracted_coords +FROM signal.t_vessel_tracks_5min t +INNER JOIN recent_vessel rv ON t.sig_src_cd = rv.sig_src_cd AND t.target_id = rv.target_id +WHERE t.time_bucket >= rv.hour_bucket + AND t.time_bucket < rv.hour_bucket + INTERVAL '1 hour' + AND t.track_geom IS NOT NULL + AND public.ST_NPoints(t.track_geom) > 0 +ORDER BY t.time_bucket; + +-- 3. string_agg 테스트 +WITH recent_vessel AS ( + SELECT + sig_src_cd, + target_id, + DATE_TRUNC('hour', MIN(time_bucket)) as hour_bucket + FROM signal.t_vessel_tracks_5min + WHERE time_bucket >= CURRENT_TIMESTAMP - INTERVAL '24 hours' + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + GROUP BY sig_src_cd, target_id, DATE_TRUNC('hour', time_bucket) + HAVING COUNT(*) >= 2 + ORDER BY DATE_TRUNC('hour', MIN(time_bucket)) DESC + LIMIT 1 +) +SELECT + '=== STRING_AGG RESULT ===' as section, + t.sig_src_cd, + t.target_id, + string_agg( + substring(public.ST_AsText(t.track_geom) from 'M \\((.+)\\)'), + ',' + ORDER BY t.time_bucket + ) FILTER (WHERE t.track_geom IS NOT NULL) as all_coords, + COUNT(*) as track_count, + LENGTH(string_agg( + substring(public.ST_AsText(t.track_geom) from 'M \\((.+)\\)'), + ',' + ORDER BY t.time_bucket + ) FILTER (WHERE t.track_geom IS NOT NULL)) as coords_total_length +FROM signal.t_vessel_tracks_5min t +INNER JOIN recent_vessel rv ON t.sig_src_cd = rv.sig_src_cd AND t.target_id = rv.target_id +WHERE t.time_bucket >= rv.hour_bucket + AND t.time_bucket < rv.hour_bucket + INTERVAL '1 hour' + AND t.track_geom IS NOT NULL + AND public.ST_NPoints(t.track_geom) > 0 +GROUP BY t.sig_src_cd, t.target_id; + +-- 4. Geometry 생성 테스트 +WITH recent_vessel AS ( + SELECT + sig_src_cd, + target_id, + DATE_TRUNC('hour', MIN(time_bucket)) as hour_bucket + FROM signal.t_vessel_tracks_5min + WHERE time_bucket >= CURRENT_TIMESTAMP - INTERVAL '24 hours' + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + GROUP BY sig_src_cd, target_id, DATE_TRUNC('hour', time_bucket) + HAVING COUNT(*) >= 2 + ORDER BY DATE_TRUNC('hour', MIN(time_bucket)) DESC + LIMIT 1 +), +merged_coords AS ( + SELECT + t.sig_src_cd, + t.target_id, + string_agg( + substring(public.ST_AsText(t.track_geom) from 'M \\((.+)\\)'), + ',' + ORDER BY t.time_bucket + ) FILTER (WHERE t.track_geom IS NOT NULL) as all_coords + FROM signal.t_vessel_tracks_5min t + INNER JOIN recent_vessel rv ON t.sig_src_cd = rv.sig_src_cd AND t.target_id = rv.target_id + WHERE t.time_bucket >= rv.hour_bucket + AND t.time_bucket < rv.hour_bucket + INTERVAL '1 hour' + AND t.track_geom IS NOT NULL + AND public.ST_NPoints(t.track_geom) > 0 + GROUP BY t.sig_src_cd, t.target_id +) +SELECT + '=== GEOMETRY CREATION TEST ===' as section, + sig_src_cd, + target_id, + all_coords IS NOT NULL as has_coords, + LENGTH(all_coords) as coords_length, + public.ST_GeomFromText('LINESTRING M(' || all_coords || ')', 4326) as merged_geom, + public.ST_NPoints(public.ST_GeomFromText('LINESTRING M(' || all_coords || ')', 4326)) as merged_points, + public.ST_IsValid(public.ST_GeomFromText('LINESTRING M(' || all_coords || ')', 4326)) as is_valid +FROM merged_coords; + +-- 5. 전체 집계 쿼리 실행 (실제 HourlyTrackProcessor와 동일) +WITH recent_vessel AS ( + SELECT + sig_src_cd, + target_id, + DATE_TRUNC('hour', MIN(time_bucket)) as hour_bucket + FROM signal.t_vessel_tracks_5min + WHERE time_bucket >= CURRENT_TIMESTAMP - INTERVAL '24 hours' + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + GROUP BY sig_src_cd, target_id, DATE_TRUNC('hour', time_bucket) + HAVING COUNT(*) >= 2 + ORDER BY DATE_TRUNC('hour', MIN(time_bucket)) DESC + LIMIT 1 +), +ordered_tracks AS ( + SELECT t.* + FROM signal.t_vessel_tracks_5min t + INNER JOIN recent_vessel rv ON t.sig_src_cd = rv.sig_src_cd AND t.target_id = rv.target_id + WHERE t.time_bucket >= rv.hour_bucket + AND t.time_bucket < rv.hour_bucket + INTERVAL '1 hour' + AND t.track_geom IS NOT NULL + AND public.ST_NPoints(t.track_geom) > 0 + ORDER BY t.time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + substring(public.ST_AsText(track_geom) from 'M \\((.+)\\)'), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + rv.hour_bucket as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc + CROSS JOIN recent_vessel rv +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + '=== FULL AGGREGATION RESULT ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(merged_geom) as merged_points, + public.ST_IsValid(merged_geom) as is_valid, + total_distance, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points, + start_time, + end_time, + time_diff_seconds +FROM calculated_tracks; + +-- 6. 에러 발생 가능성 체크 +WITH recent_vessel AS ( + SELECT + sig_src_cd, + target_id, + DATE_TRUNC('hour', MIN(time_bucket)) as hour_bucket + FROM signal.t_vessel_tracks_5min + WHERE time_bucket >= CURRENT_TIMESTAMP - INTERVAL '24 hours' + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + GROUP BY sig_src_cd, target_id, DATE_TRUNC('hour', time_bucket) + HAVING COUNT(*) >= 2 + ORDER BY DATE_TRUNC('hour', MIN(time_bucket)) DESC + LIMIT 1 +) +SELECT + '=== ERROR CHECK ===' as section, + COUNT(*) as total_tracks, + COUNT(CASE WHEN track_geom IS NULL THEN 1 END) as null_geom_count, + COUNT(CASE WHEN NOT public.ST_IsValid(track_geom) THEN 1 END) as invalid_geom_count, + COUNT(CASE WHEN public.ST_NPoints(track_geom) = 0 THEN 1 END) as zero_points_count, + COUNT(CASE WHEN public.ST_NPoints(track_geom) = 1 THEN 1 END) as single_point_count, + COUNT(CASE WHEN + substring(public.ST_AsText(track_geom) from 'M \\((.+)\\)') IS NULL + THEN 1 END) as regex_fail_count +FROM signal.t_vessel_tracks_5min t +INNER JOIN recent_vessel rv ON t.sig_src_cd = rv.sig_src_cd AND t.target_id = rv.target_id +WHERE t.time_bucket >= rv.hour_bucket + AND t.time_bucket < rv.hour_bucket + INTERVAL '1 hour'; + +-- ======================================== +-- 사용 방법: +-- 1. 그냥 전체 스크립트 실행 +-- 2. 자동으로 최근 선박 선택됨 +-- 3. 각 섹션별 결과 확인 +-- +-- 에러 발생시 확인 사항: +-- - "ERROR CHECK" 섹션에서 이상값 확인 +-- - "STRING_AGG RESULT"에서 all_coords 확인 +-- - "GEOMETRY CREATION TEST"에서 is_valid 확인 +-- ======================================== diff --git a/scripts/run-load-test.sh b/scripts/run-load-test.sh new file mode 100644 index 0000000..b9e0246 --- /dev/null +++ b/scripts/run-load-test.sh @@ -0,0 +1,288 @@ +#!/bin/bash + +# 선박 궤적 집계 시스템 부하 테스트 실행 스크립트 +# 실행 전 JMeter가 설치되어 있어야 합니다. + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../.." && pwd)" +JMETER_HOME="${JMETER_HOME:-/opt/jmeter}" +RESULTS_DIR="$PROJECT_ROOT/load-test-results" +TIMESTAMP=$(date +%Y%m%d_%H%M%S) + +# 색상 정의 +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# 함수: 메시지 출력 +log_info() { + echo -e "${GREEN}[INFO]${NC} $1" +} + +log_warn() { + echo -e "${YELLOW}[WARN]${NC} $1" +} + +log_error() { + echo -e "${RED}[ERROR]${NC} $1" +} + +# JMeter 설치 확인 +check_jmeter() { + if [ ! -d "$JMETER_HOME" ]; then + log_error "JMeter가 설치되어 있지 않습니다. JMETER_HOME을 설정하세요." + exit 1 + fi + + if [ ! -f "$JMETER_HOME/bin/jmeter" ]; then + log_error "JMeter 실행 파일을 찾을 수 없습니다: $JMETER_HOME/bin/jmeter" + exit 1 + fi + + log_info "JMeter 경로: $JMETER_HOME" +} + +# 결과 디렉토리 생성 +create_results_dir() { + mkdir -p "$RESULTS_DIR/$TIMESTAMP" + log_info "결과 디렉토리 생성: $RESULTS_DIR/$TIMESTAMP" +} + +# 시스템 상태 모니터링 시작 +start_monitoring() { + log_info "시스템 모니터링 시작..." + + # CPU, 메모리, 네트워크 사용률 모니터링 + nohup vmstat 5 > "$RESULTS_DIR/$TIMESTAMP/vmstat.log" 2>&1 & + VMSTAT_PID=$! + + nohup iostat -x 5 > "$RESULTS_DIR/$TIMESTAMP/iostat.log" 2>&1 & + IOSTAT_PID=$! + + # 데이터베이스 연결 모니터링 + nohup watch -n 5 "psql -h 10.26.252.48 -U mdauser -d mdadb -c 'SELECT count(*) FROM pg_stat_activity;'" > "$RESULTS_DIR/$TIMESTAMP/db_connections.log" 2>&1 & + DB_MON_PID=$! + + echo "$VMSTAT_PID $IOSTAT_PID $DB_MON_PID" > "$RESULTS_DIR/$TIMESTAMP/monitoring.pids" +} + +# 시스템 모니터링 중지 +stop_monitoring() { + log_info "시스템 모니터링 중지..." + + if [ -f "$RESULTS_DIR/$TIMESTAMP/monitoring.pids" ]; then + while read pid; do + kill $pid 2>/dev/null + done < "$RESULTS_DIR/$TIMESTAMP/monitoring.pids" + rm "$RESULTS_DIR/$TIMESTAMP/monitoring.pids" + fi +} + +# JMeter 테스트 실행 +run_jmeter_test() { + local test_file=$1 + local test_name=$(basename "$test_file" .jmx) + + log_info "JMeter 테스트 실행: $test_name" + + # JMeter 실행 + "$JMETER_HOME/bin/jmeter" \ + -n \ + -t "$test_file" \ + -l "$RESULTS_DIR/$TIMESTAMP/${test_name}-results.jtl" \ + -e \ + -o "$RESULTS_DIR/$TIMESTAMP/${test_name}-report" \ + -Jjmeter.save.saveservice.output_format=csv \ + -Jjmeter.save.saveservice.assertion_results_failure_message=true \ + -Jjmeter.save.saveservice.data_type=true \ + -Jjmeter.save.saveservice.label=true \ + -Jjmeter.save.saveservice.response_code=true \ + -Jjmeter.save.saveservice.response_data.on_error=true \ + -Jjmeter.save.saveservice.response_message=true \ + -Jjmeter.save.saveservice.successful=true \ + -Jjmeter.save.saveservice.thread_name=true \ + -Jjmeter.save.saveservice.time=true \ + -Jjmeter.save.saveservice.connect_time=true \ + -Jjmeter.save.saveservice.latency=true \ + -Jjmeter.save.saveservice.bytes=true \ + -Jjmeter.save.saveservice.sent_bytes=true \ + -Jjmeter.save.saveservice.url=true + + if [ $? -eq 0 ]; then + log_info "테스트 완료: $test_name" + log_info "결과 파일: $RESULTS_DIR/$TIMESTAMP/${test_name}-results.jtl" + log_info "HTML 리포트: $RESULTS_DIR/$TIMESTAMP/${test_name}-report/index.html" + else + log_error "테스트 실패: $test_name" + return 1 + fi +} + +# WebSocket 부하 테스트 +run_websocket_test() { + log_info "WebSocket 부하 테스트 준비..." + + # Python 스크립트로 WebSocket 테스트 실행 + cat > "$RESULTS_DIR/$TIMESTAMP/websocket_load_test.py" << 'EOF' +import asyncio +import websockets +import json +import time +from datetime import datetime, timedelta +import statistics + +class WebSocketLoadTester: + def __init__(self, base_url, num_clients, queries_per_client): + self.base_url = base_url + self.num_clients = num_clients + self.queries_per_client = queries_per_client + self.metrics = { + 'total_queries': 0, + 'successful_queries': 0, + 'failed_queries': 0, + 'latencies': [], + 'throughput': [] + } + + async def client_session(self, client_id): + async with websockets.connect(f"{self.base_url}/ws-tracks") as websocket: + for query_id in range(self.queries_per_client): + try: + # 쿼리 요청 생성 + query = { + "startTime": (datetime.now() - timedelta(days=7)).isoformat(), + "endTime": datetime.now().isoformat(), + "viewport": { + "minLon": 124.0, + "maxLon": 132.0, + "minLat": 33.0, + "maxLat": 38.0 + }, + "chunkSize": 1000 + } + + start_time = time.time() + await websocket.send(json.dumps(query)) + + # 응답 수신 + chunks_received = 0 + while True: + response = await websocket.recv() + data = json.loads(response) + chunks_received += 1 + + if data.get('isLastChunk', False): + break + + end_time = time.time() + latency = (end_time - start_time) * 1000 # ms + + self.metrics['latencies'].append(latency) + self.metrics['successful_queries'] += 1 + + print(f"Client {client_id} - Query {query_id}: {latency:.2f}ms, {chunks_received} chunks") + + except Exception as e: + print(f"Client {client_id} - Query {query_id} failed: {str(e)}") + self.metrics['failed_queries'] += 1 + + self.metrics['total_queries'] += 1 + await asyncio.sleep(1) # 쿼리 간 딜레이 + + async def run_test(self): + print(f"Starting WebSocket load test with {self.num_clients} clients...") + start_time = time.time() + + # 모든 클라이언트 동시 실행 + tasks = [] + for i in range(self.num_clients): + task = asyncio.create_task(self.client_session(i)) + tasks.append(task) + + await asyncio.gather(*tasks) + + end_time = time.time() + total_duration = end_time - start_time + + # 결과 분석 + print("\n=== 부하 테스트 결과 ===") + print(f"총 실행 시간: {total_duration:.2f}초") + print(f"총 쿼리 수: {self.metrics['total_queries']}") + print(f"성공: {self.metrics['successful_queries']}") + print(f"실패: {self.metrics['failed_queries']}") + + if self.metrics['latencies']: + print(f"평균 레이턴시: {statistics.mean(self.metrics['latencies']):.2f}ms") + print(f"최소 레이턴시: {min(self.metrics['latencies']):.2f}ms") + print(f"최대 레이턴시: {max(self.metrics['latencies']):.2f}ms") + print(f"중앙값 레이턴시: {statistics.median(self.metrics['latencies']):.2f}ms") + + print(f"처리량: {self.metrics['total_queries'] / total_duration:.2f} queries/sec") + +if __name__ == "__main__": + tester = WebSocketLoadTester( + base_url="ws://10.26.252.48:8090", + num_clients=10, + queries_per_client=5 + ) + asyncio.run(tester.run_test()) +EOF + + # Python WebSocket 테스트 실행 + if command -v python3 &> /dev/null; then + python3 "$RESULTS_DIR/$TIMESTAMP/websocket_load_test.py" > "$RESULTS_DIR/$TIMESTAMP/websocket_test_results.log" 2>&1 + else + log_warn "Python3가 설치되어 있지 않아 WebSocket 테스트를 건너뜁니다." + fi +} + +# 메인 실행 함수 +main() { + log_info "선박 궤적 집계 시스템 부하 테스트 시작" + log_info "타임스탬프: $TIMESTAMP" + + # JMeter 확인 + check_jmeter + + # 결과 디렉토리 생성 + create_results_dir + + # 시스템 모니터링 시작 + start_monitoring + + # 애플리케이션 상태 확인 + log_info "애플리케이션 상태 확인..." + curl -s "http://10.26.252.48:8090/actuator/health" > "$RESULTS_DIR/$TIMESTAMP/app_health_before.json" + + # JMeter 테스트 실행 + if [ -f "$PROJECT_ROOT/src/main/resources/jmeter/comprehensive-load-test.jmx" ]; then + run_jmeter_test "$PROJECT_ROOT/src/main/resources/jmeter/comprehensive-load-test.jmx" + fi + + # WebSocket 테스트 실행 + run_websocket_test + + # 10분간 부하 테스트 실행 + log_info "부하 테스트 진행 중... (10분)" + sleep 600 + + # 시스템 모니터링 중지 + stop_monitoring + + # 최종 애플리케이션 상태 확인 + curl -s "http://10.26.252.48:8090/actuator/health" > "$RESULTS_DIR/$TIMESTAMP/app_health_after.json" + + # 결과 요약 + log_info "부하 테스트 완료!" + log_info "결과 디렉토리: $RESULTS_DIR/$TIMESTAMP" + + # 간단한 결과 분석 + if [ -f "$RESULTS_DIR/$TIMESTAMP/comprehensive-load-test-results.jtl" ]; then + log_info "JMeter 결과 요약:" + awk -F',' 'NR>1 {sum+=$2; count++} END {print "평균 응답 시간: " sum/count " ms"}' "$RESULTS_DIR/$TIMESTAMP/comprehensive-load-test-results.jtl" + fi +} + +# 스크립트 실행 +main "$@" diff --git a/scripts/run-on-query-server-dev.sh b/scripts/run-on-query-server-dev.sh new file mode 100644 index 0000000..1221441 --- /dev/null +++ b/scripts/run-on-query-server-dev.sh @@ -0,0 +1,190 @@ +#!/bin/bash + +# Query DB 서버에서 최적화된 실행 스크립트 +# Rocky Linux 환경에 맞춰 조정됨 +# Java 17 경로 명시적 지정 + +# 애플리케이션 경로 +APP_HOME="/devdata/apps/bridge-db-monitoring" +JAR_FILE="$APP_HOME/vessel-batch-aggregation.jar" + +# Java 17 경로 +JAVA_HOME="/devdata/apps/jdk-17.0.8" +JAVA_BIN="$JAVA_HOME/bin/java" + +# 로그 디렉토리 +LOG_DIR="$APP_HOME/logs" +mkdir -p $LOG_DIR + +echo "================================================" +echo "Vessel Batch Aggregation - Query Server Edition" +echo "Start Time: $(date)" +echo "================================================" + +# 경로 확인 +echo "Environment Check:" +echo "- App Home: $APP_HOME" +echo "- JAR File: $JAR_FILE" +echo "- Java Path: $JAVA_BIN" +echo "- Java Version: $($JAVA_BIN -version 2>&1 | head -1)" + +# JAR 파일 존재 확인 +if [ ! -f "$JAR_FILE" ]; then + echo "ERROR: JAR file not found at $JAR_FILE" + exit 1 +fi + +# Java 실행 파일 확인 +if [ ! -x "$JAVA_BIN" ]; then + echo "ERROR: Java not found or not executable at $JAVA_BIN" + exit 1 +fi + +# 서버 정보 확인 +echo "" +echo "Server Info:" +echo "- Hostname: $(hostname)" +echo "- CPU Cores: $(nproc)" +echo "- Total Memory: $(free -h | grep Mem | awk '{print $2}')" +echo "- PostgreSQL Version: $(psql --version 2>/dev/null | head -1 || echo 'PostgreSQL client not in PATH')" + +# 환경 변수 설정 (localhost 최적화) +export SPRING_PROFILES_ACTIVE=prod + +# Query DB와 Batch Meta DB를 localhost로 오버라이드 +export SPRING_DATASOURCE_QUERY_JDBC_URL="jdbc:postgresql://10.29.17.90:5432/mpcdb2?options=-csearch_path=signal,public&assumeMinServerVersion=12&reWriteBatchedInserts=true" +export SPRING_DATASOURCE_BATCH_JDBC_URL="jdbc:postgresql://localhost:5432/mdadb?currentSchema=public&assumeMinServerVersion=12&reWriteBatchedInserts=true" + +# 서버 CPU 코어 수에 따른 병렬 처리 조정 +CPU_CORES=$(nproc) +export VESSEL_BATCH_PARTITION_SIZE=$((CPU_CORES * 2)) +export VESSEL_BATCH_BULK_INSERT_PARALLEL_THREADS=$((CPU_CORES / 2)) + +echo "" +echo "Optimized Settings:" +echo "- Partition Size: $VESSEL_BATCH_PARTITION_SIZE" +echo "- Parallel Threads: $VESSEL_BATCH_BULK_INSERT_PARALLEL_THREADS" +echo "- Query DB: localhost (optimized)" +echo "- Batch Meta DB: localhost (optimized)" + +# JVM 옵션 (서버 메모리에 맞게 조정) +TOTAL_MEM=$(free -g | grep Mem | awk '{print $2}') +JVM_HEAP=$((TOTAL_MEM / 4)) # 전체 메모리의 25% 사용 + +# 최소 16GB, 최대 64GB로 제한 +if [ $JVM_HEAP -lt 16 ]; then + JVM_HEAP=16 +elif [ $JVM_HEAP -gt 64 ]; then + JVM_HEAP=64 +fi + +JAVA_OPTS="-Xms${JVM_HEAP}g -Xmx${JVM_HEAP}g \ + -XX:+UseG1GC \ + -XX:G1HeapRegionSize=32m \ + -XX:MaxGCPauseMillis=200 \ + -XX:InitiatingHeapOccupancyPercent=35 \ + -XX:G1ReservePercent=15 \ + -XX:+UseStringDeduplication \ + -XX:+ParallelRefProcEnabled \ + -XX:+ExplicitGCInvokesConcurrent \ + -XX:ParallelGCThreads=$((CPU_CORES / 2)) \ + -XX:ConcGCThreads=$((CPU_CORES / 4)) \ + -XX:MaxMetaspaceSize=512m \ + -XX:+HeapDumpOnOutOfMemoryError \ + -XX:HeapDumpPath=$LOG_DIR/heapdump.hprof \ + -Xlog:gc*:file=$LOG_DIR/gc.log:time,uptime,level,tags:filecount=5,filesize=100M \ + -Dfile.encoding=UTF-8 \ + -Duser.timezone=Asia/Seoul \ + -Djava.security.egd=file:/dev/./urandom \ + -Dspring.profiles.active=prod" + +echo "- JVM Heap Size: ${JVM_HEAP}GB" + +# 기존 프로세스 확인 및 종료 +echo "" +echo "Checking for existing process..." +PID=$(pgrep -f "$JAR_FILE") +if [ ! -z "$PID" ]; then + echo "Stopping existing process (PID: $PID)..." + kill -15 $PID + + # 프로세스 종료 대기 (최대 30초) + for i in {1..30}; do + if ! kill -0 $PID 2>/dev/null; then + echo "Process stopped successfully." + break + fi + if [ $i -eq 30 ]; then + echo "Force killing process..." + kill -9 $PID + fi + sleep 1 + done +fi + +# 작업 디렉토리로 이동 +cd $APP_HOME + +# 애플리케이션 실행 (nice로 우선순위 조정) +echo "" +echo "Starting application with reduced priority..." +echo "Command: nice -n 10 $JAVA_BIN $JAVA_OPTS -jar $JAR_FILE" +echo "" + +# nohup으로 백그라운드 실행 +nohup nice -n 10 $JAVA_BIN $JAVA_OPTS -jar $JAR_FILE \ + > $LOG_DIR/app.log 2>&1 & + +NEW_PID=$! +echo "Application started with PID: $NEW_PID" + +# PID 파일 생성 +echo $NEW_PID > $APP_HOME/vessel-batch.pid + +# 시작 확인 (30초 대기) +echo "Waiting for application startup..." +STARTUP_SUCCESS=false +for i in {1..30}; do + if grep -q "Started SignalBatchApplication" $LOG_DIR/app.log 2>/dev/null; then + echo "✅ Application started successfully!" + STARTUP_SUCCESS=true + break + fi + echo -n "." + sleep 1 +done + +if [ "$STARTUP_SUCCESS" = false ]; then + echo "" + echo "⚠️ Application startup timeout. Check logs for errors." + echo "Log file: $LOG_DIR/app.log" + tail -20 $LOG_DIR/app.log +fi + +echo "" +echo "================================================" +echo "Deployment Complete!" +echo "- PID: $NEW_PID" +echo "- PID File: $APP_HOME/vessel-batch.pid" +echo "- Log: $LOG_DIR/app.log" +echo "- Monitor: tail -f $LOG_DIR/app.log" +echo "================================================" + +# 초기 상태 확인 +sleep 5 +echo "" +echo "Initial Status Check:" +curl -s http://localhost:8090/actuator/health 2>/dev/null | python -m json.tool || echo "Health endpoint not yet available" + +# 리소스 사용량 표시 +echo "" +echo "Resource Usage:" +ps aux | grep $NEW_PID | grep -v grep + +# 빠른 명령어 안내 +echo "" +echo "Useful Commands:" +echo "- Stop: kill -15 \$(cat $APP_HOME/vessel-batch.pid)" +echo "- Logs: tail -f $LOG_DIR/app.log" +echo "- Status: curl http://localhost:8090/actuator/health" +echo "- Monitor: $APP_HOME/monitor-query-server.sh" diff --git a/scripts/run-query-only-server.sh b/scripts/run-query-only-server.sh new file mode 100644 index 0000000..0fd0c6d --- /dev/null +++ b/scripts/run-query-only-server.sh @@ -0,0 +1,184 @@ +#!/bin/bash + +# Query 전용 서버 실행 스크립트 (10.29.17.90) +# 배치 Job 없이 조회 API만 제공 +# Java 17 경로 명시적 지정 + +# 애플리케이션 경로 +APP_HOME="/devdata/apps/bridge-db-monitoring" +JAR_FILE="$APP_HOME/vessel-batch-aggregation.jar" + +# Java 17 경로 +JAVA_HOME="/devdata/apps/jdk-17.0.8" +JAVA_BIN="$JAVA_HOME/bin/java" + +# 로그 디렉토리 +LOG_DIR="$APP_HOME/logs" +mkdir -p $LOG_DIR + +echo "================================================" +echo "Vessel Query API Server - Query Only Mode" +echo "Start Time: $(date)" +echo "================================================" + +# 경로 확인 +echo "Environment Check:" +echo "- App Home: $APP_HOME" +echo "- JAR File: $JAR_FILE" +echo "- Java Path: $JAVA_BIN" +echo "- Java Version: $($JAVA_BIN -version 2>&1 | head -1)" + +# JAR 파일 존재 확인 +if [ ! -f "$JAR_FILE" ]; then + echo "ERROR: JAR file not found at $JAR_FILE" + exit 1 +fi + +# Java 실행 파일 확인 +if [ ! -x "$JAVA_BIN" ]; then + echo "ERROR: Java not found or not executable at $JAVA_BIN" + exit 1 +fi + +# 서버 정보 확인 +echo "" +echo "Server Info:" +echo "- Hostname: $(hostname)" +echo "- CPU Cores: $(nproc)" +echo "- Total Memory: $(free -h | grep Mem | awk '{print $2}')" +echo "- PostgreSQL Version: $(psql --version 2>/dev/null | head -1 || echo 'PostgreSQL client not in PATH')" + +# 환경 변수 설정 (query 프로파일 - 배치 비활성화!) +export SPRING_PROFILES_ACTIVE=query + +echo "" +echo "Profile Settings:" +echo "- Active Profile: QUERY (Batch Jobs Disabled)" +echo "- Query DB: 10.29.17.90:5432/mpcdb2 (Local DB)" +echo "- Batch Jobs: DISABLED" +echo "- Scheduler: DISABLED" + +# JVM 옵션 (서버 메모리에 맞게 조정) +TOTAL_MEM=$(free -g | grep Mem | awk '{print $2}') +JVM_HEAP=$((TOTAL_MEM / 8)) # 전체 메모리의 12.5% 사용 (배치 없으므로 적게) + +# 최소 4GB, 최대 16GB로 제한 +if [ $JVM_HEAP -lt 4 ]; then + JVM_HEAP=4 +elif [ $JVM_HEAP -gt 16 ]; then + JVM_HEAP=16 +fi + +CPU_CORES=$(nproc) + +JAVA_OPTS="-Xms${JVM_HEAP}g -Xmx${JVM_HEAP}g \ + -XX:+UseG1GC \ + -XX:G1HeapRegionSize=32m \ + -XX:MaxGCPauseMillis=200 \ + -XX:InitiatingHeapOccupancyPercent=35 \ + -XX:G1ReservePercent=15 \ + -XX:+UseStringDeduplication \ + -XX:+ParallelRefProcEnabled \ + -XX:+ExplicitGCInvokesConcurrent \ + -XX:ParallelGCThreads=$((CPU_CORES / 2)) \ + -XX:ConcGCThreads=$((CPU_CORES / 4)) \ + -XX:MaxMetaspaceSize=512m \ + -XX:+HeapDumpOnOutOfMemoryError \ + -XX:HeapDumpPath=$LOG_DIR/heapdump.hprof \ + -Xlog:gc*:file=$LOG_DIR/gc.log:time,uptime,level,tags:filecount=5,filesize=100M \ + -Dfile.encoding=UTF-8 \ + -Duser.timezone=Asia/Seoul \ + -Djava.security.egd=file:/dev/./urandom \ + -Dspring.profiles.active=query" + +echo "- JVM Heap Size: ${JVM_HEAP}GB" + +# 기존 프로세스 확인 및 종료 +echo "" +echo "Checking for existing process..." +PID=$(pgrep -f "$JAR_FILE") +if [ ! -z "$PID" ]; then + echo "Stopping existing process (PID: $PID)..." + kill -15 $PID + + # 프로세스 종료 대기 (최대 30초) + for i in {1..30}; do + if ! kill -0 $PID 2>/dev/null; then + echo "Process stopped successfully." + break + fi + if [ $i -eq 30 ]; then + echo "Force killing process..." + kill -9 $PID + fi + sleep 1 + done +fi + +# 작업 디렉토리로 이동 +cd $APP_HOME + +# 애플리케이션 실행 +echo "" +echo "Starting application in QUERY-ONLY mode..." +echo "Command: $JAVA_BIN $JAVA_OPTS -jar $JAR_FILE" +echo "" + +# nohup으로 백그라운드 실행 +nohup $JAVA_BIN $JAVA_OPTS -jar $JAR_FILE \ + > $LOG_DIR/app.log 2>&1 & + +NEW_PID=$! +echo "Application started with PID: $NEW_PID" + +# PID 파일 생성 +echo $NEW_PID > $APP_HOME/vessel-query.pid + +# 시작 확인 (30초 대기) +echo "Waiting for application startup..." +STARTUP_SUCCESS=false +for i in {1..30}; do + if grep -q "Started SignalBatchApplication" $LOG_DIR/app.log 2>/dev/null; then + echo "✅ Application started successfully!" + STARTUP_SUCCESS=true + break + fi + echo -n "." + sleep 1 +done + +if [ "$STARTUP_SUCCESS" = false ]; then + echo "" + echo "⚠️ Application startup timeout. Check logs for errors." + echo "Log file: $LOG_DIR/app.log" + tail -20 $LOG_DIR/app.log +fi + +echo "" +echo "================================================" +echo "Deployment Complete!" +echo "- Mode: QUERY ONLY (No Batch Jobs)" +echo "- PID: $NEW_PID" +echo "- PID File: $APP_HOME/vessel-query.pid" +echo "- Log: $LOG_DIR/app.log" +echo "- Monitor: tail -f $LOG_DIR/app.log" +echo "================================================" + +# 초기 상태 확인 +sleep 5 +echo "" +echo "Initial Status Check:" +curl -s http://localhost:8090/actuator/health 2>/dev/null | python -m json.tool || echo "Health endpoint not yet available" + +# 리소스 사용량 표시 +echo "" +echo "Resource Usage:" +ps aux | grep $NEW_PID | grep -v grep + +# 빠른 명령어 안내 +echo "" +echo "Useful Commands:" +echo "- Stop: kill -15 \$(cat $APP_HOME/vessel-query.pid)" +echo "- Logs: tail -f $LOG_DIR/app.log" +echo "- Status: curl http://localhost:8090/actuator/health" +echo "- API Test: curl http://localhost:8090/api/gis/areas" diff --git a/scripts/server-logs.bat b/scripts/server-logs.bat new file mode 100644 index 0000000..9dd28fd --- /dev/null +++ b/scripts/server-logs.bat @@ -0,0 +1,40 @@ +@echo off +chcp 65001 >nul +REM =============================================== +REM Signal Batch Server Log Viewer +REM =============================================== + +setlocal + +set SERVER_IP=10.26.252.48 +set SERVER_USER=root +set SERVER_PATH=/devdata/apps/bridge-db-monitoring + +echo =============================================== +echo Signal Batch Server Log Viewer +echo =============================================== +echo Server: %SERVER_IP% +echo Time: %date% %time% +echo. + +if "%1"=="tail" ( + echo Starting real-time log monitoring... (Ctrl+C to exit) + ssh %SERVER_USER%@%SERVER_IP% "cd %SERVER_PATH% && ./vessel-batch-control.sh logs" +) else if "%1"=="errors" ( + echo Retrieving recent error logs... + ssh %SERVER_USER%@%SERVER_IP% "cd %SERVER_PATH% && ./vessel-batch-control.sh errors" +) else if "%1"=="stats" ( + echo Retrieving performance statistics... + ssh %SERVER_USER%@%SERVER_IP% "cd %SERVER_PATH% && ./vessel-batch-control.sh stats" +) else ( + echo Usage: + echo server-logs.bat - Show recent 50 lines + echo server-logs.bat tail - Real-time log monitoring + echo server-logs.bat errors - Show error logs only + echo server-logs.bat stats - Show performance statistics + echo. + echo Recent 50 lines of log: + ssh %SERVER_USER%@%SERVER_IP% "tail -50 %SERVER_PATH%/logs/app.log 2>/dev/null || echo 'Log file not available'" +) + +endlocal \ No newline at end of file diff --git a/scripts/server-status.bat b/scripts/server-status.bat new file mode 100644 index 0000000..d72031c --- /dev/null +++ b/scripts/server-status.bat @@ -0,0 +1,64 @@ +@echo off +chcp 65001 >nul +REM =============================================== +REM Signal Batch Server Status Checker +REM =============================================== + +setlocal enabledelayedexpansion + +REM Configuration +set "SERVER_IP=10.26.252.48" +set "SERVER_USER=root" +set "SERVER_PATH=/devdata/apps/bridge-db-monitoring" + +echo =============================================== +echo Signal Batch Server Status +echo =============================================== +echo [INFO] Query Time: !date! !time! +echo [INFO] Target Server: !SERVER_IP! + +REM 1. Server Connection Test +echo. +echo =============== Server Connection Test =============== +ssh !SERVER_USER!@!SERVER_IP! "echo 'Server connection OK'" 2>nul +set CONNECTION_RESULT=!ERRORLEVEL! +if !CONNECTION_RESULT! neq 0 ( + echo [ERROR] Server connection failed + exit /b 1 +) +echo [INFO] Server connection successful + +REM 2. Application Status +echo. +echo =============== Application Status =============== +ssh !SERVER_USER!@!SERVER_IP! "cd !SERVER_PATH! && ./vessel-batch-control.sh status" + +REM 3. Additional Status Information +echo. +echo =============== Additional Status Information =============== + +REM Health Check +echo [INFO] Health Check: +ssh !SERVER_USER!@!SERVER_IP! "curl -s http://localhost:8090/actuator/health --max-time 5 2>/dev/null | python -m json.tool 2>/dev/null || echo 'Health endpoint not available'" + +echo. +REM Metrics Information +echo [INFO] Metrics Information: +ssh !SERVER_USER!@!SERVER_IP! "curl -s http://localhost:8090/actuator/metrics --max-time 5 2>/dev/null | head -20 || echo 'Metrics endpoint not available'" + +echo. +REM Disk Usage +echo [INFO] Disk Usage: +ssh !SERVER_USER!@!SERVER_IP! "df -h !SERVER_PATH!" + +echo. +REM Memory Usage +echo [INFO] Memory Usage: +ssh !SERVER_USER!@!SERVER_IP! "free -h" + +echo. +REM Recent Log Check +echo [INFO] Recent Logs (last 10 lines): +ssh !SERVER_USER!@!SERVER_IP! "tail -10 !SERVER_PATH!/logs/app.log 2>/dev/null || echo 'Log file not available'" + +endlocal \ No newline at end of file diff --git a/scripts/setup-ssh-key.bat b/scripts/setup-ssh-key.bat new file mode 100644 index 0000000..18c9c8c --- /dev/null +++ b/scripts/setup-ssh-key.bat @@ -0,0 +1,59 @@ +@echo off +chcp 65001 >nul +echo =============================================== +echo SSH Key Setup for Server Deployment +echo =============================================== + +set "SERVER_IP=10.26.252.51" +set "SERVER_USER=root" + +echo [INFO] Setting up SSH key authentication for %SERVER_USER%@%SERVER_IP% +echo. + +REM Check if SSH key exists +if not exist "%USERPROFILE%\.ssh\id_rsa.pub" ( + echo [INFO] SSH key not found. Generating new SSH key... + ssh-keygen -t rsa -b 4096 -f "%USERPROFILE%\.ssh\id_rsa" -N "" + if !ERRORLEVEL! neq 0 ( + echo [ERROR] Failed to generate SSH key + pause + exit /b 1 + ) + echo [SUCCESS] SSH key generated +) + +echo. +echo [INFO] Copying SSH key to server... +echo [INFO] You will be prompted for the server password +echo. + +type "%USERPROFILE%\.ssh\id_rsa.pub" | ssh %SERVER_USER%@%SERVER_IP% "mkdir -p ~/.ssh && chmod 700 ~/.ssh && cat >> ~/.ssh/authorized_keys && chmod 600 ~/.ssh/authorized_keys && echo '[SUCCESS] SSH key installed'" + +if !ERRORLEVEL! neq 0 ( + echo [ERROR] Failed to copy SSH key + echo. + echo Please ensure: + echo - Server is accessible at %SERVER_IP% + echo - You have the correct password for %SERVER_USER% + echo - SSH service is running on the server + pause + exit /b 1 +) + +echo. +echo =============================================== +echo [SUCCESS] SSH Key Setup Complete! +echo =============================================== +echo. +echo Testing connection... +ssh -o BatchMode=yes -o ConnectTimeout=10 %SERVER_USER%@%SERVER_IP% "echo '[SUCCESS] SSH key authentication working!'" + +if !ERRORLEVEL! equ 0 ( + echo. + echo You can now run deploy-only.bat without password +) else ( + echo [WARN] Key authentication test failed + echo Please try running this script again +) + +pause \ No newline at end of file diff --git a/scripts/stop-running-jobs.sql b/scripts/stop-running-jobs.sql new file mode 100644 index 0000000..ea9097c --- /dev/null +++ b/scripts/stop-running-jobs.sql @@ -0,0 +1,67 @@ +-- 실행 중인(STARTED) 배치 Job과 Step을 강제 종료 +-- 주의: 실제로 실행 중인 프로세스를 종료하지는 않습니다. +-- DB 상태만 변경하므로, 애플리케이션을 먼저 중지한 후 사용하세요. + +-- 1. 현재 실행 중인 Job 확인 +SELECT + '=== RUNNING JOBS ===' as status, + JOB_EXECUTION_ID, + JOB_INSTANCE_ID, + START_TIME, + STATUS, + (SELECT JOB_NAME FROM BATCH_JOB_INSTANCE WHERE JOB_INSTANCE_ID = bje.JOB_INSTANCE_ID) as JOB_NAME +FROM BATCH_JOB_EXECUTION bje +WHERE STATUS IN ('STARTED', 'STARTING', 'STOPPING') +ORDER BY START_TIME DESC; + +-- 2. 실행 중인 Step 확인 +SELECT + '=== RUNNING STEPS ===' as status, + bse.STEP_EXECUTION_ID, + bse.JOB_EXECUTION_ID, + bse.STEP_NAME, + bse.STATUS, + bse.START_TIME +FROM BATCH_STEP_EXECUTION bse +WHERE STATUS IN ('STARTED', 'STARTING', 'STOPPING') +ORDER BY START_TIME DESC; + +-- 3. 실행 중인 Step을 STOPPED로 변경 +UPDATE BATCH_STEP_EXECUTION +SET + STATUS = 'STOPPED', + EXIT_CODE = 'STOPPED', + EXIT_MESSAGE = 'Manually stopped - Original status: ' || STATUS, + END_TIME = CURRENT_TIMESTAMP, + LAST_UPDATED = CURRENT_TIMESTAMP +WHERE STATUS IN ('STARTED', 'STARTING', 'STOPPING'); + +-- 4. 실행 중인 Job을 STOPPED로 변경 +UPDATE BATCH_JOB_EXECUTION +SET + STATUS = 'STOPPED', + EXIT_CODE = 'STOPPED', + EXIT_MESSAGE = 'Manually stopped - Original status: ' || STATUS, + END_TIME = CURRENT_TIMESTAMP, + LAST_UPDATED = CURRENT_TIMESTAMP +WHERE STATUS IN ('STARTED', 'STARTING', 'STOPPING'); + +-- 5. 결과 확인 +SELECT + '=== AFTER STOP ===' as status, + COUNT(*) as running_jobs +FROM BATCH_JOB_EXECUTION +WHERE STATUS IN ('STARTED', 'STARTING', 'STOPPING'); + +SELECT + '=== STOPPED JOBS ===' as status, + JOB_EXECUTION_ID, + JOB_INSTANCE_ID, + START_TIME, + END_TIME, + STATUS, + EXIT_CODE +FROM BATCH_JOB_EXECUTION +WHERE STATUS = 'STOPPED' +ORDER BY JOB_EXECUTION_ID DESC +LIMIT 10; diff --git a/scripts/sync-nexus.sh b/scripts/sync-nexus.sh new file mode 100644 index 0000000..1be44a5 --- /dev/null +++ b/scripts/sync-nexus.sh @@ -0,0 +1,170 @@ +#!/bin/bash +# ============================================================================= +# sync-nexus.sh - 로컬 Maven 의존성을 Nexus에 동기화 +# +# 사용법: +# ./scripts/sync-nexus.sh # 실제 업로드 +# ./scripts/sync-nexus.sh --dry-run # 업로드 대상만 확인 +# ============================================================================= + +set -eo pipefail + +# --- SDKMAN 초기화 (set -u 전에 실행) --- +if [ -f "$HOME/.sdkman/bin/sdkman-init.sh" ]; then + source "$HOME/.sdkman/bin/sdkman-init.sh" 2>/dev/null || true +fi + +# --- 설정 --- +NEXUS_URL="http://10.26.252.39:8081" +REPO_ID="mda-backend-repository" +NEXUS_USER="admin" +NEXUS_PASS="8932" +LOCAL_REPO="$HOME/.m2/repository" + +# --- 옵션 파싱 --- +DRY_RUN=false +if [[ "${1:-}" == "--dry-run" ]]; then + DRY_RUN=true + echo "=== DRY RUN 모드 (업로드하지 않음) ===" +fi + +# --- 카운터 --- +TOTAL=0 +SKIPPED=0 +UPLOADED=0 +FAILED=0 + +# Nexus에 아티팩트 존재 여부 확인 (HTTP HEAD로 .pom 파일 체크) +check_exists() { + local group_path=$1 + local artifact_id=$2 + local version=$3 + local pom_url="${NEXUS_URL}/repository/${REPO_ID}/${group_path}/${artifact_id}/${version}/${artifact_id}-${version}.pom" + local http_code + http_code=$(curl -s -o /dev/null -w "%{http_code}" -u "${NEXUS_USER}:${NEXUS_PASS}" --connect-timeout 5 "$pom_url" < /dev/null) + [[ "$http_code" == "200" ]] +} + +# 파일 업로드 (HTTP PUT) +upload_file() { + local file_path=$1 + local remote_path=$2 + local url="${NEXUS_URL}/repository/${REPO_ID}/${remote_path}" + + if [ ! -f "$file_path" ]; then + return 1 + fi + + local http_code + http_code=$(curl -s -o /dev/null -w "%{http_code}" -u "${NEXUS_USER}:${NEXUS_PASS}" --upload-file "$file_path" --connect-timeout 10 --max-time 120 "$url" < /dev/null) + [[ "$http_code" == "201" || "$http_code" == "200" ]] +} + +# 아티팩트 업로드 (pom + jar + 기타) +upload_artifact() { + local group_id=$1 + local artifact_id=$2 + local version=$3 + local packaging=$4 + + local group_path + group_path=$(echo "$group_id" | tr '.' '/') + local base_dir="${LOCAL_REPO}/${group_path}/${artifact_id}/${version}" + local base_name="${artifact_id}-${version}" + local remote_base="${group_path}/${artifact_id}/${version}" + + local success=true + + # POM 업로드 (필수) + local pom_file="${base_dir}/${base_name}.pom" + if [ -f "$pom_file" ]; then + if upload_file "$pom_file" "${remote_base}/${base_name}.pom"; then + : + else + echo " [FAIL] POM 업로드 실패" + success=false + fi + fi + + # JAR 업로드 (pom 패키징이 아닌 경우) + if [[ "$packaging" != "pom" ]]; then + local jar_file="${base_dir}/${base_name}.${packaging}" + if [ -f "$jar_file" ]; then + if upload_file "$jar_file" "${remote_base}/${base_name}.${packaging}"; then + : + else + echo " [FAIL] ${packaging} 업로드 실패" + success=false + fi + fi + fi + + $success +} + +echo "" +echo "=== Nexus 동기화 시작 ===" +echo " Nexus: ${NEXUS_URL}/repository/${REPO_ID}" +echo " 로컬: ${LOCAL_REPO}" +echo "" + +# Nexus 연결 확인 +if ! curl -s -o /dev/null -w "" -u "${NEXUS_USER}:${NEXUS_PASS}" --connect-timeout 5 "${NEXUS_URL}/service/rest/v1/repositories" 2>/dev/null; then + echo "[ERROR] Nexus(${NEXUS_URL})에 연결할 수 없습니다." + exit 1 +fi +echo "[OK] Nexus 연결 확인" +echo "" + +# Maven dependency:list로 GAV 목록 추출 +echo "의존성 목록 추출 중..." +DEP_LIST=$(mvn dependency:list -DoutputAbsoluteArtifactFilename=true 2>/dev/null | grep "^\[INFO\] " | sed 's/\[INFO\] //' | sed 's/ -- .*//') + +echo "" +echo "--- 동기화 진행 ---" + +while IFS= read -r line; do + # 형식: groupId:artifactId:packaging:version:scope:/path/to/file + IFS=':' read -r group_id artifact_id packaging version scope rest <<< "$line" + + if [[ -z "$group_id" || -z "$artifact_id" || -z "$version" ]]; then + continue + fi + + TOTAL=$((TOTAL + 1)) + local_group_path=$(echo "$group_id" | tr '.' '/') + + # Nexus 존재 여부 확인 + if check_exists "$local_group_path" "$artifact_id" "$version"; then + SKIPPED=$((SKIPPED + 1)) + continue + fi + + # 신규 아티팩트 발견 + echo "[NEW] ${group_id}:${artifact_id}:${version} (${packaging})" + + if $DRY_RUN; then + UPLOADED=$((UPLOADED + 1)) + else + if upload_artifact "$group_id" "$artifact_id" "$version" "$packaging"; then + echo " -> 업로드 완료" + UPLOADED=$((UPLOADED + 1)) + else + echo " -> 업로드 실패" + FAILED=$((FAILED + 1)) + fi + fi + +done <<< "$DEP_LIST" + +echo "" +echo "=== 동기화 완료 ===" +echo " 전체: ${TOTAL}" +echo " 스킵 (이미 존재): ${SKIPPED}" +if $DRY_RUN; then + echo " 업로드 대상: ${UPLOADED}" +else + echo " 업로드 성공: ${UPLOADED}" + echo " 업로드 실패: ${FAILED}" +fi +echo "" diff --git a/scripts/test-abnormal-tracks-insert.sql b/scripts/test-abnormal-tracks-insert.sql new file mode 100644 index 0000000..5bf7282 --- /dev/null +++ b/scripts/test-abnormal-tracks-insert.sql @@ -0,0 +1,135 @@ +-- t_abnormal_tracks 테스트용 INSERT 쿼리 +-- PostGIS ST_GeomFromText 함수 테스트 + +-- 1. 기본 테스트 (track_geom 컬럼 사용) +INSERT INTO signal.t_abnormal_tracks ( + sig_src_cd, + target_id, + time_bucket, + track_geom, + abnormal_type, + abnormal_reason, + distance_nm, + avg_speed, + max_speed, + point_count, + source_table +) VALUES ( + 'AIS', -- sig_src_cd + 'TEST_VESSEL_001', -- target_id + '2025-10-10 12:00:00'::timestamp, -- time_bucket + ST_GeomFromText('LINESTRING M(126.0 37.0 1728547200, 126.1 37.1 1728547260)', 4326), -- track_geom (LineString M 타입) + 'EXCESSIVE_SPEED', -- abnormal_type + '{"reason": "Speed exceeds 200 knots", "detected_speed": 250.5}'::jsonb, -- abnormal_reason + 15.5, -- distance_nm + 180.3, -- avg_speed + 250.5, -- max_speed + 10, -- point_count + 'hourly' -- source_table +) +ON CONFLICT (sig_src_cd, target_id, time_bucket, source_table) +DO UPDATE SET + track_geom = EXCLUDED.track_geom, + abnormal_type = EXCLUDED.abnormal_type, + abnormal_reason = EXCLUDED.abnormal_reason, + distance_nm = EXCLUDED.distance_nm, + avg_speed = EXCLUDED.avg_speed, + max_speed = EXCLUDED.max_speed, + point_count = EXCLUDED.point_count, + detected_at = NOW(); + +-- 2. track_geom_v2 컬럼을 사용하는 경우 +INSERT INTO signal.t_abnormal_tracks ( + sig_src_cd, + target_id, + time_bucket, + track_geom_v2, + abnormal_type, + abnormal_reason, + distance_nm, + avg_speed, + max_speed, + point_count, + source_table +) VALUES ( + 'LRIT', -- sig_src_cd + 'TEST_VESSEL_002', -- target_id + '2025-10-10 13:00:00'::timestamp, -- time_bucket + ST_GeomFromText('LINESTRING M(127.0 38.0 1728550800, 127.2 38.2 1728550860, 127.4 38.4 1728550920)', 4326), -- track_geom_v2 + 'UNREALISTIC_DISTANCE', -- abnormal_type + '{"reason": "Distance too large for time interval", "distance_nm": 120.0, "time_interval_minutes": 5}'::jsonb, -- abnormal_reason + 120.0, -- distance_nm + 1440.0, -- avg_speed (120nm / 5min = 1440 knots) + 1500.0, -- max_speed + 3, -- point_count + '5min' -- source_table +) +ON CONFLICT (sig_src_cd, target_id, time_bucket, source_table) +DO UPDATE SET + track_geom_v2 = EXCLUDED.track_geom_v2, + abnormal_type = EXCLUDED.abnormal_type, + abnormal_reason = EXCLUDED.abnormal_reason, + distance_nm = EXCLUDED.distance_nm, + avg_speed = EXCLUDED.avg_speed, + max_speed = EXCLUDED.max_speed, + point_count = EXCLUDED.point_count, + detected_at = NOW(); + +-- 3. public 스키마를 명시적으로 지정한 버전 +INSERT INTO signal.t_abnormal_tracks ( + sig_src_cd, + target_id, + time_bucket, + track_geom, + abnormal_type, + abnormal_reason, + distance_nm, + avg_speed, + max_speed, + point_count, + source_table +) VALUES ( + 'VPASS', -- sig_src_cd + 'TEST_VESSEL_003', -- target_id + '2025-10-10 14:00:00'::timestamp, -- time_bucket + public.ST_GeomFromText('LINESTRING M(128.0 36.0 1728554400, 128.1 36.1 1728554460)', 4326), -- public 스키마 명시 + 'SUDDEN_DIRECTION_CHANGE', -- abnormal_type + '{"reason": "Unrealistic turn angle", "angle_degrees": 175}'::jsonb, -- abnormal_reason + 8.5, -- distance_nm + 102.0, -- avg_speed + 120.0, -- max_speed + 2, -- point_count + 'hourly' -- source_table +) +ON CONFLICT (sig_src_cd, target_id, time_bucket, source_table) +DO UPDATE SET + track_geom = EXCLUDED.track_geom, + abnormal_type = EXCLUDED.abnormal_type, + abnormal_reason = EXCLUDED.abnormal_reason, + distance_nm = EXCLUDED.distance_nm, + avg_speed = EXCLUDED.avg_speed, + max_speed = EXCLUDED.max_speed, + point_count = EXCLUDED.point_count, + detected_at = NOW(); + +-- 4. 검증 쿼리 +SELECT + sig_src_cd, + target_id, + time_bucket, + abnormal_type, + abnormal_reason, + distance_nm, + avg_speed, + max_speed, + point_count, + source_table, + ST_AsText(track_geom) as track_geom_wkt, + ST_AsText(track_geom_v2) as track_geom_v2_wkt, + detected_at +FROM signal.t_abnormal_tracks +WHERE target_id LIKE 'TEST_VESSEL_%' +ORDER BY time_bucket DESC; + +-- 5. 정리 (테스트 데이터 삭제) +-- DELETE FROM signal.t_abnormal_tracks WHERE target_id LIKE 'TEST_VESSEL_%'; diff --git a/scripts/test-daily-aggregation-fixed.sql b/scripts/test-daily-aggregation-fixed.sql new file mode 100644 index 0000000..dcbb9b8 --- /dev/null +++ b/scripts/test-daily-aggregation-fixed.sql @@ -0,0 +1,496 @@ +-- ======================================== +-- 일별 집계 쿼리 검증 스크립트 +-- CAST 및 타입 호환성 테스트 +-- ======================================== + +-- 1. 임시 테스트 테이블 생성 +DROP TABLE IF EXISTS test_vessel_tracks_hourly_for_daily CASCADE; +DROP TABLE IF EXISTS test_vessel_tracks_daily CASCADE; + +CREATE TABLE test_vessel_tracks_hourly_for_daily ( + sig_src_cd VARCHAR(10), + target_id VARCHAR(20), + time_bucket TIMESTAMP, + track_geom geometry(LineStringM, 4326), + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + max_speed NUMERIC(6,2), + point_count INTEGER, + start_position JSONB, + end_position JSONB, + PRIMARY KEY (sig_src_cd, target_id, time_bucket) +); + +CREATE TABLE test_vessel_tracks_daily ( + sig_src_cd VARCHAR(10), + target_id VARCHAR(20), + time_bucket TIMESTAMP, + track_geom geometry(LineStringM, 4326), + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + max_speed NUMERIC(6,2), + point_count INTEGER, + start_position JSONB, + end_position JSONB, + PRIMARY KEY (sig_src_cd, target_id, time_bucket) +); + +-- 2. 샘플 데이터 삽입 (하루치 시간별 데이터) +-- 시나리오 1: 정상 이동 선박 (24시간 중 일부) +INSERT INTO test_vessel_tracks_hourly_for_daily VALUES +( + '000001', + 'TEST001', + '2025-01-07 00:00:00', + public.ST_GeomFromText('LINESTRING M(126.5 37.5 1736179200, 126.52 37.52 1736182800)', 4326), + 5.5, + 10.5, + 12.0, + 12, + '{"lat": 37.5, "lon": 126.5, "time": "2025-01-07 00:00:00", "sog": 10.5}'::jsonb, + '{"lat": 37.52, "lon": 126.52, "time": "2025-01-07 01:00:00", "sog": 11.0}'::jsonb +), +( + '000001', + 'TEST001', + '2025-01-07 01:00:00', + public.ST_GeomFromText('LINESTRING M(126.52 37.52 1736182800, 126.54 37.54 1736186400)', 4326), + 6.0, + 11.0, + 13.0, + 12, + '{"lat": 37.52, "lon": 126.52, "time": "2025-01-07 01:00:00", "sog": 11.0}'::jsonb, + '{"lat": 37.54, "lon": 126.54, "time": "2025-01-07 02:00:00", "sog": 12.0}'::jsonb +), +( + '000001', + 'TEST001', + '2025-01-07 02:00:00', + public.ST_GeomFromText('LINESTRING M(126.54 37.54 1736186400, 126.56 37.56 1736190000)', 4326), + 5.8, + 10.8, + 12.5, + 12, + '{"lat": 37.54, "lon": 126.54, "time": "2025-01-07 02:00:00", "sog": 10.8}'::jsonb, + '{"lat": 37.56, "lon": 126.56, "time": "2025-01-07 03:00:00", "sog": 11.5}'::jsonb +), +( + '000001', + 'TEST001', + '2025-01-07 03:00:00', + public.ST_GeomFromText('LINESTRING M(126.56 37.56 1736190000, 126.58 37.58 1736193600)', 4326), + 6.2, + 11.2, + 13.5, + 12, + '{"lat": 37.56, "lon": 126.56, "time": "2025-01-07 03:00:00", "sog": 11.2}'::jsonb, + '{"lat": 37.58, "lon": 126.58, "time": "2025-01-07 04:00:00", "sog": 12.5}'::jsonb +); + +-- 시나리오 2: 정박 선박 +INSERT INTO test_vessel_tracks_hourly_for_daily VALUES +( + '000002', + 'TEST002', + '2025-01-07 00:00:00', + public.ST_GeomFromText('LINESTRING M(129.0 35.0 1736179200, 129.0 35.0 1736182800)', 4326), + 0.0, + 0.0, + 0.5, + 24, + '{"lat": 35.0, "lon": 129.0, "time": "2025-01-07 00:00:00", "sog": 0.0}'::jsonb, + '{"lat": 35.0, "lon": 129.0, "time": "2025-01-07 01:00:00", "sog": 0.0}'::jsonb +), +( + '000002', + 'TEST002', + '2025-01-07 01:00:00', + public.ST_GeomFromText('LINESTRING M(129.0 35.0 1736182800, 129.0 35.0 1736186400)', 4326), + 0.0, + 0.0, + 0.3, + 24, + '{"lat": 35.0, "lon": 129.0, "time": "2025-01-07 01:00:00", "sog": 0.0}'::jsonb, + '{"lat": 35.0, "lon": 129.0, "time": "2025-01-07 02:00:00", "sog": 0.0}'::jsonb +); + +-- 시나리오 3: 단일 시간 데이터 +INSERT INTO test_vessel_tracks_hourly_for_daily VALUES +( + '000003', + 'TEST003', + '2025-01-07 00:00:00', + public.ST_GeomFromText('LINESTRING M(130.0 36.0 1736179200, 130.0 36.0 1736179200)', 4326), + 0.0, + 0.0, + 0.0, + 2, + '{"lat": 36.0, "lon": 130.0, "time": "2025-01-07 00:00:00", "sog": 0.0}'::jsonb, + '{"lat": 36.0, "lon": 130.0, "time": "2025-01-07 00:00:00", "sog": 0.0}'::jsonb +); + +-- 3. 입력 데이터 검증 +SELECT + '=== INPUT DATA VALIDATION ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(track_geom) as points, + public.ST_IsValid(track_geom) as is_valid, + public.ST_AsText(track_geom) as wkt +FROM test_vessel_tracks_hourly_for_daily +ORDER BY sig_src_cd, target_id, time_bucket; + +-- 4. 실제 DailyTrackProcessor SQL 실행 (CAST 사용) +-- Vessel: 000001_TEST001, Day: 2025-01-07 +WITH ordered_tracks AS ( + SELECT * + FROM test_vessel_tracks_hourly_for_daily + WHERE sig_src_cd = '000001' + AND target_id = 'TEST001' + AND time_bucket >= CAST('2025-01-07 00:00:00' AS timestamp) + AND time_bucket < CAST('2025-01-08 00:00:00' AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + CAST('2025-01-07 00:00:00' AS timestamp) as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + '=== DAILY AGGREGATION RESULT (VESSEL 000001_TEST001) ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(merged_geom) as merged_points, + public.ST_IsValid(merged_geom) as is_valid, + total_distance, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points, + start_time, + end_time, + start_pos, + end_pos, + public.ST_AsText(merged_geom) as geom_text +FROM calculated_tracks; + +-- 5. INSERT 테스트 (CAST 호환성 검증) +INSERT INTO test_vessel_tracks_daily +WITH ordered_tracks AS ( + SELECT * + FROM test_vessel_tracks_hourly_for_daily + WHERE sig_src_cd = '000001' + AND target_id = 'TEST001' + AND time_bucket >= CAST('2025-01-07 00:00:00' AS timestamp) + AND time_bucket < CAST('2025-01-08 00:00:00' AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + CAST('2025-01-07 00:00:00' AS timestamp) as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + sig_src_cd, + target_id, + time_bucket, + merged_geom as track_geom, + total_distance as distance_nm, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points as point_count, + start_pos as start_position, + end_pos as end_position +FROM calculated_tracks; + +-- 6. 정박 선박 INSERT 테스트 +INSERT INTO test_vessel_tracks_daily +WITH ordered_tracks AS ( + SELECT * + FROM test_vessel_tracks_hourly_for_daily + WHERE sig_src_cd = '000002' + AND target_id = 'TEST002' + AND time_bucket >= CAST('2025-01-07 00:00:00' AS timestamp) + AND time_bucket < CAST('2025-01-08 00:00:00' AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + CAST('2025-01-07 00:00:00' AS timestamp) as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + sig_src_cd, + target_id, + time_bucket, + merged_geom as track_geom, + total_distance as distance_nm, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points as point_count, + start_pos as start_position, + end_pos as end_position +FROM calculated_tracks; + +-- 7. 단일 시간 선박 INSERT 테스트 +INSERT INTO test_vessel_tracks_daily +WITH ordered_tracks AS ( + SELECT * + FROM test_vessel_tracks_hourly_for_daily + WHERE sig_src_cd = '000003' + AND target_id = 'TEST003' + AND time_bucket >= CAST('2025-01-07 00:00:00' AS timestamp) + AND time_bucket < CAST('2025-01-08 00:00:00' AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + CAST('2025-01-07 00:00:00' AS timestamp) as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + sig_src_cd, + target_id, + time_bucket, + merged_geom as track_geom, + total_distance as distance_nm, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points as point_count, + start_pos as start_position, + end_pos as end_position +FROM calculated_tracks; + +-- 8. 최종 결과 검증 +SELECT + '=== FINAL DAILY AGGREGATION RESULTS ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(track_geom) as points, + public.ST_IsValid(track_geom) as is_valid, + distance_nm, + avg_speed, + max_speed, + point_count, + public.ST_AsText(track_geom) as wkt +FROM test_vessel_tracks_daily +ORDER BY sig_src_cd, target_id; + +-- 9. 타입 검증 +SELECT + '=== DATA TYPE VALIDATION ===' as section, + pg_typeof(time_bucket) as time_bucket_type, + pg_typeof(track_geom) as track_geom_type, + pg_typeof(distance_nm) as distance_type, + pg_typeof(avg_speed) as avg_speed_type, + pg_typeof(max_speed) as max_speed_type, + pg_typeof(point_count) as point_count_type, + pg_typeof(start_position) as start_position_type +FROM test_vessel_tracks_daily +LIMIT 1; + +-- 10. 시간 순서 검증 (M값이 증가하는지 확인) +SELECT + '=== TIME ORDERING VALIDATION ===' as section, + sig_src_cd, + target_id, + public.ST_M(public.ST_PointN(track_geom, 1)) as first_m_value, + public.ST_M(public.ST_PointN(track_geom, public.ST_NPoints(track_geom))) as last_m_value, + CASE + WHEN public.ST_M(public.ST_PointN(track_geom, public.ST_NPoints(track_geom))) >= + public.ST_M(public.ST_PointN(track_geom, 1)) + THEN 'PASS' + ELSE 'FAIL' + END as time_order_check +FROM test_vessel_tracks_daily; + +-- 11. 정리 +DROP TABLE IF EXISTS test_vessel_tracks_hourly_for_daily CASCADE; +DROP TABLE IF EXISTS test_vessel_tracks_daily CASCADE; + +-- ======================================== +-- 테스트 완료 +-- 모든 INSERT가 성공하고 타입 에러가 없으면 CAST 사용이 정상 +-- ======================================== diff --git a/scripts/test-hourly-aggregation-fixed.sql b/scripts/test-hourly-aggregation-fixed.sql new file mode 100644 index 0000000..9a99d65 --- /dev/null +++ b/scripts/test-hourly-aggregation-fixed.sql @@ -0,0 +1,484 @@ +-- ======================================== +-- 시간별 집계 쿼리 검증 스크립트 +-- CAST 및 타입 호환성 테스트 +-- ======================================== + +-- 1. 임시 테스트 테이블 생성 +DROP TABLE IF EXISTS test_vessel_tracks_5min CASCADE; +DROP TABLE IF EXISTS test_vessel_tracks_hourly CASCADE; + +CREATE TABLE test_vessel_tracks_5min ( + sig_src_cd VARCHAR(10), + target_id VARCHAR(20), + time_bucket TIMESTAMP, + track_geom geometry(LineStringM, 4326), + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + max_speed NUMERIC(6,2), + point_count INTEGER, + start_position JSONB, + end_position JSONB, + PRIMARY KEY (sig_src_cd, target_id, time_bucket) +); + +CREATE TABLE test_vessel_tracks_hourly ( + sig_src_cd VARCHAR(10), + target_id VARCHAR(20), + time_bucket TIMESTAMP, + track_geom geometry(LineStringM, 4326), + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + max_speed NUMERIC(6,2), + point_count INTEGER, + start_position JSONB, + end_position JSONB, + PRIMARY KEY (sig_src_cd, target_id, time_bucket) +); + +-- 2. 샘플 데이터 삽입 (1시간치 5분 간격 데이터) +-- 시나리오 1: 정상 이동 선박 +INSERT INTO test_vessel_tracks_5min VALUES +( + '000001', + 'TEST001', + '2025-01-07 10:00:00', + public.ST_GeomFromText('LINESTRING M(126.5 37.5 1736215200, 126.51 37.51 1736215260, 126.52 37.52 1736215320)', 4326), + 0.5, + 10.5, + 12.0, + 3, + '{"lat": 37.5, "lon": 126.5, "time": "2025-01-07 10:00:00", "sog": 10.5}'::jsonb, + '{"lat": 37.52, "lon": 126.52, "time": "2025-01-07 10:02:00", "sog": 11.0}'::jsonb +), +( + '000001', + 'TEST001', + '2025-01-07 10:05:00', + public.ST_GeomFromText('LINESTRING M(126.52 37.52 1736215500, 126.53 37.53 1736215560, 126.54 37.54 1736215620)', 4326), + 0.6, + 11.0, + 13.0, + 3, + '{"lat": 37.52, "lon": 126.52, "time": "2025-01-07 10:05:00", "sog": 11.0}'::jsonb, + '{"lat": 37.54, "lon": 126.54, "time": "2025-01-07 10:07:00", "sog": 12.0}'::jsonb +), +( + '000001', + 'TEST001', + '2025-01-07 10:10:00', + public.ST_GeomFromText('LINESTRING M(126.54 37.54 1736215800, 126.55 37.55 1736215860)', 4326), + 0.4, + 9.5, + 11.0, + 2, + '{"lat": 37.54, "lon": 126.54, "time": "2025-01-07 10:10:00", "sog": 9.5}'::jsonb, + '{"lat": 37.55, "lon": 126.55, "time": "2025-01-07 10:11:00", "sog": 10.0}'::jsonb +); + +-- 시나리오 2: 정박 선박 (같은 좌표 반복) +INSERT INTO test_vessel_tracks_5min VALUES +( + '000002', + 'TEST002', + '2025-01-07 10:00:00', + public.ST_GeomFromText('LINESTRING M(129.0 35.0 1736215200, 129.0 35.0 1736215260)', 4326), + 0.0, + 0.0, + 0.5, + 2, + '{"lat": 35.0, "lon": 129.0, "time": "2025-01-07 10:00:00", "sog": 0.0}'::jsonb, + '{"lat": 35.0, "lon": 129.0, "time": "2025-01-07 10:01:00", "sog": 0.0}'::jsonb +), +( + '000002', + 'TEST002', + '2025-01-07 10:05:00', + public.ST_GeomFromText('LINESTRING M(129.0 35.0 1736215500, 129.0 35.0 1736215560)', 4326), + 0.0, + 0.0, + 0.3, + 2, + '{"lat": 35.0, "lon": 129.0, "time": "2025-01-07 10:05:00", "sog": 0.0}'::jsonb, + '{"lat": 35.0, "lon": 129.0, "time": "2025-01-07 10:06:00", "sog": 0.0}'::jsonb +); + +-- 시나리오 3: 단일 포인트 (중복 포인트로 유효한 LineString) +INSERT INTO test_vessel_tracks_5min VALUES +( + '000003', + 'TEST003', + '2025-01-07 10:00:00', + public.ST_GeomFromText('LINESTRING M(130.0 36.0 1736215200, 130.0 36.0 1736215200)', 4326), + 0.0, + 0.0, + 0.0, + 1, + '{"lat": 36.0, "lon": 130.0, "time": "2025-01-07 10:00:00", "sog": 0.0}'::jsonb, + '{"lat": 36.0, "lon": 130.0, "time": "2025-01-07 10:00:00", "sog": 0.0}'::jsonb +); + +-- 3. 입력 데이터 검증 +SELECT + '=== INPUT DATA VALIDATION ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(track_geom) as points, + public.ST_IsValid(track_geom) as is_valid, + public.ST_AsText(track_geom) as wkt +FROM test_vessel_tracks_5min +ORDER BY sig_src_cd, target_id, time_bucket; + +-- 4. 실제 HourlyTrackProcessor SQL 실행 (CAST 사용) +-- Vessel: 000001_TEST001, Hour: 2025-01-07 10:00:00 +WITH ordered_tracks AS ( + SELECT * + FROM test_vessel_tracks_5min + WHERE sig_src_cd = '000001' + AND target_id = 'TEST001' + AND time_bucket >= CAST('2025-01-07 10:00:00' AS timestamp) + AND time_bucket < CAST('2025-01-07 11:00:00' AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + CAST('2025-01-07 10:00:00' AS timestamp) as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + '=== HOURLY AGGREGATION RESULT (VESSEL 000001_TEST001) ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(merged_geom) as merged_points, + public.ST_IsValid(merged_geom) as is_valid, + total_distance, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points, + start_time, + end_time, + start_pos, + end_pos, + public.ST_AsText(merged_geom) as geom_text +FROM calculated_tracks; + +-- 5. INSERT 테스트 (CAST 호환성 검증) +INSERT INTO test_vessel_tracks_hourly +WITH ordered_tracks AS ( + SELECT * + FROM test_vessel_tracks_5min + WHERE sig_src_cd = '000001' + AND target_id = 'TEST001' + AND time_bucket >= CAST('2025-01-07 10:00:00' AS timestamp) + AND time_bucket < CAST('2025-01-07 11:00:00' AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + CAST('2025-01-07 10:00:00' AS timestamp) as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + sig_src_cd, + target_id, + time_bucket, + merged_geom as track_geom, + total_distance as distance_nm, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points as point_count, + start_pos as start_position, + end_pos as end_position +FROM calculated_tracks; + +-- 6. 정박 선박 INSERT 테스트 +INSERT INTO test_vessel_tracks_hourly +WITH ordered_tracks AS ( + SELECT * + FROM test_vessel_tracks_5min + WHERE sig_src_cd = '000002' + AND target_id = 'TEST002' + AND time_bucket >= CAST('2025-01-07 10:00:00' AS timestamp) + AND time_bucket < CAST('2025-01-07 11:00:00' AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + CAST('2025-01-07 10:00:00' AS timestamp) as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + sig_src_cd, + target_id, + time_bucket, + merged_geom as track_geom, + total_distance as distance_nm, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points as point_count, + start_pos as start_position, + end_pos as end_position +FROM calculated_tracks; + +-- 7. 단일 포인트 선박 INSERT 테스트 +INSERT INTO test_vessel_tracks_hourly +WITH ordered_tracks AS ( + SELECT * + FROM test_vessel_tracks_5min + WHERE sig_src_cd = '000003' + AND target_id = 'TEST003' + AND time_bucket >= CAST('2025-01-07 10:00:00' AS timestamp) + AND time_bucket < CAST('2025-01-07 11:00:00' AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + CAST('2025-01-07 10:00:00' AS timestamp) as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + sig_src_cd, + target_id, + time_bucket, + merged_geom as track_geom, + total_distance as distance_nm, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points as point_count, + start_pos as start_position, + end_pos as end_position +FROM calculated_tracks; + +-- 8. 최종 결과 검증 +SELECT + '=== FINAL HOURLY AGGREGATION RESULTS ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(track_geom) as points, + public.ST_IsValid(track_geom) as is_valid, + distance_nm, + avg_speed, + max_speed, + point_count, + public.ST_AsText(track_geom) as wkt +FROM test_vessel_tracks_hourly +ORDER BY sig_src_cd, target_id; + +-- 9. 타입 검증 +SELECT + '=== DATA TYPE VALIDATION ===' as section, + pg_typeof(time_bucket) as time_bucket_type, + pg_typeof(track_geom) as track_geom_type, + pg_typeof(distance_nm) as distance_type, + pg_typeof(avg_speed) as avg_speed_type, + pg_typeof(max_speed) as max_speed_type, + pg_typeof(point_count) as point_count_type, + pg_typeof(start_position) as start_position_type +FROM test_vessel_tracks_hourly +LIMIT 1; + +-- 10. 시간 순서 검증 (M값이 증가하는지 확인) +SELECT + '=== TIME ORDERING VALIDATION ===' as section, + sig_src_cd, + target_id, + public.ST_M(public.ST_PointN(track_geom, 1)) as first_m_value, + public.ST_M(public.ST_PointN(track_geom, public.ST_NPoints(track_geom))) as last_m_value, + CASE + WHEN public.ST_M(public.ST_PointN(track_geom, public.ST_NPoints(track_geom))) >= + public.ST_M(public.ST_PointN(track_geom, 1)) + THEN 'PASS' + ELSE 'FAIL' + END as time_order_check +FROM test_vessel_tracks_hourly; + +-- 11. 정리 +DROP TABLE IF EXISTS test_vessel_tracks_5min CASCADE; +DROP TABLE IF EXISTS test_vessel_tracks_hourly CASCADE; + +-- ======================================== +-- 테스트 완료 +-- 모든 INSERT가 성공하고 타입 에러가 없으면 CAST 사용이 정상 +-- ======================================== diff --git a/scripts/test-with-real-data.sql b/scripts/test-with-real-data.sql new file mode 100644 index 0000000..6ebe31b --- /dev/null +++ b/scripts/test-with-real-data.sql @@ -0,0 +1,274 @@ +-- ======================================== +-- 실제 테이블 데이터로 CAST 호환성 테스트 +-- ======================================== + +-- 1. 최근 5분 데이터 샘플 확인 (100개) +SELECT + '=== SAMPLE 5MIN DATA ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(track_geom) as points, + public.ST_IsValid(track_geom) as is_valid +FROM signal.t_vessel_tracks_5min +WHERE track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 +ORDER BY time_bucket DESC +LIMIT 100; + +-- 2. 테스트할 선박 선정 (최근 1시간 내 5분 데이터가 있는 선박) +WITH recent_vessels AS ( + SELECT + sig_src_cd, + target_id, + DATE_TRUNC('hour', time_bucket) as hour_bucket, + COUNT(*) as record_count, + MIN(time_bucket) as min_time, + MAX(time_bucket) as max_time + FROM signal.t_vessel_tracks_5min + WHERE time_bucket >= CURRENT_TIMESTAMP - INTERVAL '24 hours' + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + GROUP BY sig_src_cd, target_id, DATE_TRUNC('hour', time_bucket) + HAVING COUNT(*) >= 2 + ORDER BY hour_bucket DESC + LIMIT 10 +) +SELECT + '=== TEST CANDIDATE VESSELS ===' as section, + sig_src_cd, + target_id, + hour_bucket, + record_count, + min_time, + max_time +FROM recent_vessels; + +-- 3. 특정 선박의 5분 데이터 상세 확인 +-- 아래 값들을 위 결과에서 선택해서 수정하세요 +-- 예시: sig_src_cd = '000019', target_id = '111440547', hour_bucket = '2025-01-07 10:00:00' +\set test_sig_src_cd '000019' +\set test_target_id '111440547' +\set test_hour_start '''2025-01-07 10:00:00''' +\set test_hour_end '''2025-01-07 11:00:00''' + +SELECT + '=== 5MIN DATA FOR TEST VESSEL ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(track_geom) as points, + public.ST_IsValid(track_geom) as is_valid, + public.ST_GeometryType(track_geom) as geom_type, + public.ST_AsText(track_geom) as wkt, + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)') as regex_v1, + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ) as regex_v2 +FROM signal.t_vessel_tracks_5min +WHERE sig_src_cd = :'test_sig_src_cd' + AND target_id = :'test_target_id' + AND time_bucket >= CAST(:test_hour_start AS timestamp) + AND time_bucket < CAST(:test_hour_end AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 +ORDER BY time_bucket; + +-- 4. string_agg 결과 확인 +SELECT + '=== STRING_AGG TEST ===' as section, + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords, + COUNT(*) as track_count +FROM signal.t_vessel_tracks_5min +WHERE sig_src_cd = :'test_sig_src_cd' + AND target_id = :'test_target_id' + AND time_bucket >= CAST(:test_hour_start AS timestamp) + AND time_bucket < CAST(:test_hour_end AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 +GROUP BY sig_src_cd, target_id; + +-- 5. 병합된 WKT로 geometry 생성 테스트 +WITH ordered_tracks AS ( + SELECT * + FROM signal.t_vessel_tracks_5min + WHERE sig_src_cd = :'test_sig_src_cd' + AND target_id = :'test_target_id' + AND time_bucket >= CAST(:test_hour_start AS timestamp) + AND time_bucket < CAST(:test_hour_end AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +) +SELECT + '=== WKT GENERATION TEST ===' as section, + sig_src_cd, + target_id, + 'LINESTRING M(' || all_coords || ')' as full_wkt, + LENGTH(all_coords) as coords_length, + public.ST_GeomFromText('LINESTRING M(' || all_coords || ')', 4326) as test_geom, + public.ST_NPoints(public.ST_GeomFromText('LINESTRING M(' || all_coords || ')', 4326)) as merged_points, + public.ST_IsValid(public.ST_GeomFromText('LINESTRING M(' || all_coords || ')', 4326)) as is_valid +FROM merged_coords; + +-- 6. 전체 시간별 집계 쿼리 실행 (SELECT만, INSERT 안함) +WITH ordered_tracks AS ( + SELECT * + FROM signal.t_vessel_tracks_5min + WHERE sig_src_cd = :'test_sig_src_cd' + AND target_id = :'test_target_id' + AND time_bucket >= CAST(:test_hour_start AS timestamp) + AND time_bucket < CAST(:test_hour_end AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + CAST(:test_hour_start AS timestamp) as time_bucket, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom, + (SELECT MAX(max_speed) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as max_speed, + (SELECT SUM(point_count) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as total_points, + (SELECT MIN(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as start_time, + (SELECT MAX(time_bucket) FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id) as end_time, + (SELECT start_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket LIMIT 1) as start_pos, + (SELECT end_position FROM ordered_tracks WHERE sig_src_cd = mc.sig_src_cd AND target_id = mc.target_id ORDER BY time_bucket DESC LIMIT 1) as end_pos + FROM merged_coords mc +), +calculated_tracks AS ( + SELECT + *, + public.ST_Length(merged_geom::geography) / 1852.0 as total_distance, + CASE + WHEN public.ST_NPoints(merged_geom) > 0 THEN + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) - + public.ST_M(public.ST_PointN(merged_geom, 1)) + ELSE + EXTRACT(EPOCH FROM + CAST(end_pos->>'time' AS timestamp) - CAST(start_pos->>'time' AS timestamp) + ) + END as time_diff_seconds + FROM merged_tracks +) +SELECT + '=== FULL HOURLY AGGREGATION TEST ===' as section, + sig_src_cd, + target_id, + time_bucket, + public.ST_NPoints(merged_geom) as merged_points, + public.ST_IsValid(merged_geom) as is_valid, + total_distance, + CASE + WHEN time_diff_seconds > 0 THEN + CAST(LEAST((total_distance / (time_diff_seconds / 3600.0)), 9999.99) AS numeric(6,2)) + ELSE 0 + END as avg_speed, + max_speed, + total_points, + start_time, + end_time, + start_pos, + end_pos, + public.ST_AsText(merged_geom) as geom_text, + time_diff_seconds +FROM calculated_tracks; + +-- 7. M값 시간 순서 검증 +WITH ordered_tracks AS ( + SELECT * + FROM signal.t_vessel_tracks_5min + WHERE sig_src_cd = :'test_sig_src_cd' + AND target_id = :'test_target_id' + AND time_bucket >= CAST(:test_hour_start AS timestamp) + AND time_bucket < CAST(:test_hour_end AS timestamp) + AND track_geom IS NOT NULL + AND public.ST_NPoints(track_geom) > 0 + ORDER BY time_bucket +), +merged_coords AS ( + SELECT + sig_src_cd, + target_id, + string_agg( + COALESCE( + substring(public.ST_AsText(track_geom) from 'LINESTRING\\s*M\\s*\\((.+)\\)'), + substring(public.ST_AsText(track_geom) from '\\((.+)\\)') + ), + ',' + ORDER BY time_bucket + ) FILTER (WHERE track_geom IS NOT NULL) as all_coords + FROM ordered_tracks + GROUP BY sig_src_cd, target_id +), +merged_tracks AS ( + SELECT + mc.sig_src_cd, + mc.target_id, + public.ST_GeomFromText('LINESTRING M(' || mc.all_coords || ')', 4326) as merged_geom + FROM merged_coords mc +) +SELECT + '=== TIME ORDERING CHECK ===' as section, + sig_src_cd, + target_id, + public.ST_M(public.ST_PointN(merged_geom, 1)) as first_m_value, + to_timestamp(public.ST_M(public.ST_PointN(merged_geom, 1))) as first_time, + public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) as last_m_value, + to_timestamp(public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom)))) as last_time, + CASE + WHEN public.ST_M(public.ST_PointN(merged_geom, public.ST_NPoints(merged_geom))) >= + public.ST_M(public.ST_PointN(merged_geom, 1)) + THEN 'PASS' + ELSE 'FAIL' + END as time_order_check +FROM merged_tracks; + +-- ======================================== +-- 사용 방법: +-- 1. 먼저 쿼리 2번 실행해서 테스트할 선박 선택 +-- 2. \set 변수 값 수정 (라인 48-51) +-- 3. 전체 스크립트 실행 +-- 4. 각 섹션별 결과 확인 +-- ======================================== diff --git a/scripts/vessel-batch-control.sh b/scripts/vessel-batch-control.sh new file mode 100644 index 0000000..687d390 --- /dev/null +++ b/scripts/vessel-batch-control.sh @@ -0,0 +1,215 @@ +#!/bin/bash + +# Vessel Batch 관리 스크립트 +# 시작, 중지, 상태 확인 등 기본 관리 기능 + +# 애플리케이션 경로 +APP_HOME="/devdata/apps/bridge-db-monitoring" +JAR_FILE="$APP_HOME/vessel-batch-aggregation.jar" +PID_FILE="$APP_HOME/vessel-batch.pid" +LOG_DIR="$APP_HOME/logs" + +# Java 17 경로 +JAVA_HOME="/devdata/apps/jdk-17.0.8" +JAVA_BIN="$JAVA_HOME/bin/java" + +# 색상 코드 +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +# 함수: PID 확인 +get_pid() { + if [ -f "$PID_FILE" ]; then + PID=$(cat $PID_FILE) + if kill -0 $PID 2>/dev/null; then + echo $PID + else + rm -f $PID_FILE + echo "" + fi + else + PID=$(pgrep -f "$JAR_FILE") + echo $PID + fi +} + +# 함수: 상태 확인 +status() { + PID=$(get_pid) + if [ ! -z "$PID" ]; then + echo -e "${GREEN}✓ Vessel Batch is running (PID: $PID)${NC}" + + # 프로세스 정보 + echo "" + ps aux | grep $PID | grep -v grep + + # Health Check + echo "" + echo "Health Check:" + curl -s http://localhost:8090/actuator/health 2>/dev/null | python -m json.tool || echo "Health endpoint not available" + + # 처리 상태 + echo "" + echo "Processing Status:" + if command -v psql >/dev/null 2>&1; then + psql -h localhost -U mda -d mdadb -c " + SELECT + NOW() - MAX(last_update) as processing_delay, + COUNT(*) as vessel_count + FROM signal.t_vessel_latest_position;" 2>/dev/null || echo "Unable to query database" + fi + + return 0 + else + echo -e "${RED}✗ Vessel Batch is not running${NC}" + return 1 + fi +} + +# 함수: 시작 +start() { + PID=$(get_pid) + if [ ! -z "$PID" ]; then + echo -e "${YELLOW}Vessel Batch is already running (PID: $PID)${NC}" + return 1 + fi + + echo "Starting Vessel Batch..." + cd $APP_HOME + $APP_HOME/run-on-query-server-dev.sh +} + +# 함수: 중지 +stop() { + PID=$(get_pid) + if [ -z "$PID" ]; then + echo -e "${YELLOW}Vessel Batch is not running${NC}" + return 1 + fi + + echo "Stopping Vessel Batch (PID: $PID)..." + kill -15 $PID + + # 종료 대기 + for i in {1..30}; do + if ! kill -0 $PID 2>/dev/null; then + echo -e "${GREEN}✓ Vessel Batch stopped successfully${NC}" + rm -f $PID_FILE + return 0 + fi + echo -n "." + sleep 1 + done + + echo "" + echo -e "${RED}Process did not stop gracefully, force killing...${NC}" + kill -9 $PID + rm -f $PID_FILE +} + +# 함수: 재시작 +restart() { + echo "Restarting Vessel Batch..." + stop + sleep 3 + start +} + +# 함수: 로그 보기 +logs() { + if [ ! -d "$LOG_DIR" ]; then + echo "Log directory not found: $LOG_DIR" + return 1 + fi + + echo "Available log files:" + ls -lh $LOG_DIR/*.log 2>/dev/null + + echo "" + echo "Tailing app.log (Ctrl+C to exit)..." + tail -f $LOG_DIR/app.log +} + +# 함수: 최근 에러 확인 +errors() { + if [ ! -f "$LOG_DIR/app.log" ]; then + echo "Log file not found: $LOG_DIR/app.log" + return 1 + fi + + echo "Recent errors (last 50 lines with ERROR):" + grep "ERROR" $LOG_DIR/app.log | tail -50 + + echo "" + echo "Error summary:" + echo "Total errors: $(grep -c "ERROR" $LOG_DIR/app.log)" + echo "Errors today: $(grep "ERROR" $LOG_DIR/app.log | grep "$(date +%Y-%m-%d)" | wc -l)" +} + +# 함수: 성능 통계 +stats() { + echo "Performance Statistics" + echo "====================" + + if [ -f "$LOG_DIR/resource-monitor.csv" ]; then + echo "Recent resource usage:" + tail -5 $LOG_DIR/resource-monitor.csv | column -t -s, + fi + + echo "" + echo "Batch job statistics:" + if command -v psql >/dev/null 2>&1; then + psql -h localhost -U mda -d mdadb -c " + SELECT + job_name, + COUNT(*) as executions, + AVG(EXTRACT(EPOCH FROM (end_time - start_time))/60)::numeric(10,2) as avg_duration_min, + MAX(end_time) as last_execution + FROM batch_job_execution je + JOIN batch_job_instance ji ON je.job_instance_id = ji.job_instance_id + WHERE end_time > CURRENT_DATE - INTERVAL '7 days' + GROUP BY job_name;" 2>/dev/null || echo "Unable to query batch statistics" + fi +} + +# 메인 로직 +case "$1" in + start) + start + ;; + stop) + stop + ;; + restart) + restart + ;; + status) + status + ;; + logs) + logs + ;; + errors) + errors + ;; + stats) + stats + ;; + *) + echo "Usage: $0 {start|stop|restart|status|logs|errors|stats}" + echo "" + echo "Commands:" + echo " start - Start the Vessel Batch application" + echo " stop - Stop the Vessel Batch application" + echo " restart - Restart the Vessel Batch application" + echo " status - Check application status and health" + echo " logs - Tail application logs" + echo " errors - Show recent errors from logs" + echo " stats - Show performance statistics" + exit 1 + ;; +esac + +exit $? diff --git a/scripts/vessel-batch-start-prod.sh b/scripts/vessel-batch-start-prod.sh new file mode 100644 index 0000000..8b7d5f4 --- /dev/null +++ b/scripts/vessel-batch-start-prod.sh @@ -0,0 +1,191 @@ +#!/bin/bash + +# Query DB 서버에서 최적화된 실행 스크립트 (PROD 프로파일) +# Rocky Linux 환경에 맞춰 조정됨 +# Java 17 경로 명시적 지정 + +# 애플리케이션 경로 +APP_HOME="/devdata/apps/bridge-db-monitoring" +JAR_FILE="$APP_HOME/vessel-batch-aggregation.jar" + +# Java 17 경로 +JAVA_HOME="/devdata/apps/jdk-17.0.8" +JAVA_BIN="$JAVA_HOME/bin/java" + +# 로그 디렉토리 +LOG_DIR="$APP_HOME/logs" +mkdir -p $LOG_DIR + +echo "================================================" +echo "Vessel Batch Aggregation - PROD Profile" +echo "Start Time: $(date)" +echo "================================================" + +# 경로 확인 +echo "Environment Check:" +echo "- App Home: $APP_HOME" +echo "- JAR File: $JAR_FILE" +echo "- Java Path: $JAVA_BIN" +echo "- Java Version: $($JAVA_BIN -version 2>&1 | head -1)" + +# JAR 파일 존재 확인 +if [ ! -f "$JAR_FILE" ]; then + echo "ERROR: JAR file not found at $JAR_FILE" + exit 1 +fi + +# Java 실행 파일 확인 +if [ ! -x "$JAVA_BIN" ]; then + echo "ERROR: Java not found or not executable at $JAVA_BIN" + exit 1 +fi + +# 서버 정보 확인 +echo "" +echo "Server Info:" +echo "- Hostname: $(hostname)" +echo "- CPU Cores: $(nproc)" +echo "- Total Memory: $(free -h | grep Mem | awk '{print $2}')" +echo "- PostgreSQL Version: $(psql --version 2>/dev/null | head -1 || echo 'PostgreSQL client not in PATH')" + +# 환경 변수 설정 (PROD 프로파일) +export SPRING_PROFILES_ACTIVE=prod + +# Query DB와 Batch Meta DB를 localhost로 오버라이드 +export SPRING_DATASOURCE_QUERY_JDBC_URL="jdbc:postgresql://localhost:5432/mdadb?currentSchema=signal&options=-csearch_path=signal,public&assumeMinServerVersion=12&reWriteBatchedInserts=true" +export SPRING_DATASOURCE_BATCH_JDBC_URL="jdbc:postgresql://localhost:5432/mdadb?currentSchema=public&assumeMinServerVersion=12&reWriteBatchedInserts=true" + +# 서버 CPU 코어 수에 따른 병렬 처리 조정 +CPU_CORES=$(nproc) +export VESSEL_BATCH_PARTITION_SIZE=$((CPU_CORES * 2)) +export VESSEL_BATCH_BULK_INSERT_PARALLEL_THREADS=$((CPU_CORES / 2)) + +echo "" +echo "Optimized Settings:" +echo "- Active Profile: PROD" +echo "- Partition Size: $VESSEL_BATCH_PARTITION_SIZE" +echo "- Parallel Threads: $VESSEL_BATCH_BULK_INSERT_PARALLEL_THREADS" +echo "- Query DB: localhost (optimized)" +echo "- Batch Meta DB: localhost (optimized)" + +# JVM 옵션 (서버 메모리에 맞게 조정) +TOTAL_MEM=$(free -g | grep Mem | awk '{print $2}') +JVM_HEAP=$((TOTAL_MEM / 8)) # 전체 메모리의 25% 사용 + +# 최소 16GB, 최대 32GB로 제한 +if [ $JVM_HEAP -lt 8 ]; then + JVM_HEAP=8 +elif [ $JVM_HEAP -gt 16 ]; then + JVM_HEAP=16 +fi + +JAVA_OPTS="-Xms${JVM_HEAP}g -Xmx${JVM_HEAP}g \ + -XX:+UseG1GC \ + -XX:MaxGCPauseMillis=200 \ + -XX:+UseStringDeduplication \ + -XX:+ParallelRefProcEnabled \ + -XX:ParallelGCThreads=$((CPU_CORES / 2)) \ + -XX:ConcGCThreads=$((CPU_CORES / 4)) \ + -XX:+HeapDumpOnOutOfMemoryError \ + -XX:HeapDumpPath=$LOG_DIR/heapdump.hprof \ + -Dfile.encoding=UTF-8 \ + -Duser.timezone=Asia/Seoul \ + -Djava.security.egd=file:/dev/./urandom \ + -Dspring.profiles.active=prod" + +echo "- JVM Heap Size: ${JVM_HEAP}GB" + +# 기존 프로세스 확인 및 종료 +echo "" +echo "Checking for existing process..." +PID=$(pgrep -f "$JAR_FILE") +if [ ! -z "$PID" ]; then + echo "Stopping existing process (PID: $PID)..." + kill -15 $PID + + # 프로세스 종료 대기 (최대 30초) + for i in {1..30}; do + if ! kill -0 $PID 2>/dev/null; then + echo "Process stopped successfully." + break + fi + if [ $i -eq 30 ]; then + echo "Force killing process..." + kill -9 $PID + fi + sleep 1 + done +fi + +# 작업 디렉토리로 이동 +cd $APP_HOME + +# 애플리케이션 실행 (nice로 우선순위 조정) +echo "" +echo "Starting application with PROD profile..." +echo "Command: nice -n 10 $JAVA_BIN $JAVA_OPTS -jar $JAR_FILE" +echo "" + +# nohup으로 백그라운드 실행 +nohup nice -n 10 $JAVA_BIN $JAVA_OPTS -jar $JAR_FILE \ + > $LOG_DIR/app.log 2>&1 & + +NEW_PID=$! +echo "Application started with PID: $NEW_PID" + +# PID 파일 생성 +echo $NEW_PID > $APP_HOME/vessel-batch.pid + +# 시작 확인 (30초 대기) +echo "Waiting for application startup..." +STARTUP_SUCCESS=false +for i in {1..30}; do + if grep -q "Started SignalBatchApplication" $LOG_DIR/app.log 2>/dev/null; then + echo "✅ Application started successfully!" + STARTUP_SUCCESS=true + break + fi + echo -n "." + sleep 1 +done + +if [ "$STARTUP_SUCCESS" = false ]; then + echo "" + echo "⚠️ Application startup timeout. Check logs for errors." + echo "Log file: $LOG_DIR/app.log" + tail -20 $LOG_DIR/app.log +fi + +echo "" +echo "================================================" +echo "Deployment Complete!" +echo "- Profile: PROD" +echo "- PID: $NEW_PID" +echo "- PID File: $APP_HOME/vessel-batch.pid" +echo "- Log: $LOG_DIR/app.log" +echo "- Monitor: tail -f $LOG_DIR/app.log" +echo "================================================" + +# 초기 상태 확인 +sleep 5 +echo "" +echo "Initial Status Check:" +curl -s http://localhost:8090/actuator/health 2>/dev/null | python -m json.tool || echo "Health endpoint not yet available" + +# 활성 프로파일 확인 +echo "" +echo "Active Profile Check:" +curl -s http://localhost:8090/actuator/env | grep -A 5 "activeProfiles" 2>/dev/null || echo "Env endpoint not yet available" + +# 리소스 사용량 표시 +echo "" +echo "Resource Usage:" +ps aux | grep $NEW_PID | grep -v grep + +# 빠른 명령어 안내 +echo "" +echo "Useful Commands:" +echo "- Stop: kill -15 \$(cat $APP_HOME/vessel-batch.pid)" +echo "- Logs: tail -f $LOG_DIR/app.log" +echo "- Status: curl http://localhost:8090/actuator/health" +echo "- Monitor: $APP_HOME/monitor-query-server.sh" diff --git a/scripts/websocket-load-test.py b/scripts/websocket-load-test.py new file mode 100644 index 0000000..f7aecd1 --- /dev/null +++ b/scripts/websocket-load-test.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +""" +WebSocket 부하 테스트 자동화 스크립트 +""" +import asyncio +import json +import time +import statistics +from datetime import datetime, timedelta +import websockets +import stomper +from concurrent.futures import ThreadPoolExecutor + +class WebSocketLoadTest: + def __init__(self, base_url="ws://10.26.252.48:8090/ws-tracks"): + self.base_url = base_url + self.results = [] + self.active_connections = 0 + + async def single_client_test(self, client_id, duration_seconds=60): + """단일 클라이언트 테스트""" + start_time = time.time() + messages_received = 0 + bytes_received = 0 + errors = 0 + + try: + async with websockets.connect(self.base_url) as websocket: + self.active_connections += 1 + print(f"Client {client_id}: Connected") + + # STOMP CONNECT + connect_frame = stomper.connect(host='/', accept_version='1.2') + await websocket.send(connect_frame) + + # Subscribe to data channel + sub_frame = stomper.subscribe('/user/queue/tracks/data', client_id) + await websocket.send(sub_frame) + + # Send query request + query_request = { + "startTime": (datetime.now() - timedelta(days=1)).isoformat(), + "endTime": datetime.now().isoformat(), + "viewport": { + "minLon": 124.0, + "maxLon": 132.0, + "minLat": 33.0, + "maxLat": 38.0 + }, + "filters": { + "minDistance": 10, + "minSpeed": 5 + }, + "chunkSize": 2000 + } + + send_frame = stomper.send('/app/tracks/query', json.dumps(query_request)) + await websocket.send(send_frame) + + # Receive messages + while time.time() - start_time < duration_seconds: + try: + message = await asyncio.wait_for(websocket.recv(), timeout=1.0) + messages_received += 1 + bytes_received += len(message) + + # Parse STOMP frame + frame = stomper.Frame() + frame.parse(message) + + if frame.cmd == 'MESSAGE': + data = json.loads(frame.body) + if data.get('type') == 'complete': + print(f"Client {client_id}: Query completed") + break + + except asyncio.TimeoutError: + continue + except Exception as e: + errors += 1 + print(f"Client {client_id}: Error - {e}") + + except Exception as e: + errors += 1 + print(f"Client {client_id}: Connection error - {e}") + finally: + self.active_connections -= 1 + + # Calculate results + elapsed_time = time.time() - start_time + result = { + 'client_id': client_id, + 'duration': elapsed_time, + 'messages': messages_received, + 'bytes': bytes_received, + 'errors': errors, + 'msg_per_sec': messages_received / elapsed_time if elapsed_time > 0 else 0, + 'mbps': (bytes_received / 1024 / 1024) / elapsed_time if elapsed_time > 0 else 0 + } + + self.results.append(result) + return result + + async def run_load_test(self, num_clients=10, duration=60): + """병렬 부하 테스트 실행""" + print(f"Starting load test with {num_clients} clients for {duration} seconds...") + + tasks = [] + for i in range(num_clients): + task = asyncio.create_task(self.single_client_test(i, duration)) + tasks.append(task) + await asyncio.sleep(0.1) # Stagger connections + + # Wait for all clients to complete + await asyncio.gather(*tasks) + + # Print summary + self.print_summary() + + def print_summary(self): + """테스트 결과 요약 출력""" + print("\n" + "="*60) + print("LOAD TEST SUMMARY") + print("="*60) + + total_messages = sum(r['messages'] for r in self.results) + total_bytes = sum(r['bytes'] for r in self.results) + total_errors = sum(r['errors'] for r in self.results) + avg_msg_per_sec = statistics.mean(r['msg_per_sec'] for r in self.results) + avg_mbps = statistics.mean(r['mbps'] for r in self.results) + + print(f"Total Clients: {len(self.results)}") + print(f"Total Messages: {total_messages:,}") + print(f"Total Data: {total_bytes/1024/1024:.2f} MB") + print(f"Total Errors: {total_errors}") + print(f"Avg Messages/sec per client: {avg_msg_per_sec:.2f}") + print(f"Avg Throughput per client: {avg_mbps:.2f} MB/s") + print(f"Total Throughput: {avg_mbps * len(self.results):.2f} MB/s") + + # Error rate + error_rate = (total_errors / len(self.results)) * 100 if self.results else 0 + print(f"Error Rate: {error_rate:.2f}%") + + # Success rate + successful_clients = sum(1 for r in self.results if r['errors'] == 0) + success_rate = (successful_clients / len(self.results)) * 100 if self.results else 0 + print(f"Success Rate: {success_rate:.2f}%") + + print("="*60) + +async def main(): + # Test scenarios + scenarios = [ + {"clients": 10, "duration": 60, "name": "Light Load"}, + {"clients": 50, "duration": 120, "name": "Medium Load"}, + {"clients": 100, "duration": 180, "name": "Heavy Load"} + ] + + for scenario in scenarios: + print(f"\n{'='*60}") + print(f"Running scenario: {scenario['name']}") + print(f"{'='*60}") + + tester = WebSocketLoadTest() + await tester.run_load_test( + num_clients=scenario['clients'], + duration=scenario['duration'] + ) + + # Wait between scenarios + print(f"\nWaiting 30 seconds before next scenario...") + await asyncio.sleep(30) + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/sql/V2_snp_schema_migration.sql b/sql/V2_snp_schema_migration.sql new file mode 100644 index 0000000..0462c0d --- /dev/null +++ b/sql/V2_snp_schema_migration.sql @@ -0,0 +1,584 @@ +-- ============================================================ +-- gc-signal-batch V2: SNP API 기반 스키마 (신규 생성) +-- 타겟 DB: snpdb (211.208.115.83), 스키마: signal +-- +-- 핵심 변경: +-- sig_src_cd + target_id → mmsi VARCHAR(20) 단일 식별자 +-- t_vessel_latest_position → t_ais_position (새 구조) +-- 신규: t_vessel_static (정적 정보 이력) +-- +-- 실행 전 확인: +-- 1. PostGIS 확장이 설치되어 있는지 확인 +-- 2. signal 스키마가 존재하는지 확인 +-- 3. 파티션 테이블은 PartitionManager가 런타임에 자동 생성 +-- ============================================================ + +-- 스키마 생성 +CREATE SCHEMA IF NOT EXISTS signal; + +-- PostGIS 확장 활성화 +CREATE EXTENSION IF NOT EXISTS postgis; + +-- ============================================================ +-- 1. AIS 위치/정적 정보 (SNP API 전용, 신규) +-- ============================================================ + +-- t_ais_position: AIS 최신 위치 (MMSI별 1건 UPSERT) +-- 용도: 캐시 복원, 타 프로세스 최신 위치 조회, API 불가 환경 대응 +-- 갱신: 5분 집계 Job에서 캐시 스냅샷 UPSERT +CREATE TABLE IF NOT EXISTS signal.t_ais_position ( + mmsi VARCHAR(20) PRIMARY KEY, + imo BIGINT, + name VARCHAR(50), + callsign VARCHAR(20), + vessel_type VARCHAR(50), + extra_info VARCHAR(200), + lat DOUBLE PRECISION NOT NULL, + lon DOUBLE PRECISION NOT NULL, + geom GEOMETRY(POINT, 4326), + heading DOUBLE PRECISION, + sog DOUBLE PRECISION, + cog DOUBLE PRECISION, + rot INTEGER, + length INTEGER, + width INTEGER, + draught DOUBLE PRECISION, + destination VARCHAR(200), + eta TIMESTAMPTZ, + status VARCHAR(50), + message_timestamp TIMESTAMPTZ NOT NULL, + signal_kind_code VARCHAR(10), + class_type VARCHAR(1), + last_update TIMESTAMPTZ DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_ais_position_geom ON signal.t_ais_position USING GIST (geom); +CREATE INDEX IF NOT EXISTS idx_ais_position_signal_kind ON signal.t_ais_position (signal_kind_code); +CREATE INDEX IF NOT EXISTS idx_ais_position_timestamp ON signal.t_ais_position (message_timestamp); + +COMMENT ON TABLE signal.t_ais_position IS 'AIS 최신 위치 (MMSI별 1건, 5분 집계 Job에서 UPSERT)'; +COMMENT ON COLUMN signal.t_ais_position.mmsi IS 'MMSI (VARCHAR — 문자 혼합 MMSI 장비 지원)'; +COMMENT ON COLUMN signal.t_ais_position.signal_kind_code IS 'MDA 범례코드 (SignalKindCode.resolve 결과)'; + +-- t_vessel_static: 정적 정보 이력 (위변조/흘수 변경 추적) +-- 전략: COALESCE + CDC 하이브리드 (HourlyJob에서 저장) +-- 보존: 90일 +CREATE TABLE IF NOT EXISTS signal.t_vessel_static ( + mmsi VARCHAR(20) NOT NULL, + time_bucket TIMESTAMPTZ NOT NULL, + imo BIGINT, + name VARCHAR(50), + callsign VARCHAR(20), + vessel_type VARCHAR(50), + extra_info VARCHAR(200), + length INTEGER, + width INTEGER, + draught DOUBLE PRECISION, + destination VARCHAR(200), + eta TIMESTAMPTZ, + status VARCHAR(50), + signal_kind_code VARCHAR(10), + class_type VARCHAR(1), + PRIMARY KEY (mmsi, time_bucket) +); + +CREATE INDEX IF NOT EXISTS idx_vessel_static_mmsi ON signal.t_vessel_static (mmsi); + +COMMENT ON TABLE signal.t_vessel_static IS '선박 정적 정보 이력 (시간별, COALESCE+CDC). 보존 90일'; + +-- ============================================================ +-- 2. 핵심 항적 테이블 (5분/시간/일별 — 파티션) +-- ============================================================ + +-- t_vessel_tracks_5min: 5분 단위 항적 (일별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_vessel_tracks_5min ( + mmsi VARCHAR(20) NOT NULL, + time_bucket TIMESTAMP NOT NULL, + track_geom GEOMETRY(LINESTRINGM, 4326), + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + max_speed NUMERIC(6,2), + point_count INTEGER, + start_position JSONB, + end_position JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_vessel_tracks_5min_pkey PRIMARY KEY (mmsi, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_tracks_5min_mmsi ON signal.t_vessel_tracks_5min (mmsi); +CREATE INDEX IF NOT EXISTS idx_tracks_5min_bucket ON signal.t_vessel_tracks_5min (time_bucket); + +COMMENT ON TABLE signal.t_vessel_tracks_5min IS '선박 항적 5분 단위 집계'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.mmsi IS 'MMSI (VARCHAR)'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.track_geom IS 'LineStringM 형식 항적 (M값은 첫 포인트 기준 상대시간 초)'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.start_position IS '시작 위치 JSON {lat, lon, time, sog}'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.end_position IS '종료 위치 JSON {lat, lon, time, sog}'; + +-- t_vessel_tracks_hourly: 시간별 항적 (월별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_vessel_tracks_hourly ( + mmsi VARCHAR(20) NOT NULL, + time_bucket TIMESTAMP NOT NULL, + track_geom GEOMETRY(LINESTRINGM, 4326), + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + max_speed NUMERIC(6,2), + point_count INTEGER, + start_position JSONB, + end_position JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_vessel_tracks_hourly_pkey PRIMARY KEY (mmsi, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_tracks_hourly_mmsi ON signal.t_vessel_tracks_hourly (mmsi); +CREATE INDEX IF NOT EXISTS idx_tracks_hourly_bucket ON signal.t_vessel_tracks_hourly (time_bucket); +CREATE INDEX IF NOT EXISTS idx_tracks_hourly_geom ON signal.t_vessel_tracks_hourly USING GIST (track_geom); + +COMMENT ON TABLE signal.t_vessel_tracks_hourly IS '선박 항적 시간별 집계'; + +-- t_vessel_tracks_daily: 일별 항적 (월별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_vessel_tracks_daily ( + mmsi VARCHAR(20) NOT NULL, + time_bucket DATE NOT NULL, + track_geom GEOMETRY(LINESTRINGM, 4326), + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + max_speed NUMERIC(6,2), + point_count INTEGER, + operating_hours NUMERIC(4,2), + port_visits JSONB, + start_position JSONB, + end_position JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_vessel_tracks_daily_pkey PRIMARY KEY (mmsi, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_tracks_daily_mmsi ON signal.t_vessel_tracks_daily (mmsi); +CREATE INDEX IF NOT EXISTS idx_tracks_daily_bucket ON signal.t_vessel_tracks_daily (time_bucket); +CREATE INDEX IF NOT EXISTS idx_tracks_daily_geom ON signal.t_vessel_tracks_daily USING GIST (track_geom); + +COMMENT ON TABLE signal.t_vessel_tracks_daily IS '선박 항적 일별 집계'; + +-- ============================================================ +-- 3. 해구(Grid) 관련 테이블 — 파티션 +-- ============================================================ + +-- t_haegu_definitions: 대해구 정의 (일반 테이블) +CREATE TABLE IF NOT EXISTS signal.t_haegu_definitions ( + haegu_no INTEGER NOT NULL, + min_lat DOUBLE PRECISION NOT NULL, + min_lon DOUBLE PRECISION NOT NULL, + max_lat DOUBLE PRECISION NOT NULL, + max_lon DOUBLE PRECISION NOT NULL, + center_lat DOUBLE PRECISION NOT NULL, + center_lon DOUBLE PRECISION NOT NULL, + geom GEOMETRY(MULTIPOLYGON, 4326) NOT NULL, + center_point GEOMETRY(POINT, 4326) NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_haegu_definitions_pkey PRIMARY KEY (haegu_no) +); + +CREATE INDEX IF NOT EXISTS idx_haegu_definitions_geom ON signal.t_haegu_definitions USING GIST (geom); + +COMMENT ON TABLE signal.t_haegu_definitions IS '대해구 정의 정보'; + +-- t_grid_tiles: 그리드 타일 정의 (일반 테이블) +CREATE TABLE IF NOT EXISTS signal.t_grid_tiles ( + tile_id VARCHAR(50) NOT NULL, + tile_level INTEGER NOT NULL, + haegu_no INTEGER NOT NULL, + sohaegu_no INTEGER, + min_lat DOUBLE PRECISION NOT NULL, + min_lon DOUBLE PRECISION NOT NULL, + max_lat DOUBLE PRECISION NOT NULL, + max_lon DOUBLE PRECISION NOT NULL, + tile_geom GEOMETRY(POLYGON, 4326) NOT NULL, + center_point GEOMETRY(POINT, 4326) NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_grid_tiles_pkey PRIMARY KEY (tile_id) +); + +CREATE INDEX IF NOT EXISTS idx_grid_tiles_tile_geom ON signal.t_grid_tiles USING GIST (tile_geom); +CREATE INDEX IF NOT EXISTS idx_grid_tiles_haegu ON signal.t_grid_tiles (haegu_no); +CREATE INDEX IF NOT EXISTS idx_grid_tiles_level ON signal.t_grid_tiles (tile_level); +CREATE INDEX IF NOT EXISTS idx_grid_tiles_haegu_sohaegu ON signal.t_grid_tiles (haegu_no, sohaegu_no); + +COMMENT ON TABLE signal.t_grid_tiles IS '그리드 타일 정의 (대해구/소해구)'; + +-- t_grid_vessel_tracks: 해구별 선박 항적 (5분, 일별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_grid_vessel_tracks ( + haegu_no INTEGER NOT NULL, + mmsi VARCHAR(20) NOT NULL, + time_bucket TIMESTAMP NOT NULL, + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + point_count INTEGER, + entry_time TIMESTAMP, + exit_time TIMESTAMP, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_grid_vessel_tracks_pkey PRIMARY KEY (haegu_no, mmsi, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_grid_vessel_tracks_mmsi_time ON signal.t_grid_vessel_tracks (mmsi, time_bucket DESC); +CREATE INDEX IF NOT EXISTS idx_grid_vessel_tracks_haegu_time ON signal.t_grid_vessel_tracks (haegu_no, time_bucket DESC); + +COMMENT ON TABLE signal.t_grid_vessel_tracks IS '해구별 선박 항적 (5분 단위)'; + +-- t_grid_tracks_summary: 해구별 항적 요약 (5분, 일별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_grid_tracks_summary ( + haegu_no INTEGER NOT NULL, + time_bucket TIMESTAMP NOT NULL, + total_vessels INTEGER, + total_distance_nm NUMERIC(12,2), + avg_speed NUMERIC(6,2), + vessel_list JSONB, + traffic_density NUMERIC(10,4), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_grid_tracks_summary_pkey PRIMARY KEY (haegu_no, time_bucket) +) PARTITION BY RANGE (time_bucket); + +COMMENT ON TABLE signal.t_grid_tracks_summary IS '해구별 5분 단위 항적 요약 통계'; +COMMENT ON COLUMN signal.t_grid_tracks_summary.vessel_list IS '선박별 상세 정보 [{mmsi, distance_nm, avg_speed}]'; + +-- t_grid_tracks_summary_hourly: 해구별 시간별 요약 (월별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_grid_tracks_summary_hourly ( + haegu_no INTEGER NOT NULL, + time_bucket TIMESTAMP NOT NULL, + total_vessels INTEGER, + total_distance_nm NUMERIC(12,2), + avg_speed NUMERIC(6,2), + vessel_list JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_grid_tracks_summary_hourly_pkey PRIMARY KEY (haegu_no, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_grid_tracks_summary_hourly_time ON signal.t_grid_tracks_summary_hourly (time_bucket DESC, haegu_no); + +COMMENT ON TABLE signal.t_grid_tracks_summary_hourly IS '해구별 시간별 항적 요약 통계'; + +-- t_grid_tracks_summary_daily: 해구별 일별 요약 (월별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_grid_tracks_summary_daily ( + haegu_no INTEGER NOT NULL, + time_bucket DATE NOT NULL, + total_vessels INTEGER, + total_distance_nm NUMERIC(12,2), + avg_speed NUMERIC(6,2), + vessel_list JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_grid_tracks_summary_daily_pkey PRIMARY KEY (haegu_no, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_grid_tracks_summary_daily_time ON signal.t_grid_tracks_summary_daily (time_bucket DESC, haegu_no); + +COMMENT ON TABLE signal.t_grid_tracks_summary_daily IS '해구별 일일 항적 요약 통계'; + +-- ============================================================ +-- 4. 영역(Area) 관련 테이블 — 파티션 +-- ============================================================ + +-- t_areas: 사용자 정의 영역 (일반 테이블) +CREATE TABLE IF NOT EXISTS signal.t_areas ( + area_id VARCHAR(50) NOT NULL, + area_name VARCHAR(100) NOT NULL, + area_type VARCHAR(20) NOT NULL, + area_geom GEOMETRY(MULTIPOLYGON, 4326) NOT NULL, + properties JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_areas_pkey PRIMARY KEY (area_id) +); + +CREATE INDEX IF NOT EXISTS idx_t_areas_area_geom ON signal.t_areas USING GIST (area_geom); + +COMMENT ON TABLE signal.t_areas IS '사용자 정의 영역 정보'; + +-- t_area_vessel_tracks: 영역별 선박 항적 (5분, 일별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_area_vessel_tracks ( + area_id VARCHAR(50) NOT NULL, + mmsi VARCHAR(20) NOT NULL, + time_bucket TIMESTAMP NOT NULL, + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + point_count INTEGER, + metrics JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_area_vessel_tracks_pkey PRIMARY KEY (area_id, mmsi, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_area_vessel_tracks_mmsi_time ON signal.t_area_vessel_tracks (mmsi, time_bucket DESC); +CREATE INDEX IF NOT EXISTS idx_area_vessel_tracks_area_time ON signal.t_area_vessel_tracks (area_id, time_bucket DESC); + +COMMENT ON TABLE signal.t_area_vessel_tracks IS '영역별 선박 항적 (5분 단위)'; + +-- t_area_tracks_summary: 영역별 항적 요약 (5분, 일별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_area_tracks_summary ( + area_id VARCHAR(50) NOT NULL, + time_bucket TIMESTAMP NOT NULL, + total_vessels INTEGER, + total_distance_nm NUMERIC(12,2), + avg_speed NUMERIC(6,2), + vessel_list JSONB, + metrics_summary JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_area_tracks_summary_pkey PRIMARY KEY (area_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +COMMENT ON TABLE signal.t_area_tracks_summary IS '영역별 5분 단위 항적 요약 통계'; +COMMENT ON COLUMN signal.t_area_tracks_summary.vessel_list IS '선박별 상세 정보 [{mmsi, distance_nm, avg_speed}]'; + +-- t_area_tracks_summary_hourly: 영역별 시간별 요약 (월별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_area_tracks_summary_hourly ( + area_id VARCHAR(50) NOT NULL, + time_bucket TIMESTAMP NOT NULL, + total_vessels INTEGER, + total_distance_nm NUMERIC(12,2), + avg_speed NUMERIC(6,2), + vessel_list JSONB, + metrics_summary JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_area_tracks_summary_hourly_pkey PRIMARY KEY (area_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_area_tracks_summary_hourly_time ON signal.t_area_tracks_summary_hourly (time_bucket DESC, area_id); + +COMMENT ON TABLE signal.t_area_tracks_summary_hourly IS '영역별 시간별 항적 요약 통계'; + +-- t_area_tracks_summary_daily: 영역별 일별 요약 (월별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_area_tracks_summary_daily ( + area_id VARCHAR(50) NOT NULL, + time_bucket DATE NOT NULL, + total_vessels INTEGER, + total_distance_nm NUMERIC(12,2), + avg_speed NUMERIC(6,2), + vessel_list JSONB, + metrics_summary JSONB, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_area_tracks_summary_daily_pkey PRIMARY KEY (area_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_area_tracks_summary_daily_time ON signal.t_area_tracks_summary_daily (time_bucket DESC, area_id); + +COMMENT ON TABLE signal.t_area_tracks_summary_daily IS '영역별 일일 항적 요약 통계'; + +-- t_area_statistics: 영역별 선박 통계 (5분, 일별 파티션) +CREATE TABLE IF NOT EXISTS signal.t_area_statistics ( + area_id VARCHAR(50) NOT NULL, + time_bucket TIMESTAMP NOT NULL, + vessel_count INTEGER DEFAULT 0, + in_count INTEGER DEFAULT 0, + out_count INTEGER DEFAULT 0, + transit_vessels JSONB, + stationary_vessels JSONB, + avg_sog NUMERIC(25,1), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + CONSTRAINT t_area_statistics_pkey PRIMARY KEY (area_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +CREATE INDEX IF NOT EXISTS idx_area_stats_lookup ON signal.t_area_statistics (area_id, time_bucket DESC); + +COMMENT ON TABLE signal.t_area_statistics IS '영역별 5분 단위 선박 통계'; + +-- ============================================================ +-- 5. 비정상 항적 테이블 — 파티션 +-- ============================================================ + +-- t_abnormal_tracks: 비정상 항적 (월별 파티션) +-- id는 GENERATED ALWAYS로 자동 생성 +CREATE TABLE IF NOT EXISTS signal.t_abnormal_tracks ( + id BIGINT GENERATED ALWAYS AS IDENTITY, + mmsi VARCHAR(20) NOT NULL, + time_bucket TIMESTAMP NOT NULL, + track_geom GEOMETRY(LINESTRINGM, 4326), + abnormal_type VARCHAR(50) NOT NULL, + abnormal_reason JSONB NOT NULL, + distance_nm NUMERIC(10,2), + avg_speed NUMERIC(6,2), + max_speed NUMERIC(6,2), + point_count INTEGER, + source_table VARCHAR(50) NOT NULL, + detected_at TIMESTAMP DEFAULT NOW(), + CONSTRAINT t_abnormal_tracks_pkey PRIMARY KEY (id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- ON CONFLICT (mmsi, time_bucket, source_table) 지원 +CREATE UNIQUE INDEX IF NOT EXISTS abnormal_tracks_uk ON signal.t_abnormal_tracks (mmsi, time_bucket, source_table); +CREATE INDEX IF NOT EXISTS idx_abnormal_tracks_mmsi ON signal.t_abnormal_tracks (mmsi); +CREATE INDEX IF NOT EXISTS idx_abnormal_tracks_time ON signal.t_abnormal_tracks (time_bucket); +CREATE INDEX IF NOT EXISTS idx_abnormal_tracks_type ON signal.t_abnormal_tracks (abnormal_type); +CREATE INDEX IF NOT EXISTS idx_abnormal_tracks_geom ON signal.t_abnormal_tracks USING GIST (track_geom); + +COMMENT ON TABLE signal.t_abnormal_tracks IS '비정상 선박 항적'; +COMMENT ON COLUMN signal.t_abnormal_tracks.mmsi IS 'MMSI (VARCHAR)'; +COMMENT ON COLUMN signal.t_abnormal_tracks.abnormal_type IS '비정상 유형 (excessive_speed, teleport, impossible_distance, excessive_avg_speed, gap_jump)'; +COMMENT ON COLUMN signal.t_abnormal_tracks.source_table IS '검출 원본 테이블 (t_vessel_tracks_5min/hourly/daily)'; + +-- t_abnormal_track_stats: 비정상 항적 일별 통계 (일반 테이블) +CREATE TABLE IF NOT EXISTS signal.t_abnormal_track_stats ( + stat_date DATE NOT NULL, + abnormal_type VARCHAR(50) NOT NULL, + vessel_count INTEGER NOT NULL, + track_count INTEGER NOT NULL, + total_points INTEGER, + avg_deviation NUMERIC(10,2), + max_deviation NUMERIC(10,2), + created_at TIMESTAMP DEFAULT NOW(), + updated_at TIMESTAMP DEFAULT NOW(), + CONSTRAINT t_abnormal_track_stats_pkey PRIMARY KEY (stat_date, abnormal_type) +); + +CREATE INDEX IF NOT EXISTS idx_abnormal_track_stats_date ON signal.t_abnormal_track_stats (stat_date); + +COMMENT ON TABLE signal.t_abnormal_track_stats IS '비정상 항적 일별 통계'; + +-- ============================================================ +-- 6. 타일 요약 테이블 — 파티션 +-- ============================================================ + +-- t_tile_summary: 타일별 선박 요약 (5분, 일별 파티션) +-- ON CONFLICT (tile_id, time_bucket) 지원을 위해 UNIQUE 추가 +CREATE TABLE IF NOT EXISTS signal.t_tile_summary ( + tile_id VARCHAR(50) NOT NULL, + tile_level INTEGER NOT NULL, + time_bucket TIMESTAMP NOT NULL, + vessel_count INTEGER DEFAULT 0, + unique_vessels JSONB, + total_points BIGINT DEFAULT 0, + avg_sog NUMERIC(25,1), + max_sog NUMERIC(25,1), + vessel_density NUMERIC(10,6), + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + haegu_no INTEGER, + sohaegu_no INTEGER, + CONSTRAINT t_tile_summary_pkey PRIMARY KEY (tile_id, time_bucket, tile_level) +) PARTITION BY RANGE (time_bucket); + +-- ConcurrentUpdateManager에서 ON CONFLICT (tile_id, time_bucket) 사용 +CREATE UNIQUE INDEX IF NOT EXISTS idx_tile_summary_tile_time_uk ON signal.t_tile_summary (tile_id, time_bucket); +CREATE INDEX IF NOT EXISTS idx_tile_summary_time ON signal.t_tile_summary (time_bucket DESC); +CREATE INDEX IF NOT EXISTS idx_tile_summary_vessel_count ON signal.t_tile_summary (vessel_count DESC); +CREATE INDEX IF NOT EXISTS idx_tile_summary_tile_level ON signal.t_tile_summary (tile_level); + +COMMENT ON TABLE signal.t_tile_summary IS '타일별 5분 단위 선박 요약 통계'; +COMMENT ON COLUMN signal.t_tile_summary.unique_vessels IS '고유 선박 목록 [{mmsi}]'; + +-- ============================================================ +-- 7. 배치 성능 메트릭 (일반 테이블) +-- ============================================================ + +CREATE TABLE IF NOT EXISTS signal.t_batch_performance_metrics ( + id SERIAL PRIMARY KEY, + job_name VARCHAR(100) NOT NULL, + execution_id BIGINT NOT NULL, + start_time TIMESTAMP NOT NULL, + end_time TIMESTAMP, + duration_seconds BIGINT, + total_read BIGINT, + total_write BIGINT, + throughput_per_sec NUMERIC(10,2), + status VARCHAR(20), + error_message TEXT, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX IF NOT EXISTS idx_batch_metrics_job ON signal.t_batch_performance_metrics (job_name, start_time DESC); +CREATE INDEX IF NOT EXISTS idx_batch_metrics_status ON signal.t_batch_performance_metrics (status) WHERE status != 'COMPLETED'; + +COMMENT ON TABLE signal.t_batch_performance_metrics IS '배치 작업 성능 메트릭'; + +-- ============================================================ +-- 8. 초기 파티션 생성 (수동 실행용) +-- PartitionManager가 런타임에 자동 생성하지만, +-- 최초 배포 시 수동으로 미리 생성할 수 있음. +-- ============================================================ + +-- 일별 파티션 생성 함수 +CREATE OR REPLACE FUNCTION signal.create_daily_partition( + parent_table TEXT, + target_date DATE +) RETURNS VOID AS $$ +DECLARE + partition_name TEXT; + start_date DATE; + end_date DATE; +BEGIN + partition_name := parent_table || '_' || to_char(target_date, 'YYMMDD'); + start_date := target_date; + end_date := target_date + INTERVAL '1 day'; + + EXECUTE format( + 'CREATE TABLE IF NOT EXISTS signal.%I PARTITION OF signal.%I FOR VALUES FROM (%L) TO (%L)', + partition_name, parent_table, start_date, end_date + ); +END; +$$ LANGUAGE plpgsql; + +-- 월별 파티션 생성 함수 +CREATE OR REPLACE FUNCTION signal.create_monthly_partition( + parent_table TEXT, + target_date DATE +) RETURNS VOID AS $$ +DECLARE + partition_name TEXT; + start_date DATE; + end_date DATE; +BEGIN + partition_name := parent_table || '_' || to_char(target_date, 'YYYY_MM'); + start_date := date_trunc('month', target_date); + end_date := date_trunc('month', target_date) + INTERVAL '1 month'; + + EXECUTE format( + 'CREATE TABLE IF NOT EXISTS signal.%I PARTITION OF signal.%I FOR VALUES FROM (%L) TO (%L)', + partition_name, parent_table, start_date, end_date + ); +END; +$$ LANGUAGE plpgsql; + +-- 현재 월 + 다음 달 파티션 일괄 생성 +DO $$ +DECLARE + today DATE := CURRENT_DATE; + day_offset INTEGER; + daily_tables TEXT[] := ARRAY[ + 't_vessel_tracks_5min', + 't_grid_vessel_tracks', + 't_grid_tracks_summary', + 't_area_vessel_tracks', + 't_area_tracks_summary', + 't_tile_summary', + 't_area_statistics' + ]; + monthly_tables TEXT[] := ARRAY[ + 't_vessel_tracks_hourly', + 't_vessel_tracks_daily', + 't_grid_tracks_summary_hourly', + 't_grid_tracks_summary_daily', + 't_area_tracks_summary_hourly', + 't_area_tracks_summary_daily', + 't_abnormal_tracks' + ]; + tbl TEXT; +BEGIN + -- 일별 파티션: 오늘부터 7일간 + FOREACH tbl IN ARRAY daily_tables LOOP + FOR day_offset IN 0..6 LOOP + PERFORM signal.create_daily_partition(tbl, today + day_offset); + END LOOP; + END LOOP; + + -- 월별 파티션: 이번 달 + 다음 달 + FOREACH tbl IN ARRAY monthly_tables LOOP + PERFORM signal.create_monthly_partition(tbl, today); + PERFORM signal.create_monthly_partition(tbl, (today + INTERVAL '1 month')::DATE); + END LOOP; + + RAISE NOTICE 'Initial partitions created successfully'; +END; +$$; + +-- ============================================================ +-- 9. ANALYZE (통계 수집) +-- ============================================================ +ANALYZE signal.t_ais_position; +ANALYZE signal.t_haegu_definitions; +ANALYZE signal.t_grid_tiles; +ANALYZE signal.t_areas; +ANALYZE signal.t_abnormal_track_stats; +ANALYZE signal.t_batch_performance_metrics; diff --git a/sql/convert_to_unix_timestamp.sql b/sql/convert_to_unix_timestamp.sql new file mode 100644 index 0000000..897f0ec --- /dev/null +++ b/sql/convert_to_unix_timestamp.sql @@ -0,0 +1,68 @@ +-- Unix timestamp 변환 함수 +CREATE OR REPLACE FUNCTION signal.convert_to_unix_timestamp( + geom geometry, + base_time timestamp without time zone +) RETURNS geometry AS $$ +DECLARE + wkt_text text; + points text[]; + point_text text; + coords text[]; + result_wkt text; + unix_base bigint; + relative_seconds bigint; + unix_time bigint; + i integer; +BEGIN + IF geom IS NULL THEN + RETURN NULL; + END IF; + + -- Unix timestamp 기준값 + unix_base := EXTRACT(EPOCH FROM base_time AT TIME ZONE 'Asia/Seoul')::bigint; + + -- WKT 텍스트 추출 + wkt_text := ST_AsText(geom); + + -- LINESTRING M(...) 에서 좌표 부분만 추출 + wkt_text := substring(wkt_text from 'LINESTRING M\((.*)\)'); + + -- 각 포인트를 배열로 분리 + points := string_to_array(wkt_text, ', '); + + -- 결과 WKT 시작 + result_wkt := 'LINESTRING M('; + + -- 각 포인트 처리 + FOR i IN 1..array_length(points, 1) LOOP + -- 좌표를 공백으로 분리 (lon lat m) + coords := string_to_array(points[i], ' '); + + -- M값(상대시간 초) 추출 및 Unix timestamp로 변환 + relative_seconds := coords[3]::bigint; + unix_time := unix_base + relative_seconds; + + -- 결과에 추가 + IF i > 1 THEN + result_wkt := result_wkt || ', '; + END IF; + result_wkt := result_wkt || coords[1] || ' ' || coords[2] || ' ' || unix_time; + END LOOP; + + result_wkt := result_wkt || ')'; + + -- geometry 타입으로 변환하여 반환 + RETURN ST_GeomFromText(result_wkt, 4326); +END; +$$ LANGUAGE plpgsql IMMUTABLE PARALLEL SAFE; + +-- 함수 테스트 +SELECT + sig_src_cd, + target_id, + time_bucket, + ST_AsText(track_geom) as original, + ST_AsText(signal.convert_to_unix_timestamp(track_geom, time_bucket)) as converted +FROM signal.t_vessel_tracks_5min +WHERE track_geom IS NOT NULL +LIMIT 1; diff --git a/sql/simple_update_v2.sql b/sql/simple_update_v2.sql new file mode 100644 index 0000000..db9512d --- /dev/null +++ b/sql/simple_update_v2.sql @@ -0,0 +1,42 @@ +-- hourly 테이블 직접 UPDATE (함수 없이) +UPDATE signal.t_vessel_tracks_hourly AS h +SET track_geom_v2 = ST_GeomFromText( + REPLACE( + REPLACE(ST_AsText(track_geom), 'LINESTRING M(', + 'LINESTRING M(' || + CASE + WHEN ST_M(ST_PointN(track_geom, 1)) = 0 + THEN EXTRACT(EPOCH FROM time_bucket + INTERVAL '9 hours')::text + ELSE (EXTRACT(EPOCH FROM time_bucket + INTERVAL '9 hours')::bigint + ST_M(ST_PointN(track_geom, 1)))::text + END || ' ' + ), + ')', + EXTRACT(EPOCH FROM time_bucket + INTERVAL '9 hours')::text || ')' + ), + 4326 +) +WHERE time_bucket = '2025-08-07 14:00:00' + AND track_geom IS NOT NULL + AND track_geom_v2 IS NULL; + +-- daily 테이블 직접 UPDATE +UPDATE signal.t_vessel_tracks_daily AS d +SET track_geom_v2 = track_geom -- 임시로 복사 (정확한 변환은 나중에) +WHERE time_bucket = DATE_TRUNC('day', NOW()) + AND track_geom IS NOT NULL + AND track_geom_v2 IS NULL; + +-- 결과 확인 +SELECT + 'hourly' as table_type, + COUNT(*) as total, + COUNT(track_geom_v2) as v2_filled +FROM signal.t_vessel_tracks_hourly +WHERE time_bucket = '2025-08-07 14:00:00' +UNION ALL +SELECT + 'daily' as table_type, + COUNT(*) as total, + COUNT(track_geom_v2) as v2_filled +FROM signal.t_vessel_tracks_daily +WHERE time_bucket = DATE_TRUNC('day', NOW()); diff --git a/sql/update_missing_v2.sql b/sql/update_missing_v2.sql new file mode 100644 index 0000000..061c231 --- /dev/null +++ b/sql/update_missing_v2.sql @@ -0,0 +1,40 @@ +-- Unix timestamp 변환을 위한 간단한 UPDATE 쿼리 +-- 5분 집계 테이블 +UPDATE signal.t_vessel_tracks_5min +SET track_geom_v2 = signal.convert_to_unix_timestamp(track_geom, time_bucket) +WHERE time_bucket >= NOW() - INTERVAL '2 hours' + AND track_geom IS NOT NULL + AND track_geom_v2 IS NULL; + +-- 1시간 집계 테이블 (오후 2시 데이터) +UPDATE signal.t_vessel_tracks_hourly +SET track_geom_v2 = signal.convert_to_unix_timestamp(track_geom, time_bucket) +WHERE time_bucket = '2025-08-07 14:00:00' + AND track_geom IS NOT NULL + AND track_geom_v2 IS NULL; + +-- 일별 집계 테이블 (오늘 데이터) +UPDATE signal.t_vessel_tracks_daily +SET track_geom_v2 = signal.convert_to_unix_timestamp(track_geom, time_bucket) +WHERE time_bucket = DATE_TRUNC('day', NOW()) + AND track_geom IS NOT NULL + AND track_geom_v2 IS NULL; + +-- 결과 확인 +SELECT + 'hourly' as table_type, + COUNT(*) as total_records, + COUNT(track_geom) as v1_count, + COUNT(track_geom_v2) as v2_count +FROM signal.t_vessel_tracks_hourly +WHERE time_bucket = '2025-08-07 14:00:00' + +UNION ALL + +SELECT + 'daily' as table_type, + COUNT(*) as total_records, + COUNT(track_geom) as v1_count, + COUNT(track_geom_v2) as v2_count +FROM signal.t_vessel_tracks_daily +WHERE time_bucket = DATE_TRUNC('day', NOW()); diff --git a/src/main/java/gc/mda/signal_batch/BatchCommandLineRunner.java b/src/main/java/gc/mda/signal_batch/BatchCommandLineRunner.java index ad5383d..d244d04 100644 --- a/src/main/java/gc/mda/signal_batch/BatchCommandLineRunner.java +++ b/src/main/java/gc/mda/signal_batch/BatchCommandLineRunner.java @@ -28,8 +28,8 @@ public class BatchCommandLineRunner implements CommandLineRunner { private JobLauncher jobLauncher; @Autowired - @Qualifier("vesselAggregationJob") - private Job vesselAggregationJob; + @Qualifier("vesselTrackAggregationJob") + private Job vesselTrackAggregationJob; private final BatchUtils batchUtils; @@ -48,7 +48,7 @@ public class BatchCommandLineRunner implements CommandLineRunner { log.info("Running batch job from {} to {}", startTime, endTime); JobParameters params = batchUtils.createJobParameters(startTime, endTime); - JobExecution execution = jobLauncher.run(vesselAggregationJob, params); + JobExecution execution = jobLauncher.run(vesselTrackAggregationJob, params); log.info("Batch job completed: {}", execution.getStatus()); } else { diff --git a/src/main/java/gc/mda/signal_batch/batch/job/AisPositionSyncStepConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/AisPositionSyncStepConfig.java new file mode 100644 index 0000000..c029f38 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/job/AisPositionSyncStepConfig.java @@ -0,0 +1,144 @@ +package gc.mda.signal_batch.batch.job; + +import gc.mda.signal_batch.batch.reader.AisTargetCacheManager; +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.core.Step; +import org.springframework.batch.core.repository.JobRepository; +import org.springframework.batch.core.step.builder.StepBuilder; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Profile; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.transaction.PlatformTransactionManager; + +import javax.sql.DataSource; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +/** + * 5분 집계 Job 편승: 캐시 스냅샷 → t_ais_position UPSERT + * + * 용도: + * - 서비스 재시작 시 캐시 복원 (ChnPrmShipCacheWarmer 등) + * - 캐시 접근 불가 타 프로세스의 최신 위치 조회 + * - API 연결 불가 환경 대응 + */ +@Slf4j +@Configuration +@Profile("!query") +@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) +public class AisPositionSyncStepConfig { + + private final JobRepository jobRepository; + private final DataSource queryDataSource; + private final PlatformTransactionManager transactionManager; + private final AisTargetCacheManager cacheManager; + + public AisPositionSyncStepConfig( + JobRepository jobRepository, + @Qualifier("queryDataSource") DataSource queryDataSource, + @Qualifier("queryTransactionManager") PlatformTransactionManager transactionManager, + AisTargetCacheManager cacheManager) { + this.jobRepository = jobRepository; + this.queryDataSource = queryDataSource; + this.transactionManager = transactionManager; + this.cacheManager = cacheManager; + } + + @Bean + public Step aisPositionSyncStep() { + return new StepBuilder("aisPositionSyncStep", jobRepository) + .tasklet((contribution, chunkContext) -> { + Collection entities = cacheManager.getAllValues(); + + if (entities.isEmpty()) { + log.debug("캐시에 데이터 없음 — t_ais_position 동기화 스킵"); + return org.springframework.batch.repeat.RepeatStatus.FINISHED; + } + + JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); + + String sql = """ + INSERT INTO signal.t_ais_position ( + mmsi, imo, name, callsign, vessel_type, extra_info, + lat, lon, geom, + heading, sog, cog, rot, + length, width, draught, + destination, eta, status, + message_timestamp, signal_kind_code, class_type, + last_update + ) VALUES ( + ?, ?, ?, ?, ?, ?, + ?, ?, public.ST_SetSRID(public.ST_MakePoint(?, ?), 4326), + ?, ?, ?, ?, + ?, ?, ?, + ?, ?, ?, + ?, ?, ?, + NOW() + ) + ON CONFLICT (mmsi) DO UPDATE SET + imo = EXCLUDED.imo, + name = EXCLUDED.name, + callsign = EXCLUDED.callsign, + vessel_type = EXCLUDED.vessel_type, + extra_info = EXCLUDED.extra_info, + lat = EXCLUDED.lat, + lon = EXCLUDED.lon, + geom = EXCLUDED.geom, + heading = EXCLUDED.heading, + sog = EXCLUDED.sog, + cog = EXCLUDED.cog, + rot = EXCLUDED.rot, + length = EXCLUDED.length, + width = EXCLUDED.width, + draught = EXCLUDED.draught, + destination = EXCLUDED.destination, + eta = EXCLUDED.eta, + status = EXCLUDED.status, + message_timestamp = EXCLUDED.message_timestamp, + signal_kind_code = EXCLUDED.signal_kind_code, + class_type = EXCLUDED.class_type, + last_update = NOW() + """; + + List batchArgs = new ArrayList<>(); + + for (AisTargetEntity e : entities) { + if (e.getMmsi() == null || e.getLat() == null || e.getLon() == null) { + continue; + } + + Timestamp msgTs = e.getMessageTimestamp() != null + ? Timestamp.from(e.getMessageTimestamp().toInstant()) + : null; + Timestamp etaTs = e.getEta() != null + ? Timestamp.from(e.getEta().toInstant()) + : null; + + batchArgs.add(new Object[] { + e.getMmsi(), e.getImo(), e.getName(), e.getCallsign(), + e.getVesselType(), e.getExtraInfo(), + e.getLat(), e.getLon(), + e.getLon(), e.getLat(), // ST_MakePoint(lon, lat) + e.getHeading(), e.getSog(), e.getCog(), e.getRot(), + e.getLength(), e.getWidth(), e.getDraught(), + e.getDestination(), etaTs, e.getStatus(), + msgTs, e.getSignalKindCode(), e.getClassType() + }); + } + + if (!batchArgs.isEmpty()) { + int[] results = jdbcTemplate.batchUpdate(sql, batchArgs); + log.info("t_ais_position 동기화 완료: {} 건 UPSERT", results.length); + } + + return org.springframework.batch.repeat.RepeatStatus.FINISHED; + }, transactionManager) + .build(); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/job/AisTargetImportJobConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/AisTargetImportJobConfig.java new file mode 100644 index 0000000..32168f5 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/job/AisTargetImportJobConfig.java @@ -0,0 +1,96 @@ +package gc.mda.signal_batch.batch.job; + +import gc.mda.signal_batch.batch.processor.AisTargetDataProcessor; +import gc.mda.signal_batch.batch.reader.AisTargetDataReader; +import gc.mda.signal_batch.batch.writer.AisTargetCacheWriter; +import gc.mda.signal_batch.domain.vessel.dto.AisTargetDto; +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.core.Job; +import org.springframework.batch.core.JobExecution; +import org.springframework.batch.core.JobExecutionListener; +import org.springframework.batch.core.Step; +import org.springframework.batch.core.job.builder.JobBuilder; +import org.springframework.batch.core.repository.JobRepository; +import org.springframework.batch.core.step.builder.StepBuilder; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Profile; +import org.springframework.transaction.PlatformTransactionManager; +import org.springframework.web.reactive.function.client.WebClient; + +/** + * AIS Target Import Job 설정 + * + * 매 1분 실행: S&P AIS API → DTO 변환 → 캐시 저장 + * Chunk Size: 50,000 (API 한 번 호출에 ~33,000건) + * + * DB 저장 없음 — 캐시만 업데이트. + * t_ais_position UPSERT는 Phase 3의 5분 집계 Job에서 편승. + */ +@Slf4j +@Configuration +@Profile("!query") +@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) +public class AisTargetImportJobConfig { + + private final JobRepository jobRepository; + private final PlatformTransactionManager transactionManager; + private final AisTargetDataProcessor processor; + private final AisTargetCacheWriter writer; + private final WebClient aisApiWebClient; + + @Value("${app.ais-api.since-seconds:60}") + private int sinceSeconds; + + @Value("${app.ais-api.chunk-size:50000}") + private int chunkSize; + + public AisTargetImportJobConfig( + JobRepository jobRepository, + @Qualifier("batchTransactionManager") PlatformTransactionManager transactionManager, + AisTargetDataProcessor processor, + AisTargetCacheWriter writer, + @Qualifier("aisApiWebClient") WebClient aisApiWebClient) { + this.jobRepository = jobRepository; + this.transactionManager = transactionManager; + this.processor = processor; + this.writer = writer; + this.aisApiWebClient = aisApiWebClient; + } + + @Bean(name = "aisTargetImportStep") + public Step aisTargetImportStep() { + return new StepBuilder("aisTargetImportStep", jobRepository) + .chunk(chunkSize, transactionManager) + .reader(new AisTargetDataReader(aisApiWebClient, sinceSeconds)) + .processor(processor) + .writer(writer) + .build(); + } + + @Bean(name = "aisTargetImportJob") + public Job aisTargetImportJob() { + return new JobBuilder("aisTargetImportJob", jobRepository) + .start(aisTargetImportStep()) + .listener(new JobExecutionListener() { + @Override + public void beforeJob(JobExecution jobExecution) { + log.info("[aisTargetImportJob] Job 시작"); + } + + @Override + public void afterJob(JobExecution jobExecution) { + log.info("[aisTargetImportJob] Job 완료 - 상태: {}, 처리: {} 건", + jobExecution.getStatus(), + jobExecution.getStepExecutions().stream() + .mapToLong(se -> se.getWriteCount()) + .sum()); + } + }) + .build(); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/job/AreaStatisticsStepConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/AreaStatisticsStepConfig.java deleted file mode 100644 index 97173c6..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/job/AreaStatisticsStepConfig.java +++ /dev/null @@ -1,220 +0,0 @@ -package gc.mda.signal_batch.batch.job; - -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import gc.mda.signal_batch.batch.processor.AccumulatingAreaProcessor; -import gc.mda.signal_batch.batch.processor.AreaStatisticsProcessor; -import gc.mda.signal_batch.batch.processor.AreaStatisticsProcessor.AreaStatistics; -import gc.mda.signal_batch.batch.reader.InMemoryVesselDataReader; -import gc.mda.signal_batch.batch.reader.PartitionedReader; -import gc.mda.signal_batch.batch.reader.VesselDataReader; -import gc.mda.signal_batch.batch.writer.UpsertWriter; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.Step; -import org.springframework.batch.core.ExitStatus; -import org.springframework.batch.core.StepExecution; -import org.springframework.batch.core.StepExecutionListener; -import org.springframework.batch.core.configuration.annotation.StepScope; -import org.springframework.batch.core.partition.support.TaskExecutorPartitionHandler; -import org.springframework.batch.core.repository.JobRepository; -import org.springframework.batch.core.step.builder.StepBuilder; -import org.springframework.batch.item.Chunk; -import org.springframework.batch.item.ItemReader; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.ApplicationContext; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Profile; -import org.springframework.core.task.TaskExecutor; -import org.springframework.transaction.PlatformTransactionManager; - -import java.time.LocalDateTime; -import java.util.List; - - -@Slf4j -@Configuration -@Profile("!query") // query 프로파일에서는 배치 작업 비활성화 -@RequiredArgsConstructor -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -public class AreaStatisticsStepConfig { - - private final JobRepository jobRepository; - private final PlatformTransactionManager queryTransactionManager; - private final VesselDataReader vesselDataReader; - - private final AccumulatingAreaProcessor accumulatingAreaProcessor; - private final AreaStatisticsProcessor areaStatisticsProcessor; - private final UpsertWriter upsertWriter; - private final PartitionedReader partitionedReader; - private final ApplicationContext applicationContext; - - @Value("${vessel.batch.area-statistics.chunk-size:1000}") - private int areaChunkSize; - - @Value("${vessel.batch.area-statistics.batch-size:500}") - private int areaBatchSize; - - @Qualifier("batchTaskExecutor") - private final TaskExecutor batchTaskExecutor; - - @Qualifier("partitionTaskExecutor") - private final TaskExecutor partitionTaskExecutor; - - @Bean - public Step aggregateAreaStatisticsStep() { - // InMemoryVesselDataReader를 ApplicationContext에서 가져옴 - InMemoryVesselDataReader inMemoryReader = applicationContext.getBean(InMemoryVesselDataReader.class); - - return new StepBuilder("aggregateAreaStatisticsStep", jobRepository) - .chunk(areaChunkSize, queryTransactionManager) - .reader(inMemoryReader) // 메모리 기반 Reader 사용 - .processor(accumulatingAreaProcessor) - .writer(items -> {}) // 빈 writer, 실제 저장은 listener에서 - .listener(areaStatisticsStepListener()) - .faultTolerant() - .skipLimit(100) - .skip(Exception.class) - .build(); - } - - @Bean - public Step partitionedAreaStatisticsStep() { - return new StepBuilder("partitionedAreaStatisticsStep", jobRepository) - .partitioner("areaStatisticsPartitioner", partitionedReader.dayPartitioner(null)) - .partitionHandler(areaStatisticsPartitionHandler()) - .build(); - } - - @Bean - public TaskExecutorPartitionHandler areaStatisticsPartitionHandler() { - TaskExecutorPartitionHandler handler = new TaskExecutorPartitionHandler(); - handler.setTaskExecutor(partitionTaskExecutor); - handler.setStep(areaStatisticsSlaveStep()); - handler.setGridSize(24); - return handler; - } - - @Bean - public Step areaStatisticsSlaveStep() { - return new StepBuilder("areaStatisticsSlaveStep", jobRepository) - ., List>chunk(50, queryTransactionManager) - .reader(slaveAreaBatchVesselDataReader(null, null, null)) - .processor(areaStatisticsProcessor.batchProcessor()) - .writer(upsertWriter.areaStatisticsWriter()) - .faultTolerant() - .skipLimit(100) - .skip(Exception.class) - .build(); - } - - @Bean - @StepScope - public ItemReader areaVesselDataReader( - @Value("#{jobParameters['startTime']}") String startTimeStr, - @Value("#{jobParameters['endTime']}") String endTimeStr) { - return new ItemReader() { - private ItemReader delegate; - private boolean initialized = false; - - @Override - public VesselData read() throws Exception { - if (!initialized) { - LocalDateTime startTime = startTimeStr != null ? LocalDateTime.parse(startTimeStr) : null; - LocalDateTime endTime = endTimeStr != null ? LocalDateTime.parse(endTimeStr) : null; - - // 기존 reader close - if (delegate != null) { - try { - ((org.springframework.batch.item.ItemStream) delegate).close(); - } catch (Exception e) { - log.debug("Failed to close previous reader: {}", e.getMessage()); - } - } - - // 최신 위치만 사용 - delegate = vesselDataReader.vesselLatestPositionReader(startTime, endTime, null); - ((org.springframework.batch.item.ItemStream) delegate).open( - org.springframework.batch.core.scope.context.StepSynchronizationManager - .getContext().getStepExecution().getExecutionContext()); - initialized = true; - } - - VesselData data = delegate.read(); - - // Reader 종료 시 close - if (data == null && delegate != null) { - try { - ((org.springframework.batch.item.ItemStream) delegate).close(); - delegate = null; - initialized = false; - } catch (Exception e) { - log.debug("Failed to close reader on completion: {}", e.getMessage()); - } - } - - return data; - } - }; - } - - @Bean - @StepScope - public ItemReader> slaveAreaBatchVesselDataReader( - @Value("#{stepExecutionContext['startTime']}") String startTime, - @Value("#{stepExecutionContext['endTime']}") String endTime, - @Value("#{stepExecutionContext['partition']}") String partition) { - - return new ItemReader>() { - private ItemReader delegate = vesselDataReader.vesselDataPagingReader( - startTime != null ? LocalDateTime.parse(startTime) : null, - endTime != null ? LocalDateTime.parse(endTime) : null, - partition - ); - - @Override - public List read() throws Exception { - List batch = new java.util.ArrayList<>(); - - for (int i = 0; i < areaBatchSize; i++) { - VesselData item = delegate.read(); - if (item == null) { - break; - } - batch.add(item); - } - - return batch.isEmpty() ? null : batch; - } - }; - } - - @Bean - public StepExecutionListener areaStatisticsStepListener() { - return new StepExecutionListener() { - @Override - public ExitStatus afterStep(StepExecution stepExecution) { - // 누적된 데이터를 DB에 저장 - @SuppressWarnings("unchecked") - List statistics = (List) - stepExecution.getExecutionContext().get("areaStatistics"); - - if (statistics != null && !statistics.isEmpty()) { - try { - upsertWriter.areaStatisticsWriter().write( - new Chunk<>(List.of(statistics)) - ); - - log.info("Successfully wrote {} area statistics", statistics.size()); - } catch (Exception e) { - log.error("Failed to write area statistics", e); - throw new RuntimeException(e); - } - } - return stepExecution.getExitStatus(); - } - }; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/job/DailyAggregationStepConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/DailyAggregationStepConfig.java index 055cae6..1ab85e3 100644 --- a/src/main/java/gc/mda/signal_batch/batch/job/DailyAggregationStepConfig.java +++ b/src/main/java/gc/mda/signal_batch/batch/job/DailyAggregationStepConfig.java @@ -117,12 +117,12 @@ public class DailyAggregationStepConfig { LocalDateTime end = LocalDateTime.parse(endTime); String sql = """ - SELECT DISTINCT sig_src_cd, target_id, date_trunc('day', time_bucket) as day_bucket + SELECT DISTINCT mmsi, date_trunc('day', time_bucket) as day_bucket FROM signal.t_vessel_tracks_hourly WHERE time_bucket >= ? AND time_bucket < ? - ORDER BY sig_src_cd, target_id, day_bucket + ORDER BY mmsi, day_bucket """; - + return new JdbcCursorItemReaderBuilder() .name("dailyVesselKeyReader") .dataSource(queryDataSource) @@ -132,8 +132,7 @@ public class DailyAggregationStepConfig { ps.setTimestamp(2, java.sql.Timestamp.valueOf(end)); }) .rowMapper((rs, rowNum) -> new VesselTrack.VesselKey( - rs.getString("sig_src_cd"), - rs.getString("target_id"), + rs.getString("mmsi"), rs.getObject("day_bucket", LocalDateTime.class) )) .build(); @@ -226,7 +225,7 @@ public class DailyAggregationStepConfig { FROM ( SELECT haegu_no, jsonb_array_elements(vessel_list) as vessel_list, total_distance_nm, avg_speed, - (vessel_list->>'sig_src_cd') || '_' || (vessel_list->>'target_id') as vessel_key + (vessel_list->>'mmsi') as vessel_key FROM signal.t_grid_tracks_summary_hourly WHERE haegu_no = ? AND time_bucket >= ? @@ -313,7 +312,7 @@ public class DailyAggregationStepConfig { FROM ( SELECT area_id, jsonb_array_elements(vessel_list) as vessel_list, total_distance_nm, avg_speed, - (vessel_list->>'sig_src_cd') || '_' || (vessel_list->>'target_id') as vessel_key + (vessel_list->>'mmsi') as vessel_key FROM signal.t_area_tracks_summary_hourly WHERE area_id = ? AND time_bucket >= ? diff --git a/src/main/java/gc/mda/signal_batch/batch/job/HourlyAggregationJobConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/HourlyAggregationJobConfig.java index 93d0912..fdece49 100644 --- a/src/main/java/gc/mda/signal_batch/batch/job/HourlyAggregationJobConfig.java +++ b/src/main/java/gc/mda/signal_batch/batch/job/HourlyAggregationJobConfig.java @@ -23,6 +23,7 @@ public class HourlyAggregationJobConfig { private final JobRepository jobRepository; private final HourlyAggregationStepConfig hourlyAggregationStepConfig; + private final VesselStaticStepConfig vesselStaticStepConfig; private final JobCompletionListener jobCompletionListener; @Bean @@ -34,6 +35,7 @@ public class HourlyAggregationJobConfig { .start(hourlyAggregationStepConfig.mergeHourlyTracksStep()) .next(hourlyAggregationStepConfig.gridHourlySummaryStep()) .next(hourlyAggregationStepConfig.areaHourlySummaryStep()) + .next(vesselStaticStepConfig.vesselStaticSyncStep()) .build(); } diff --git a/src/main/java/gc/mda/signal_batch/batch/job/HourlyAggregationStepConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/HourlyAggregationStepConfig.java index 942b724..b1b8f80 100644 --- a/src/main/java/gc/mda/signal_batch/batch/job/HourlyAggregationStepConfig.java +++ b/src/main/java/gc/mda/signal_batch/batch/job/HourlyAggregationStepConfig.java @@ -117,12 +117,12 @@ public class HourlyAggregationStepConfig { LocalDateTime end = LocalDateTime.parse(endTime); String sql = """ - SELECT DISTINCT sig_src_cd, target_id, date_trunc('hour', time_bucket) as hour_bucket + SELECT DISTINCT mmsi, date_trunc('hour', time_bucket) as hour_bucket FROM signal.t_vessel_tracks_5min WHERE time_bucket >= ? AND time_bucket < ? - ORDER BY sig_src_cd, target_id, hour_bucket + ORDER BY mmsi, hour_bucket """; - + return new JdbcCursorItemReaderBuilder() .name("hourlyVesselKeyReader") .dataSource(queryDataSource) @@ -132,8 +132,7 @@ public class HourlyAggregationStepConfig { ps.setTimestamp(2, java.sql.Timestamp.valueOf(end)); }) .rowMapper((rs, rowNum) -> new VesselTrack.VesselKey( - rs.getString("sig_src_cd"), - rs.getString("target_id"), + rs.getString("mmsi"), rs.getObject("hour_bucket", LocalDateTime.class) )) .build(); @@ -222,17 +221,16 @@ public class HourlyAggregationStepConfig { SELECT haegu_no, ?::timestamp as time_bucket, - COUNT(DISTINCT sig_src_cd || '_' || target_id) as total_vessels, + COUNT(DISTINCT mmsi) as total_vessels, SUM(distance_nm) as total_distance_nm, AVG(avg_speed) as avg_speed, jsonb_agg(DISTINCT jsonb_build_object( - 'sig_src_cd', sig_src_cd, - 'target_id', target_id, + 'mmsi', mmsi, 'distance_nm', distance_nm, 'avg_speed', avg_speed )) as vessel_list, NOW() - FROM signal.t_grid_vessel_tracks + FROM signal.t_grid_vessel_tracks WHERE haegu_no = ? AND time_bucket >= ? AND time_bucket < ? @@ -313,17 +311,16 @@ public class HourlyAggregationStepConfig { SELECT area_id, ?::timestamp as time_bucket, - COUNT(DISTINCT sig_src_cd || '_' || target_id) as total_vessels, + COUNT(DISTINCT mmsi) as total_vessels, SUM(distance_nm) as total_distance_nm, AVG(avg_speed) as avg_speed, jsonb_agg(DISTINCT jsonb_build_object( - 'sig_src_cd', sig_src_cd, - 'target_id', target_id, + 'mmsi', mmsi, 'distance_nm', distance_nm, 'avg_speed', avg_speed )) as vessel_list, NOW() - FROM signal.t_area_vessel_tracks + FROM signal.t_area_vessel_tracks WHERE area_id = ? AND time_bucket >= ? AND time_bucket < ? diff --git a/src/main/java/gc/mda/signal_batch/batch/job/LatestPositionStepConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/LatestPositionStepConfig.java deleted file mode 100644 index a2d1062..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/job/LatestPositionStepConfig.java +++ /dev/null @@ -1,178 +0,0 @@ -package gc.mda.signal_batch.batch.job; - -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import gc.mda.signal_batch.domain.vessel.model.VesselLatestPosition; -import gc.mda.signal_batch.batch.processor.LatestPositionProcessor; -import gc.mda.signal_batch.batch.reader.InMemoryVesselDataReader; -import gc.mda.signal_batch.batch.reader.PartitionedReader; -import gc.mda.signal_batch.batch.reader.VesselDataReader; -import gc.mda.signal_batch.batch.writer.UpsertWriter; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.Step; -import org.springframework.batch.core.configuration.annotation.StepScope; -import org.springframework.batch.core.partition.support.TaskExecutorPartitionHandler; -import org.springframework.batch.core.repository.JobRepository; -import org.springframework.batch.core.step.builder.StepBuilder; -import org.springframework.batch.item.ItemProcessor; -import org.springframework.batch.item.ItemReader; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.ApplicationContext; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Profile; -import org.springframework.core.task.TaskExecutor; -import org.springframework.retry.RetryPolicy; -import org.springframework.retry.backoff.BackOffPolicy; -import org.springframework.retry.backoff.ExponentialBackOffPolicy; -import org.springframework.retry.policy.SimpleRetryPolicy; -import org.springframework.transaction.PlatformTransactionManager; - -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.util.HashMap; -import java.util.Map; - -@Slf4j -@Configuration -@Profile("!query") // query 프로파일에서는 배치 작업 비활성화 -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -public class LatestPositionStepConfig { - - private final JobRepository jobRepository; - private final PlatformTransactionManager queryTransactionManager; - private final LatestPositionProcessor latestPositionProcessor; - private final UpsertWriter upsertWriter; - private final PartitionedReader partitionedReader; - private final ApplicationContext applicationContext; - private final TaskExecutor batchTaskExecutor; - private final TaskExecutor partitionTaskExecutor; - - public LatestPositionStepConfig( - JobRepository jobRepository, - @Qualifier("queryTransactionManager") PlatformTransactionManager queryTransactionManager, - LatestPositionProcessor latestPositionProcessor, - UpsertWriter upsertWriter, - PartitionedReader partitionedReader, - ApplicationContext applicationContext, - @Qualifier("batchTaskExecutor") TaskExecutor batchTaskExecutor, - @Qualifier("partitionTaskExecutor") TaskExecutor partitionTaskExecutor) { - this.jobRepository = jobRepository; - this.queryTransactionManager = queryTransactionManager; - this.latestPositionProcessor = latestPositionProcessor; - this.upsertWriter = upsertWriter; - this.partitionedReader = partitionedReader; - this.applicationContext = applicationContext; - this.batchTaskExecutor = batchTaskExecutor; - this.partitionTaskExecutor = partitionTaskExecutor; - } - - @Bean - public Step updateLatestPositionStep() { - // InMemoryVesselDataReader를 ApplicationContext에서 가져옴 - InMemoryVesselDataReader inMemoryReader = applicationContext.getBean(InMemoryVesselDataReader.class); - - return new StepBuilder("updateLatestPositionStep", jobRepository) - .chunk(10000, queryTransactionManager) - .reader(inMemoryReader) // 메모리 기반 Reader 사용 - .processor(latestPositionProcessor.processor()) - .writer(upsertWriter.latestPositionWriter()) - .faultTolerant() - .retryLimit(3) - .retry(org.springframework.dao.CannotAcquireLockException.class) - .skipLimit(1000) - .skip(org.springframework.dao.EmptyResultDataAccessException.class) - .skip(Exception.class) - .build(); - } - - // 메모리 기반 Reader 사용으로 제거 - // @Bean - // @StepScope - // public ItemReader defaultVesselDataReader() { ... } - - @Bean - public Step partitionedLatestPositionStep() { - return new StepBuilder("partitionedLatestPositionStep", jobRepository) - .partitioner("latestPositionPartitioner", dayPartitioner(null)) - .partitionHandler(latestPositionPartitionHandler()) - .build(); - } - - @Bean - public TaskExecutorPartitionHandler latestPositionPartitionHandler() { - TaskExecutorPartitionHandler handler = new TaskExecutorPartitionHandler(); - handler.setTaskExecutor(partitionTaskExecutor); - handler.setStep(latestPositionSlaveStep()); - handler.setGridSize(24); - return handler; - } - - @Bean - public Step latestPositionSlaveStep() { - return new StepBuilder("latestPositionSlaveStep", jobRepository) - .chunk(3000, queryTransactionManager) - .reader(slaveVesselDataReader(null, null, null)) - .processor(slaveLatestPositionProcessor()) - .writer(upsertWriter.latestPositionWriter()) - .faultTolerant() - .retryPolicy(retryPolicy()) - .backOffPolicy(exponentialBackOffPolicy()) - .skipLimit(50) - .skip(Exception.class) - .noRollback(org.springframework.dao.DuplicateKeyException.class) - .build(); - } - - @Bean - @StepScope - public ItemReader slaveVesselDataReader( - @Value("#{stepExecutionContext['startTime']}") String startTime, - @Value("#{stepExecutionContext['endTime']}") String endTime, - @Value("#{stepExecutionContext['partition']}") String partition) { - - // ApplicationContext에서 VesselDataReader를 가져와서 사용 - VesselDataReader reader = applicationContext.getBean(VesselDataReader.class); - - return reader.vesselLatestPositionReader( - LocalDateTime.parse(startTime), - LocalDateTime.parse(endTime), - partition - ); - } - - @Bean - @StepScope - public ItemProcessor slaveLatestPositionProcessor() { - return latestPositionProcessor.processor(); - } - - @Bean - @StepScope - public org.springframework.batch.core.partition.support.Partitioner dayPartitioner( - @Value("#{jobParameters['processingDate']}") String processingDateStr) { - LocalDate processingDate = processingDateStr != null ? LocalDate.parse(processingDateStr) : null; - return partitionedReader.dayPartitioner(processingDate); - } - - @Bean - public RetryPolicy retryPolicy() { - Map, Boolean> retryableExceptions = new HashMap<>(); - retryableExceptions.put(org.springframework.dao.CannotAcquireLockException.class, true); - retryableExceptions.put(org.springframework.dao.DataAccessException.class, true); - - SimpleRetryPolicy retryPolicy = new SimpleRetryPolicy(3, retryableExceptions); - return retryPolicy; - } - - @Bean - public BackOffPolicy exponentialBackOffPolicy() { - ExponentialBackOffPolicy backOffPolicy = new ExponentialBackOffPolicy(); - backOffPolicy.setInitialInterval(1000); // 1초 - backOffPolicy.setMaxInterval(10000); // 최대 10초 - backOffPolicy.setMultiplier(2.0); // 2배씩 증가 - return backOffPolicy; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/job/TileAggregationStepConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/TileAggregationStepConfig.java deleted file mode 100644 index 6476b80..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/job/TileAggregationStepConfig.java +++ /dev/null @@ -1,350 +0,0 @@ -package gc.mda.signal_batch.batch.job; - -import gc.mda.signal_batch.batch.processor.AccumulatingTileProcessor; -import gc.mda.signal_batch.domain.gis.model.TileStatistics; -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import gc.mda.signal_batch.batch.processor.TileAggregationProcessor; -import gc.mda.signal_batch.batch.reader.InMemoryVesselDataReader; -import gc.mda.signal_batch.batch.reader.PartitionedReader; -import gc.mda.signal_batch.batch.reader.VesselDataReader; -import gc.mda.signal_batch.batch.writer.OptimizedBulkInsertWriter; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.Step; -import org.springframework.batch.core.configuration.annotation.StepScope; -import org.springframework.batch.core.partition.support.TaskExecutorPartitionHandler; -import org.springframework.batch.core.repository.JobRepository; -import org.springframework.batch.core.step.builder.StepBuilder; -import org.springframework.batch.item.Chunk; -import org.springframework.batch.item.ItemWriter; -import org.springframework.batch.item.ItemProcessor; -import org.springframework.batch.item.ItemReader; -import org.springframework.batch.item.support.CompositeItemProcessor; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.ApplicationContext; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Profile; -import org.springframework.core.task.TaskExecutor; -import org.springframework.transaction.PlatformTransactionManager; - -import java.time.LocalDateTime; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -@Slf4j -@Configuration -@Profile("!query") // query 프로파일에서는 배치 작업 비활성화 -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -public class TileAggregationStepConfig { - - private final JobRepository jobRepository; - private final PlatformTransactionManager queryTransactionManager; - private final VesselDataReader vesselDataReader; - private final TileAggregationProcessor tileAggregationProcessor; - private final AccumulatingTileProcessor accumulatingTileProcessor; - private final OptimizedBulkInsertWriter optimizedBulkInsertWriter; - private final PartitionedReader partitionedReader; - private final ApplicationContext applicationContext; - private final TaskExecutor batchTaskExecutor; - private final TaskExecutor partitionTaskExecutor; - - public TileAggregationStepConfig( - JobRepository jobRepository, - @Qualifier("queryTransactionManager") PlatformTransactionManager queryTransactionManager, - VesselDataReader vesselDataReader, - TileAggregationProcessor tileAggregationProcessor, - AccumulatingTileProcessor accumulatingTileProcessor, - OptimizedBulkInsertWriter optimizedBulkInsertWriter, - PartitionedReader partitionedReader, - ApplicationContext applicationContext, - @Qualifier("batchTaskExecutor") TaskExecutor batchTaskExecutor, - @Qualifier("partitionTaskExecutor") TaskExecutor partitionTaskExecutor) { - this.jobRepository = jobRepository; - this.queryTransactionManager = queryTransactionManager; - this.vesselDataReader = vesselDataReader; - this.tileAggregationProcessor = tileAggregationProcessor; - this.accumulatingTileProcessor = accumulatingTileProcessor; - this.optimizedBulkInsertWriter = optimizedBulkInsertWriter; - this.partitionedReader = partitionedReader; - this.applicationContext = applicationContext; - this.batchTaskExecutor = batchTaskExecutor; - this.partitionTaskExecutor = partitionTaskExecutor; - } - - @Bean - public Step aggregateTileStatisticsStep() { - // InMemoryVesselDataReader를 ApplicationContext에서 가져옴 - InMemoryVesselDataReader inMemoryReader = applicationContext.getBean(InMemoryVesselDataReader.class); - - return new StepBuilder("aggregateTileStatisticsStep", jobRepository) - .chunk(50000, queryTransactionManager) - .reader(inMemoryReader) // 메모리 기반 Reader 사용 - .processor(accumulatingTileProcessor) - .writer(new AccumulatedTileWriter()) - .listener(tileAggregationStepListener()) - .faultTolerant() - .skipLimit(1000) - .skip(Exception.class) - .build(); - } - - @Bean - @StepScope - public ItemReader tileDataReader( - @Value("#{jobParameters['startTime']}") String startTimeStr, - @Value("#{jobParameters['endTime']}") String endTimeStr) { - return new ItemReader() { - private ItemReader delegate; - private boolean initialized = false; - - @Override - public VesselData read() throws Exception { - if (!initialized) { - LocalDateTime startTime = startTimeStr != null ? LocalDateTime.parse(startTimeStr) : null; - LocalDateTime endTime = endTimeStr != null ? LocalDateTime.parse(endTimeStr) : null; - log.info("Creating tileDataReader with startTime: {}, endTime: {}", startTime, endTime); - - // 기존 reader close - if (delegate != null) { - try { - ((org.springframework.batch.item.ItemStream) delegate).close(); - } catch (Exception e) { - log.debug("Failed to close previous reader: {}", e.getMessage()); - } - } - - // 최신 위치만 사용 - delegate = vesselDataReader.vesselLatestPositionReader(startTime, endTime, null); - ((org.springframework.batch.item.ItemStream) delegate).open( - org.springframework.batch.core.scope.context.StepSynchronizationManager - .getContext().getStepExecution().getExecutionContext()); - initialized = true; - } - - VesselData data = delegate.read(); - - // Reader 종료 시 close - if (data == null && delegate != null) { - try { - ((org.springframework.batch.item.ItemStream) delegate).close(); - delegate = null; - initialized = false; - } catch (Exception e) { - log.debug("Failed to close reader on completion: {}", e.getMessage()); - } - } - - return data; - } - }; - } - - @Bean - public Step partitionedTileAggregationStep() { - return new StepBuilder("partitionedTileAggregationStep", jobRepository) - .partitioner("tileAggregationPartitioner", partitionedReader.dayPartitioner(null)) - .partitionHandler(tileAggregationPartitionHandler()) - .build(); - } - - @Bean - public TaskExecutorPartitionHandler tileAggregationPartitionHandler() { - TaskExecutorPartitionHandler handler = new TaskExecutorPartitionHandler(); - handler.setTaskExecutor(partitionTaskExecutor); - handler.setStep(tileAggregationSlaveStep()); - handler.setGridSize(24); - return handler; - } - - @Bean - public Step tileAggregationSlaveStep() { - return new StepBuilder("tileAggregationSlaveStep", jobRepository) - ., List>chunk(50, queryTransactionManager) - .reader(slaveTileBatchVesselDataReader(null, null, null)) - .processor(slaveTileProcessor(null, null)) - .writer(optimizedBulkInsertWriter.tileStatisticsBulkWriter()) - .faultTolerant() - .skipLimit(100) - .skip(Exception.class) - .build(); - } - - @Bean - @StepScope - public ItemReader> tileBatchVesselDataReader( - @Value("#{jobParameters['startTime']}") String startTimeStr, - @Value("#{jobParameters['endTime']}") String endTimeStr) { - LocalDateTime startTime = startTimeStr != null ? LocalDateTime.parse(startTimeStr) : null; - LocalDateTime endTime = endTimeStr != null ? LocalDateTime.parse(endTimeStr) : null; - return new ItemReader>() { - private ItemReader delegate = vesselDataReader.vesselDataPagingReader(startTime, endTime, null); - - @Override - public List read() throws Exception { - List batch = new java.util.ArrayList<>(); - - for (int i = 0; i < 1000; i++) { - VesselData item = delegate.read(); - if (item == null) { - break; - } - batch.add(item); - } - - return batch.isEmpty() ? null : batch; - } - }; - } - - @Bean - @StepScope - public ItemReader> slaveTileBatchVesselDataReader( - @Value("#{stepExecutionContext['startTime']}") String startTime, - @Value("#{stepExecutionContext['endTime']}") String endTime, - @Value("#{stepExecutionContext['partition']}") String partition) { - - return new ItemReader>() { - private ItemReader delegate = vesselDataReader.vesselDataPagingReader( - startTime != null ? LocalDateTime.parse(startTime) : null, - endTime != null ? LocalDateTime.parse(endTime) : null, - partition - ); - - @Override - public List read() throws Exception { - List batch = new java.util.ArrayList<>(); - - for (int i = 0; i < 1000; i++) { - VesselData item = delegate.read(); - if (item == null) { - break; - } - batch.add(item); - } - - return batch.isEmpty() ? null : batch; - } - }; - } - - @Bean - @StepScope - public ItemProcessor, List> slaveTileProcessor( - @Value("#{jobParameters['tileLevel']}") Integer tileLevel, - @Value("#{jobParameters['timeBucketMinutes']}") Integer timeBucketMinutes) { - - final int bucketMinutes = (timeBucketMinutes != null) ? timeBucketMinutes : 5; - - // 여러 레벨 처리를 위한 복합 프로세서 - if (tileLevel == null) { - CompositeItemProcessor, List> compositeProcessor = - new CompositeItemProcessor<>(); - - compositeProcessor.setDelegates(Arrays.asList( - tileAggregationProcessor.batchProcessor(0, bucketMinutes), - tileAggregationProcessor.batchProcessor(1, bucketMinutes), - tileAggregationProcessor.batchProcessor(2, bucketMinutes) - )); - - return compositeProcessor; - } else { - return tileAggregationProcessor.batchProcessor(tileLevel, bucketMinutes); - } - } - - @Bean - @StepScope - public ItemProcessor> batchTileProcessor( - @Value("#{jobParameters['tileLevel']}") Integer tileLevel, - @Value("#{jobParameters['timeBucketMinutes']}") Integer timeBucketMinutes) { - - final int level = (tileLevel != null) ? tileLevel : 1; - final int bucketMinutes = (timeBucketMinutes != null) ? timeBucketMinutes : 5; - - return new ItemProcessor>() { - private final List buffer = new ArrayList<>(1000); - - @Override - public List process(VesselData item) throws Exception { - if (item == null || !item.isValidPosition()) { - return null; - } - - buffer.add(item); - - // 버퍼가 차면 처리 - if (buffer.size() >= 1000) { - List result = tileAggregationProcessor - .batchProcessor(level, bucketMinutes) - .process(new ArrayList<>(buffer)); - buffer.clear(); - return result; - } - - return null; - } - }; - } - - /** - * 누적된 결과를 한 번에 처리하는 Writer - */ - private class AccumulatedTileWriter implements ItemWriter { - @Override - public void write(Chunk chunk) throws Exception { - // 대부분의 아이템은 null일 것임 (processor에서 null 반환) - // 실제 데이터는 Step 종료 시 처리됨 - log.debug("AccumulatedTileWriter called with {} items", chunk.size()); - } - } - - /** - * Step 종료 후 누적된 데이터를 처리하는 리스너 - */ - @Bean - @StepScope - public org.springframework.batch.core.StepExecutionListener tileAggregationStepListener() { - return new org.springframework.batch.core.StepExecutionListener() { - @Override - public void beforeStep(org.springframework.batch.core.StepExecution stepExecution) { - // beforeStep에서는 특별한 처리 없음 - } - - @Override - public org.springframework.batch.core.ExitStatus afterStep(org.springframework.batch.core.StepExecution stepExecution) { - log.info("[TileAggregationStepListener] afterStep called"); - - try { - // AccumulatingTileProcessor에서 직접 결과 가져오기 - List accumulatedTiles = accumulatingTileProcessor.getAccumulatedResults(); - log.info("[TileAggregationStepListener] Retrieved {} tiles from processor", - accumulatedTiles != null ? accumulatedTiles.size() : 0); - - if (accumulatedTiles != null && !accumulatedTiles.isEmpty()) { - log.info("Writing {} accumulated tiles to database", accumulatedTiles.size()); - - // Bulk Writer를 사용하여 한 번에 저장 - ItemWriter> writer = optimizedBulkInsertWriter.tileStatisticsBulkWriter(); - Chunk> chunk = new Chunk<>(); - chunk.add(accumulatedTiles); - writer.write(chunk); - - log.info("Successfully wrote all accumulated tiles"); - stepExecution.setWriteCount(accumulatedTiles.size()); - } else { - log.warn("[TileAggregationStepListener] No tiles to write!"); - } - - return stepExecution.getExitStatus(); - } catch (Exception e) { - log.error("Failed to write accumulated tiles", e); - return org.springframework.batch.core.ExitStatus.FAILED; - } - } - }; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/job/VesselAggregationJobConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/VesselAggregationJobConfig.java deleted file mode 100644 index 86d4989..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/job/VesselAggregationJobConfig.java +++ /dev/null @@ -1,78 +0,0 @@ -package gc.mda.signal_batch.batch.job; - -import gc.mda.signal_batch.global.util.SharedDataJobListener; -import gc.mda.signal_batch.global.util.VesselDataHolder; -import gc.mda.signal_batch.batch.listener.JobCompletionListener; -import gc.mda.signal_batch.batch.listener.PerformanceOptimizationListener; -import gc.mda.signal_batch.batch.reader.InMemoryVesselDataReader; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.Job; -import org.springframework.batch.core.JobParametersValidator; -import org.springframework.batch.core.configuration.annotation.StepScope; -import org.springframework.batch.core.job.DefaultJobParametersValidator; -import org.springframework.batch.core.job.builder.JobBuilder; -import org.springframework.batch.core.launch.support.RunIdIncrementer; -import org.springframework.batch.core.repository.JobRepository; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.Profile; - - -@Slf4j -@Configuration -@Profile("!query") // query 프로파일에서는 배치 작업 비활성화 -@RequiredArgsConstructor -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -public class VesselAggregationJobConfig { - - private final JobRepository jobRepository; - private final LatestPositionStepConfig latestPositionStepConfig; - private final TileAggregationStepConfig tileAggregationStepConfig; - private final AreaStatisticsStepConfig areaStatisticsStepConfig; - private final JobCompletionListener jobCompletionListener; - private final SharedDataJobListener sharedDataJobListener; - private final VesselDataHolder vesselDataHolder; - private final PerformanceOptimizationListener performanceOptimizationListener; - - @Bean - public Job vesselAggregationJob() { - return new JobBuilder("vesselAggregationJob", jobRepository) - .incrementer(new RunIdIncrementer()) - .validator(jobParametersValidator()) - .listener(jobCompletionListener) - .listener(sharedDataJobListener) // 데이터 로드 리스너 추가 - .listener(performanceOptimizationListener) // 성능 최적화 리스너 추가 - .start(latestPositionStepConfig.updateLatestPositionStep()) - .next(tileAggregationStepConfig.aggregateTileStatisticsStep()) - .next(areaStatisticsStepConfig.aggregateAreaStatisticsStep()) - .build(); - } - - @Bean - @StepScope - public InMemoryVesselDataReader inMemoryVesselDataReader() { - return new InMemoryVesselDataReader(vesselDataHolder); - } - - @Bean - public Job vesselDailyPositionJob() { - return new JobBuilder("vesselDailyPositionJob", jobRepository) - .incrementer(new RunIdIncrementer()) - .listener(jobCompletionListener) - .start(latestPositionStepConfig.partitionedLatestPositionStep()) - .next(tileAggregationStepConfig.partitionedTileAggregationStep()) - .next(areaStatisticsStepConfig.partitionedAreaStatisticsStep()) - .build(); - } - - @Bean - public JobParametersValidator jobParametersValidator() { - DefaultJobParametersValidator validator = new DefaultJobParametersValidator(); - validator.setRequiredKeys(new String[]{"startTime", "endTime"}); - validator.setOptionalKeys(new String[]{"executionTime", "processingDate", - "tileLevel", "partitionCount"}); - return validator; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/job/VesselBatchScheduler.java b/src/main/java/gc/mda/signal_batch/batch/job/VesselBatchScheduler.java index 98e6f8b..ab8fb31 100644 --- a/src/main/java/gc/mda/signal_batch/batch/job/VesselBatchScheduler.java +++ b/src/main/java/gc/mda/signal_batch/batch/job/VesselBatchScheduler.java @@ -29,10 +29,6 @@ public class VesselBatchScheduler { @Qualifier("asyncJobLauncher") private JobLauncher jobLauncher; - @Autowired - @Qualifier("vesselAggregationJob") - private Job vesselAggregationJob; - @Autowired @Qualifier("vesselTrackAggregationJob") private Job vesselTrackAggregationJob; @@ -45,55 +41,41 @@ public class VesselBatchScheduler { @Qualifier("dailyAggregationJob") private Job dailyAggregationJob; + @Autowired(required = false) + @Qualifier("aisTargetImportJob") + private Job aisTargetImportJob; + @Value("${vessel.batch.scheduler.enabled:true}") private boolean schedulerEnabled; - @Value("${vessel.batch.scheduler.incremental.delay-minutes:2}") - private int incrementalDelayMinutes; - @Value("${vessel.batch.abnormal-detection.enabled:true}") private boolean abnormalDetectionEnabled; + /** - * 5분 단위 증분 처리 (3분 지연으로 데이터 수집 대기) - * 매 5분마다 실행 (0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55분) + * S&P AIS API 수집 (매 1분 15초) + * 캐시에 최신 위치 저장 → 5분 집계 Job에서 활용 */ - @Scheduled(cron = "0 3,8,13,18,23,28,33,38,43,48,53,58 * * * *") - public void runIncrementalAggregation() { - if (!schedulerEnabled) { - log.debug("Scheduler is disabled"); + @Scheduled(cron = "15 * * * * *") + public void runAisTargetImport() { + if (!schedulerEnabled || aisTargetImportJob == null) { return; } try { - // 3분 전 데이터를 처리 (데이터 수집 지연 고려) LocalDateTime now = LocalDateTime.now(); - LocalDateTime endTime = now.minusMinutes(incrementalDelayMinutes); - LocalDateTime startTime = endTime.minusMinutes(5); - - log.info("Starting incremental aggregation for period: {} to {}", startTime, endTime); - JobParameters params = new JobParametersBuilder() - .addString("startTime", startTime.withNano(0).toString()) - .addString("endTime", endTime.withNano(0).toString()) - .addString("jobType", "INCREMENTAL") - .addString("timeBucketMinutes", "5") // 5분 단위 집계 - // executionTime 제거 - startTime/endTime만으로 고유성 보장 + .addString("executionTime", now.toString()) .toJobParameters(); - JobExecution execution = jobLauncher.run(vesselAggregationJob, params); - - log.info("Incremental aggregation started with execution ID: {}", execution.getId()); - + JobExecution execution = jobLauncher.run(aisTargetImportJob, params); + log.debug("[AIS Import] 실행 ID: {}", execution.getId()); } catch (JobExecutionAlreadyRunningException e) { - log.warn("Previous incremental job is still running, skipping this execution"); + log.warn("[AIS Import] 이전 Job 실행 중, 스킵"); } catch (Exception e) { - log.error("Failed to start incremental aggregation", e); - // 중복 키 오류인 경우 경고로만 처리 - if (e.getMessage().contains("중복된 키") || e.getMessage().contains("duplicate key")) { - log.warn("Duplicate key detected, job may have already processed this time bucket"); - } + log.error("[AIS Import] Job 실행 실패", e); } } + // /** * 5분 단위 궤적 집계 처리 (4분 지연으로 위치 집계 이후 실행) @@ -118,7 +100,7 @@ public class VesselBatchScheduler { try { // 4분 전 데이터를 처리 (위치 집계 완료 후) LocalDateTime now = LocalDateTime.now(); - LocalDateTime endTime = now.minusMinutes(incrementalDelayMinutes + 1); // 3+1=4분 지연 + LocalDateTime endTime = now.minusMinutes(4); // 4분 지연 (캐시 기반이므로 고정) LocalDateTime startTime = endTime.minusMinutes(5); // 5분 버킷 계산 diff --git a/src/main/java/gc/mda/signal_batch/batch/job/VesselPositionCacheRefreshScheduler.java b/src/main/java/gc/mda/signal_batch/batch/job/VesselPositionCacheRefreshScheduler.java deleted file mode 100644 index 19f08f6..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/job/VesselPositionCacheRefreshScheduler.java +++ /dev/null @@ -1,194 +0,0 @@ -package gc.mda.signal_batch.batch.job; - -import gc.mda.signal_batch.domain.vessel.dto.RecentVesselPositionDto; -import gc.mda.signal_batch.domain.vessel.service.VesselLatestPositionCache; -import gc.mda.signal_batch.global.util.ShipKindCodeConverter; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Profile; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.scheduling.annotation.Scheduled; -import org.springframework.stereotype.Component; - -import java.math.BigDecimal; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.time.LocalDateTime; -import java.util.List; - -/** - * 선박 최신 위치 캐시 갱신 스케줄러 - * - * 실행 주기: 1분마다 (매분 0초) - * 데이터 소스: Collect DB (sig_test 테이블) - * 처리 방식: 읽기 전용 (DB에 쓰기 없음, 캐시만 업데이트) - * - * 동작 흐름: - * 1. 매분 0초에 실행 - * 2. 최근 2분치 데이터를 DB에서 조회 (수집 지연 고려) - * 3. DISTINCT ON으로 선박별 최신 위치만 추출 - * 4. 캐시에 업데이트 - * - * 기존 배치와의 관계: - * - 기존 5분 배치는 그대로 유지 (DB 저장) - * - 이 스케줄러는 캐시만 관리 (읽기 전용) - * - 충돌 없음 - */ -@Slf4j -@Component -@Profile("!query") // query 프로파일에서는 캐시 갱신 스케줄러 비활성화 -@RequiredArgsConstructor -@ConditionalOnProperty(name = "vessel.batch.cache.latest-position.enabled", havingValue = "true", matchIfMissing = false) -public class VesselPositionCacheRefreshScheduler { - - @Qualifier("collectJdbcTemplate") - private final JdbcTemplate collectJdbcTemplate; - - private final VesselLatestPositionCache cache; - - @Value("${vessel.batch.cache.latest-position.refresh-interval-minutes:2}") - private int refreshIntervalMinutes; - - private volatile boolean isRunning = false; - - /** - * 1분마다 캐시 갱신 - * 매분 0초에 실행 (예: 10:00:00, 10:01:00, 10:02:00...) - */ - @Scheduled(cron = "0 * * * * *") - public void refreshCache() { - // 동시 실행 방지 - if (isRunning) { - log.warn("Previous cache refresh is still running, skipping this execution"); - return; - } - - isRunning = true; - long startTime = System.currentTimeMillis(); - - try { - // 최근 N분치 데이터 조회 (수집 지연 고려) - List positions = fetchLatestPositions(); - - if (positions.isEmpty()) { - log.warn("No vessel positions found in last {} minutes", refreshIntervalMinutes); - return; - } - - // 캐시 업데이트 - cache.putAll(positions); - - long duration = System.currentTimeMillis() - startTime; - log.info("Cache refresh completed in {}ms (fetched {} positions from DB)", - duration, positions.size()); - - // 캐시 통계 로깅 (5분마다만) - if (LocalDateTime.now().getMinute() % 5 == 0) { - logCacheStats(); - } - - } catch (Exception e) { - log.error("Failed to refresh cache", e); - } finally { - isRunning = false; - } - } - - /** - * DB에서 최신 위치 데이터 조회 - */ - private List fetchLatestPositions() { - LocalDateTime endTime = LocalDateTime.now(); - LocalDateTime startTime = endTime.minusMinutes(refreshIntervalMinutes); - - String sql = """ - SELECT DISTINCT ON (sig_src_cd, target_id) - sig_src_cd, - target_id, - lon, - lat, - sog, - cog, - ship_nm, - ship_ty, - message_time as last_update - FROM signal.sig_test - WHERE message_time >= ? AND message_time < ? - AND sig_src_cd != '000005' - AND length(target_id) > 5 - AND lat BETWEEN -90 AND 90 - AND lon BETWEEN -180 AND 180 - ORDER BY sig_src_cd, target_id, message_time DESC - """; - - try { - return collectJdbcTemplate.query(sql, - new Object[]{Timestamp.valueOf(startTime), Timestamp.valueOf(endTime)}, - new VesselPositionRowMapper()); - - } catch (Exception e) { - log.error("Failed to fetch positions from DB", e); - return List.of(); - } - } - - /** - * 캐시 통계 로깅 - */ - private void logCacheStats() { - try { - VesselLatestPositionCache.CacheStats stats = cache.getStats(); - log.info("Cache Stats - Size: {}, HitRate: {}%, MissRate: {}%, Hits: {}, Misses: {}", - stats.currentSize(), - String.format("%.2f", stats.hitRate()), - String.format("%.2f", stats.missRate()), - stats.hitCount(), - stats.missCount()); - } catch (Exception e) { - log.warn("Failed to get cache stats", e); - } - } - - /** - * RowMapper 구현 - */ - private static class VesselPositionRowMapper implements RowMapper { - @Override - public RecentVesselPositionDto mapRow(ResultSet rs, int rowNum) throws SQLException { - String sigSrcCd = rs.getString("sig_src_cd"); - String targetId = rs.getString("target_id"); - String shipTy = rs.getString("ship_ty"); - - // shipKindCode 계산 - String shipKindCode = ShipKindCodeConverter.getShipKindCode(sigSrcCd, shipTy); - - // nationalCode 계산 - String nationalCode; - if ("000001".equals(sigSrcCd) && targetId != null && targetId.length() >= 3) { - nationalCode = targetId.substring(0, 3); - } else { - nationalCode = "440"; // 기본값 - } - - return RecentVesselPositionDto.builder() - .sigSrcCd(sigSrcCd) - .targetId(targetId) - .lon(rs.getDouble("lon")) - .lat(rs.getDouble("lat")) - .sog(rs.getBigDecimal("sog")) - .cog(rs.getBigDecimal("cog")) - .shipNm(rs.getString("ship_nm")) - .shipTy(shipTy) - .shipKindCode(shipKindCode) - .nationalCode(nationalCode) - .lastUpdate(rs.getTimestamp("last_update") != null ? - rs.getTimestamp("last_update").toLocalDateTime() : null) - .build(); - } - } -} diff --git a/src/main/java/gc/mda/signal_batch/batch/job/VesselStaticStepConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/VesselStaticStepConfig.java new file mode 100644 index 0000000..e282052 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/job/VesselStaticStepConfig.java @@ -0,0 +1,239 @@ +package gc.mda.signal_batch.batch.job; + +import gc.mda.signal_batch.batch.reader.AisTargetCacheManager; +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.core.Step; +import org.springframework.batch.core.repository.JobRepository; +import org.springframework.batch.core.step.builder.StepBuilder; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Profile; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.transaction.PlatformTransactionManager; + +import javax.sql.DataSource; +import java.sql.Timestamp; +import java.time.LocalDateTime; +import java.util.*; + +/** + * HourlyJob 편승: 정적 정보 COALESCE + CDC → t_vessel_static INSERT + * + * 전략: + * 1. COALESCE: 캐시에서 직전 1시간 데이터 → 필드별 lastNonEmpty 조합 + * 2. CDC: 이전 저장 레코드와 비교 → 변경 시에만 INSERT + * + * 조회: WHERE mmsi=? AND time_bucket <= ? ORDER BY time_bucket DESC LIMIT 1 + */ +@Slf4j +@Configuration +@Profile("!query") +@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) +public class VesselStaticStepConfig { + + private final JobRepository jobRepository; + private final DataSource queryDataSource; + private final PlatformTransactionManager transactionManager; + private final AisTargetCacheManager cacheManager; + + public VesselStaticStepConfig( + JobRepository jobRepository, + @Qualifier("queryDataSource") DataSource queryDataSource, + @Qualifier("queryTransactionManager") PlatformTransactionManager transactionManager, + AisTargetCacheManager cacheManager) { + this.jobRepository = jobRepository; + this.queryDataSource = queryDataSource; + this.transactionManager = transactionManager; + this.cacheManager = cacheManager; + } + + @Bean + public Step vesselStaticSyncStep() { + return new StepBuilder("vesselStaticSyncStep", jobRepository) + .tasklet((contribution, chunkContext) -> { + // 1. 캐시에서 전체 데이터 → MMSI별 그룹 + Collection allEntities = cacheManager.getAllValues(); + + if (allEntities.isEmpty()) { + log.debug("캐시에 데이터 없음 — t_vessel_static 동기화 스킵"); + return org.springframework.batch.repeat.RepeatStatus.FINISHED; + } + + // 시간 버킷: 현재 시각의 정각 + LocalDateTime hourBucket = LocalDateTime.now() + .withMinute(0).withSecond(0).withNano(0); + + // MMSI별 최신 데이터 (필드별 COALESCE) + Map coalesced = coalesceByMmsi(allEntities); + + JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); + + // 2. CDC: 이전 레코드와 비교 → 변경 시에만 INSERT + String selectPrevSql = """ + SELECT imo, name, callsign, vessel_type, extra_info, + length, width, draught, destination, status, + signal_kind_code, class_type + FROM signal.t_vessel_static + WHERE mmsi = ? AND time_bucket <= ? + ORDER BY time_bucket DESC + LIMIT 1 + """; + + String insertSql = """ + INSERT INTO signal.t_vessel_static ( + mmsi, time_bucket, imo, name, callsign, + vessel_type, extra_info, length, width, draught, + destination, eta, status, signal_kind_code, class_type + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT (mmsi, time_bucket) DO UPDATE SET + imo = EXCLUDED.imo, + name = EXCLUDED.name, + callsign = EXCLUDED.callsign, + vessel_type = EXCLUDED.vessel_type, + extra_info = EXCLUDED.extra_info, + length = EXCLUDED.length, + width = EXCLUDED.width, + draught = EXCLUDED.draught, + destination = EXCLUDED.destination, + eta = EXCLUDED.eta, + status = EXCLUDED.status, + signal_kind_code = EXCLUDED.signal_kind_code, + class_type = EXCLUDED.class_type + """; + + Timestamp hourBucketTs = Timestamp.valueOf(hourBucket); + int inserted = 0; + int skipped = 0; + + List batchArgs = new ArrayList<>(); + + for (Map.Entry entry : coalesced.entrySet()) { + String mmsi = entry.getKey(); + AisTargetEntity current = entry.getValue(); + + // 이전 레코드 조회 + boolean changed; + try { + Map prev = jdbcTemplate.queryForMap( + selectPrevSql, mmsi, hourBucketTs); + changed = hasStaticInfoChanged(current, prev); + } catch (org.springframework.dao.EmptyResultDataAccessException e) { + // 이전 레코드 없음 → 첫 INSERT + changed = true; + } + + if (changed) { + Timestamp etaTs = current.getEta() != null + ? Timestamp.from(current.getEta().toInstant()) + : null; + + batchArgs.add(new Object[] { + mmsi, hourBucketTs, + current.getImo(), current.getName(), current.getCallsign(), + current.getVesselType(), current.getExtraInfo(), + current.getLength(), current.getWidth(), current.getDraught(), + current.getDestination(), etaTs, current.getStatus(), + current.getSignalKindCode(), current.getClassType() + }); + inserted++; + } else { + skipped++; + } + } + + if (!batchArgs.isEmpty()) { + jdbcTemplate.batchUpdate(insertSql, batchArgs); + } + + log.info("t_vessel_static 동기화 완료: 총 {} 선박, INSERT {} 건, CDC 스킵 {} 건", + coalesced.size(), inserted, skipped); + + return org.springframework.batch.repeat.RepeatStatus.FINISHED; + }, transactionManager) + .build(); + } + + /** + * MMSI별 필드 COALESCE: 각 필드별 마지막 non-empty 값 조합 + */ + private Map coalesceByMmsi(Collection entities) { + Map result = new LinkedHashMap<>(); + + for (AisTargetEntity entity : entities) { + if (entity.getMmsi() == null) continue; + + result.merge(entity.getMmsi(), entity, (existing, incoming) -> { + // 더 최신 타임스탬프 기준, 각 필드별 non-empty 우선 + return AisTargetEntity.builder() + .mmsi(existing.getMmsi()) + .imo(coalesce(incoming.getImo(), existing.getImo())) + .name(coalesceStr(incoming.getName(), existing.getName())) + .callsign(coalesceStr(incoming.getCallsign(), existing.getCallsign())) + .vesselType(coalesceStr(incoming.getVesselType(), existing.getVesselType())) + .extraInfo(coalesceStr(incoming.getExtraInfo(), existing.getExtraInfo())) + .length(coalesce(incoming.getLength(), existing.getLength())) + .width(coalesce(incoming.getWidth(), existing.getWidth())) + .draught(coalesce(incoming.getDraught(), existing.getDraught())) + .destination(coalesceStr(incoming.getDestination(), existing.getDestination())) + .eta(coalesce(incoming.getEta(), existing.getEta())) + .status(coalesceStr(incoming.getStatus(), existing.getStatus())) + .signalKindCode(coalesceStr(incoming.getSignalKindCode(), existing.getSignalKindCode())) + .classType(coalesceStr(incoming.getClassType(), existing.getClassType())) + .messageTimestamp(coalesce(incoming.getMessageTimestamp(), existing.getMessageTimestamp())) + .build(); + }); + } + + return result; + } + + /** + * CDC: 정적 정보 변경 여부 비교 + */ + private boolean hasStaticInfoChanged(AisTargetEntity current, Map prev) { + return !Objects.equals(current.getImo(), toLong(prev.get("imo"))) + || !Objects.equals(current.getName(), prev.get("name")) + || !Objects.equals(current.getCallsign(), prev.get("callsign")) + || !Objects.equals(current.getVesselType(), prev.get("vessel_type")) + || !Objects.equals(current.getExtraInfo(), prev.get("extra_info")) + || !Objects.equals(current.getLength(), toInt(prev.get("length"))) + || !Objects.equals(current.getWidth(), toInt(prev.get("width"))) + || !Objects.equals(current.getDraught(), toDouble(prev.get("draught"))) + || !Objects.equals(current.getDestination(), prev.get("destination")) + || !Objects.equals(current.getStatus(), prev.get("status")) + || !Objects.equals(current.getSignalKindCode(), prev.get("signal_kind_code")) + || !Objects.equals(current.getClassType(), prev.get("class_type")); + } + + private T coalesce(T a, T b) { + return a != null ? a : b; + } + + private String coalesceStr(String a, String b) { + return (a != null && !a.isBlank()) ? a : b; + } + + private Long toLong(Object val) { + if (val == null) return null; + if (val instanceof Long l) return l; + if (val instanceof Number n) return n.longValue(); + return null; + } + + private Integer toInt(Object val) { + if (val == null) return null; + if (val instanceof Integer i) return i; + if (val instanceof Number n) return n.intValue(); + return null; + } + + private Double toDouble(Object val) { + if (val == null) return null; + if (val instanceof Double d) return d; + if (val instanceof Number n) return n.doubleValue(); + return null; + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/job/VesselTrackAggregationJobConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/VesselTrackAggregationJobConfig.java index b5cbbf6..b9ad0de 100644 --- a/src/main/java/gc/mda/signal_batch/batch/job/VesselTrackAggregationJobConfig.java +++ b/src/main/java/gc/mda/signal_batch/batch/job/VesselTrackAggregationJobConfig.java @@ -1,6 +1,6 @@ package gc.mda.signal_batch.batch.job; -import gc.mda.signal_batch.global.util.VesselTrackDataJobListener; +import gc.mda.signal_batch.batch.listener.CacheBasedTrackJobListener; import gc.mda.signal_batch.batch.listener.JobCompletionListener; import gc.mda.signal_batch.batch.listener.PerformanceOptimizationListener; import lombok.RequiredArgsConstructor; @@ -25,8 +25,9 @@ public class VesselTrackAggregationJobConfig { private final JobRepository jobRepository; private final VesselTrackStepConfig vesselTrackStepConfig; + private final AisPositionSyncStepConfig aisPositionSyncStepConfig; private final JobCompletionListener jobCompletionListener; - private final VesselTrackDataJobListener vesselTrackDataJobListener; + private final CacheBasedTrackJobListener cacheBasedTrackJobListener; private final PerformanceOptimizationListener performanceOptimizationListener; @Bean @@ -35,11 +36,12 @@ public class VesselTrackAggregationJobConfig { .incrementer(new RunIdIncrementer()) .validator(trackJobParametersValidator()) .listener(jobCompletionListener) - .listener(vesselTrackDataJobListener) + .listener(cacheBasedTrackJobListener) .listener(performanceOptimizationListener) // 성능 최적화 리스너 추가 .start(vesselTrackStepConfig.vesselTrackStep()) .next(vesselTrackStepConfig.gridTrackSummaryStep()) .next(vesselTrackStepConfig.areaTrackSummaryStep()) + .next(aisPositionSyncStepConfig.aisPositionSyncStep()) .build(); } diff --git a/src/main/java/gc/mda/signal_batch/batch/job/VesselTrackStepConfig.java b/src/main/java/gc/mda/signal_batch/batch/job/VesselTrackStepConfig.java index a315588..2211d32 100644 --- a/src/main/java/gc/mda/signal_batch/batch/job/VesselTrackStepConfig.java +++ b/src/main/java/gc/mda/signal_batch/batch/job/VesselTrackStepConfig.java @@ -7,8 +7,8 @@ import gc.mda.signal_batch.domain.vessel.service.VesselPreviousBucketCache; import gc.mda.signal_batch.batch.processor.VesselTrackProcessor; import gc.mda.signal_batch.batch.processor.AbnormalTrackDetector; import gc.mda.signal_batch.batch.processor.AbnormalTrackDetector.AbnormalDetectionResult; -import gc.mda.signal_batch.batch.reader.InMemoryVesselTrackDataReader; -import gc.mda.signal_batch.global.util.VesselTrackDataHolder; +import gc.mda.signal_batch.batch.reader.AisTargetCacheManager; +import gc.mda.signal_batch.batch.reader.CacheBasedVesselTrackDataReader; import gc.mda.signal_batch.global.util.TrackClippingUtils; import gc.mda.signal_batch.batch.writer.VesselTrackBulkWriter; import gc.mda.signal_batch.batch.writer.AbnormalTrackWriter; @@ -36,9 +36,9 @@ import javax.sql.DataSource; import java.sql.Timestamp; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; import jakarta.annotation.PostConstruct; @@ -53,7 +53,7 @@ public class VesselTrackStepConfig { private final PlatformTransactionManager transactionManager; private final DataSource queryDataSource; private final VesselTrackProcessor vesselTrackProcessor; - private final VesselTrackDataHolder vesselTrackDataHolder; + private final AisTargetCacheManager aisTargetCacheManager; private final VesselTrackBulkWriter vesselTrackBulkWriter; private final TrackClippingUtils trackClippingUtils; private final AbnormalTrackDetector abnormalTrackDetector; @@ -61,14 +61,14 @@ public class VesselTrackStepConfig { private final VesselPreviousBucketCache previousBucketCache; // 현재 처리 중인 버킷의 종료 위치 저장 (캐시 업데이트용) - private final Map currentBucketEndPositions = new HashMap<>(); + private final Map currentBucketEndPositions = new ConcurrentHashMap<>(); public VesselTrackStepConfig( JobRepository jobRepository, PlatformTransactionManager transactionManager, @Qualifier("queryDataSource") DataSource queryDataSource, VesselTrackProcessor vesselTrackProcessor, - VesselTrackDataHolder vesselTrackDataHolder, + AisTargetCacheManager aisTargetCacheManager, VesselTrackBulkWriter vesselTrackBulkWriter, TrackClippingUtils trackClippingUtils, AbnormalTrackDetector abnormalTrackDetector, @@ -78,7 +78,7 @@ public class VesselTrackStepConfig { this.transactionManager = transactionManager; this.queryDataSource = queryDataSource; this.vesselTrackProcessor = vesselTrackProcessor; - this.vesselTrackDataHolder = vesselTrackDataHolder; + this.aisTargetCacheManager = aisTargetCacheManager; this.vesselTrackBulkWriter = vesselTrackBulkWriter; this.trackClippingUtils = trackClippingUtils; this.abnormalTrackDetector = abnormalTrackDetector; @@ -88,6 +88,9 @@ public class VesselTrackStepConfig { @Value("${vessel.batch.chunk-size:1000}") private int chunkSize; + + @Value("${partition.retention.tables.t_vessel_tracks_5min.retention-days:7}") + private int trackRetentionDays; @PostConstruct public void init() { @@ -108,8 +111,8 @@ public class VesselTrackStepConfig { @Bean @StepScope - public InMemoryVesselTrackDataReader trackDataReader() { - return new InMemoryVesselTrackDataReader(vesselTrackDataHolder, chunkSize); + public CacheBasedVesselTrackDataReader trackDataReader() { + return new CacheBasedVesselTrackDataReader(aisTargetCacheManager, trackRetentionDays); } @Bean @@ -124,7 +127,7 @@ public class VesselTrackStepConfig { // 2. 이전 버킷 위치 조회 (캐시 + DB Fallback) List vesselKeys = tracks.stream() - .map(track -> track.getSigSrcCd() + ":" + track.getTargetId()) + .map(VesselTrack::getMmsi) .distinct() .collect(Collectors.toList()); @@ -138,10 +141,9 @@ public class VesselTrackStepConfig { boolean isAbnormal = false; String abnormalReason = ""; - // 선박/항공기 구분 - boolean isAircraft = "000019".equals(track.getSigSrcCd()); - double speedLimit = isAircraft ? 300.0 : 100.0; // 항공기 300, 선박 100 - double distanceLimit = isAircraft ? 30.0 : 10.0; // 항공기 30nm, 선박 10nm + // S&P AIS API는 선박 전용 — 항공기 구분 불필요 + double speedLimit = 100.0; + double distanceLimit = 10.0; // 버킷 내 평균속도 체크 if (track.getAvgSpeed() != null && track.getAvgSpeed().doubleValue() >= speedLimit) { @@ -155,9 +157,9 @@ public class VesselTrackStepConfig { abnormalReason = "within_bucket_distance"; } - // 버킷 간 점프 검출 (NEW!) + // 버킷 간 점프 검출 if (!isAbnormal && track.getStartPosition() != null) { - String vesselKey = track.getSigSrcCd() + ":" + track.getTargetId(); + String vesselKey = track.getMmsi(); VesselBucketPositionDto prevPosition = previousPositions.get(vesselKey); if (prevPosition != null) { @@ -166,10 +168,9 @@ public class VesselTrackStepConfig { track.getStartPosition().getLat(), track.getStartPosition().getLon() ); - // 위성 AIS는 2시간, 일반 신호는 15분 범위 체크 - boolean isSatellite = "000016".equals(track.getSigSrcCd()); - double maxGapMinutes = isSatellite ? 120.0 : 15.0; - double expectedMaxDistance = isAircraft ? (maxGapMinutes / 60.0 * 300.0) : (maxGapMinutes / 60.0 * 50.0); + // S&P AIS API: 위성/지상 구분 불가 → 보수적 30분 gap 허용 + double maxGapMinutes = 30.0; + double expectedMaxDistance = maxGapMinutes / 60.0 * 50.0; if (jumpDistance > expectedMaxDistance) { isAbnormal = true; @@ -196,10 +197,8 @@ public class VesselTrackStepConfig { // 정상 궤적의 종료 위치 저장 (캐시 업데이트용) if (track.getEndPosition() != null) { - String vesselKey = track.getSigSrcCd() + ":" + track.getTargetId(); - currentBucketEndPositions.put(vesselKey, VesselBucketPositionDto.builder() - .sigSrcCd(track.getSigSrcCd()) - .targetId(track.getTargetId()) + currentBucketEndPositions.put(track.getMmsi(), VesselBucketPositionDto.builder() + .mmsi(track.getMmsi()) .endLon(track.getEndPosition().getLon()) .endLat(track.getEndPosition().getLat()) .endTime(track.getEndPosition().getTime()) @@ -232,15 +231,14 @@ public class VesselTrackStepConfig { abnormalTrackWriter.setJobName("vesselTrackAggregationJob"); List segments = new ArrayList<>(); - Map details = new HashMap<>(); + Map details = new ConcurrentHashMap<>(); details.put("avgSpeed", track.getAvgSpeed()); details.put("distanceNm", track.getDistanceNm()); details.put("timeBucket", track.getTimeBucket()); - // 선박/항공기 구분 - boolean isAircraft = "000019".equals(track.getSigSrcCd()); - double speedLimit = isAircraft ? 300.0 : 100.0; - double distanceLimit = isAircraft ? 30.0 : 10.0; + // S&P AIS API는 선박 전용 + double speedLimit = 100.0; + double distanceLimit = 10.0; // 비정상 유형 결정 String abnormalType = "abnormal_5min"; @@ -339,17 +337,16 @@ public class VesselTrackStepConfig { String sql = """ INSERT INTO signal.t_grid_vessel_tracks ( - haegu_no, sig_src_cd, target_id, time_bucket, + haegu_no, mmsi, time_bucket, distance_nm, avg_speed, point_count, entry_time, exit_time - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?) - ON CONFLICT (haegu_no, sig_src_cd, target_id, time_bucket) DO NOTHING + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?) + ON CONFLICT (haegu_no, mmsi, time_bucket) DO NOTHING """; List args = allClippedTracks.stream() .map(track -> new Object[] { track.getHaeguNo(), - track.getSigSrcCd(), - track.getTargetId(), + track.getMmsi(), Timestamp.valueOf(track.getTimeBucket()), track.getDistanceNm(), track.getAvgSpeed(), @@ -385,17 +382,16 @@ public class VesselTrackStepConfig { String sql = """ INSERT INTO signal.t_area_vessel_tracks ( - area_id, sig_src_cd, target_id, time_bucket, + area_id, mmsi, time_bucket, distance_nm, avg_speed, point_count, metrics - ) VALUES (?, ?, ?, ?, ?, ?, ?, ?::jsonb) - ON CONFLICT (area_id, sig_src_cd, target_id, time_bucket) DO NOTHING + ) VALUES (?, ?, ?, ?, ?, ?, ?::jsonb) + ON CONFLICT (area_id, mmsi, time_bucket) DO NOTHING """; List args = allClippedTracks.stream() .map(track -> new Object[] { track.getAreaId(), - track.getSigSrcCd(), - track.getTargetId(), + track.getMmsi(), Timestamp.valueOf(track.getTimeBucket()), track.getDistanceNm(), track.getAvgSpeed(), @@ -422,12 +418,11 @@ public class VesselTrackStepConfig { SELECT haegu_no, time_bucket, - COUNT(DISTINCT CONCAT(sig_src_cd, '_', target_id)) as total_vessels, + COUNT(DISTINCT mmsi) as total_vessels, SUM(distance_nm) as total_distance_nm, AVG(avg_speed) as avg_speed, jsonb_agg(jsonb_build_object( - 'sig_src_cd', sig_src_cd, - 'target_id', target_id, + 'mmsi', mmsi, 'distance_nm', distance_nm, 'avg_speed', avg_speed )) as vessel_list @@ -466,12 +461,11 @@ public class VesselTrackStepConfig { SELECT area_id, time_bucket, - COUNT(DISTINCT CONCAT(sig_src_cd, '_', target_id)) as total_vessels, + COUNT(DISTINCT mmsi) as total_vessels, SUM(distance_nm) as total_distance_nm, AVG(avg_speed) as avg_speed, jsonb_agg(jsonb_build_object( - 'sig_src_cd', sig_src_cd, - 'target_id', target_id, + 'mmsi', mmsi, 'distance_nm', distance_nm, 'avg_speed', avg_speed )) as vessel_list diff --git a/src/main/java/gc/mda/signal_batch/batch/listener/CacheBasedTrackJobListener.java b/src/main/java/gc/mda/signal_batch/batch/listener/CacheBasedTrackJobListener.java new file mode 100644 index 0000000..0fd03c8 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/listener/CacheBasedTrackJobListener.java @@ -0,0 +1,52 @@ +package gc.mda.signal_batch.batch.listener; + +import gc.mda.signal_batch.domain.gis.cache.AreaBoundaryCache; +import gc.mda.signal_batch.domain.vessel.service.VesselPreviousBucketCache; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.core.JobExecution; +import org.springframework.batch.core.JobExecutionListener; +import org.springframework.batch.core.annotation.AfterJob; +import org.springframework.batch.core.annotation.BeforeJob; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; +import org.springframework.stereotype.Component; + +/** + * 캐시 기반 Track Job 리스너 + * + * 기존 VesselTrackDataJobListener 대체: + * - collectDB 데이터 로드 제거 (AisTargetCacheManager로 대체) + * - Area/Haegu 경계 캐시 갱신 유지 + * - 이전 버킷 캐시 Fallback 플래그 리셋 유지 + */ +@Slf4j +@Component +@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) +@RequiredArgsConstructor +public class CacheBasedTrackJobListener implements JobExecutionListener { + + private final AreaBoundaryCache areaBoundaryCache; + private final VesselPreviousBucketCache previousBucketCache; + + @BeforeJob + public void beforeJob(JobExecution jobExecution) { + // Area/Haegu 경계 캐시 갱신 + areaBoundaryCache.refresh(); + log.info("Refreshed area boundary cache"); + + // 이전 버킷 캐시 Fallback 플래그 리셋 + previousBucketCache.resetFallbackFlag(); + log.info("Reset previous bucket cache fallback flag"); + + log.info("Cache-based track job started: startTime={}, endTime={}", + jobExecution.getJobParameters().getString("startTime"), + jobExecution.getJobParameters().getString("endTime")); + } + + @AfterJob + public void afterJob(JobExecution jobExecution) { + // DB 조회 통계 출력 + previousBucketCache.logJobStatistics(); + log.debug("Cache-based track job completed"); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/processor/AbnormalTrackDetector.java b/src/main/java/gc/mda/signal_batch/batch/processor/AbnormalTrackDetector.java index 061174f..74aefab 100644 --- a/src/main/java/gc/mda/signal_batch/batch/processor/AbnormalTrackDetector.java +++ b/src/main/java/gc/mda/signal_batch/batch/processor/AbnormalTrackDetector.java @@ -29,12 +29,11 @@ public class AbnormalTrackDetector { // 물리적 한계값 (매우 관대하게 설정) @SuppressWarnings("unused") private static final double VESSEL_PHYSICAL_LIMIT_KNOTS = 100.0; // 선박 물리적 한계 - @SuppressWarnings("unused") - private static final double AIRCRAFT_PHYSICAL_LIMIT_KNOTS = 600.0; // 항공기 물리적 한계 + // 항공기 물리적 한계 — S&P AIS API 전환으로 미사용 (선박 전용) // 명백한 비정상만 검출하기 위한 임계값 private static final double VESSEL_ABNORMAL_SPEED_KNOTS = 500.0; // 선박 비정상 속도 (매우 관대) - private static final double AIRCRAFT_ABNORMAL_SPEED_KNOTS = 800.0; // 항공기 비정상 속도 + // 항공기 비정상 속도 — S&P AIS API 전환으로 미사용 (선박 전용) // 시간별 거리 임계값 (제곱근 스케일링 적용) private static final double BASE_DISTANCE_5MIN_NM = 20.0; // 5분간 기준 거리 (2배로 증가) @@ -46,7 +45,7 @@ public class AbnormalTrackDetector { private static final long MIN_GAP_FOR_RELAXED_CHECK = 30; // 30분 이상 gap은 완화된 검사 private static final double EARTH_RADIUS_NM = 3440.065; - private static final String AIRCRAFT_SIG_SRC_CD = "000019"; + // S&P AIS API는 선박 전용 — 항공기 구분 불필요 @Data @Builder @@ -130,9 +129,8 @@ public class AbnormalTrackDetector { return buildNormalResult(track); } - // Hourly/Daily에서는 선박/항공기 구분하여 제외 - boolean isAircraft = AIRCRAFT_SIG_SRC_CD.equals(track.getSigSrcCd()); - double speedLimit = isAircraft ? 300.0 : 100.0; // 항공기 300, 선박 100 + // S&P AIS API는 선박 전용 — 선박 기준 속도 제한 + double speedLimit = 100.0; boolean shouldExclude = abnormalSegments.stream() .anyMatch(seg -> seg.getActualValue() > speedLimit); @@ -185,9 +183,8 @@ public class AbnormalTrackDetector { private List checkAggregatedMetricsLenient(VesselTrack track) { List abnormalSegments = new ArrayList<>(); - boolean isAircraft = AIRCRAFT_SIG_SRC_CD.equals(track.getSigSrcCd()); - double speedLimit = isAircraft ? AIRCRAFT_ABNORMAL_SPEED_KNOTS : VESSEL_ABNORMAL_SPEED_KNOTS; - + double speedLimit = VESSEL_ABNORMAL_SPEED_KNOTS; + // 평균속도가 명백히 비정상인 경우만 검출 if (track.getAvgSpeed() != null && track.getAvgSpeed().doubleValue() > speedLimit) { abnormalSegments.add(AbnormalSegment.builder() @@ -259,9 +256,8 @@ public class AbnormalTrackDetector { double timeScale = Math.sqrt(durationMinutes / 5.0); double distanceThreshold = BASE_DISTANCE_5MIN_NM * timeScale * 3.0; // 3배 여유 - boolean isAircraft = AIRCRAFT_SIG_SRC_CD.equals(currentTrack.getSigSrcCd()); - double speedLimit = isAircraft ? AIRCRAFT_ABNORMAL_SPEED_KNOTS : VESSEL_ABNORMAL_SPEED_KNOTS; - + double speedLimit = VESSEL_ABNORMAL_SPEED_KNOTS; + // 매우 명백한 비정상만 검출 if (impliedSpeed > speedLimit && distance > distanceThreshold) { Map details = new HashMap<>(); @@ -345,9 +341,8 @@ public class AbnormalTrackDetector { double impliedSpeed = (distance * 60.0) / durationMinutes; - // Hourly/Daily는 선박/항공기 구분하여 처리 - boolean isAircraft = AIRCRAFT_SIG_SRC_CD.equals(currentTrack.getSigSrcCd()); - double speedLimit = isAircraft ? 300.0 : 100.0; + // S&P AIS API는 선박 전용 — 항공기 구분 불필요 + double speedLimit = 100.0; if (impliedSpeed > speedLimit) { Map details = new HashMap<>(); diff --git a/src/main/java/gc/mda/signal_batch/batch/processor/AccumulatingAreaProcessor.java b/src/main/java/gc/mda/signal_batch/batch/processor/AccumulatingAreaProcessor.java deleted file mode 100644 index cd9019c..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/processor/AccumulatingAreaProcessor.java +++ /dev/null @@ -1,190 +0,0 @@ -package gc.mda.signal_batch.batch.processor; - -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import gc.mda.signal_batch.batch.processor.AreaStatisticsProcessor.AreaStatistics; -import gc.mda.signal_batch.batch.processor.AreaStatisticsProcessor.VesselMovement; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.StepExecution; -import org.springframework.batch.core.annotation.AfterStep; -import org.springframework.batch.core.annotation.BeforeStep; -import org.springframework.batch.core.configuration.annotation.StepScope; -import org.springframework.batch.item.ItemProcessor; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.stereotype.Component; - -import java.math.BigDecimal; -import java.time.LocalDateTime; -import java.time.temporal.ChronoUnit; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; - -/** - * Area Statistics를 위한 누적 프로세서 - * 전체 데이터를 메모리에 누적한 후 Step 종료 시 한 번에 집계 - */ -@Slf4j -@Component -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@StepScope -@RequiredArgsConstructor -public class AccumulatingAreaProcessor implements ItemProcessor { - - private final AreaStatisticsProcessor areaStatisticsProcessor; - - @Value("#{jobParameters['timeBucketMinutes']}") - private Integer timeBucketMinutes; - - // area_id + time_bucket별 선박 데이터 누적 - private final Map> dataAccumulator = new ConcurrentHashMap<>(); - - // 처리 통계 - private long processedCount = 0; - private long skippedCount = 0; - - @BeforeStep - public void beforeStep(StepExecution stepExecution) { - int bucketMinutes = (timeBucketMinutes != null) ? timeBucketMinutes : 5; - log.info("AccumulatingAreaProcessor initialized with timeBucket: {} minutes", bucketMinutes); - dataAccumulator.clear(); - processedCount = 0; - skippedCount = 0; - } - - @Override - public AreaStatistics process(VesselData item) throws Exception { - if (!item.isValidPosition()) { - skippedCount++; - return null; - } - - // 메모리에서 속한 구역 찾기 - List areaIds = areaStatisticsProcessor.findAreasForPointInMemory( - item.getLat(), item.getLon() - ); - - if (areaIds.isEmpty()) { - return null; - } - - // time bucket 계산 - int bucketSize = timeBucketMinutes != null ? timeBucketMinutes : 5; - LocalDateTime bucket = item.getMessageTime() - .truncatedTo(ChronoUnit.MINUTES) - .withMinute((item.getMessageTime().getMinute() / bucketSize) * bucketSize); - - // 각 area에 대해 데이터 누적 - for (String areaId : areaIds) { - String key = areaId + "||" + bucket.toString(); // 구분자 변경 - dataAccumulator.computeIfAbsent(key, k -> new ArrayList<>()).add(item); - } - - processedCount++; - - // null 반환으로 개별 출력 방지 - return null; - } - - @AfterStep - public void afterStep(StepExecution stepExecution) { - log.info("Processing accumulated data for {} area-timebucket combinations", - dataAccumulator.size()); - log.info("Processed: {}, Skipped: {}", processedCount, skippedCount); - - if (dataAccumulator.isEmpty()) { - return; - } - - // 누적된 데이터를 기반으로 통계 계산 - List allStatistics = new ArrayList<>(); - - dataAccumulator.forEach((key, vessels) -> { - String[] parts = key.split("\\|\\|", 2); // || 구분자 사용 - if (parts.length != 2) { - log.error("Invalid key format: {}", key); - return; - } - String areaId = parts[0]; - LocalDateTime timeBucket = LocalDateTime.parse(parts[1]); - - AreaStatistics stats = new AreaStatistics(areaId, timeBucket); - Map vesselMovements = new HashMap<>(); - - // 각 선박별로 movement 정보 계산 - Map> vesselGroups = new HashMap<>(); - for (VesselData vessel : vessels) { - vesselGroups.computeIfAbsent(vessel.getVesselKey(), k -> new ArrayList<>()) - .add(vessel); - } - - vesselGroups.forEach((vesselKey, vesselDataList) -> { - // 시간순 정렬 - vesselDataList.sort(Comparator.comparing(VesselData::getMessageTime)); - - VesselMovement movement = new VesselMovement(); - movement.setVesselKey(vesselKey); - movement.setEnterTime(vesselDataList.get(0).getMessageTime()); - movement.setExitTime(vesselDataList.get(vesselDataList.size() - 1).getMessageTime()); - movement.setPointCount(vesselDataList.size()); - - // 평균 속도 계산 - double totalSpeed = 0; - int speedCount = 0; - for (VesselData vd : vesselDataList) { - if (vd.getSog() != null) { - totalSpeed += vd.getSog().doubleValue(); - speedCount++; - } - } - - if (speedCount > 0) { - movement.setAvgSpeed(BigDecimal.valueOf(totalSpeed / speedCount) - .setScale(2, BigDecimal.ROUND_HALF_UP)); - } else { - movement.setAvgSpeed(BigDecimal.ZERO); - } - - // 정류/통과 구분 (10분 이상 체류 시 정류) - long stayMinutes = ChronoUnit.MINUTES.between( - movement.getEnterTime(), movement.getExitTime() - ); - - if (stayMinutes > 10) { - stats.getStationaryVessels().put(vesselKey, movement); - } else { - stats.getTransitVessels().put(vesselKey, movement); - } - - vesselMovements.put(vesselKey, movement); - }); - - // 통계 최종 계산 - stats.setVesselCount(vesselMovements.size()); - stats.setInCount(vesselMovements.size()); // 진입 선박 수 - stats.setOutCount(0); // 추후 로직 개선 필요 - - // 전체 평균 속도 - List allSpeeds = new ArrayList<>(); - vesselMovements.values().stream() - .map(VesselMovement::getAvgSpeed) - .filter(Objects::nonNull) - .forEach(allSpeeds::add); - - if (!allSpeeds.isEmpty()) { - BigDecimal totalSpeed = allSpeeds.stream() - .reduce(BigDecimal.ZERO, BigDecimal::add); - stats.setAvgSog(totalSpeed.divide( - BigDecimal.valueOf(allSpeeds.size()), 2, BigDecimal.ROUND_HALF_UP)); - } else { - stats.setAvgSog(BigDecimal.ZERO); - } - - allStatistics.add(stats); - }); - - // StepExecution context에 결과 저장 - stepExecution.getExecutionContext().put("areaStatistics", allStatistics); - log.info("Calculated statistics for {} areas", allStatistics.size()); - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/processor/AccumulatingTileProcessor.java b/src/main/java/gc/mda/signal_batch/batch/processor/AccumulatingTileProcessor.java deleted file mode 100644 index 8e63aa0..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/processor/AccumulatingTileProcessor.java +++ /dev/null @@ -1,206 +0,0 @@ -package gc.mda.signal_batch.batch.processor; - -import gc.mda.signal_batch.domain.gis.model.TileStatistics; -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import gc.mda.signal_batch.global.util.HaeguGeoUtils; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.StepExecution; -import org.springframework.batch.core.annotation.AfterStep; -import org.springframework.batch.core.annotation.BeforeStep; -import org.springframework.batch.core.configuration.annotation.StepScope; -import org.springframework.batch.item.ItemProcessor; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.stereotype.Component; - -import java.math.BigDecimal; -import java.time.LocalDateTime; -import java.time.temporal.ChronoUnit; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; - - -/** - * 전체 데이터를 누적하여 집계하는 프로세서 - * Step 실행 중 모든 데이터를 메모리에 누적하고, Step 완료 시 한 번에 출력 - */ -@Slf4j -@Component -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@StepScope -@RequiredArgsConstructor -public class AccumulatingTileProcessor implements ItemProcessor { - - private final HaeguGeoUtils geoUtils; - - @Value("#{jobParameters['tileLevel']}") - private Integer tileLevel; - - @Value("#{jobParameters['timeBucketMinutes']}") - private Integer timeBucketMinutes; - - // 전체 집계를 위한 누적 맵 - private final Map accumulator = new ConcurrentHashMap<>(); - - // 처리된 레코드 수 추적 - private long processedCount = 0; - private long skippedCount = 0; - - @BeforeStep - public void beforeStep(StepExecution stepExecution) { - int level = (tileLevel != null) ? tileLevel : 1; - int bucketMinutes = (timeBucketMinutes != null) ? timeBucketMinutes : 5; - - log.info("Starting AccumulatingTileProcessor - tileLevel: {}, timeBucket: {} minutes", - level, bucketMinutes); - - // 초기화 - accumulator.clear(); - processedCount = 0; - skippedCount = 0; - } - - @Override - public TileStatistics process(VesselData item) throws Exception { - if (item == null || !item.isValidPosition()) { - skippedCount++; - return null; - } - - processedCount++; - - int level = (tileLevel != null) ? tileLevel : 1; - int bucketMinutes = (timeBucketMinutes != null) ? timeBucketMinutes : 5; - - LocalDateTime bucket = item.getMessageTime() - .truncatedTo(ChronoUnit.MINUTES) - .withMinute((item.getMessageTime().getMinute() / bucketMinutes) * bucketMinutes); - - // Level 0 (대해구) 처리 - if (level >= 0) { - processLevel0(item, bucket); - } - - // Level 1 (소해구) 처리 - if (level >= 1) { - processLevel1(item, bucket); - } - - // 10000건마다 진행 상황 로그 - if (processedCount % 10000 == 0) { - log.debug("Processed {} records, accumulated {} tiles", - processedCount, accumulator.size()); - } - - // null 반환 - 실제 출력은 AfterStep에서 수행 - return null; - } - - private void processLevel0(VesselData item, LocalDateTime bucket) { - HaeguGeoUtils.HaeguTileInfo level0Info = geoUtils.getHaeguTileInfo( - item.getLat(), item.getLon(), 0 - ); - - if (level0Info != null) { - String key = generateKey(level0Info.tileId, 0, bucket); - - accumulator.compute(key, (k, existing) -> { - if (existing == null) { - existing = TileStatistics.builder() - .tileId(level0Info.tileId) - .tileLevel(0) - .timeBucket(bucket) - .uniqueVessels(new HashMap<>()) - .totalPoints(0L) - .avgSog(BigDecimal.ZERO) - .maxSog(BigDecimal.ZERO) - .build(); - } - existing.addVesselData(item); - return existing; - }); - } - } - - private void processLevel1(VesselData item, LocalDateTime bucket) { - HaeguGeoUtils.HaeguTileInfo level1Info = geoUtils.getHaeguTileInfo( - item.getLat(), item.getLon(), 1 - ); - - if (level1Info != null && level1Info.sohaeguNo != null) { - String key = generateKey(level1Info.tileId, 1, bucket); - - accumulator.compute(key, (k, existing) -> { - if (existing == null) { - existing = TileStatistics.builder() - .tileId(level1Info.tileId) - .tileLevel(1) - .timeBucket(bucket) - .uniqueVessels(new HashMap<>()) - .totalPoints(0L) - .avgSog(BigDecimal.ZERO) - .maxSog(BigDecimal.ZERO) - .build(); - } - existing.addVesselData(item); - return existing; - }); - } - } - - private String generateKey(String tileId, int tileLevel, LocalDateTime timeBucket) { - return String.format("%s|%d|%s", tileId, tileLevel, timeBucket); - } - - @AfterStep - public void afterStep(StepExecution stepExecution) { - log.info("AccumulatingTileProcessor completed - processed: {}, skipped: {}, tiles: {}", - processedCount, skippedCount, accumulator.size()); - - // 밀도 계산 - accumulator.values().forEach(this::calculateDensity); - - // 메트릭 저장 - stepExecution.getExecutionContext().putLong("totalProcessed", processedCount); - stepExecution.getExecutionContext().putLong("totalSkipped", skippedCount); - stepExecution.getExecutionContext().putInt("totalTiles", accumulator.size()); - - // 이 위치에서 바로 DB에 저장하면 안됨 - StepListener에서 처리해야 함 - log.info("Accumulated {} tiles ready for writing", accumulator.size()); - } - - private void calculateDensity(TileStatistics stats) { - if (stats.getVesselCount() == null || stats.getVesselCount() == 0) { - stats.setVesselDensity(BigDecimal.ZERO); - return; - } - - double tileArea = geoUtils.getTileArea(stats.getTileId()); - - if (tileArea > 0) { - BigDecimal density = BigDecimal.valueOf(stats.getVesselCount()) - .divide(BigDecimal.valueOf(tileArea), 6, BigDecimal.ROUND_HALF_UP); - stats.setVesselDensity(density); - } else { - stats.setVesselDensity(BigDecimal.ZERO); - } - } - - /** - * 누적된 결과 반환 (테스트용) - */ - public List getAccumulatedResults() { - log.info("[AccumulatingTileProcessor] getAccumulatedResults called - size: {}", accumulator.size()); - return new ArrayList<>(accumulator.values()); - } - - /** - * 누적 데이터 초기화 - */ - public void clear() { - accumulator.clear(); - processedCount = 0; - skippedCount = 0; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/processor/AisTargetDataProcessor.java b/src/main/java/gc/mda/signal_batch/batch/processor/AisTargetDataProcessor.java new file mode 100644 index 0000000..40f62d9 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/processor/AisTargetDataProcessor.java @@ -0,0 +1,85 @@ +package gc.mda.signal_batch.batch.processor; + +import gc.mda.signal_batch.domain.vessel.dto.AisTargetDto; +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.item.ItemProcessor; +import org.springframework.stereotype.Component; + +import java.time.OffsetDateTime; +import java.time.format.DateTimeFormatter; +import java.time.format.DateTimeParseException; + +/** + * AIS Target DTO → Entity 변환 Processor + * + * - 타임스탬프 파싱 (ISO 8601) + * - 유효성 필터링 (MMSI, Lat, Lon 필수) + * - gc-signal-batch에서는 mmsi를 String으로 처리 + */ +@Slf4j +@Component +public class AisTargetDataProcessor implements ItemProcessor { + + private static final DateTimeFormatter ISO_FORMATTER = DateTimeFormatter.ISO_DATE_TIME; + + @Override + public AisTargetEntity process(AisTargetDto dto) { + // 유효성 검사: MMSI와 위치 정보 필수 + if (dto.getMmsi() == null || dto.getMmsi().isBlank() + || dto.getLat() == null || dto.getLon() == null) { + log.debug("유효하지 않은 데이터 스킵 - MMSI: {}, Lat: {}, Lon: {}", + dto.getMmsi(), dto.getLat(), dto.getLon()); + return null; + } + + // MessageTimestamp 파싱 + OffsetDateTime messageTimestamp = parseTimestamp(dto.getMessageTimestamp()); + if (messageTimestamp == null) { + log.debug("MessageTimestamp 파싱 실패 - MMSI: {}, Timestamp: {}", + dto.getMmsi(), dto.getMessageTimestamp()); + return null; + } + + return AisTargetEntity.builder() + .mmsi(dto.getMmsi()) + .imo(dto.getImo()) + .name(dto.getName()) + .callsign(dto.getCallsign()) + .vesselType(dto.getVesselType()) + .extraInfo(dto.getExtraInfo()) + .lat(dto.getLat()) + .lon(dto.getLon()) + .heading(dto.getHeading()) + .sog(dto.getSog()) + .cog(dto.getCog()) + .rot(dto.getRot()) + .length(dto.getLength()) + .width(dto.getWidth()) + .draught(dto.getDraught()) + .destination(dto.getDestination()) + .eta(parseEta(dto.getEta())) + .status(dto.getStatus()) + .messageTimestamp(messageTimestamp) + .build(); + } + + private OffsetDateTime parseTimestamp(String timestamp) { + if (timestamp == null || timestamp.isEmpty()) { + return null; + } + try { + return OffsetDateTime.parse(timestamp, ISO_FORMATTER); + } catch (DateTimeParseException e) { + log.trace("타임스탬프 파싱 실패: {}", timestamp); + return null; + } + } + + private OffsetDateTime parseEta(String eta) { + if (eta == null || eta.isEmpty() || "9999-12-31T23:59:59Z".equals(eta)) { + return null; + } + return parseTimestamp(eta); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/processor/AreaStatisticsProcessor.java b/src/main/java/gc/mda/signal_batch/batch/processor/AreaStatisticsProcessor.java deleted file mode 100644 index 891235b..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/processor/AreaStatisticsProcessor.java +++ /dev/null @@ -1,333 +0,0 @@ -package gc.mda.signal_batch.batch.processor; - -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import gc.mda.signal_batch.global.util.DataSourceLogger; -import lombok.Data; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.locationtech.jts.geom.*; -import org.locationtech.jts.io.WKTReader; -import org.springframework.batch.core.configuration.annotation.StepScope; -import org.springframework.batch.item.ItemProcessor; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Component; - -import javax.sql.DataSource; -import jakarta.annotation.PostConstruct; -import java.math.BigDecimal; -import java.time.LocalDateTime; -import java.time.temporal.ChronoUnit; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.stream.Collectors; - - -@Slf4j -@Component -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@RequiredArgsConstructor -public class AreaStatisticsProcessor { - - @Qualifier("queryJdbcTemplate") - private final JdbcTemplate queryJdbcTemplate; - - @Qualifier("queryDataSource") - private final DataSource queryDataSource; - - // 메모리에 구역 정보 캐싱 - private final Map areaCache = new ConcurrentHashMap<>(); - private final List areaList = new ArrayList<>(); - - // JTS 객체들 - private final GeometryFactory geometryFactory = new GeometryFactory(new PrecisionModel(), 4326); - private final WKTReader wktReader = new WKTReader(geometryFactory); - - @PostConstruct - public void init() { - log.info("========== AreaStatisticsProcessor Initialization =========="); - DataSourceLogger.logJdbcTemplateInfo("AreaStatisticsProcessor", queryJdbcTemplate); - - // t_areas 테이블 존재 확인 - boolean tableExists = DataSourceLogger.checkTableExists( - "AreaStatisticsProcessor", queryJdbcTemplate, "signal", "t_areas" - ); - - if (!tableExists) { - log.error("CRITICAL: Table signal.t_areas does not exist in query database!"); - log.error("Please run: scripts/sql/create-query-db-schema.sql on the query database"); - } else { - // 초기화 시 구역 정보 로드 - loadAreas(); - } - - log.info("========== End of Initialization =========="); - } - - @Data - public static class AreaInfo { - private String areaId; - private String areaName; - private String areaType; - private String geomWkt; - private Geometry geometry; // JTS Geometry 객체 - private Envelope envelope; // Bounding Box for quick filtering - } - - @Data - public static class AreaStatistics implements java.io.Serializable { - private String areaId; - private LocalDateTime timeBucket; - private Integer vesselCount; - private Integer inCount; - private Integer outCount; - private Map transitVessels; - private Map stationaryVessels; - private BigDecimal avgSog; - private LocalDateTime createdAt; - - public AreaStatistics(String areaId, LocalDateTime timeBucket) { - this.areaId = areaId; - this.timeBucket = timeBucket; - this.vesselCount = 0; - this.inCount = 0; - this.outCount = 0; - this.transitVessels = new HashMap<>(); - this.stationaryVessels = new HashMap<>(); - this.avgSog = BigDecimal.ZERO; - } - } - - @Data - public static class VesselMovement implements java.io.Serializable { - private String vesselKey; - private LocalDateTime enterTime; - private LocalDateTime exitTime; - private BigDecimal avgSpeed; - private Integer pointCount; - } - - @StepScope - public ItemProcessor, List> batchProcessor() { - return batchProcessor(null); - } - - @StepScope - public ItemProcessor, List> batchProcessor( - @Value("#{jobParameters['timeBucketMinutes']}") Integer bucketMinutes) { - - return items -> { - // 구역 정보가 없으면 빈 결과 반환 - if (areaList.isEmpty()) { - log.warn("No areas loaded, skipping area statistics processing"); - return new ArrayList<>(); - } - - Map statsMap = new HashMap<>(); - Map> vesselTracker = new HashMap<>(); - - for (VesselData item : items) { - if (!item.isValidPosition()) { - continue; - } - - // 메모리에서 속한 구역 찾기 (DB 쿼리 없음!) - List areaIds = findAreasForPointInMemory(item.getLat(), item.getLon()); - - int bucketSize = bucketMinutes != null ? bucketMinutes : 5; // 5분 단위로 변경 - LocalDateTime bucket = item.getMessageTime() - .truncatedTo(ChronoUnit.MINUTES) - .withMinute((item.getMessageTime().getMinute() / bucketSize) * bucketSize); - - for (String areaId : areaIds) { - String statsKey = areaId + "_" + bucket.toString(); - AreaStatistics stats = statsMap.computeIfAbsent(statsKey, - k -> new AreaStatistics(areaId, bucket) - ); - - // 선박 이동 추적 - String vesselKey = item.getVesselKey(); - Map areaVessels = vesselTracker.computeIfAbsent( - areaId, k -> new HashMap<>() - ); - - VesselMovement movement = areaVessels.computeIfAbsent(vesselKey, - k -> { - VesselMovement vm = new VesselMovement(); - vm.setVesselKey(vesselKey); - vm.setEnterTime(item.getMessageTime()); - vm.setPointCount(0); - vm.setAvgSpeed(BigDecimal.ZERO); - stats.setInCount(stats.getInCount() + 1); - return vm; - } - ); - - movement.setExitTime(item.getMessageTime()); - movement.setPointCount(movement.getPointCount() + 1); - - // 평균 속도 계산 - if (item.getSog() != null) { - BigDecimal currentTotal = movement.getAvgSpeed() - .multiply(BigDecimal.valueOf(movement.getPointCount() - 1)); - movement.setAvgSpeed( - currentTotal.add(item.getSog()) - .divide(BigDecimal.valueOf(movement.getPointCount()), 2, BigDecimal.ROUND_HALF_UP) - ); - } - - // 정류/통과 구분 (10분 이상 체류 시 정류) - long stayMinutes = ChronoUnit.MINUTES.between( - movement.getEnterTime(), movement.getExitTime() - ); - - if (stayMinutes > 10) { - stats.getStationaryVessels().put(vesselKey, movement); - } else { - stats.getTransitVessels().put(vesselKey, movement); - } - } - } - - // 통계 최종 계산 - statsMap.values().forEach(stats -> { - stats.setVesselCount( - stats.getTransitVessels().size() + stats.getStationaryVessels().size() - ); - - // 평균 속도 계산 - List allSpeeds = new ArrayList<>(); - stats.getTransitVessels().values().stream() - .map(VesselMovement::getAvgSpeed) - .filter(Objects::nonNull) - .forEach(allSpeeds::add); - stats.getStationaryVessels().values().stream() - .map(VesselMovement::getAvgSpeed) - .filter(Objects::nonNull) - .forEach(allSpeeds::add); - - if (!allSpeeds.isEmpty()) { - BigDecimal totalSpeed = allSpeeds.stream() - .reduce(BigDecimal.ZERO, BigDecimal::add); - stats.setAvgSog( - totalSpeed.divide(BigDecimal.valueOf(allSpeeds.size()), 2, BigDecimal.ROUND_HALF_UP) - ); - } - }); - - return new ArrayList<>(statsMap.values()); - }; - } - - private void loadAreas() { - log.info("Loading areas from query database..."); - DataSourceLogger.logJdbcTemplateInfo("AreaStatisticsProcessor.loadAreas", queryJdbcTemplate); - - String sql = "SELECT area_id, area_name, area_type, public.ST_AsText(area_geom) as geom_wkt FROM signal.t_areas"; - - try { - boolean exists = DataSourceLogger.checkTableExists( - "AreaStatisticsProcessor.loadAreas", queryJdbcTemplate, "signal", "t_areas" - ); - - if (exists) { - List areas = queryJdbcTemplate.query(sql, (rs, rowNum) -> { - AreaInfo area = new AreaInfo(); - area.setAreaId(rs.getString("area_id")); - area.setAreaName(rs.getString("area_name")); - area.setAreaType(rs.getString("area_type")); - area.setGeomWkt(rs.getString("geom_wkt")); - - // WKT를 JTS Geometry로 변환 - try { - Geometry geom = wktReader.read(area.getGeomWkt()); - area.setGeometry(geom); - area.setEnvelope(geom.getEnvelopeInternal()); - } catch (Exception e) { - log.error("Failed to parse WKT for area {}: {}", area.getAreaId(), e.getMessage()); - } - - return area; - }); - - areas.forEach(area -> { - areaCache.put(area.getAreaId(), area); - areaList.add(area); - }); - - log.info("Successfully loaded {} areas into memory cache", areas.size()); - log.info("Area types: {}", areas.stream() - .collect(java.util.stream.Collectors.groupingBy( - AreaInfo::getAreaType, - java.util.stream.Collectors.counting() - ))); - } else { - log.error("Cannot load areas - table signal.t_areas does not exist!"); - } - } catch (Exception e) { - log.error("Failed to load areas", e); - } - } - - /** - * 메모리에서 포인트가 속한 구역 찾기 (DB 쿼리 없음!) - */ - public List findAreasForPointInMemory(double lat, double lon) { - - // JTS Point 생성 - Point point = geometryFactory.createPoint(new Coordinate(lon, lat)); - - return areaList.parallelStream() - .filter(area -> area.getGeometry() != null) - .filter(area -> area.getEnvelope().contains(lon, lat)) - .filter(area -> { - try { - return area.getGeometry().contains(point); - } catch (Exception e) { - return false; - } - }) - .map(AreaInfo::getAreaId) - .collect(Collectors.toList()); -// List areaIds = new ArrayList<>(); -// // 모든 구역에 대해 contains 검사 -// for (AreaInfo area : areaList) { -// if (area.getGeometry() == null) { -// continue; -// } -// -// // 1. Envelope(Bounding Box)로 빠른 필터링 -// if (!area.getEnvelope().contains(lon, lat)) { -// continue; -// } -// -// // 2. 정확한 contains 검사 -// try { -// if (area.getGeometry().contains(point)) { -// areaIds.add(area.getAreaId()); -// } -// } catch (Exception e) { -// log.debug("Error checking contains for area {}: {}", area.getAreaId(), e.getMessage()); -// } -// } -// -// return areaIds; - - } - - /** - * 캐시 상태 조회 (디버깅/모니터링용) - */ - public Map getCacheStats() { - Map stats = new HashMap<>(); - stats.put("loadedAreas", areaList.size()); - stats.put("areaTypes", areaList.stream() - .collect(java.util.stream.Collectors.groupingBy( - AreaInfo::getAreaType, - java.util.stream.Collectors.counting() - ))); - return stats; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/processor/BaseTrackProcessorWithAbnormalDetection.java b/src/main/java/gc/mda/signal_batch/batch/processor/BaseTrackProcessorWithAbnormalDetection.java index 6c942c0..dc5d836 100644 --- a/src/main/java/gc/mda/signal_batch/batch/processor/BaseTrackProcessorWithAbnormalDetection.java +++ b/src/main/java/gc/mda/signal_batch/batch/processor/BaseTrackProcessorWithAbnormalDetection.java @@ -46,8 +46,8 @@ public abstract class BaseTrackProcessorWithAbnormalDetection implements ItemPro AbnormalDetectionResult result = abnormalTrackDetector.detectBucketTransitionOnly(track, previousTrack); if (result.hasAbnormalities()) { - log.debug("Abnormal track detected for vessel {}/{} at {}: {}", - track.getSigSrcCd(), track.getTargetId(), track.getTimeBucket(), + log.debug("Abnormal track detected for vessel {} at {}: {}", + track.getMmsi(), track.getTimeBucket(), result.getAbnormalSegments().size()); } @@ -60,12 +60,11 @@ public abstract class BaseTrackProcessorWithAbnormalDetection implements ItemPro protected VesselTrack getPreviousBucketLastTrack(VesselTrack.VesselKey vesselKey) { try { String sql = """ - SELECT sig_src_cd, target_id, time_bucket, + SELECT mmsi, time_bucket, end_position, public.ST_AsText(public.ST_LineSubstring(track_geom, 0.9, 1.0)) as last_segment FROM %s - WHERE sig_src_cd = ? - AND target_id = ? + WHERE mmsi = ? AND time_bucket >= ? AND time_bucket < ? ORDER BY time_bucket DESC @@ -83,14 +82,13 @@ public abstract class BaseTrackProcessorWithAbnormalDetection implements ItemPro return jdbcTemplate.queryForObject(sql, (rs, rowNum) -> { return VesselTrack.builder() - .sigSrcCd(rs.getString("sig_src_cd")) - .targetId(rs.getString("target_id")) + .mmsi(rs.getString("mmsi")) .timeBucket(rs.getTimestamp("time_bucket").toLocalDateTime()) .trackGeom(rs.getString("last_segment")) .endPosition(parseEndPosition(rs.getString("end_position"))) .build(); }, - vesselKey.getSigSrcCd(), vesselKey.getTargetId(), previousBucketTimestamp, currentBucketTimestamp + vesselKey.getMmsi(), previousBucketTimestamp, currentBucketTimestamp ); } catch (Exception e) { log.debug("No previous bucket track found for vessel {}", vesselKey); diff --git a/src/main/java/gc/mda/signal_batch/batch/processor/DailyTrackProcessor.java b/src/main/java/gc/mda/signal_batch/batch/processor/DailyTrackProcessor.java index 1f99280..5fc4300 100644 --- a/src/main/java/gc/mda/signal_batch/batch/processor/DailyTrackProcessor.java +++ b/src/main/java/gc/mda/signal_batch/batch/processor/DailyTrackProcessor.java @@ -39,8 +39,7 @@ public class DailyTrackProcessor implements ItemProcessor= ? AND time_bucket < ? AND track_geom IS NOT NULL @@ -49,28 +48,26 @@ public class DailyTrackProcessor implements ItemProcessor { - + private final DataSource queryDataSource; private final JdbcTemplate jdbcTemplate; private static final DateTimeFormatter TIMESTAMP_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); - + @Override public VesselTrack process(VesselTrack.VesselKey vesselKey) throws Exception { LocalDateTime hourBucket = vesselKey.getTimeBucket() .withMinute(0) .withSecond(0) .withNano(0); - + String sql = """ WITH ordered_tracks AS ( SELECT * FROM signal.t_vessel_tracks_5min - WHERE sig_src_cd = ? - AND target_id = ? + WHERE mmsi = ? AND time_bucket >= ? AND time_bucket < ? AND track_geom IS NOT NULL @@ -46,28 +45,26 @@ public class HourlyTrackProcessor implements ItemProcessor processor() { - // 청크 내에서 최신 데이터만 유지 - ConcurrentHashMap latestMap = new ConcurrentHashMap<>(); - - return item -> { - if (!item.isValidPosition()) { - log.debug("Invalid position for vessel: {}", item.getVesselKey()); - return null; - } - - String key = item.getVesselKey(); - VesselLatestPosition current = VesselLatestPosition.fromVesselData(item); - - VesselLatestPosition existing = latestMap.get(key); - if (existing == null || current.getLastUpdate().isAfter(existing.getLastUpdate())) { - latestMap.put(key, current); - return current; - } - - return null; - }; - } - - @StepScope - public ItemProcessor filteringProcessor( - LocalDateTime cutoffTime) { - - return item -> { - // 특정 시간 이후 데이터만 처리 - if (item.getMessageTime().isBefore(cutoffTime)) { - return null; - } - - if (!item.isValidPosition()) { - return null; - } - - return VesselLatestPosition.fromVesselData(item); - }; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/processor/TileAggregationProcessor.java b/src/main/java/gc/mda/signal_batch/batch/processor/TileAggregationProcessor.java deleted file mode 100644 index f30368f..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/processor/TileAggregationProcessor.java +++ /dev/null @@ -1,291 +0,0 @@ -package gc.mda.signal_batch.batch.processor; - -import gc.mda.signal_batch.domain.gis.model.TileStatistics; -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import gc.mda.signal_batch.global.util.HaeguGeoUtils; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.configuration.annotation.StepScope; -import org.springframework.batch.item.ItemProcessor; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import java.math.BigDecimal; -import java.math.RoundingMode; -import java.time.LocalDateTime; -import java.time.temporal.ChronoUnit; -import java.util.*; - - -@Slf4j -@Configuration -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@RequiredArgsConstructor -public class TileAggregationProcessor { - - private final HaeguGeoUtils geoUtils; - - /** - * 타일 레벨과 시간 버킷에 따른 배치 프로세서 생성 - */ - public ItemProcessor, List> batchProcessor( - int tileLevel, int timeBucketMinutes) { - - return items -> { - if (items == null || items.isEmpty()) { - return null; - } - - Map tileMap = new HashMap<>(); - - for (VesselData item : items) { - if (!item.isValidPosition()) { - continue; - } - - LocalDateTime bucket = item.getMessageTime() - .truncatedTo(ChronoUnit.MINUTES) - .withMinute((item.getMessageTime().getMinute() / timeBucketMinutes) * timeBucketMinutes); - - // 요청된 레벨에 따라 처리 - if (tileLevel >= 0) { - // Level 0 (대해구) 처리 - HaeguGeoUtils.HaeguTileInfo level0Info = geoUtils.getHaeguTileInfo( - item.getLat(), item.getLon(), 0 - ); - - if (level0Info != null) { - String haeguKey = level0Info.tileId + "_" + bucket.toString(); - - TileStatistics haeguStats = tileMap.computeIfAbsent(haeguKey, - k -> TileStatistics.builder() - .tileId(level0Info.tileId) - .tileLevel(0) - .timeBucket(bucket) - .uniqueVessels(new HashMap<>()) - .totalPoints(0L) - .avgSog(BigDecimal.ZERO) - .maxSog(BigDecimal.ZERO) - .build() - ); - haeguStats.addVesselData(item); - } - } - - if (tileLevel >= 1) { - // Level 1 (소해구) 처리 - HaeguGeoUtils.HaeguTileInfo level1Info = geoUtils.getHaeguTileInfo( - item.getLat(), item.getLon(), 1 - ); - - if (level1Info != null && level1Info.sohaeguNo != null) { - String subKey = level1Info.tileId + "_" + bucket.toString(); - - TileStatistics subStats = tileMap.computeIfAbsent(subKey, - k -> TileStatistics.builder() - .tileId(level1Info.tileId) - .tileLevel(1) - .timeBucket(bucket) - .uniqueVessels(new HashMap<>()) - .totalPoints(0L) - .avgSog(BigDecimal.ZERO) - .maxSog(BigDecimal.ZERO) - .build() - ); - subStats.addVesselData(item); - } - } - } - - // 각 타일별로 밀도 계산 - tileMap.values().forEach(this::calculateDensity); - - return new ArrayList<>(tileMap.values()); - }; - } - - @Bean - @StepScope - public ItemProcessor, List> tileAggregationBatchProcessor( - @Value("#{jobParameters['timeBucketMinutes']}") Integer timeBucketMinutes) { - - final int bucketMinutes = (timeBucketMinutes != null) ? timeBucketMinutes : 5; - - return items -> { - if (items == null || items.isEmpty()) { - return null; - } - - Map tileMap = new HashMap<>(); - - for (VesselData item : items) { - if (!item.isValidPosition()) { - continue; - } - - LocalDateTime bucket = item.getMessageTime() - .truncatedTo(ChronoUnit.MINUTES) - .withMinute((item.getMessageTime().getMinute() / bucketMinutes) * bucketMinutes); - - // 1. 대해구 레벨(Level 0) 처리 - HaeguGeoUtils.HaeguTileInfo level0Info = geoUtils.getHaeguTileInfo( - item.getLat(), item.getLon(), 0 - ); - - if (level0Info != null) { - String haeguKey = level0Info.tileId + "_" + bucket.toString(); - - TileStatistics haeguStats = tileMap.computeIfAbsent(haeguKey, - k -> TileStatistics.builder() - .tileId(level0Info.tileId) - .tileLevel(0) // 대해구는 레벨 0 - .timeBucket(bucket) - .uniqueVessels(new HashMap<>()) - .totalPoints(0L) - .avgSog(BigDecimal.ZERO) - .maxSog(BigDecimal.ZERO) - .build() - ); - haeguStats.addVesselData(item); - } - - // 2. 소해구 레벨(Level 1) 처리 - HaeguGeoUtils.HaeguTileInfo level1Info = geoUtils.getHaeguTileInfo( - item.getLat(), item.getLon(), 1 - ); - - if (level1Info != null && level1Info.sohaeguNo != null) { - String subKey = level1Info.tileId + "_" + bucket.toString(); - - TileStatistics subStats = tileMap.computeIfAbsent(subKey, - k -> TileStatistics.builder() - .tileId(level1Info.tileId) - .tileLevel(1) // 소해구는 레벨 1 - .timeBucket(bucket) - .uniqueVessels(new HashMap<>()) - .totalPoints(0L) - .avgSog(BigDecimal.ZERO) - .maxSog(BigDecimal.ZERO) - .build() - ); - subStats.addVesselData(item); - } - } - - // 각 타일별로 밀도 계산 - tileMap.values().forEach(stats -> { - calculateDensity(stats); - }); - - return new ArrayList<>(tileMap.values()); - }; - } - - @Bean - @StepScope - public ItemProcessor> singleItemProcessor( - @Value("#{jobParameters['tileLevel']}") Integer tileLevel, - @Value("#{jobParameters['timeBucketMinutes']}") Integer timeBucketMinutes) { - - final int bucketMinutes = (timeBucketMinutes != null) ? timeBucketMinutes : 5; - final int maxLevel = (tileLevel != null) ? tileLevel : 1; - - Map accumulator = new HashMap<>(); - - return item -> { - if (!item.isValidPosition()) { - return null; - } - - LocalDateTime bucket = item.getMessageTime() - .truncatedTo(ChronoUnit.MINUTES) - .withMinute((item.getMessageTime().getMinute() / bucketMinutes) * bucketMinutes); - - List result = new ArrayList<>(); - - // Level 0 (대해구) - if (maxLevel >= 0) { - HaeguGeoUtils.HaeguTileInfo level0Info = geoUtils.getHaeguTileInfo( - item.getLat(), item.getLon(), 0 - ); - - if (level0Info != null) { - String key = level0Info.tileId + "_" + bucket.toString(); - TileStatistics stats = accumulator.computeIfAbsent(key, - k -> TileStatistics.builder() - .tileId(level0Info.tileId) - .tileLevel(0) - .timeBucket(bucket) - .uniqueVessels(new HashMap<>()) - .totalPoints(0L) - .avgSog(BigDecimal.ZERO) - .maxSog(BigDecimal.ZERO) - .build() - ); - stats.addVesselData(item); - - // 일정 개수가 쌓이면 출력 - if (stats.getTotalPoints() % 1000 == 0) { - calculateDensity(stats); - result.add(stats); - } - } - } - - // Level 1 (소해구) - if (maxLevel >= 1) { - HaeguGeoUtils.HaeguTileInfo level1Info = geoUtils.getHaeguTileInfo( - item.getLat(), item.getLon(), 1 - ); - - if (level1Info != null && level1Info.sohaeguNo != null) { - String key = level1Info.tileId + "_" + bucket.toString(); - TileStatistics stats = accumulator.computeIfAbsent(key, - k -> TileStatistics.builder() - .tileId(level1Info.tileId) - .tileLevel(1) - .timeBucket(bucket) - .uniqueVessels(new HashMap<>()) - .totalPoints(0L) - .avgSog(BigDecimal.ZERO) - .maxSog(BigDecimal.ZERO) - .build() - ); - stats.addVesselData(item); - - // 일정 개수가 쌓이면 출력 - if (stats.getTotalPoints() % 1000 == 0) { - calculateDensity(stats); - result.add(stats); - } - } - } - - return result.isEmpty() ? null : result; - }; - } - - /** - * 타일의 선박 밀도 계산 - */ - private void calculateDensity(TileStatistics stats) { - if (stats.getVesselCount() == null || stats.getVesselCount() == 0) { - stats.setVesselDensity(BigDecimal.ZERO); - return; - } - - // 타일 면적 가져오기 (km²) - double tileArea = geoUtils.getTileArea(stats.getTileId()); - - if (tileArea > 0) { - // 밀도 = 선박 수 / 면적 - BigDecimal density = BigDecimal.valueOf(stats.getVesselCount()) - .divide(BigDecimal.valueOf(tileArea), 6, RoundingMode.HALF_UP); - stats.setVesselDensity(density); - } else { - stats.setVesselDensity(BigDecimal.ZERO); - } - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/processor/VesselTrackProcessor.java b/src/main/java/gc/mda/signal_batch/batch/processor/VesselTrackProcessor.java index b3bc426..a3ff5d8 100644 --- a/src/main/java/gc/mda/signal_batch/batch/processor/VesselTrackProcessor.java +++ b/src/main/java/gc/mda/signal_batch/batch/processor/VesselTrackProcessor.java @@ -76,8 +76,7 @@ public class VesselTrackProcessor implements ItemProcessor, Lis .collect(Collectors.toList()); VesselTrack track = VesselTrack.builder() - .sigSrcCd(first.getSigSrcCd()) - .targetId(first.getTargetId()) + .mmsi(first.getMmsi()) .timeBucket(timeBucket) .trackPoints(trackPoints) .pointCount(trackPoints.size()) diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/AisTargetCacheManager.java b/src/main/java/gc/mda/signal_batch/batch/reader/AisTargetCacheManager.java new file mode 100644 index 0000000..17641c8 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/reader/AisTargetCacheManager.java @@ -0,0 +1,246 @@ +package gc.mda.signal_batch.batch.reader; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.RemovalCause; +import com.github.benmanes.caffeine.cache.stats.CacheStats; +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import jakarta.annotation.PostConstruct; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.stereotype.Component; + +import java.time.OffsetDateTime; +import java.util.*; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +/** + * AIS Target Caffeine 캐시 매니저 + * + * key: MMSI (String) — 문자 혼합 MMSI 장비 지원 + * value: AisTargetEntity + * + * 동작: + * - 1분 주기 API Reader → Writer에서 캐시 업데이트 + * - 5분 집계 Job에서 캐시 스냅샷 추출 → VesselData 변환 + * - 기존 데이터보다 최신(messageTimestamp 기준)인 경우에만 업데이트 + * + * TTL (프로파일별): + * - local: 5분, dev: 60분, prod/prod-mpr: 120분 + */ +@Slf4j +@Component +public class AisTargetCacheManager { + + private Cache cache; + + /** + * 트랙 누적 버퍼 — 1분 API 호출마다 위치를 append, 5분 집계 시 drain + * AtomicReference swap으로 drain 시 lock-free 처리 + */ + private final AtomicReference>> trackBufferRef = + new AtomicReference<>(new ConcurrentHashMap<>()); + + @Value("${app.cache.ais-target.ttl-minutes:120}") + private long ttlMinutes; + + @Value("${app.cache.ais-target.max-size:300000}") + private int maxSize; + + @PostConstruct + public void init() { + this.cache = Caffeine.newBuilder() + .maximumSize(maxSize) + .expireAfterWrite(ttlMinutes, TimeUnit.MINUTES) + .recordStats() + .removalListener((String key, AisTargetEntity value, RemovalCause cause) -> { + if (cause != RemovalCause.REPLACED) { + log.trace("캐시 제거 - MMSI: {}, 원인: {}", key, cause); + } + }) + .build(); + + log.info("AIS Target Caffeine 캐시 초기화 - TTL: {}분, 최대 크기: {}", ttlMinutes, maxSize); + } + + // ==================== 단건 조회/업데이트 ==================== + + public Optional get(String mmsi) { + return Optional.ofNullable(cache.getIfPresent(mmsi)); + } + + public void put(AisTargetEntity entity) { + if (entity == null || entity.getMmsi() == null) { + return; + } + + String mmsi = entity.getMmsi(); + AisTargetEntity existing = cache.getIfPresent(mmsi); + + if (existing == null || isNewer(entity, existing)) { + cache.put(mmsi, entity); + } + } + + // ==================== 배치 조회/업데이트 ==================== + + public Map getAll(List mmsiList) { + if (mmsiList == null || mmsiList.isEmpty()) { + return Collections.emptyMap(); + } + return cache.getAllPresent(mmsiList); + } + + /** + * 여러 데이터 일괄 저장/업데이트 + * 기존 데이터보다 최신인 경우에만 업데이트 + */ + public void putAll(List entities) { + if (entities == null || entities.isEmpty()) { + return; + } + + int updated = 0; + int skipped = 0; + + for (AisTargetEntity entity : entities) { + if (entity == null || entity.getMmsi() == null) { + continue; + } + + AisTargetEntity existing = cache.getIfPresent(entity.getMmsi()); + + if (existing == null || isNewer(entity, existing)) { + cache.put(entity.getMmsi(), entity); + updated++; + } else { + skipped++; + } + } + + log.debug("캐시 배치 업데이트 - 입력: {}, 업데이트: {}, 스킵: {}, 현재 크기: {}", + entities.size(), updated, skipped, cache.estimatedSize()); + } + + // ==================== 캐시 스냅샷 (t_ais_position 동기화용) ==================== + + /** + * 캐시의 모든 데이터 조회 (AisPositionSyncStep에서 사용) + */ + public Collection getAllValues() { + return cache.asMap().values(); + } + + // ==================== 트랙 누적 버퍼 (5분 집계용) ==================== + + /** + * 1분 API 호출 결과를 트랙 버퍼에 누적 + * MMSI별로 위치 이력을 쌓아 5분 집계 시 LineStringM 생성에 사용 + */ + public void appendAllForTrack(List entities) { + if (entities == null || entities.isEmpty()) { + return; + } + + ConcurrentHashMap> buffer = trackBufferRef.get(); + int appended = 0; + + for (AisTargetEntity entity : entities) { + if (entity == null || entity.getMmsi() == null + || entity.getLat() == null || entity.getLon() == null) { + continue; + } + buffer.computeIfAbsent(entity.getMmsi(), + k -> Collections.synchronizedList(new ArrayList<>())).add(entity); + appended++; + } + + log.debug("트랙 버퍼 누적: {} 건 (버퍼 내 선박 수: {})", appended, buffer.size()); + } + + /** + * 트랙 버퍼를 drain하여 반환하고 새 버퍼로 교체 (5분 집계 Job에서 호출) + * AtomicReference swap으로 1분 Writer와 lock-free 동시성 보장 + * + * @return MMSI별 누적 위치 목록 (보통 MMSI당 ~5개 포인트) + */ + public Map> drainTrackBuffer() { + ConcurrentHashMap> drained = + trackBufferRef.getAndSet(new ConcurrentHashMap<>()); + + long totalPoints = drained.values().stream().mapToLong(List::size).sum(); + log.info("트랙 버퍼 drain: {} 선박, {} 포인트", drained.size(), totalPoints); + + return drained; + } + + /** + * 트랙 버퍼 현재 크기 (모니터링용) + */ + public Map getTrackBufferStats() { + ConcurrentHashMap> buffer = trackBufferRef.get(); + long totalPoints = buffer.values().stream().mapToLong(List::size).sum(); + + Map stats = new LinkedHashMap<>(); + stats.put("vesselCount", buffer.size()); + stats.put("totalPoints", totalPoints); + stats.put("avgPointsPerVessel", buffer.isEmpty() ? 0 : String.format("%.1f", (double) totalPoints / buffer.size())); + return stats; + } + + // ==================== 캐시 관리 ==================== + + public void evict(String mmsi) { + cache.invalidate(mmsi); + } + + public void clear() { + long size = cache.estimatedSize(); + cache.invalidateAll(); + log.info("캐시 전체 삭제 - {} 건", size); + } + + public long size() { + return cache.estimatedSize(); + } + + public void cleanup() { + cache.cleanUp(); + } + + // ==================== 통계 ==================== + + public Map getStats() { + CacheStats stats = cache.stats(); + + Map result = new LinkedHashMap<>(); + result.put("estimatedSize", cache.estimatedSize()); + result.put("maxSize", maxSize); + result.put("ttlMinutes", ttlMinutes); + result.put("hitCount", stats.hitCount()); + result.put("missCount", stats.missCount()); + result.put("hitRate", String.format("%.2f%%", stats.hitRate() * 100)); + result.put("evictionCount", stats.evictionCount()); + result.put("utilizationPercent", String.format("%.2f%%", (cache.estimatedSize() * 100.0 / maxSize))); + + return result; + } + + public CacheStats getCacheStats() { + return cache.stats(); + } + + // ==================== Private ==================== + + private boolean isNewer(AisTargetEntity newEntity, AisTargetEntity existing) { + OffsetDateTime newTs = newEntity.getMessageTimestamp(); + OffsetDateTime existingTs = existing.getMessageTimestamp(); + + if (newTs == null) return false; + if (existingTs == null) return true; + + return newTs.isAfter(existingTs); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/AisTargetDataReader.java b/src/main/java/gc/mda/signal_batch/batch/reader/AisTargetDataReader.java new file mode 100644 index 0000000..1aad2cc --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/reader/AisTargetDataReader.java @@ -0,0 +1,86 @@ +package gc.mda.signal_batch.batch.reader; + +import gc.mda.signal_batch.domain.vessel.dto.AisTargetApiResponse; +import gc.mda.signal_batch.domain.vessel.dto.AisTargetDto; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.item.ItemReader; +import org.springframework.web.reactive.function.client.WebClient; + +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * S&P Global AIS API Reader (Spring Batch ItemReader) + * + * API: POST /AisSvc.svc/AIS/GetTargetsEnhanced + * 요청: {"sinceSeconds": "60"} + * 응답: ~33,000건/분 + * + * 동작: + * - 첫 read() 호출 시 API를 한 번 호출하여 전체 데이터를 가져옴 + * - 이후 read() 호출마다 한 건씩 반환 (Spring Batch chunk 처리) + * - 모든 데이터를 반환하면 null을 반환하여 Step 종료 + */ +@Slf4j +public class AisTargetDataReader implements ItemReader { + + private static final String API_PATH = "/AisSvc.svc/AIS/GetTargetsEnhanced"; + + private final WebClient webClient; + private final int sinceSeconds; + + private Iterator iterator; + private boolean fetched = false; + + public AisTargetDataReader(WebClient webClient, int sinceSeconds) { + this.webClient = webClient; + this.sinceSeconds = sinceSeconds; + } + + @Override + public AisTargetDto read() { + if (!fetched) { + List data = fetchDataFromApi(); + this.iterator = data.iterator(); + this.fetched = true; + } + + if (iterator != null && iterator.hasNext()) { + return iterator.next(); + } + + // Step 종료 — 다음 실행을 위해 상태 리셋 + fetched = false; + iterator = null; + return null; + } + + private List fetchDataFromApi() { + try { + log.info("[AisTargetDataReader] API 호출 시작: POST {} (sinceSeconds: {})", + API_PATH, sinceSeconds); + + AisTargetApiResponse response = webClient.post() + .uri(API_PATH) + .bodyValue(Map.of("sinceSeconds", String.valueOf(sinceSeconds))) + .retrieve() + .bodyToMono(AisTargetApiResponse.class) + .block(); + + if (response != null && response.getTargetArr() != null) { + List targets = response.getTargetArr(); + log.info("[AisTargetDataReader] API 호출 완료: {} 건 조회", targets.size()); + return targets; + } else { + log.warn("[AisTargetDataReader] API 응답이 비어있습니다"); + return Collections.emptyList(); + } + + } catch (Exception e) { + log.error("[AisTargetDataReader] API 호출 실패: {}", e.getMessage(), e); + return Collections.emptyList(); + } + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/CacheBasedVesselTrackDataReader.java b/src/main/java/gc/mda/signal_batch/batch/reader/CacheBasedVesselTrackDataReader.java new file mode 100644 index 0000000..ab7d28d --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/reader/CacheBasedVesselTrackDataReader.java @@ -0,0 +1,132 @@ +package gc.mda.signal_batch.batch.reader; + +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import gc.mda.signal_batch.domain.vessel.model.VesselData; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.item.ItemReader; +import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; + +import java.math.BigDecimal; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.util.*; +import java.util.stream.Collectors; + +/** + * Caffeine 캐시 기반 선박 궤적 데이터 Reader + * + * AisTargetCacheManager에서 캐시 스냅샷을 추출하여 + * VesselData 형식으로 변환, MMSI + 5분 time_bucket별 그룹화하여 반환. + * 하나의 MMSI가 여러 time_bucket에 걸친 데이터를 가질 수 있으므로 + * 각 (MMSI, time_bucket) 조합이 별도의 처리 단위가 된다. + * + * 기존 InMemoryVesselTrackDataReader + VesselTrackDataJobListener 대체 + */ +@Slf4j +@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) +@RequiredArgsConstructor +public class CacheBasedVesselTrackDataReader implements ItemReader> { + + private final AisTargetCacheManager cacheManager; + private final int staleDataThresholdDays; + + private Iterator> groupIterator; + private boolean initialized = false; + + @Override + public List read() { + if (!initialized) { + initialize(); + initialized = true; + } + + if (groupIterator != null && groupIterator.hasNext()) { + return groupIterator.next(); + } + + return null; // 더 이상 데이터 없음 + } + + private void initialize() { + // 트랙 버퍼에서 누적 데이터 drain (1분마다 쌓인 위치 이력) + Map> trackBuffer = cacheManager.drainTrackBuffer(); + + if (trackBuffer.isEmpty()) { + log.info("트랙 버퍼에 데이터 없음 — 궤적 생성 스킵"); + groupIterator = Collections.emptyIterator(); + return; + } + + // AisTargetEntity → VesselData 변환 + MMSI × 5분 time_bucket 이중 그룹화 + LocalDateTime staleCutoff = LocalDateTime.now().minusDays(staleDataThresholdDays); + List> allGroups = new ArrayList<>(); + long totalPoints = 0; + int totalVessels = 0; + int skippedStaleGroups = 0; + + for (Map.Entry> entry : trackBuffer.entrySet()) { + List vesselDataList = entry.getValue().stream() + .filter(e -> e.getLat() != null && e.getLon() != null) + .map(this::toVesselData) + .sorted(Comparator.comparing(VesselData::getMessageTime, + Comparator.nullsLast(Comparator.naturalOrder()))) + .collect(Collectors.toList()); + + if (vesselDataList.isEmpty()) { + continue; + } + + totalVessels++; + + // MMSI 내에서 5분 time_bucket별로 서브 그룹 분할 + Map> bucketGroups = vesselDataList.stream() + .collect(Collectors.groupingBy( + (VesselData vd) -> calculateTimeBucket(vd.getMessageTime()), + LinkedHashMap::new, + Collectors.toList())); + + for (Map.Entry> bucketEntry : bucketGroups.entrySet()) { + if (bucketEntry.getKey().isBefore(staleCutoff)) { + skippedStaleGroups++; + continue; + } + allGroups.add(bucketEntry.getValue()); + totalPoints += bucketEntry.getValue().size(); + } + } + + if (skippedStaleGroups > 0) { + log.info("Stale data 필터: {}개 그룹 스킵 ({}일 이전 데이터)", skippedStaleGroups, staleDataThresholdDays); + } + log.info("트랙 버퍼 Reader 초기화: {} 선박, {} 그룹(MMSI×버킷), {} 포인트 (평균 {}pt/그룹)", + totalVessels, allGroups.size(), totalPoints, + allGroups.isEmpty() ? "0.0" : String.format("%.1f", (double) totalPoints / allGroups.size())); + + groupIterator = allGroups.iterator(); + } + + private LocalDateTime calculateTimeBucket(LocalDateTime messageTime) { + return messageTime.withSecond(0).withNano(0) + .minusMinutes(messageTime.getMinute() % 5); + } + + private VesselData toVesselData(AisTargetEntity entity) { + LocalDateTime messageTime = entity.getMessageTimestamp() != null + ? entity.getMessageTimestamp().atZoneSameInstant(ZoneId.systemDefault()).toLocalDateTime() + : LocalDateTime.now(); + + return VesselData.builder() + .mmsi(entity.getMmsi()) + .messageTime(messageTime) + .lat(entity.getLat()) + .lon(entity.getLon()) + .sog(entity.getSog() != null ? BigDecimal.valueOf(entity.getSog()) : null) + .cog(entity.getCog() != null ? BigDecimal.valueOf(entity.getCog()) : null) + .heading(entity.getHeading() != null ? entity.getHeading().intValue() : null) + .shipNm(entity.getName()) + .shipTy(entity.getVesselType()) + .rot(entity.getRot()) + .build(); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipCacheManager.java b/src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipCacheManager.java new file mode 100644 index 0000000..78619e1 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipCacheManager.java @@ -0,0 +1,121 @@ +package gc.mda.signal_batch.batch.reader; + +import com.github.benmanes.caffeine.cache.Cache; +import com.github.benmanes.caffeine.cache.Caffeine; +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import jakarta.annotation.PostConstruct; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Component; + +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +/** + * 중국 허가선박 전용 캐시 + * + * - 대상 MMSI(~1,400척)만 별도 관리 + * - TTL: expireAfterWrite (마지막 put 이후 N일 경과 시 만료) + * - key: MMSI (String) + */ +@Slf4j +@Component +@RequiredArgsConstructor +public class ChnPrmShipCacheManager { + + private final ChnPrmShipProperties properties; + private Cache cache; + + @PostConstruct + public void init() { + this.cache = Caffeine.newBuilder() + .maximumSize(properties.getMaxSize()) + .expireAfterWrite(properties.getTtlDays(), TimeUnit.DAYS) + .recordStats() + .build(); + + log.info("ChnPrmShip 캐시 초기화 - TTL: {}일, 최대 크기: {}, 대상 MMSI: {}건", + properties.getTtlDays(), properties.getMaxSize(), properties.getMmsiSet().size()); + } + + /** + * 대상 MMSI에 해당하는 항목만 필터링하여 캐시에 저장 + */ + public int putIfTarget(List items) { + if (items == null || items.isEmpty()) { + return 0; + } + + int updated = 0; + for (AisTargetEntity item : items) { + if (!properties.isTarget(item.getMmsi())) { + continue; + } + + AisTargetEntity existing = cache.getIfPresent(item.getMmsi()); + if (existing == null || isNewerOrEqual(item, existing)) { + cache.put(item.getMmsi(), item); + updated++; + } + } + + if (updated > 0) { + log.debug("ChnPrmShip 캐시 업데이트 - 입력: {}, 대상 저장: {}, 현재 크기: {}", + items.size(), updated, cache.estimatedSize()); + } + return updated; + } + + /** + * 시간 범위 내 캐시 데이터 조회 + */ + public List getByTimeRange(int minutes) { + OffsetDateTime threshold = OffsetDateTime.now(ZoneOffset.UTC).minusMinutes(minutes); + + return cache.asMap().values().stream() + .filter(entity -> entity.getMessageTimestamp() != null) + .filter(entity -> entity.getMessageTimestamp().isAfter(threshold)) + .collect(Collectors.toList()); + } + + /** + * 워밍업용 직접 저장 (시간 비교 없이) + */ + public void putAll(List entities) { + if (entities == null || entities.isEmpty()) { + return; + } + for (AisTargetEntity entity : entities) { + if (entity != null && entity.getMmsi() != null) { + cache.put(entity.getMmsi(), entity); + } + } + } + + public long size() { + return cache.estimatedSize(); + } + + public Map getStats() { + var stats = cache.stats(); + return Map.of( + "estimatedSize", cache.estimatedSize(), + "maxSize", properties.getMaxSize(), + "ttlDays", properties.getTtlDays(), + "targetMmsiCount", properties.getMmsiSet().size(), + "hitCount", stats.hitCount(), + "missCount", stats.missCount(), + "hitRate", String.format("%.2f%%", stats.hitRate() * 100) + ); + } + + private boolean isNewerOrEqual(AisTargetEntity candidate, AisTargetEntity existing) { + if (candidate.getMessageTimestamp() == null) return false; + if (existing.getMessageTimestamp() == null) return true; + return !candidate.getMessageTimestamp().isBefore(existing.getMessageTimestamp()); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipCacheWarmer.java b/src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipCacheWarmer.java new file mode 100644 index 0000000..7533055 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipCacheWarmer.java @@ -0,0 +1,134 @@ +package gc.mda.signal_batch.batch.reader; + +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import gc.mda.signal_batch.global.util.SignalKindCode; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Qualifier; +import org.springframework.boot.ApplicationArguments; +import org.springframework.boot.ApplicationRunner; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.stereotype.Component; + +import java.sql.ResultSet; +import java.sql.SQLException; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.ArrayList; +import java.util.List; + +/** + * 기동 시 ChnPrmShip 캐시 워밍업 + * + * t_ais_position 테이블에서 대상 MMSI의 데이터를 조회하여 캐시를 채운다. + * 이후 매 분 배치 수집에서 실시간 데이터가 캐시를 갱신한다. + */ +@Slf4j +@Component +@RequiredArgsConstructor +public class ChnPrmShipCacheWarmer implements ApplicationRunner { + + private static final int DB_QUERY_CHUNK_SIZE = 500; + + private final ChnPrmShipProperties properties; + private final ChnPrmShipCacheManager cacheManager; + @Qualifier("queryJdbcTemplate") + private final JdbcTemplate queryJdbcTemplate; + + @Override + public void run(ApplicationArguments args) { + if (!properties.isWarmupEnabled()) { + log.info("ChnPrmShip 캐시 워밍업 비활성화"); + return; + } + + if (properties.getMmsiSet().isEmpty()) { + log.warn("ChnPrmShip 대상 MMSI가 없어 워밍업을 건너뜁니다"); + return; + } + + OffsetDateTime since = OffsetDateTime.now(ZoneOffset.UTC) + .minusDays(properties.getWarmupDays()); + + log.info("ChnPrmShip 캐시 워밍업 시작 - 대상: {}건, 조회 범위: 최근 {}일", + properties.getMmsiSet().size(), properties.getWarmupDays()); + long startTime = System.currentTimeMillis(); + + List mmsiList = new ArrayList<>(properties.getMmsiSet()); + int totalLoaded = 0; + + for (int i = 0; i < mmsiList.size(); i += DB_QUERY_CHUNK_SIZE) { + List chunk = mmsiList.subList(i, + Math.min(i + DB_QUERY_CHUNK_SIZE, mmsiList.size())); + + try { + List fromDb = queryLatestByMmsiSince(chunk, since); + + fromDb.forEach(entity -> { + if (entity.getSignalKindCode() == null) { + SignalKindCode kindCode = SignalKindCode.resolve( + entity.getVesselType(), entity.getExtraInfo()); + entity.setSignalKindCode(kindCode.getCode()); + } + }); + + cacheManager.putAll(fromDb); + totalLoaded += fromDb.size(); + } catch (Exception e) { + log.warn("ChnPrmShip 워밍업 DB 조회 실패 (chunk {}/{}): {}", + i / DB_QUERY_CHUNK_SIZE + 1, + (mmsiList.size() + DB_QUERY_CHUNK_SIZE - 1) / DB_QUERY_CHUNK_SIZE, + e.getMessage()); + } + } + + long elapsed = System.currentTimeMillis() - startTime; + log.info("ChnPrmShip 캐시 워밍업 완료 - 대상: {}, 로딩: {}건, 소요: {}ms", + properties.getMmsiSet().size(), totalLoaded, elapsed); + } + + private List queryLatestByMmsiSince(List mmsiList, OffsetDateTime since) { + String placeholders = String.join(",", mmsiList.stream().map(m -> "?").toList()); + String sql = "SELECT mmsi, imo, name, callsign, vessel_type, extra_info, " + + "lat, lon, heading, sog, cog, rot, length, width, draught, " + + "destination, eta, status, message_timestamp, signal_kind_code, class_type " + + "FROM signal.t_ais_position " + + "WHERE mmsi IN (" + placeholders + ") " + + "AND message_timestamp >= ?"; + + Object[] params = new Object[mmsiList.size() + 1]; + for (int j = 0; j < mmsiList.size(); j++) { + params[j] = mmsiList.get(j); + } + params[mmsiList.size()] = since; + + return queryJdbcTemplate.query(sql, params, (rs, rowNum) -> mapRow(rs)); + } + + private AisTargetEntity mapRow(ResultSet rs) throws SQLException { + return AisTargetEntity.builder() + .mmsi(rs.getString("mmsi")) + .imo(rs.getObject("imo") != null ? rs.getLong("imo") : null) + .name(rs.getString("name")) + .callsign(rs.getString("callsign")) + .vesselType(rs.getString("vessel_type")) + .extraInfo(rs.getString("extra_info")) + .lat(rs.getObject("lat") != null ? rs.getDouble("lat") : null) + .lon(rs.getObject("lon") != null ? rs.getDouble("lon") : null) + .heading(rs.getObject("heading") != null ? rs.getDouble("heading") : null) + .sog(rs.getObject("sog") != null ? rs.getDouble("sog") : null) + .cog(rs.getObject("cog") != null ? rs.getDouble("cog") : null) + .rot(rs.getObject("rot") != null ? rs.getInt("rot") : null) + .length(rs.getObject("length") != null ? rs.getInt("length") : null) + .width(rs.getObject("width") != null ? rs.getInt("width") : null) + .draught(rs.getObject("draught") != null ? rs.getDouble("draught") : null) + .destination(rs.getString("destination")) + .eta(rs.getObject("eta") != null ? rs.getObject("eta", OffsetDateTime.class) : null) + .status(rs.getString("status")) + .messageTimestamp(rs.getObject("message_timestamp") != null + ? rs.getObject("message_timestamp", OffsetDateTime.class) : null) + .signalKindCode(rs.getString("signal_kind_code")) + .classType(rs.getString("class_type")) + .build(); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipProperties.java b/src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipProperties.java new file mode 100644 index 0000000..5a8fb05 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/reader/ChnPrmShipProperties.java @@ -0,0 +1,61 @@ +package gc.mda.signal_batch.batch.reader; + +import jakarta.annotation.PostConstruct; +import lombok.Getter; +import lombok.Setter; +import lombok.extern.slf4j.Slf4j; +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.core.io.DefaultResourceLoader; +import org.springframework.core.io.Resource; +import org.springframework.stereotype.Component; + +import java.io.BufferedReader; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.Set; +import java.util.stream.Collectors; + +/** + * 중국 허가선박(ChnPrmShip) 설정 + * + * 대상 MMSI 목록을 리소스 파일에서 로딩하여 Set으로 보관한다. + * MMSI는 String 타입 — 문자 혼합 장비 지원 + */ +@Slf4j +@Getter +@Setter +@Component +@ConfigurationProperties(prefix = "app.chnprmship") +public class ChnPrmShipProperties { + + private String mmsiResourcePath = "classpath:chnprmship-mmsi.txt"; + private int ttlDays = 2; + private int maxSize = 2000; + private boolean warmupEnabled = true; + private int warmupDays = 2; + + private Set mmsiSet = Collections.emptySet(); + + @PostConstruct + public void init() { + try { + Resource resource = new DefaultResourceLoader().getResource(mmsiResourcePath); + try (BufferedReader reader = new BufferedReader( + new InputStreamReader(resource.getInputStream(), StandardCharsets.UTF_8))) { + mmsiSet = reader.lines() + .map(String::trim) + .filter(line -> !line.isEmpty() && !line.startsWith("#")) + .collect(Collectors.toUnmodifiableSet()); + } + log.info("ChnPrmShip MMSI 로딩 완료 - {}건 (경로: {})", mmsiSet.size(), mmsiResourcePath); + } catch (Exception e) { + log.warn("ChnPrmShip MMSI 로딩 실패 - 경로: {}, 오류: {} (비활성화됨)", mmsiResourcePath, e.getMessage()); + mmsiSet = Collections.emptySet(); + } + } + + public boolean isTarget(String mmsi) { + return mmsi != null && mmsiSet.contains(mmsi); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/InMemoryVesselDataReader.java b/src/main/java/gc/mda/signal_batch/batch/reader/InMemoryVesselDataReader.java deleted file mode 100644 index 2f5d6c1..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/reader/InMemoryVesselDataReader.java +++ /dev/null @@ -1,54 +0,0 @@ -package gc.mda.signal_batch.batch.reader; - -import gc.mda.signal_batch.global.util.VesselDataHolder; -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.StepExecution; -import org.springframework.batch.core.annotation.AfterStep; -import org.springframework.batch.core.annotation.BeforeStep; -import org.springframework.batch.item.ItemReader; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.stereotype.Component; - -import java.util.Iterator; -import java.util.List; - - -@Component -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@RequiredArgsConstructor -@Slf4j -public class InMemoryVesselDataReader implements ItemReader { - - private final VesselDataHolder dataHolder; - private Iterator iterator; - private boolean initialized = false; - - @BeforeStep - public void beforeStep(StepExecution stepExecution) { - List data = dataHolder.getData(); - this.iterator = data.iterator(); - this.initialized = true; - log.info("Initialized reader with {} items for step: {}", - data.size(), stepExecution.getStepName()); - } - - @Override - public VesselData read() { - if (!initialized) { - throw new IllegalStateException("Reader not initialized"); - } - - if (iterator.hasNext()) { - return iterator.next(); - } - return null; - } - - @AfterStep - public void afterStep(StepExecution stepExecution) { - iterator = null; - initialized = false; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/InMemoryVesselTrackDataReader.java b/src/main/java/gc/mda/signal_batch/batch/reader/InMemoryVesselTrackDataReader.java deleted file mode 100644 index 1c19579..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/reader/InMemoryVesselTrackDataReader.java +++ /dev/null @@ -1,73 +0,0 @@ -package gc.mda.signal_batch.batch.reader; - -import gc.mda.signal_batch.global.util.VesselTrackDataHolder; -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.item.ItemReader; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; - -import java.util.*; -import java.util.stream.Collectors; - -@Slf4j -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@RequiredArgsConstructor -public class InMemoryVesselTrackDataReader implements ItemReader> { - - private final VesselTrackDataHolder dataHolder; - private final int chunkSize; - - private Iterator>> groupIterator; - private List> currentChunk; - private Iterator> chunkIterator; - private boolean initialized = false; - - public void initialize() { - - // 선박별로 그룹화 (sig_src_cd + target_id) - Map> groupedData = dataHolder.getAllVesselData().stream() - .collect(Collectors.groupingBy(VesselData::getVesselKey)); - - // 각 그룹 내에서 시간순 정렬 - groupedData.forEach((key, dataList) -> - dataList.sort(Comparator.comparing(VesselData::getMessageTime))); - - groupIterator = groupedData.entrySet().iterator(); - currentChunk = new ArrayList<>(); - - log.info("Initialized track reader with {} vessel groups", groupedData.size()); - } - - @Override - public List read() { - if (!initialized) { - initialize(); - initialized = true; - } - - // 현재 청크에서 데이터 반환 - if (chunkIterator != null && chunkIterator.hasNext()) { - return chunkIterator.next(); - } - - // 새로운 청크 생성 - currentChunk.clear(); - int count = 0; - - while (groupIterator.hasNext() && count < chunkSize) { - Map.Entry> entry = groupIterator.next(); - currentChunk.add(entry.getValue()); - count++; - } - - if (currentChunk.isEmpty()) { - return null; // 더 이상 데이터 없음 - } - - chunkIterator = currentChunk.iterator(); - return chunkIterator.next(); - } - - -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/PartitionedReader.java b/src/main/java/gc/mda/signal_batch/batch/reader/PartitionedReader.java deleted file mode 100644 index 8c04b4e..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/reader/PartitionedReader.java +++ /dev/null @@ -1,181 +0,0 @@ -package gc.mda.signal_batch.batch.reader; - -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.configuration.annotation.StepScope; -import org.springframework.batch.core.partition.support.Partitioner; -import org.springframework.batch.item.ExecutionContext; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Component; - -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.format.DateTimeFormatter; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - - -@Slf4j -@Component -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@RequiredArgsConstructor -public class PartitionedReader { - - @Qualifier("collectJdbcTemplate") - private final JdbcTemplate collectJdbcTemplate; - - @StepScope - public Partitioner dayPartitioner(@Value("#{jobParameters['processingDate']}") LocalDate processingDate) { - return gridSize -> { - Map partitions = new HashMap<>(); - - // 파티션 존재 확인 - String partitionName = generatePartitionName(processingDate); - - if (checkPartitionExists(partitionName)) { - // 시간대별로 파티션 생성 (gridSize 고려) - int hoursPerPartition = 24 / Math.min(gridSize, 24); - int actualPartitions = Math.min(gridSize, 24); - - for (int i = 0; i < actualPartitions; i++) { - ExecutionContext context = new ExecutionContext(); - - int startHour = i * hoursPerPartition; - int endHour = (i == actualPartitions - 1) ? 24 : (i + 1) * hoursPerPartition; - - context.put("partition", partitionName); - context.put("startTime", processingDate.atTime(startHour, 0)); - context.put("endTime", processingDate.atTime(endHour, 0)); - context.put("partitionIndex", i); - - partitions.put("partition-" + i, context); - } - - log.info("Created {} partitions for table {}", partitions.size(), partitionName); - - } else { - // 파티션이 없는 경우 처리 - log.warn("Partition {} does not exist. Creating fallback partition.", partitionName); - - // 동적으로 파티션 생성 시도 - if (createMissingPartition(processingDate)) { - // 재귀 호출로 다시 파티셔닝 - return dayPartitioner(processingDate).partition(gridSize); - } - - // 실패 시 단일 파티션으로 처리 - ExecutionContext context = new ExecutionContext(); - context.put("partition", ""); // 전체 테이블에서 날짜 조건으로 읽기 - context.put("startTime", processingDate.atStartOfDay()); - context.put("endTime", processingDate.plusDays(1).atStartOfDay()); - context.put("partitionIndex", 0); - partitions.put("partition-fallback", context); - } - - return partitions; - }; - } - - /** - * 시간 범위 기반 파티셔너 - */ - @StepScope - public Partitioner rangePartitioner( - @Value("#{jobParameters['startTime']}") LocalDateTime startTime, - @Value("#{jobParameters['endTime']}") LocalDateTime endTime, - @Value("#{jobParameters['partitionCount']}") Integer partitionCount) { - - return gridSize -> { - Map partitions = new HashMap<>(); - - // 날짜별로 그룹화 - Map> dateGroups = groupByDate(startTime, endTime); - - int partitionIndex = 0; - for (Map.Entry> entry : dateGroups.entrySet()) { - LocalDate date = entry.getKey(); - String partitionName = findPartitionForDate(date); - - // 각 날짜에 대해 시간 범위 분할 - LocalDateTime dayStart = entry.getValue().get(0); - LocalDateTime dayEnd = entry.getValue().get(1); - - long totalMinutes = java.time.Duration.between(dayStart, dayEnd).toMinutes(); - int subPartitions = Math.max(1, (int)(totalMinutes / 60)); // 시간 단위로 분할 - - for (int i = 0; i < subPartitions; i++) { - ExecutionContext context = new ExecutionContext(); - - LocalDateTime partStart = dayStart.plusHours(i); - LocalDateTime partEnd = (i == subPartitions - 1) ? dayEnd : dayStart.plusHours(i + 1); - - context.put("startTime", partStart); - context.put("endTime", partEnd); - context.put("partition", partitionName != null ? partitionName : ""); - context.put("partitionIndex", partitionIndex++); - - partitions.put("range-partition-" + partitionIndex, context); - } - } - - log.info("Created {} range partitions for period {} to {}", - partitions.size(), startTime, endTime); - - return partitions; - }; - } - - private String generatePartitionName(LocalDate date) { - // YYMMDD 형식으로 변경 - return "sig_test_" + date.format(DateTimeFormatter.ofPattern("yyMMdd")); - } - - private boolean checkPartitionExists(String partitionName) { - String sql = "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'signal' AND tablename = ?)"; - return Boolean.TRUE.equals(collectJdbcTemplate.queryForObject(sql, Boolean.class, partitionName)); - } - - private String findPartitionForDate(LocalDate date) { - String partitionName = generatePartitionName(date); - return checkPartitionExists(partitionName) ? partitionName : null; - } - - private boolean createMissingPartition(LocalDate date) { - try { - String partitionName = generatePartitionName(date); - String sql = String.format(""" - CREATE TABLE IF NOT EXISTS signal.%s PARTITION OF signal.sig_test - FOR VALUES FROM ('%s') TO ('%s') - """, partitionName, date, date.plusDays(1)); - - collectJdbcTemplate.execute(sql); - log.info("Successfully created missing partition: {}", partitionName); - return true; - - } catch (Exception e) { - log.error("Failed to create missing partition for date: {}", date, e); - return false; - } - } - - private Map> groupByDate(LocalDateTime start, LocalDateTime end) { - Map> groups = new HashMap<>(); - - LocalDate currentDate = start.toLocalDate(); - while (!currentDate.isAfter(end.toLocalDate())) { - LocalDateTime dayStart = currentDate.equals(start.toLocalDate()) ? - start : currentDate.atStartOfDay(); - LocalDateTime dayEnd = currentDate.equals(end.toLocalDate()) ? - end : currentDate.plusDays(1).atStartOfDay(); - - groups.put(currentDate, List.of(dayStart, dayEnd)); - currentDate = currentDate.plusDays(1); - } - - return groups; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/reader/VesselDataReader.java b/src/main/java/gc/mda/signal_batch/batch/reader/VesselDataReader.java deleted file mode 100644 index ef39105..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/reader/VesselDataReader.java +++ /dev/null @@ -1,408 +0,0 @@ -package gc.mda.signal_batch.batch.reader; - -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.item.database.JdbcCursorItemReader; -import org.springframework.batch.item.database.JdbcPagingItemReader; -import org.springframework.batch.item.database.Order; -import org.springframework.batch.item.database.support.PostgresPagingQueryProvider; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.stereotype.Component; - -import javax.sql.DataSource; -import jakarta.annotation.PostConstruct; -import java.sql.Connection; -import java.sql.DatabaseMetaData; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.time.LocalDateTime; -import java.time.format.DateTimeFormatter; -import java.util.HashMap; -import java.util.Map; - -@Slf4j -@Component -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -public class VesselDataReader { - - private final DataSource collectDataSource; - private final JdbcTemplate collectJdbcTemplate; - - @Value("${vessel.filter.zero-coordinates.enabled:false}") - private boolean filterZeroCoordinates; - - private static final DateTimeFormatter PARTITION_FORMATTER = DateTimeFormatter.ofPattern("yyMMdd"); - - public VesselDataReader( - @Qualifier("collectDataSource") DataSource collectDataSource, - @Qualifier("collectJdbcTemplate") JdbcTemplate collectJdbcTemplate) { - this.collectDataSource = collectDataSource; - this.collectJdbcTemplate = collectJdbcTemplate; - } - - @PostConstruct - public void init() { - logDataSourceInfo(); - log.info("Zero coordinates filter enabled: {}", filterZeroCoordinates); - } - - /** - * 0 근처 좌표 필터링 조건 생성 - */ - private String getZeroCoordinatesFilter() { - if (filterZeroCoordinates) { - return "AND NOT (lat BETWEEN -1 AND 1 AND lon BETWEEN -1 AND 1) "; - } - return ""; - } - - /** - * 최신 위치만 가져오는 최적화된 Reader - * DISTINCT ON을 사용하여 각 선박의 최신 위치만 조회 - */ - public JdbcCursorItemReader vesselLatestPositionReader( - LocalDateTime startTime, - LocalDateTime endTime, - String partition) { - - log.info("Creating optimized latest position reader from {} to {}", startTime, endTime); - - JdbcCursorItemReader reader = new JdbcCursorItemReader() { - @Override - protected void openCursor(Connection con) { - try { - // search_path 설정 - try (var stmt = con.createStatement()) { - stmt.execute("SET search_path TO signal, public"); - } - } catch (Exception e) { - log.error("Error setting search_path in cursor", e); - throw new RuntimeException("Failed to set search_path", e); - } - super.openCursor(con); - } - }; - - reader.setDataSource(collectDataSource); - reader.setName("vesselLatestPositionReader"); - - // 성능 최적화 설정 - reader.setFetchSize(10000); // 줄임 (최신 위치만 가져오므로) - reader.setMaxRows(0); - reader.setQueryTimeout(300); - reader.setVerifyCursorPosition(false); - reader.setUseSharedExtendedConnection(false); - reader.setSaveState(false); - - String tableName = determineTableName(partition, startTime); - log.info("Using table: {}", tableName); - - // 최신 위치만 가져오는 SQL - DISTINCT ON 사용 - String sql = String.format(""" - SELECT DISTINCT ON (sig_src_cd, target_id) - message_time, real_time, sig_src_cd, target_id, - lat, lon, sog, cog, heading, ship_nm, ship_ty, rot, posacc, - sensor_id, base_st_id, mode, gps_sttus, battery_sttus, - vts_cd, mmsi, vpass_id, ship_no - FROM signal.%s - WHERE message_time >= ? AND message_time < ? - AND sig_src_cd != '000005' - AND lat BETWEEN -90 AND 90 - AND lon BETWEEN -180 AND 180 - %s - ORDER BY sig_src_cd, target_id, message_time DESC - """, tableName, getZeroCoordinatesFilter()); - - reader.setSql(sql); - - reader.setPreparedStatementSetter(ps -> { - ps.setObject(1, Timestamp.valueOf(startTime)); - ps.setObject(2, Timestamp.valueOf(endTime)); - }); - - reader.setRowMapper(new OptimizedVesselDataRowMapper()); - - // 예상 데이터 건수 로그 - try { - Integer expectedCount = collectJdbcTemplate.queryForObject( - """ - SELECT COUNT(*) FROM ( - SELECT DISTINCT ON (sig_src_cd, target_id) 1 - FROM signal.%s - WHERE message_time >= ? AND message_time < ? - AND sig_src_cd != '000005' - ) t - """.formatted(tableName), - Integer.class, - startTime, endTime - ); - log.info("Expected record count (latest positions only): {}", expectedCount); - } catch (Exception e) { - log.warn("Could not get expected count: {}", e.getMessage()); - } - - return reader; - } - - /** - * 기존 Cursor Reader (전체 데이터) - 타일 집계 등에 필요한 경우 - */ - public JdbcCursorItemReader vesselDataCursorReader( - LocalDateTime startTime, - LocalDateTime endTime, - String partition) { - - log.info("Creating cursor reader for partition: {} from {} to {}", - partition, startTime, endTime); - - JdbcCursorItemReader reader = new JdbcCursorItemReader() { - @Override - protected void openCursor(Connection con) { - try { - try (var stmt = con.createStatement()) { - stmt.execute("SET search_path TO signal, public"); - } - } catch (Exception e) { - log.error("Error setting search_path in cursor", e); - throw new RuntimeException("Failed to set search_path", e); - } - super.openCursor(con); - } - }; - - reader.setDataSource(collectDataSource); - reader.setName("vesselDataCursorReader"); - - reader.setFetchSize(50000); - reader.setMaxRows(0); - reader.setQueryTimeout(1800); - reader.setVerifyCursorPosition(false); - reader.setUseSharedExtendedConnection(false); - reader.setSaveState(false); - - String tableName = determineTableName(partition, startTime); - log.info("Determined table name: {} for startTime: {}", tableName, startTime); - - // 전체 데이터 조회 SQL (타일 집계용) - StringBuilder sql = new StringBuilder(); - sql.append("SELECT /*+ PARALLEL(8) */ "); - sql.append("message_time, real_time, sig_src_cd, target_id, "); - sql.append("lat, lon, sog, cog, heading, ship_nm, ship_ty, rot, posacc, "); - sql.append("sensor_id, base_st_id, mode, gps_sttus, battery_sttus, "); - sql.append("vts_cd, mmsi, vpass_id, ship_no "); - sql.append("FROM signal.").append(tableName).append(" "); - sql.append("WHERE message_time >= ? AND message_time < ? AND sig_src_cd != '000005' "); - sql.append(getZeroCoordinatesFilter()); - sql.append("ORDER BY message_time, sig_src_cd, target_id"); - - reader.setSql(sql.toString()); - - reader.setPreparedStatementSetter(ps -> { - ps.setTimestamp(1, Timestamp.valueOf(startTime)); - ps.setTimestamp(2, Timestamp.valueOf(endTime)); - }); - - reader.setRowMapper(new OptimizedVesselDataRowMapper()); - - return reader; - } - - /** - * 기존 Paging Reader (작은 데이터셋용) - */ - public JdbcPagingItemReader vesselDataPagingReader( - LocalDateTime startTime, - LocalDateTime endTime, - String partition) { - - JdbcPagingItemReader reader = new JdbcPagingItemReader<>(); - reader.setDataSource(collectDataSource); - reader.setPageSize(10000); - reader.setFetchSize(10000); - reader.setRowMapper(new OptimizedVesselDataRowMapper()); - - String tableName = determineTableName(partition, startTime); - - PostgresPagingQueryProvider queryProvider = new PostgresPagingQueryProvider(); - queryProvider.setSelectClause("SELECT message_time, real_time, sig_src_cd, target_id, " + - "lat, lon, sog, cog, heading, ship_nm, ship_ty, rot, posacc, " + - "sensor_id, base_st_id, mode, gps_sttus, battery_sttus, " + - "vts_cd, mmsi, vpass_id, ship_no "); - - queryProvider.setFromClause("FROM signal." + tableName); - - String whereClause = "WHERE message_time >= :startTime AND message_time < :endTime and sig_src_cd != '000005' " - + getZeroCoordinatesFilter(); - queryProvider.setWhereClause(whereClause); - - Map sortKeys = new HashMap<>(); - sortKeys.put("message_time", Order.ASCENDING); - sortKeys.put("sig_src_cd", Order.ASCENDING); - sortKeys.put("target_id", Order.ASCENDING); - queryProvider.setSortKeys(sortKeys); - - reader.setQueryProvider(queryProvider); - - Map parameterValues = new HashMap<>(); - parameterValues.put("startTime", startTime); - parameterValues.put("endTime", endTime); - reader.setParameterValues(parameterValues); - - try { - reader.afterPropertiesSet(); - } catch (Exception e) { - log.error("Failed to initialize JdbcPagingItemReader", e); - throw new RuntimeException("Reader initialization failed", e); - } - - return reader; - } - - /** - * 파티션 테이블 이름 결정 - */ - private String determineTableName(String partition, LocalDateTime startTime) { - if (partition != null && !partition.isEmpty()) { - log.debug("Using specified partition: {}", partition); - return partition; - } - - LocalDateTime targetTime = startTime != null ? startTime : LocalDateTime.now(); - String partitionSuffix = targetTime.format(PARTITION_FORMATTER); - String tableName = "sig_test_" + partitionSuffix; - - try { - Boolean exists = collectJdbcTemplate.queryForObject( - "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'signal' AND tablename = ?)", - Boolean.class, - tableName - ); - - if (Boolean.TRUE.equals(exists)) { - log.info("Auto-selected partition table: {}", tableName); - return tableName; - } else { - log.warn("Partition table {} does not exist, using sig_test", tableName); - return "sig_test"; - } - } catch (Exception e) { - log.error("Error checking partition table existence", e); - return "sig_test"; - } - } - - /** - * 최적화된 RowMapper - */ - public static class OptimizedVesselDataRowMapper implements RowMapper { - @Override - public VesselData mapRow(ResultSet rs, int rowNum) throws SQLException { - VesselData data = new VesselData(); - - Timestamp messageTime = rs.getTimestamp(1); - if (messageTime != null) { - data.setMessageTime(messageTime.toLocalDateTime()); - } - - Timestamp realTime = rs.getTimestamp(2); - if (realTime != null) { - data.setRealTime(realTime.toLocalDateTime()); - } - - data.setSigSrcCd(rs.getString(3)); - data.setTargetId(rs.getString(4)); - data.setLat(rs.getDouble(5)); - data.setLon(rs.getDouble(6)); - data.setSog(rs.getBigDecimal(7)); - data.setCog(rs.getBigDecimal(8)); - - data.setHeading(getIntegerFromNumeric(rs, 9)); - data.setShipNm(rs.getString(10)); - data.setShipTy(rs.getString(11)); - data.setRot(getIntegerFromNumeric(rs, 12)); - data.setPosacc(getIntegerFromNumeric(rs, 13)); - data.setSensorId(rs.getString(14)); - data.setBaseStId(rs.getString(15)); - data.setMode(getIntegerFromNumeric(rs, 16)); - data.setGpsSttus(getIntegerFromNumeric(rs, 17)); - data.setBatterySttus(getIntegerFromNumeric(rs, 18)); - data.setVtsCd(rs.getString(19)); - data.setMmsi(rs.getString(20)); - data.setVpassId(rs.getString(21)); - data.setShipNo(rs.getString(22)); - - return data; - } - - private Integer getIntegerFromNumeric(ResultSet rs, int columnIndex) throws SQLException { - Object value = rs.getObject(columnIndex); - if (value == null || rs.wasNull()) { - return null; - } - - if (value instanceof java.math.BigDecimal) { - return ((java.math.BigDecimal) value).intValue(); - } else if (value instanceof Integer) { - return (Integer) value; - } else if (value instanceof Number) { - return ((Number) value).intValue(); - } else if (value instanceof String) { - try { - return Integer.parseInt((String) value); - } catch (NumberFormatException e) { - return null; - } - } - - return null; - } - } - - private void logDataSourceInfo() { - try { - String info = getDataSourceInfo(collectDataSource); - log.info("VesselDataReader initialized with DataSource: {}", info); - } catch (Exception e) { - log.error("Failed to get DataSource info", e); - } - } - - private String getDataSourceInfo(DataSource dataSource) { - try (Connection conn = dataSource.getConnection()) { - DatabaseMetaData meta = conn.getMetaData(); - String url = meta.getURL(); - String user = meta.getUserName(); - String db = conn.getCatalog(); - String schema = conn.getSchema(); - return String.format("URL=%s, User=%s, DB=%s, Schema=%s", url, user, db, schema); - } catch (Exception e) { - return "Unknown (" + e.getMessage() + ")"; - } - } - - @SuppressWarnings("unused") - private void testConnection(String tableName) { - try { - try (Connection conn = collectDataSource.getConnection()) { - try (var stmt = conn.createStatement()) { - stmt.execute("SET search_path TO signal, public"); - } - - String testSql = "SELECT COUNT(*) FROM signal." + tableName + " LIMIT 1"; - try (var stmt = conn.createStatement(); - var rs = stmt.executeQuery(testSql)) { - if (rs.next()) { - log.info("Direct connection test successful, count: {}", rs.getInt(1)); - } - } - } - } catch (Exception e) { - log.error("Connection test failed", e); - } - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/writer/AbnormalTrackWriter.java b/src/main/java/gc/mda/signal_batch/batch/writer/AbnormalTrackWriter.java index 6e54adb..600fc78 100644 --- a/src/main/java/gc/mda/signal_batch/batch/writer/AbnormalTrackWriter.java +++ b/src/main/java/gc/mda/signal_batch/batch/writer/AbnormalTrackWriter.java @@ -87,11 +87,11 @@ public class AbnormalTrackWriter implements ItemWriter String sql = String.format(""" INSERT INTO signal.t_abnormal_tracks ( - sig_src_cd, target_id, time_bucket, %s, + mmsi, time_bucket, %s, abnormal_type, abnormal_reason, distance_nm, avg_speed, max_speed, point_count, source_table - ) VALUES (?, ?, ?, public.ST_GeomFromText(?::text, 4326), ?, ?::jsonb, ?, ?, ?, ?, ?) - ON CONFLICT (sig_src_cd, target_id, time_bucket, source_table) + ) VALUES (?, ?, public.ST_GeomFromText(?::text, 4326), ?, ?::jsonb, ?, ?, ?, ?, ?) + ON CONFLICT (mmsi, time_bucket, source_table) DO UPDATE SET %s = EXCLUDED.%s, abnormal_type = EXCLUDED.abnormal_type, @@ -137,8 +137,7 @@ public class AbnormalTrackWriter implements ItemWriter } batchArgs.add(new Object[] { - track.getSigSrcCd(), - track.getTargetId(), + track.getMmsi(), Timestamp.valueOf(track.getTimeBucket()), geomWkt, mainAbnormalType, diff --git a/src/main/java/gc/mda/signal_batch/batch/writer/AisTargetCacheWriter.java b/src/main/java/gc/mda/signal_batch/batch/writer/AisTargetCacheWriter.java new file mode 100644 index 0000000..78fe92e --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/batch/writer/AisTargetCacheWriter.java @@ -0,0 +1,58 @@ +package gc.mda.signal_batch.batch.writer; + +import gc.mda.signal_batch.batch.reader.AisTargetCacheManager; +import gc.mda.signal_batch.batch.reader.ChnPrmShipCacheManager; +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import gc.mda.signal_batch.global.util.SignalKindCode; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.batch.item.Chunk; +import org.springframework.batch.item.ItemWriter; +import org.springframework.stereotype.Component; + +import java.util.List; + +/** + * AIS Target 캐시 Writer + * + * 처리 순서: + * 1. SignalKindCode 치환 (vesselType + extraInfo → MDA 범례코드) + * 2. AisTargetCacheManager에 일괄 저장 + * 3. ChnPrmShipCacheManager에 대상 MMSI만 필터 저장 + * + * DB 저장은 Phase 3의 AisPositionSyncStep에서 5분 집계 Job에 편승하여 수행. + */ +@Slf4j +@Component +@RequiredArgsConstructor +public class AisTargetCacheWriter implements ItemWriter { + + private final AisTargetCacheManager cacheManager; + private final ChnPrmShipCacheManager chnPrmShipCacheManager; + + @Override + public void write(Chunk chunk) { + List items = chunk.getItems(); + log.debug("AIS Target 캐시 업데이트 시작: {} 건", items.size()); + + // 1. SignalKindCode 치환 + items.forEach(item -> { + SignalKindCode kindCode = SignalKindCode.resolve(item.getVesselType(), item.getExtraInfo()); + item.setSignalKindCode(kindCode.getCode()); + }); + + // 2. 메인 캐시 업데이트 (최신 위치 — t_ais_position 동기화용) + @SuppressWarnings("unchecked") + List entityList = (List) items; + cacheManager.putAll(entityList); + + // 3. 트랙 버퍼에 누적 (5분 집계 시 LineStringM 생성용) + cacheManager.appendAllForTrack(entityList); + + log.debug("AIS Target 캐시 업데이트 완료: {} 건 (캐시 크기: {})", + items.size(), cacheManager.size()); + + // 4. ChnPrmShip 전용 캐시 업데이트 + chnPrmShipCacheManager.putIfTarget(entityList); + } +} diff --git a/src/main/java/gc/mda/signal_batch/batch/writer/OptimizedBulkInsertWriter.java b/src/main/java/gc/mda/signal_batch/batch/writer/OptimizedBulkInsertWriter.java deleted file mode 100644 index 2fd29dd..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/writer/OptimizedBulkInsertWriter.java +++ /dev/null @@ -1,702 +0,0 @@ -package gc.mda.signal_batch.batch.writer; - -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import gc.mda.signal_batch.domain.gis.model.TileStatistics; -import gc.mda.signal_batch.batch.processor.AreaStatisticsProcessor; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.SerializationFeature; -import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; -import com.google.common.collect.Lists; -import jakarta.annotation.PostConstruct; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.postgresql.copy.CopyManager; -import org.postgresql.core.BaseConnection; -import org.springframework.batch.item.Chunk; -import org.springframework.batch.item.ItemWriter; -import org.springframework.beans.factory.DisposableBean; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Component; -import org.springframework.util.StopWatch; - -import javax.sql.DataSource; -import java.io.*; -import java.sql.Connection; -import java.sql.Timestamp; -import java.time.LocalDate; -import java.time.LocalDateTime; -import java.time.format.DateTimeFormatter; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.*; -import java.util.stream.Collectors; - - - -@Slf4j -@Component -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -public class OptimizedBulkInsertWriter implements DisposableBean { - - private final DataSource queryDataSource; - private final JdbcTemplate queryJdbcTemplate; - - public OptimizedBulkInsertWriter( - @Qualifier("queryDataSource") DataSource queryDataSource, - @Qualifier("queryJdbcTemplate") JdbcTemplate queryJdbcTemplate) { - this.queryDataSource = queryDataSource; - this.queryJdbcTemplate = queryJdbcTemplate; - - System.out.println("========================================"); - System.out.println("!!! OptimizedBulkInsertWriter initialized !!!"); - System.out.println("queryDataSource: " + queryDataSource); - System.out.println("queryJdbcTemplate DataSource: " + queryJdbcTemplate.getDataSource()); - System.out.println("========================================"); - } - - private final ObjectMapper objectMapper = createObjectMapper(); - - private static ObjectMapper createObjectMapper() { - ObjectMapper mapper = new ObjectMapper(); - mapper.registerModule(new JavaTimeModule()); - mapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS); - mapper.setDateFormat(new java.text.SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")); - mapper.setTimeZone(java.util.TimeZone.getTimeZone("Asia/Seoul")); - return mapper; - } - - @Value("${vessel.batch.bulk-insert.batch-size:50000}") - private int batchSize; - - @Value("${vessel.batch.bulk-insert.parallel-threads:4}") - private int parallelThreads; - - @Value("${vessel.batch.bulk-insert.use-binary-copy:false}") - private boolean useBinaryCopy; - - private volatile ExecutorService executorService; - - - private static final DateTimeFormatter TIMESTAMP_FORMATTER = - DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss"); - - @PostConstruct - public void init() { - initializeExecutorService(); - } - - /** - * ExecutorService 초기화 또는 재초기화 - */ - private synchronized void initializeExecutorService() { - if (executorService == null || executorService.isShutdown() || executorService.isTerminated()) { - if (executorService != null && !executorService.isShutdown()) { - executorService.shutdown(); - } - - int threadCount = Math.max(8, Runtime.getRuntime().availableProcessors() * 2); - executorService = Executors.newFixedThreadPool(threadCount, - new ThreadFactoryBuilder() - .setNameFormat("bulk-insert-worker-%d") - .setDaemon(true) // 데몬 스레드로 설정하여 JVM 종료 시 자동 정리 - .build()); - - log.info("ExecutorService initialized with {} threads", threadCount); - } - } - - /** - * ExecutorService 상태 확인 및 필요시 재초기화 - */ - private ExecutorService getHealthyExecutorService() { - if (executorService == null || executorService.isShutdown() || executorService.isTerminated()) { - log.warn("ExecutorService is not healthy, reinitializing..."); - initializeExecutorService(); - } - return executorService; - } - - /** - * TileStatistics Bulk Writer - */ - public ItemWriter> tileStatisticsBulkWriter() { - return new ItemWriter>() { - @Override - public void write(Chunk> chunk) throws Exception { - List allStats = chunk.getItems().stream() - .flatMap(List::stream) - .collect(Collectors.toList()); - - if (allStats.isEmpty()) { - return; - } - - StopWatch stopWatch = new StopWatch(); - stopWatch.start(); - - try { - // 파티션별로 그룹화 - Map> partitionedData = - allStats.stream() - .collect(Collectors.groupingBy( - stat -> stat.getTimeBucket().toLocalDate() - )); - - // 병렬 처리 - List> futures = new ArrayList<>(); - - for (Map.Entry> entry : partitionedData.entrySet()) { - LocalDate date = entry.getKey(); - List data = entry.getValue(); - - // 배치 크기로 분할 - Lists.partition(data, batchSize).forEach(batch -> { - try { - ExecutorService healthyExecutor = getHealthyExecutorService(); - CompletableFuture future = CompletableFuture.supplyAsync(() -> - insertTileStatisticsBatch(date, batch), healthyExecutor - ); - futures.add(future); - } catch (RejectedExecutionException e) { - log.warn("RejectedExecutionException caught, falling back to synchronous processing"); - BulkInsertResult result = insertTileStatisticsBatch(date, batch); - futures.add(CompletableFuture.completedFuture(result)); - } - }); - } - - // 모든 작업 완료 대기 - CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join(); - - // 결과 집계 - long totalInserted = futures.stream() - .map(CompletableFuture::join) - .mapToLong(result -> result.rowsInserted) - .sum(); - - stopWatch.stop(); - log.info("Bulk inserted {} tile statistics in {} ms", - totalInserted, stopWatch.getTotalTimeMillis()); - - } catch (Exception e) { - // CompletionException에서 실제 원인 확인 - Throwable cause = e; - if (e instanceof CompletionException && e.getCause() != null) { - cause = e.getCause(); - if (cause instanceof RuntimeException && cause.getCause() != null) { - cause = cause.getCause(); - } - } - - // 중복 키 오류는 정상적인 상황 - if (cause.getMessage() != null && cause.getMessage().contains("중복된 키")) { - log.debug("Duplicate key errors detected during bulk insert, using fallback UPSERT"); - } else { - log.error("Bulk insert failed, falling back to batch insert", e); - } - - // 새로운 트랜잭션에서 재시도 - try { - fallbackBatchInsert(allStats); - } catch (Exception fallbackEx) { - log.error("Fallback insert also failed", fallbackEx); - throw fallbackEx; - } - } - } - }; - } - - /** - * 개별 배치 처리 - */ - private BulkInsertResult insertTileStatisticsBatch(LocalDate date, - List batch) { - - String tableName = "t_tile_summary_" + date.format(DateTimeFormatter.BASIC_ISO_DATE); - - // 파티션 존재 확인 - if (!checkTableExists(tableName)) { - tableName = "t_tile_summary"; // 기본 테이블 사용 - } - - try (Connection conn = queryDataSource.getConnection()) { - BaseConnection baseConn = conn.unwrap(BaseConnection.class); - CopyManager copyManager = new CopyManager(baseConn); - - if (useBinaryCopy) { - return binaryCopyInsert(copyManager, tableName, batch); - } else { - return textCopyInsert(copyManager, tableName, batch); - } - - } catch (Exception e) { - if (e.getMessage() != null && e.getMessage().contains("duplicate key")) { - // 중복 키는 정상적인 상황이므로 DEBUG 레벨로 기록 - log.debug("Duplicate entries detected for table {} - switching to UPSERT mode", tableName); - // 새로운 트랜잭션에서 UPSERT 실행 - try { - return upsertBatch(tableName, batch); - } catch (Exception upsertEx) { - log.error("UPSERT also failed for table {}", tableName, upsertEx); - throw new RuntimeException("Both COPY and UPSERT failed", upsertEx); - } - } - log.error("Failed to insert batch for table {}", tableName, e); - throw new RuntimeException("Batch insert failed", e); - } - } - - /** - * 텍스트 기반 COPY - */ - private BulkInsertResult textCopyInsert(CopyManager copyManager, String tableName, - List batch) throws Exception { - - String copySql = String.format(""" - COPY signal.%s ( - tile_id, tile_level, time_bucket, vessel_count, - unique_vessels, total_points, avg_sog, max_sog, - vessel_density, created_at - ) FROM STDIN - """, tableName); - - try (PipedOutputStream pos = new PipedOutputStream(); - PipedInputStream pis = new PipedInputStream(pos, 1024 * 1024); // 1MB 버퍼 - PrintWriter writer = new PrintWriter(new BufferedWriter( - new OutputStreamWriter(pos, "UTF-8"), 65536))) { // 64KB 버퍼 - - // 비동기로 데이터 쓰기 - CompletableFuture writerFuture = CompletableFuture.runAsync(() -> { - try { - for (TileStatistics stat : batch) { - writer.println(formatCsvLine(stat)); - } - } finally { - writer.close(); - } - }); - - // COPY 실행 - long rowsInserted = copyManager.copyIn(copySql, pis); - - // Writer 완료 대기 - writerFuture.join(); - - return new BulkInsertResult(rowsInserted, null); - } - } - - /** - * 바이너리 기반 COPY (더 빠름) - */ - private BulkInsertResult binaryCopyInsert(CopyManager copyManager, String tableName, - List batch) throws Exception { - - String copySql = String.format(""" - COPY signal.%s ( - tile_id, tile_level, time_bucket, vessel_count, - unique_vessels, total_points, avg_sog, max_sog, - vessel_density, created_at - ) FROM STDIN WITH (FORMAT BINARY) - """, tableName); - - try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { - // PostgreSQL 바이너리 형식 헤더 - writeBinaryHeader(baos); - - // 데이터 쓰기 - for (TileStatistics stat : batch) { - writeBinaryRow(baos, stat); - } - - // 트레일러 - writeBinaryTrailer(baos); - - // COPY 실행 - try (ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray())) { - long rowsInserted = copyManager.copyIn(copySql, bais); - return new BulkInsertResult(rowsInserted, null); - } - } - } - - /** - * CSV 라인 포맷팅 - */ - private String formatCsvLine(TileStatistics stat) { - String json = convertToJson(stat.getUniqueVessels()); - // TEXT 형식에서는 탭과 줄바꿈만 이스케이프 - String escapedJson = json.replace("\\", "\\\\") - .replace("\t", "\\t") - .replace("\n", "\\n") - .replace("\r", "\\r"); - - return String.format("%s\t%d\t%s\t%d\t%s\t%d\t%s\t%s\t%s\t%s", - stat.getTileId(), - stat.getTileLevel(), - stat.getTimeBucket().format(TIMESTAMP_FORMATTER), - stat.getVesselCount(), - escapedJson, - stat.getTotalPoints(), - stat.getAvgSog() != null ? stat.getAvgSog().toString() : "\\N", - stat.getMaxSog() != null ? stat.getMaxSog().toString() : "\\N", - stat.getVesselDensity() != null ? stat.getVesselDensity().toString() : "\\N", - LocalDateTime.now().format(TIMESTAMP_FORMATTER) - ); - } - - /** - * CSV 특수문자 이스케이프 - */ - @SuppressWarnings("unused") - private String escapeCsv(String value) { - if (value == null) return "NULL"; - return value.replace("\\", "\\\\") - .replace("|", "\\|") - .replace("\n", "\\n") - .replace("\r", "\\r") - .replace("\"", "\\\""); - } - - /** - * JSON 이스케이프 - */ - @SuppressWarnings("unused") - private String escapeJson(String json) { - if (json == null) return "NULL"; - return json.replace("\\", "\\\\") - .replace("|", "\\|") - .replace("\n", "\\n") - .replace("\r", "\\r"); - } - - - - /** - * 객체를 JSON으로 변환 - */ - private String convertToJson(Object obj) { - try { - if (obj == null) return "{}"; - - // 클래스 레벨의 objectMapper 사용 - String json = objectMapper.writeValueAsString(obj); - - // JSON 검증 로그 - if (log.isDebugEnabled()) { - log.debug("Generated JSON: {}", json); - } - - return json; - } catch (Exception e) { - log.error("Error converting to JSON: {}", obj, e); - return "{}"; - } - } - - /** - * UPSERT 배치 처리 (중복키 발생 시) - */ - private BulkInsertResult upsertBatch(String tableName, List batch) { - // 항상 tile_level도 포함하여 처리 - String sql = String.format(""" - INSERT INTO signal.%s ( - tile_id, tile_level, time_bucket, vessel_count, - unique_vessels, total_points, avg_sog, max_sog, - vessel_density, created_at - ) VALUES (?, ?, ?, ?, ?::jsonb, ?, ?, ?, ?, ?) - ON CONFLICT (tile_id, time_bucket, tile_level) DO UPDATE SET - vessel_count = EXCLUDED.vessel_count, - unique_vessels = EXCLUDED.unique_vessels, - total_points = EXCLUDED.total_points, - avg_sog = EXCLUDED.avg_sog, - max_sog = EXCLUDED.max_sog, - vessel_density = EXCLUDED.vessel_density, - created_at = EXCLUDED.created_at - """, tableName); - - long totalUpdated = 0; - - // 배치 크기로 분할 - for (List partition : Lists.partition(batch, 1000)) { - List args = partition.stream() - .map(stat -> new Object[] { - stat.getTileId(), - stat.getTileLevel(), - Timestamp.valueOf(stat.getTimeBucket()), - stat.getVesselCount(), - convertToJson(stat.getUniqueVessels()), - stat.getTotalPoints(), - stat.getAvgSog(), - stat.getMaxSog(), - stat.getVesselDensity(), - Timestamp.valueOf(LocalDateTime.now()) - }) - .collect(Collectors.toList()); - - int[] results = queryJdbcTemplate.batchUpdate(sql, args); - - for (int result : results) { - totalUpdated += result; - } - } - - log.info("Upserted {} records in table {}", totalUpdated, tableName); - return new BulkInsertResult(totalUpdated, null); - } - - /** - * Fallback 배치 인서트 - */ - private void fallbackBatchInsert(List stats) { - String sql = """ - INSERT INTO signal.t_tile_summary ( - tile_id, tile_level, time_bucket, vessel_count, - unique_vessels, total_points, avg_sog, max_sog, - vessel_density, created_at - ) VALUES (?, ?, ?, ?, ?::jsonb, ?, ?, ?, ?, ?) - ON CONFLICT (tile_id, time_bucket, tile_level) DO UPDATE SET - vessel_count = EXCLUDED.vessel_count, - unique_vessels = EXCLUDED.unique_vessels, - total_points = EXCLUDED.total_points, - avg_sog = EXCLUDED.avg_sog, - max_sog = EXCLUDED.max_sog, - vessel_density = EXCLUDED.vessel_density, - created_at = EXCLUDED.created_at - """; - - // 배치 크기로 분할 - Lists.partition(stats, 1000).forEach(batch -> { - List args = batch.stream() - .map(stat -> new Object[] { - stat.getTileId(), - stat.getTileLevel(), - Timestamp.valueOf(stat.getTimeBucket()), - stat.getVesselCount(), - convertToJson(stat.getUniqueVessels()), - stat.getTotalPoints(), - stat.getAvgSog(), - stat.getMaxSog(), - stat.getVesselDensity(), - Timestamp.valueOf(LocalDateTime.now()) - }) - .collect(Collectors.toList()); - - queryJdbcTemplate.batchUpdate(sql, args); - }); - } - - /** - * AreaStatistics Bulk Writer - */ - public ItemWriter> - areaStatisticsBulkWriter() { - - return new ItemWriter>() { - @Override - public void write(Chunk> chunk) - throws Exception { - - List allStats = - chunk.getItems().stream() - .flatMap(List::stream) - .collect(Collectors.toList()); - - if (allStats.isEmpty()) { - return; - } - - // 배치 크기로 분할하여 병렬 처리 - Lists.partition(allStats, batchSize) - .parallelStream() - .forEach(batch -> insertAreaStatisticsBatch(batch)); - } - }; - } - - private void insertAreaStatisticsBatch( - List batch) { - - try (Connection conn = queryDataSource.getConnection()) { - BaseConnection baseConn = conn.unwrap(BaseConnection.class); - CopyManager copyManager = new CopyManager(baseConn); - - String copySql = """ - COPY signal.t_area_statistics ( - area_id, time_bucket, vessel_count, - in_count, out_count, transit_vessels, - stationary_vessels, avg_sog, created_at - ) FROM STDIN WITH (FORMAT CSV, DELIMITER '|', NULL 'NULL') - """; - - StringWriter writer = new StringWriter(); - for (var stat : batch) { - writer.write(String.format("%s|%s|%d|%d|%d|%s|%s|%s|%s\n", - stat.getAreaId(), - stat.getTimeBucket().format(TIMESTAMP_FORMATTER), - stat.getVesselCount(), - stat.getInCount(), - stat.getOutCount(), - escapeJson(convertToJson(stat.getTransitVessels())), - escapeJson(convertToJson(stat.getStationaryVessels())), - stat.getAvgSog() != null ? stat.getAvgSog().toString() : "NULL", - LocalDateTime.now().format(TIMESTAMP_FORMATTER) - )); - } - - long rowsInserted = copyManager.copyIn(copySql, new StringReader(writer.toString())); - log.debug("Inserted {} area statistics", rowsInserted); - - } catch (Exception e) { - log.error("Failed to bulk insert area statistics", e); - // Fallback 처리 - } - } - - /** - * 테이블 존재 확인 - */ - private boolean checkTableExists(String tableName) { - String sql = "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'signal' AND tablename = ?)"; - return Boolean.TRUE.equals(queryJdbcTemplate.queryForObject(sql, Boolean.class, tableName)); - } - - - /** - * 바이너리 형식 헬퍼 메소드들 - */ - private void writeBinaryHeader(ByteArrayOutputStream baos) throws IOException { - // PostgreSQL 바이너리 COPY 헤더 - baos.write("PGCOPY\n\377\r\n\0".getBytes("UTF-8")); - // 플래그 - writeInt32(baos, 0); - // 헤더 확장 길이 - writeInt32(baos, 0); - } - - private void writeBinaryTrailer(ByteArrayOutputStream baos) throws IOException { - // -1 표시 (EOF) - writeInt16(baos, -1); - } - - private void writeBinaryRow(ByteArrayOutputStream baos, - TileStatistics stat) throws IOException { - // 필드 수 - writeInt16(baos, 10); - - // 각 필드 쓰기 - writeString(baos, stat.getTileId()); - writeInt32(baos, stat.getTileLevel()); - writeTimestamp(baos, stat.getTimeBucket()); - writeInt32(baos, stat.getVesselCount()); - writeString(baos, convertToJson(stat.getUniqueVessels())); - writeInt64(baos, stat.getTotalPoints()); - writeBigDecimal(baos, stat.getAvgSog()); - writeBigDecimal(baos, stat.getMaxSog()); - writeBigDecimal(baos, stat.getVesselDensity()); - writeTimestamp(baos, LocalDateTime.now()); - } - - private void writeInt16(ByteArrayOutputStream baos, int value) throws IOException { - baos.write((value >> 8) & 0xFF); - baos.write(value & 0xFF); - } - - private void writeInt32(ByteArrayOutputStream baos, int value) throws IOException { - baos.write((value >> 24) & 0xFF); - baos.write((value >> 16) & 0xFF); - baos.write((value >> 8) & 0xFF); - baos.write(value & 0xFF); - } - - private void writeInt64(ByteArrayOutputStream baos, long value) throws IOException { - for (int i = 56; i >= 0; i -= 8) { - baos.write((int)(value >> i) & 0xFF); - } - } - - private void writeString(ByteArrayOutputStream baos, String value) throws IOException { - if (value == null) { - writeInt32(baos, -1); // NULL - } else { - byte[] bytes = value.getBytes("UTF-8"); - writeInt32(baos, bytes.length); - baos.write(bytes); - } - } - - private void writeTimestamp(ByteArrayOutputStream baos, LocalDateTime value) throws IOException { - if (value == null) { - writeInt32(baos, -1); // NULL - } else { - // PostgreSQL timestamp 형식으로 변환 - long micros = value.atZone(java.time.ZoneId.systemDefault()) - .toInstant().toEpochMilli() * 1000; - writeInt32(baos, 8); // 길이 - writeInt64(baos, micros); - } - } - - private void writeBigDecimal(ByteArrayOutputStream baos, java.math.BigDecimal value) - throws IOException { - if (value == null) { - writeInt32(baos, -1); // NULL - } else { - writeString(baos, value.toString()); - } - } - - - /** - * 결과 클래스 - */ - private static class BulkInsertResult { - final long rowsInserted; - @SuppressWarnings("unused") - final String error; - - BulkInsertResult(long rowsInserted, String error) { - this.rowsInserted = rowsInserted; - this.error = error; - } - } - - /** - * 리소스 정리 - */ - public void shutdown() { - if (executorService != null && !executorService.isShutdown()) { - executorService.shutdown(); - try { - if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) { - executorService.shutdownNow(); - } - } catch (InterruptedException e) { - executorService.shutdownNow(); - } - } - } - - @Override - public void destroy() throws Exception { - log.info("Shutting down OptimizedBulkInsertWriter ExecutorService"); - if (executorService != null && !executorService.isShutdown()) { - executorService.shutdown(); - try { - if (!executorService.awaitTermination(30, TimeUnit.SECONDS)) { - log.warn("ExecutorService did not terminate gracefully, forcing shutdown"); - executorService.shutdownNow(); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - executorService.shutdownNow(); - } - } - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/writer/UpsertWriter.java b/src/main/java/gc/mda/signal_batch/batch/writer/UpsertWriter.java deleted file mode 100644 index 6f3562f..0000000 --- a/src/main/java/gc/mda/signal_batch/batch/writer/UpsertWriter.java +++ /dev/null @@ -1,271 +0,0 @@ -package gc.mda.signal_batch.batch.writer; - -import gc.mda.signal_batch.domain.vessel.model.VesselLatestPosition; -import gc.mda.signal_batch.batch.processor.AreaStatisticsProcessor.AreaStatistics; -import gc.mda.signal_batch.global.util.ConcurrentUpdateManager; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.item.Chunk; -import org.springframework.batch.item.ItemWriter; -import org.springframework.batch.item.database.JdbcBatchItemWriter; -import org.springframework.batch.item.database.BeanPropertyItemSqlParameterSourceProvider; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import javax.sql.DataSource; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.*; - - - -@Slf4j -@Configuration -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -public class UpsertWriter { - - private final DataSource queryDataSource; - private final ConcurrentUpdateManager concurrentUpdateManager; - - public UpsertWriter( - @Qualifier("queryDataSource") DataSource queryDataSource, - ConcurrentUpdateManager concurrentUpdateManager) { - this.queryDataSource = queryDataSource; - this.concurrentUpdateManager = concurrentUpdateManager; - - System.out.println("========================================"); - System.out.println("!!! UpsertWriter initialized !!!"); - System.out.println("queryDataSource: " + queryDataSource); - System.out.println("========================================"); - } - - @Value("${vessel.batch.writer.use-advisory-lock:false}") - private boolean useAdvisoryLock; - - @Value("${vessel.batch.writer.parallel-threads:4}") - private int parallelThreads; - - private static final ExecutorService executorService = new ThreadPoolExecutor( - 4, 8, - 60L, TimeUnit.SECONDS, - new LinkedBlockingQueue<>(100), - new ThreadPoolExecutor.CallerRunsPolicy() - ); - - // shutdown hook 추가 - static { - Runtime.getRuntime().addShutdownHook(new Thread(() -> { - log.info("Shutting down executor service..."); - executorService.shutdown(); - try { - if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) { - executorService.shutdownNow(); - } - } catch (InterruptedException e) { - executorService.shutdownNow(); - } - })); - } - - private final ObjectMapper objectMapper = new ObjectMapper() - .registerModule(new JavaTimeModule()); - - /** - * 최신 위치 Writer - Advisory Lock 사용 - */ - @Bean - public ItemWriter latestPositionWriter() { - if (useAdvisoryLock) { - return new ItemWriter() { - @Override - public void write(Chunk chunk) throws Exception { - List items = new ArrayList<>(chunk.getItems()); - - // 병렬 처리를 위한 분할 - int batchSize = Math.max(1, items.size() / parallelThreads); - List> futures = new ArrayList<>(); - - for (int i = 0; i < items.size(); i += batchSize) { - int endIndex = Math.min(i + batchSize, items.size()); - List batch = items.subList(i, endIndex); - - CompletableFuture future = CompletableFuture.runAsync(() -> { - for (VesselLatestPosition position : batch) { - try { - concurrentUpdateManager.updateLatestPositionWithLock(position); - } catch (Exception e) { - log.error("Failed to update position: {}", position.getTargetId(), e); - } - } - }, executorService); - - futures.add(future); - } - - // 모든 작업 완료 대기 - CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])) - .get(5, TimeUnit.MINUTES); - - log.debug("Updated {} vessel positions", items.size()); - } - }; - } else { - // 기존 방식 (Batch Update) - return defaultLatestPositionWriter(); - } - } - - /** - * 기본 Batch Writer - */ - private JdbcBatchItemWriter defaultLatestPositionWriter() { - return customLatestPositionWriter(); - } - - /** - * Custom Writer - UPDATE 0건도 정상 처리 - */ - private JdbcBatchItemWriter customLatestPositionWriter() { - String sql = """ - INSERT INTO signal.t_vessel_latest_position ( - sig_src_cd, target_id, lat, lon, geom, - sog, cog, heading, ship_nm, ship_ty, - last_update, update_count, created_at - ) VALUES ( - :sigSrcCd, :targetId, :lat, :lon, - public.ST_SetSRID(public.ST_MakePoint(:lon, :lat), 4326), - :sog, :cog, :heading, :shipNm, :shipTy, - :lastUpdate, 1, CURRENT_TIMESTAMP - ) - ON CONFLICT (sig_src_cd, target_id) DO UPDATE SET - lat = EXCLUDED.lat, - lon = EXCLUDED.lon, - geom = EXCLUDED.geom, - sog = EXCLUDED.sog, - cog = EXCLUDED.cog, - heading = EXCLUDED.heading, - ship_nm = COALESCE(EXCLUDED.ship_nm, t_vessel_latest_position.ship_nm), - ship_ty = COALESCE(EXCLUDED.ship_ty, t_vessel_latest_position.ship_ty), - last_update = EXCLUDED.last_update, - update_count = t_vessel_latest_position.update_count + 1 - WHERE EXCLUDED.last_update > t_vessel_latest_position.last_update - """; - - JdbcBatchItemWriter writer = new JdbcBatchItemWriter() { - @Override - public void write(Chunk chunk) throws Exception { - // assertUpdates 비활성화로 UPDATE 0건도 허용 - this.setAssertUpdates(false); - super.write(chunk); - } - }; - - writer.setDataSource(queryDataSource); - writer.setSql(sql); - writer.setItemSqlParameterSourceProvider(new BeanPropertyItemSqlParameterSourceProvider<>()); - writer.afterPropertiesSet(); - - return writer; - } - - /** - * 구역 통계 Writer - */ - @Bean - public ItemWriter> areaStatisticsWriter() { - return new ItemWriter>() { - @Override - public void write(Chunk> chunk) throws Exception { - // 중복 제거를 위한 Map 사용 - Map uniqueStats = new HashMap<>(); - - for (List batch : chunk.getItems()) { - for (AreaStatistics stat : batch) { - String key = stat.getAreaId() + "_" + stat.getTimeBucket(); - // 중복된 경우 나중 데이터로 덮어쓰기 - uniqueStats.put(key, stat); - } - } - - List allStats = new ArrayList<>(uniqueStats.values()); - - if (allStats.isEmpty()) { - return; - } - - // 배치를 더 작은 단위로 분할 - int batchSize = 500; - JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); - jdbcTemplate.setQueryTimeout(60); // 60초 타임아웃 - - for (int i = 0; i < allStats.size(); i += batchSize) { - int endIndex = Math.min(i + batchSize, allStats.size()); - List subBatch = allStats.subList(i, endIndex); - - String sql = """ - INSERT INTO signal.t_area_statistics ( - area_id, time_bucket, vessel_count, in_count, out_count, - transit_vessels, stationary_vessels, avg_sog, created_at - ) VALUES ( - ?, ?, ?, ?, ?, - ?::jsonb, ?::jsonb, ?, CURRENT_TIMESTAMP - ) - ON CONFLICT (area_id, time_bucket) DO UPDATE SET - vessel_count = EXCLUDED.vessel_count, - in_count = EXCLUDED.in_count, - out_count = EXCLUDED.out_count, - transit_vessels = EXCLUDED.transit_vessels, - stationary_vessels = EXCLUDED.stationary_vessels, - avg_sog = EXCLUDED.avg_sog - """; - - List batchArgs = new ArrayList<>(); - for (AreaStatistics stats : subBatch) { - batchArgs.add(new Object[]{ - stats.getAreaId(), - java.sql.Timestamp.valueOf(stats.getTimeBucket()), - stats.getVesselCount(), - stats.getInCount(), - stats.getOutCount(), - objectMapper.writeValueAsString(stats.getTransitVessels()), - objectMapper.writeValueAsString(stats.getStationaryVessels()), - stats.getAvgSog() - }); - } - - try { - jdbcTemplate.batchUpdate(sql, batchArgs); - log.debug("Updated {} area statistics records", subBatch.size()); - } catch (Exception e) { - log.error("Failed to update batch of {} area statistics", subBatch.size(), e); - throw e; - } - } - - log.info("Total updated {} area statistics records", allStats.size()); - } - }; - } - - /** - * 리소스 정리 - */ - public void shutdown() { - executorService.shutdown(); - try { - if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) { - executorService.shutdownNow(); - } - } catch (InterruptedException e) { - executorService.shutdownNow(); - } - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/batch/writer/VesselTrackBulkWriter.java b/src/main/java/gc/mda/signal_batch/batch/writer/VesselTrackBulkWriter.java index dad0abf..b7c87b9 100644 --- a/src/main/java/gc/mda/signal_batch/batch/writer/VesselTrackBulkWriter.java +++ b/src/main/java/gc/mda/signal_batch/batch/writer/VesselTrackBulkWriter.java @@ -59,21 +59,21 @@ public class VesselTrackBulkWriter implements ItemWriter> { log.error("Failed to get DataSource info", e); } } - + private final ObjectMapper objectMapper = new ObjectMapper() .registerModule(new JavaTimeModule()) .setDateFormat(new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss")); - + @Override public void write(Chunk> chunk) throws Exception { List allTracks = chunk.getItems().stream() .flatMap(List::stream) .collect(Collectors.toList()); - + if (allTracks.isEmpty()) { return; } - + try { bulkInsertTracks(allTracks, "signal.t_vessel_tracks_5min"); } catch (Exception e) { @@ -81,12 +81,12 @@ public class VesselTrackBulkWriter implements ItemWriter> { fallbackInsert(allTracks, "signal.t_vessel_tracks_5min"); } } - + public void writeHourlyTracks(List tracks) throws Exception { if (tracks.isEmpty()) { return; } - + try { bulkInsertTracks(tracks, "signal.t_vessel_tracks_hourly"); } catch (Exception e) { @@ -94,12 +94,12 @@ public class VesselTrackBulkWriter implements ItemWriter> { fallbackInsert(tracks, "signal.t_vessel_tracks_hourly"); } } - + public void writeDailyTracks(List tracks) throws Exception { if (tracks.isEmpty()) { return; } - + try { bulkInsertTracks(tracks, "signal.t_vessel_tracks_daily"); } catch (Exception e) { @@ -107,8 +107,8 @@ public class VesselTrackBulkWriter implements ItemWriter> { fallbackInsert(tracks, "signal.t_vessel_tracks_daily"); } } - - // 임시 테이블 + MERGE 패턴을 사용한 Bulk Upsert + + // 임시 테이블 + COPY 패턴을 사용한 Bulk Insert private void bulkInsertTracks(List tracks, String tableName) throws Exception { try (Connection conn = queryDataSource.getConnection()) { conn.setAutoCommit(false); @@ -122,8 +122,7 @@ public class VesselTrackBulkWriter implements ItemWriter> { try (var stmt = conn.createStatement()) { stmt.execute(String.format(""" CREATE TEMP TABLE IF NOT EXISTS %s ( - sig_src_cd VARCHAR(10), - target_id VARCHAR(30), + mmsi VARCHAR(20), time_bucket TIMESTAMP, track_geom GEOMETRY, distance_nm NUMERIC, @@ -142,7 +141,7 @@ public class VesselTrackBulkWriter implements ItemWriter> { // 2. COPY로 임시 테이블에 bulk insert String copySql = String.format(""" COPY %s ( - sig_src_cd, target_id, time_bucket, track_geom, + mmsi, time_bucket, track_geom, distance_nm, avg_speed, max_speed, point_count, start_position, end_position ) FROM STDIN @@ -156,37 +155,29 @@ public class VesselTrackBulkWriter implements ItemWriter> { long rowsCopied = copyManager.copyIn(copySql, new StringReader(writer.toString())); - // 3. 임시 테이블에서 최종 테이블로 UPSERT - String upsertSql = String.format(""" + // 3. 임시 테이블에서 최종 테이블로 INSERT (중복 키 무시) + String insertSql = String.format(""" INSERT INTO %s ( - sig_src_cd, target_id, time_bucket, track_geom, + mmsi, time_bucket, track_geom, distance_nm, avg_speed, max_speed, point_count, start_position, end_position ) SELECT - sig_src_cd, target_id, time_bucket, track_geom, + mmsi, time_bucket, track_geom, distance_nm, avg_speed, max_speed, point_count, start_position, end_position FROM %s - ON CONFLICT (sig_src_cd, target_id, time_bucket) - DO UPDATE SET - track_geom = EXCLUDED.track_geom, - distance_nm = EXCLUDED.distance_nm, - avg_speed = EXCLUDED.avg_speed, - max_speed = EXCLUDED.max_speed, - point_count = EXCLUDED.point_count, - start_position = EXCLUDED.start_position, - end_position = EXCLUDED.end_position + ON CONFLICT (mmsi, time_bucket) DO NOTHING """, tableName, tempTableName); - int rowsUpserted; + int rowsInserted; try (var stmt = conn.createStatement()) { - rowsUpserted = stmt.executeUpdate(upsertSql); + rowsInserted = stmt.executeUpdate(insertSql); } conn.commit(); - log.info("Bulk upserted {} vessel tracks to {} (copied: {}, upserted: {})", - tracks.size(), tableName, rowsCopied, rowsUpserted); + log.info("Bulk inserted {} vessel tracks to {} (copied: {}, inserted: {})", + tracks.size(), tableName, rowsCopied, rowsInserted); } catch (Exception e) { conn.rollback(); @@ -194,22 +185,21 @@ public class VesselTrackBulkWriter implements ItemWriter> { } } } - + private String formatTrackLine(VesselTrack track) { StringBuilder sb = new StringBuilder(); - - sb.append(track.getSigSrcCd()).append('\t'); - sb.append(track.getTargetId()).append('\t'); + + sb.append(track.getMmsi()).append('\t'); sb.append(Timestamp.valueOf(track.getTimeBucket())).append('\t'); - - // track_geom만 사용 + + // track_geom if (track.getTrackGeom() != null && !track.getTrackGeom().isEmpty()) { sb.append(track.getTrackGeom()); } else { sb.append("\\N"); } sb.append('\t'); - + // distance_nm if (track.getDistanceNm() != null) { sb.append(track.getDistanceNm()); @@ -217,7 +207,7 @@ public class VesselTrackBulkWriter implements ItemWriter> { sb.append("\\N"); } sb.append('\t'); - + // avg_speed if (track.getAvgSpeed() != null) { sb.append(track.getAvgSpeed()); @@ -225,7 +215,7 @@ public class VesselTrackBulkWriter implements ItemWriter> { sb.append("\\N"); } sb.append('\t'); - + // max_speed if (track.getMaxSpeed() != null) { sb.append(track.getMaxSpeed()); @@ -233,10 +223,10 @@ public class VesselTrackBulkWriter implements ItemWriter> { sb.append("\\N"); } sb.append('\t'); - + // point_count sb.append(track.getPointCount()).append('\t'); - + // start_position (JSON) if (track.getStartPosition() != null) { sb.append(formatPositionJson(track.getStartPosition())); @@ -244,17 +234,17 @@ public class VesselTrackBulkWriter implements ItemWriter> { sb.append("\\N"); } sb.append('\t'); - + // end_position (JSON) if (track.getEndPosition() != null) { sb.append(formatPositionJson(track.getEndPosition())); } else { sb.append("\\N"); } - + return sb.toString(); } - + private String formatPositionJson(VesselTrack.TrackPosition position) { Map jsonMap = new LinkedHashMap<>(); jsonMap.put("lat", position.getLat()); @@ -263,7 +253,7 @@ public class VesselTrackBulkWriter implements ItemWriter> { if (position.getSog() != null) { jsonMap.put("sog", position.getSog()); } - + try { return objectMapper.writeValueAsString(jsonMap); } catch (Exception e) { @@ -271,30 +261,22 @@ public class VesselTrackBulkWriter implements ItemWriter> { return "{}"; } } - + private void fallbackInsert(List tracks, String tableName) { String sql = String.format(""" INSERT INTO %s ( - sig_src_cd, target_id, time_bucket, track_geom, + mmsi, time_bucket, track_geom, distance_nm, avg_speed, max_speed, point_count, start_position, end_position - ) VALUES (?, ?, ?, public.ST_GeomFromText(?), ?, ?, ?, ?, ?::jsonb, ?::jsonb) - ON CONFLICT (sig_src_cd, target_id, time_bucket) - DO UPDATE SET - track_geom = EXCLUDED.track_geom, - distance_nm = EXCLUDED.distance_nm, - avg_speed = EXCLUDED.avg_speed, - max_speed = EXCLUDED.max_speed, - point_count = EXCLUDED.point_count, - start_position = EXCLUDED.start_position, - end_position = EXCLUDED.end_position + ) VALUES (?, ?, public.ST_GeomFromText(?), ?, ?, ?, ?, ?::jsonb, ?::jsonb) + ON CONFLICT (mmsi, time_bucket) DO NOTHING """, tableName); - + + int inserted = 0; for (VesselTrack track : tracks) { try { queryJdbcTemplate.update(sql, - track.getSigSrcCd(), - track.getTargetId(), + track.getMmsi(), Timestamp.valueOf(track.getTimeBucket()), track.getTrackGeom(), track.getDistanceNm(), @@ -304,12 +286,11 @@ public class VesselTrackBulkWriter implements ItemWriter> { track.getStartPosition() != null ? formatPositionJson(track.getStartPosition()) : null, track.getEndPosition() != null ? formatPositionJson(track.getEndPosition()) : null ); - log.debug("Upserted track for vessel: {} to {}", - track.getSigSrcCd() + "_" + track.getTargetId(), tableName); + inserted++; } catch (Exception e) { - log.error("Failed to upsert track for vessel: {} to {}", - track.getSigSrcCd() + "_" + track.getTargetId(), tableName, e); + log.error("Failed to insert track for vessel: {} to {}", track.getMmsi(), tableName, e); } } + log.info("Fallback inserted {} / {} vessel tracks to {}", inserted, tracks.size(), tableName); } -} \ No newline at end of file +} diff --git a/src/main/java/gc/mda/signal_batch/domain/debug/DebugTimeController.java b/src/main/java/gc/mda/signal_batch/domain/debug/DebugTimeController.java index 21990b4..9807e52 100644 --- a/src/main/java/gc/mda/signal_batch/domain/debug/DebugTimeController.java +++ b/src/main/java/gc/mda/signal_batch/domain/debug/DebugTimeController.java @@ -9,8 +9,6 @@ import org.springframework.jdbc.core.JdbcTemplate; import org.springframework.web.bind.annotation.*; import javax.sql.DataSource; -import java.sql.ResultSet; -import java.sql.SQLException; import java.sql.Timestamp; import java.time.LocalDateTime; import java.time.ZoneId; @@ -32,8 +30,7 @@ public class DebugTimeController { @GetMapping("/time-analysis") @Operation(summary = "시간 데이터 분석", description = "특정 선박의 항적 데이터에서 시간 정보(time_bucket, Unix timestamp)를 상세 분석합니다. DB 서버 시간, 최근 데이터, 시간 차이 분석을 포함합니다") public Map analyzeTimeData( - @Parameter(description = "신호 소스 코드 (기본: 000001)") @RequestParam(defaultValue = "000001") String sigSrcCd, - @Parameter(description = "선박 ID (기본: 440331240)") @RequestParam(defaultValue = "440331240") String targetId, + @Parameter(description = "MMSI (기본: 440331240)") @RequestParam(defaultValue = "440331240") String mmsi, @Parameter(description = "시작 시간 (형식: yyyy-MM-ddTHH:mm:ss)") @RequestParam(defaultValue = "2025-08-26T08:02:59") String startTime, @Parameter(description = "종료 시간 (형식: yyyy-MM-ddTHH:mm:ss)") @RequestParam(defaultValue = "2025-08-27T08:02:59") String endTime) { @@ -44,8 +41,7 @@ public class DebugTimeController { LocalDateTime end = LocalDateTime.parse(endTime); result.put("requestInfo", Map.of( - "sigSrcCd", sigSrcCd, - "targetId", targetId, + "mmsi", mmsi, "startTime", startTime, "endTime", endTime, "startTimestamp", start.atZone(ZoneId.of("Asia/Seoul")).toEpochSecond(), @@ -73,7 +69,7 @@ public class DebugTimeController { avg_speed, point_count FROM signal.t_vessel_tracks_5min - WHERE sig_src_cd = ? AND target_id = ? + WHERE mmsi = ? AND time_bucket BETWEEN ? AND ? ORDER BY time_bucket LIMIT 10 @@ -115,7 +111,7 @@ public class DebugTimeController { return row; }, - sigSrcCd, targetId, Timestamp.valueOf(start), Timestamp.valueOf(end) + mmsi, Timestamp.valueOf(start), Timestamp.valueOf(end) ); result.put("queryResults", dataRows); @@ -127,7 +123,7 @@ public class DebugTimeController { EXTRACT(epoch FROM time_bucket) as time_bucket_unix, substring(public.ST_AsText(track_geom), 1, 100) as track_sample FROM signal.t_vessel_tracks_5min - WHERE sig_src_cd = ? AND target_id = ? + WHERE mmsi = ? ORDER BY time_bucket DESC LIMIT 5 """; @@ -140,7 +136,7 @@ public class DebugTimeController { row.put("track_sample", rs.getString("track_sample")); return row; }, - sigSrcCd, targetId + mmsi ); result.put("recentData", recentRows); diff --git a/src/main/java/gc/mda/signal_batch/domain/gis/cache/AreaBoundaryCache.java b/src/main/java/gc/mda/signal_batch/domain/gis/cache/AreaBoundaryCache.java index 1de9db8..b2ba733 100644 --- a/src/main/java/gc/mda/signal_batch/domain/gis/cache/AreaBoundaryCache.java +++ b/src/main/java/gc/mda/signal_batch/domain/gis/cache/AreaBoundaryCache.java @@ -2,9 +2,9 @@ package gc.mda.signal_batch.domain.gis.cache; import lombok.extern.slf4j.Slf4j; import org.locationtech.jts.geom.Coordinate; +import org.locationtech.jts.geom.Geometry; import org.locationtech.jts.geom.GeometryFactory; import org.locationtech.jts.geom.Point; -import org.locationtech.jts.geom.Polygon; import org.locationtech.jts.io.WKTReader; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.jdbc.core.JdbcTemplate; @@ -22,8 +22,8 @@ import java.util.stream.Collectors; public class AreaBoundaryCache { private final DataSource queryDataSource; - private final Map areaPolygons = new ConcurrentHashMap<>(); - private final Map haeguPolygons = new ConcurrentHashMap<>(); + private final Map areaPolygons = new ConcurrentHashMap<>(); + private final Map haeguPolygons = new ConcurrentHashMap<>(); private final GeometryFactory geometryFactory = new GeometryFactory(); private final WKTReader wktReader = new WKTReader(geometryFactory); @@ -52,8 +52,8 @@ public class AreaBoundaryCache { String areaId = (String) area.get("area_id"); String wkt = (String) area.get("wkt"); try { - Polygon polygon = (Polygon) wktReader.read(wkt); - areaPolygons.put(areaId, polygon); + Geometry geom = wktReader.read(wkt); + areaPolygons.put(areaId, geom); } catch (Exception e) { log.warn("Failed to parse geometry for area {}: {}", areaId, e.getMessage()); } @@ -80,8 +80,8 @@ public class AreaBoundaryCache { Integer haeguNo = (Integer) haegu.get("haegu_no"); String wkt = (String) haegu.get("wkt"); try { - Polygon polygon = (Polygon) wktReader.read(wkt); - haeguPolygons.put(haeguNo, polygon); + Geometry geom = wktReader.read(wkt); + haeguPolygons.put(haeguNo, geom); } catch (Exception e) { log.warn("Failed to parse geometry for haegu {}: {}", haeguNo, e.getMessage()); } @@ -115,20 +115,20 @@ public class AreaBoundaryCache { // 특정 area에 포인트가 포함되는지 확인 public boolean isPointInArea(double lat, double lon, String areaId) { - Polygon polygon = areaPolygons.get(areaId); - if (polygon == null) return false; - + Geometry geom = areaPolygons.get(areaId); + if (geom == null) return false; + Point point = geometryFactory.createPoint(new Coordinate(lon, lat)); - return polygon.contains(point); + return geom.contains(point); } - + // 특정 haegu에 포인트가 포함되는지 확인 public boolean isPointInHaegu(double lat, double lon, Integer haeguNo) { - Polygon polygon = haeguPolygons.get(haeguNo); - if (polygon == null) return false; - + Geometry geom = haeguPolygons.get(haeguNo); + if (geom == null) return false; + Point point = geometryFactory.createPoint(new Coordinate(lon, lat)); - return polygon.contains(point); + return geom.contains(point); } // Job 실행 시 캐시 갱신 diff --git a/src/main/java/gc/mda/signal_batch/domain/gis/controller/AreaSearchController.java b/src/main/java/gc/mda/signal_batch/domain/gis/controller/AreaSearchController.java index 4e724e7..b69b7ec 100644 --- a/src/main/java/gc/mda/signal_batch/domain/gis/controller/AreaSearchController.java +++ b/src/main/java/gc/mda/signal_batch/domain/gis/controller/AreaSearchController.java @@ -130,7 +130,7 @@ public class AreaSearchController { **접촉 판정 조건:** - 두 선박 모두 폴리곤 **내부**에 있을 때만 접촉으로 간주 - - 대상: sigSrcCd 필터 (기본 "000001") 선박끼리만 비교 + - 대상: AIS 수집 선박끼리만 비교 - 접촉 구간의 **평균 거리** <= maxContactDistanceMeters - 접촉 지속 시간 >= minContactDurationMinutes diff --git a/src/main/java/gc/mda/signal_batch/domain/gis/dto/VesselContactRequest.java b/src/main/java/gc/mda/signal_batch/domain/gis/dto/VesselContactRequest.java index d896868..6a9f520 100644 --- a/src/main/java/gc/mda/signal_batch/domain/gis/dto/VesselContactRequest.java +++ b/src/main/java/gc/mda/signal_batch/domain/gis/dto/VesselContactRequest.java @@ -47,10 +47,6 @@ public class VesselContactRequest { @Schema(description = "최대 접촉 판정 거리 (미터, 50~5000)", example = "1000", requiredMode = Schema.RequiredMode.REQUIRED) private Double maxContactDistanceMeters; - @Schema(description = "대상 선박 신호소스 코드 (기본: 000001)", example = "000001", defaultValue = "000001") - @Builder.Default - private String sigSrcCd = "000001"; - @Data @Builder @NoArgsConstructor diff --git a/src/main/java/gc/mda/signal_batch/domain/gis/dto/VesselContactResponse.java b/src/main/java/gc/mda/signal_batch/domain/gis/dto/VesselContactResponse.java index b3e2d8b..5fd9815 100644 --- a/src/main/java/gc/mda/signal_batch/domain/gis/dto/VesselContactResponse.java +++ b/src/main/java/gc/mda/signal_batch/domain/gis/dto/VesselContactResponse.java @@ -79,7 +79,7 @@ public class VesselContactResponse { @Schema(description = "접촉 선박 개별 정보") public static class VesselContactInfo { - @Schema(description = "선박 고유 ID (sigSrcCd_targetId)", example = "000001_440113620") + @Schema(description = "선박 고유 ID (MMSI)", example = "440113620") private String vesselId; @Schema(description = "선박명", example = "SAM SUNG 2HO") @@ -94,9 +94,6 @@ public class VesselContactResponse { @Schema(description = "국적 MID 코드 (MMSI 앞 3자리)", example = "440") private String nationalCode; - @Schema(description = "통합선박 ID", example = "440113620___440113620_") - private String integrationTargetId; - // ── 폴리곤 내 체류 정보 ── @Schema(description = "폴리곤 내 첫 시각 (Unix 초)", example = "1738360000") private Long insidePolygonStartTs; @@ -145,7 +142,7 @@ public class VesselContactResponse { @Schema(description = "접촉에 관련된 고유 선박 수", example = "5") private Integer totalVesselsInvolved; - @Schema(description = "sigSrcCd 필터 후 폴리곤 내 전체 선박 수", example = "42") + @Schema(description = "폴리곤 내 전체 선박 수", example = "42") private Integer totalVesselsInPolygon; @Schema(description = "처리 소요 시간 (ms)", example = "2340") diff --git a/src/main/java/gc/mda/signal_batch/domain/gis/service/AreaSearchService.java b/src/main/java/gc/mda/signal_batch/domain/gis/service/AreaSearchService.java index 5cb6eb2..b434963 100644 --- a/src/main/java/gc/mda/signal_batch/domain/gis/service/AreaSearchService.java +++ b/src/main/java/gc/mda/signal_batch/domain/gis/service/AreaSearchService.java @@ -262,13 +262,10 @@ public class AreaSearchService { merged.put(entry.getKey(), CompactVesselTrack.builder() .vesselId(first.getVesselId()) - .sigSrcCd(first.getSigSrcCd()) - .targetId(first.getTargetId()) .nationalCode(first.getNationalCode()) .shipName(first.getShipName()) .shipType(first.getShipType()) .shipKindCode(first.getShipKindCode()) - .integrationTargetId(first.getIntegrationTargetId()) .geometry(geo) .timestamps(ts) .speeds(sp) diff --git a/src/main/java/gc/mda/signal_batch/domain/gis/service/GisService.java b/src/main/java/gc/mda/signal_batch/domain/gis/service/GisService.java index 0c91413..ed7c180 100644 --- a/src/main/java/gc/mda/signal_batch/domain/gis/service/GisService.java +++ b/src/main/java/gc/mda/signal_batch/domain/gis/service/GisService.java @@ -5,11 +5,7 @@ import gc.mda.signal_batch.domain.vessel.dto.TrackResponse; import gc.mda.signal_batch.domain.vessel.dto.VesselStatsResponse; import gc.mda.signal_batch.domain.vessel.dto.VesselTracksRequest; import gc.mda.signal_batch.domain.vessel.dto.CompactVesselTrack; -import gc.mda.signal_batch.domain.vessel.dto.IntegrationVessel; -import gc.mda.signal_batch.domain.vessel.service.IntegrationVesselService; -import gc.mda.signal_batch.global.util.IntegrationSignalConstants; -import gc.mda.signal_batch.global.util.NationalCodeUtil; -import gc.mda.signal_batch.global.util.ShipKindCodeConverter; +import gc.mda.signal_batch.global.util.SignalKindCode; import gc.mda.signal_batch.global.util.TrackSimplificationUtils; import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Qualifier; @@ -23,7 +19,6 @@ import java.sql.Timestamp; import java.time.Duration; import java.time.LocalDate; import java.time.LocalDateTime; -import java.time.temporal.ChronoUnit; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -38,25 +33,22 @@ import java.util.stream.Collectors; public class GisService { private final DataSource queryDataSource; - private final IntegrationVesselService integrationVesselService; - public GisService(@Qualifier("queryDataSource") DataSource queryDataSource, - IntegrationVesselService integrationVesselService) { + public GisService(@Qualifier("queryDataSource") DataSource queryDataSource) { this.queryDataSource = queryDataSource; - this.integrationVesselService = integrationVesselService; } - + public List getHaeguBoundaries() { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); - + String sql = """ SELECT haegu_no, center_lat, center_lon, public.ST_AsGeoJSON(geom) as geom_json FROM signal.t_haegu_definitions ORDER BY haegu_no """; - - return jdbcTemplate.query(sql, (rs, rowNum) -> + + return jdbcTemplate.query(sql, (rs, rowNum) -> GisBoundaryResponse.builder() .haeguNo(rs.getInt("haegu_no")) .centerLat(rs.getDouble("center_lat")) @@ -65,13 +57,13 @@ public class GisService { .build() ); } - + public Map getHaeguVesselStats(int minutes) { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); - + String sql = """ SELECT haegu_no, - COUNT(DISTINCT CONCAT(sig_src_cd, '_', target_id)) as vessel_count, + COUNT(DISTINCT mmsi) as vessel_count, COALESCE(SUM(distance_nm), 0) as total_distance, COALESCE(AVG(avg_speed), 0) as avg_speed, COUNT(*) as active_tracks @@ -79,11 +71,11 @@ public class GisService { WHERE time_bucket >= NOW() - INTERVAL '%d minutes' GROUP BY haegu_no """.formatted(minutes); - + Map result = new HashMap<>(); - + jdbcTemplate.query(sql, rs -> { - result.put(rs.getInt("haegu_no"), + result.put(rs.getInt("haegu_no"), VesselStatsResponse.builder() .vesselCount(rs.getInt("vessel_count")) .totalDistance(rs.getBigDecimal("total_distance")) @@ -92,13 +84,13 @@ public class GisService { .build() ); }); - + return result; } - + public List getAreaBoundaries() { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); - + String sql = """ SELECT area_id, area_name, public.ST_Y(public.ST_Centroid(area_geom)) as center_lat, @@ -107,8 +99,8 @@ public class GisService { FROM signal.t_areas ORDER BY area_id """; - - return jdbcTemplate.query(sql, (rs, rowNum) -> + + return jdbcTemplate.query(sql, (rs, rowNum) -> GisBoundaryResponse.builder() .areaId(rs.getString("area_id")) .areaName(rs.getString("area_name")) @@ -118,13 +110,13 @@ public class GisService { .build() ); } - + public Map getAreaVesselStats(int minutes) { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); - + String sql = """ SELECT area_id, - COUNT(DISTINCT CONCAT(sig_src_cd, '_', target_id)) as vessel_count, + COUNT(DISTINCT mmsi) as vessel_count, COALESCE(SUM(distance_nm), 0) as total_distance, COALESCE(AVG(avg_speed), 0) as avg_speed, COUNT(*) as active_tracks @@ -132,11 +124,11 @@ public class GisService { WHERE time_bucket >= NOW() - INTERVAL '%d minutes' GROUP BY area_id """.formatted(minutes); - + Map result = new HashMap<>(); - + jdbcTemplate.query(sql, rs -> { - result.put(rs.getString("area_id"), + result.put(rs.getString("area_id"), VesselStatsResponse.builder() .vesselCount(rs.getInt("vessel_count")) .totalDistance(rs.getBigDecimal("total_distance")) @@ -145,96 +137,85 @@ public class GisService { .build() ); }); - + return result; } - + public List getHaeguTracks(Integer haeguNo, int minutes) { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); List allTracks = new ArrayList<>(); - + LocalDateTime now = LocalDateTime.now(); LocalDateTime startTime = now.minusMinutes(minutes); - - // 1시간 이상인 경우 여러 테이블 조합 + if (minutes > 60) { - // 현재 시간의 정시 LocalDateTime currentHour = now.withMinute(0).withSecond(0).withNano(0); - - if (minutes <= 1440) { // 24시간 이하 - // 1. hourly 테이블에서 과거 데이터 조회 + + if (minutes <= 1440) { String hourlySql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_hourly t WHERE EXISTS ( SELECT 1 FROM signal.t_grid_vessel_tracks g - WHERE g.sig_src_cd = t.sig_src_cd - AND g.target_id = t.target_id + WHERE g.mmsi = t.mmsi AND g.haegu_no = %d AND g.time_bucket >= '%s' ) AND t.time_bucket >= '%s' AND t.time_bucket < '%s' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(haeguNo, startTime, startTime, currentHour); - + allTracks.addAll(jdbcTemplate.query(hourlySql, this::mapTrackResponse)); - } else { - // daily 테이블 사용 (추후 구현) } - - // 2. 5min 테이블에서 최근 데이터 조회 (아직 집계되지 않은 부분) + String recentSql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_5min t WHERE EXISTS ( SELECT 1 FROM signal.t_grid_vessel_tracks g - WHERE g.sig_src_cd = t.sig_src_cd - AND g.target_id = t.target_id + WHERE g.mmsi = t.mmsi AND g.haegu_no = %d AND g.time_bucket >= '%s' ) AND t.time_bucket >= '%s' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(haeguNo, currentHour, currentHour); - + allTracks.addAll(jdbcTemplate.query(recentSql, this::mapTrackResponse)); - + } else { - // 1시간 이하는 5분 테이블만 사용 String sql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_5min t WHERE EXISTS ( SELECT 1 FROM signal.t_grid_vessel_tracks g - WHERE g.sig_src_cd = t.sig_src_cd - AND g.target_id = t.target_id + WHERE g.mmsi = t.mmsi AND g.haegu_no = %d AND g.time_bucket >= NOW() - INTERVAL '%d minutes' ) AND t.time_bucket >= NOW() - INTERVAL '%d minutes' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(haeguNo, minutes, minutes); - + allTracks = jdbcTemplate.query(sql, this::mapTrackResponse); } - - log.debug("Fetched {} tracks for haegu {} in last {} minutes", + + log.debug("Fetched {} tracks for haegu {} in last {} minutes", allTracks.size(), haeguNo, minutes); - + return allTracks; } - + private TrackResponse mapTrackResponse(ResultSet rs, int rowNum) throws SQLException { return TrackResponse.builder() - .sigSrcCd(rs.getString("sig_src_cd")) - .targetId(rs.getString("target_id")) + .mmsi(rs.getString("mmsi")) .timeBucket(rs.getObject("time_bucket", LocalDateTime.class)) .trackGeom(rs.getString("track_geom")) .distanceNm(rs.getBigDecimal("distance_nm")) @@ -243,97 +224,81 @@ public class GisService { .pointCount(rs.getInt("point_count")) .build(); } - + public List getAreaTracks(String areaId, int minutes) { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); List allTracks = new ArrayList<>(); - + LocalDateTime now = LocalDateTime.now(); LocalDateTime startTime = now.minusMinutes(minutes); - - // 1시간 이상인 경우 여러 테이블 조합 + if (minutes > 60) { - // 현재 시간의 정시 LocalDateTime currentHour = now.withMinute(0).withSecond(0).withNano(0); - - if (minutes <= 1440) { // 24시간 이하 - // 1. hourly 테이블에서 과거 데이터 조회 + + if (minutes <= 1440) { String hourlySql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_hourly t WHERE EXISTS ( SELECT 1 FROM signal.t_area_vessel_tracks a - WHERE a.sig_src_cd = t.sig_src_cd - AND a.target_id = t.target_id + WHERE a.mmsi = t.mmsi AND a.area_id = '%s' AND a.time_bucket >= '%s' ) AND t.time_bucket >= '%s' AND t.time_bucket < '%s' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(areaId, startTime, startTime, currentHour); - + allTracks.addAll(jdbcTemplate.query(hourlySql, this::mapTrackResponse)); - } else { - // daily 테이블 사용 (추후 구현) } - - // 2. 5min 테이블에서 최근 데이터 조회 (아직 집계되지 않은 부분) + String recentSql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_5min t WHERE EXISTS ( SELECT 1 FROM signal.t_area_vessel_tracks a - WHERE a.sig_src_cd = t.sig_src_cd - AND a.target_id = t.target_id + WHERE a.mmsi = t.mmsi AND a.area_id = '%s' AND a.time_bucket >= '%s' ) AND t.time_bucket >= '%s' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(areaId, currentHour, currentHour); - + allTracks.addAll(jdbcTemplate.query(recentSql, this::mapTrackResponse)); - + } else { - // 1시간 이하는 5분 테이블만 사용 String sql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_5min t WHERE EXISTS ( SELECT 1 FROM signal.t_area_vessel_tracks a - WHERE a.sig_src_cd = t.sig_src_cd - AND a.target_id = t.target_id + WHERE a.mmsi = t.mmsi AND a.area_id = '%s' AND a.time_bucket >= NOW() - INTERVAL '%d minutes' ) AND t.time_bucket >= NOW() - INTERVAL '%d minutes' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(areaId, minutes, minutes); - + allTracks = jdbcTemplate.query(sql, this::mapTrackResponse); } - - log.debug("Fetched {} tracks for area {} in last {} minutes", + + log.debug("Fetched {} tracks for area {} in last {} minutes", allTracks.size(), areaId, minutes); - + return allTracks; } - + /** * 선박별 항적 조회 (계층적 보완 조회 + 간소화) - * - * 조회 전략: - * 1. 상위 테이블(daily → hourly → 5min) 순서로 조회 - * 2. 각 테이블에서 누락 구간 감지 - * 3. 누락 구간은 하위 테이블에서 보완 조회 + 상위 수준으로 간소화 - * 4. 전체 시간순 정렬 */ public List getVesselTracks(VesselTracksRequest request) { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); @@ -342,33 +307,26 @@ public class GisService { LocalDateTime startTime = request.getStartTime(); LocalDateTime endTime = request.getEndTime(); - for (VesselTracksRequest.VesselIdentifier vessel : request.getVessels()) { + for (String mmsi : request.getVessels()) { List tracks = queryVesselTracksWithFallback( - jdbcTemplate, vessel.getSigSrcCd(), vessel.getTargetId(), startTime, endTime); + jdbcTemplate, mmsi, startTime, endTime); - // Sort all tracks by time_bucket to ensure proper ordering tracks.sort((t1, t2) -> t1.getTimeBucket().compareTo(t2.getTimeBucket())); if (!tracks.isEmpty()) { - CompactVesselTrack compactTrack = buildCompactVesselTrack(vessel, tracks); + CompactVesselTrack compactTrack = buildCompactVesselTrack(mmsi, tracks); results.add(compactTrack); } } - // 통합선박 필터링 적용 (isIntegration = "1" 이고 기능이 활성화된 경우) - if ("1".equals(request.getIsIntegration()) && integrationVesselService.isEnabled()) { - results = filterByIntegration(results); - } - return results; } /** * 계층적 보완 조회 로직 - * 상위 테이블에서 데이터가 없는 구간을 하위 테이블에서 보완 */ private List queryVesselTracksWithFallback( - JdbcTemplate jdbcTemplate, String sigSrcCd, String targetId, + JdbcTemplate jdbcTemplate, String mmsi, LocalDateTime startTime, LocalDateTime endTime) { List allTracks = new ArrayList<>(); @@ -376,7 +334,6 @@ public class GisService { long hours = duration.toHours(); LocalDateTime now = LocalDateTime.now(); - // 배치 완료 여유 시간 (hourly 배치는 매시 10분 시작, 약 5분 소요) LocalDateTime safeHourlyBoundary = now.withMinute(0).withSecond(0).withNano(0); if (now.getMinute() < 15) { safeHourlyBoundary = safeHourlyBoundary.minusHours(1); @@ -393,15 +350,15 @@ public class GisService { if (!dailyEnd.isBefore(dailyStart)) { List dailyTracks = queryDailyTracks( - jdbcTemplate, sigSrcCd, targetId, dailyStart, dailyEnd); + jdbcTemplate, mmsi, dailyStart, dailyEnd); for (TrackResponse track : dailyTracks) { coveredDays.add(track.getTimeBucket().toLocalDate()); } allTracks.addAll(dailyTracks); - log.debug("[FALLBACK] Daily: {} days covered for {}_{}", - coveredDays.size(), sigSrcCd, targetId); + log.debug("[FALLBACK] Daily: {} days covered for {}", + coveredDays.size(), mmsi); } } @@ -418,9 +375,8 @@ public class GisService { LocalDateTime dayStart = missingDay.atStartOfDay(); LocalDateTime dayEnd = missingDay.plusDays(1).atStartOfDay(); - // Hourly로 보완 조회 (Daily 수준으로 간소화) List fallbackTracks = queryHourlyTracks( - jdbcTemplate, sigSrcCd, targetId, dayStart, dayEnd); + jdbcTemplate, mmsi, dayStart, dayEnd); for (TrackResponse track : fallbackTracks) { track.setTrackGeom(TrackSimplificationUtils.simplifyDailyTrack(track.getTrackGeom())); @@ -435,21 +391,21 @@ public class GisService { // === 3단계: Hourly 테이블 조회 === Set coveredHours = new HashSet<>(); LocalDateTime hourlyStart = hours >= 24 - ? safeDailyBoundary.plusDays(1) // Daily 다음날부터 + ? safeDailyBoundary.plusDays(1) : startTime.withMinute(0).withSecond(0).withNano(0); LocalDateTime hourlyEnd = endTime.isBefore(safeHourlyBoundary) ? endTime : safeHourlyBoundary; if (hours > 1 && hourlyStart.isBefore(hourlyEnd)) { List hourlyTracks = queryHourlyTracks( - jdbcTemplate, sigSrcCd, targetId, hourlyStart, hourlyEnd); + jdbcTemplate, mmsi, hourlyStart, hourlyEnd); for (TrackResponse track : hourlyTracks) { coveredHours.add(track.getTimeBucket().withMinute(0).withSecond(0).withNano(0)); } allTracks.addAll(hourlyTracks); - log.debug("[FALLBACK] Hourly: {} hours covered for {}_{}", - coveredHours.size(), sigSrcCd, targetId); + log.debug("[FALLBACK] Hourly: {} hours covered for {}", + coveredHours.size(), mmsi); } // === 4단계: Hourly 누락 구간 → 5min에서 보완 === @@ -460,9 +416,8 @@ public class GisService { LocalDateTime hourStart = missingHour; LocalDateTime hourEnd = missingHour.plusHours(1); - // 5min으로 보완 조회 (Hourly 수준으로 간소화) List fallbackTracks = query5minTracks( - jdbcTemplate, sigSrcCd, targetId, hourStart, hourEnd); + jdbcTemplate, mmsi, hourStart, hourEnd); for (TrackResponse track : fallbackTracks) { track.setTrackGeom(TrackSimplificationUtils.simplifyHourlyTrack(track.getTrackGeom())); @@ -478,11 +433,11 @@ public class GisService { LocalDateTime fiveMinStart = safeHourlyBoundary.isAfter(startTime) ? safeHourlyBoundary : startTime; if (endTime.isAfter(fiveMinStart)) { List fiveMinTracks = query5minTracks( - jdbcTemplate, sigSrcCd, targetId, fiveMinStart, endTime); + jdbcTemplate, mmsi, fiveMinStart, endTime); allTracks.addAll(fiveMinTracks); - log.debug("[FALLBACK] 5min: {} segments for {}_{} ({} ~ {})", - fiveMinTracks.size(), sigSrcCd, targetId, fiveMinStart, endTime); + log.debug("[FALLBACK] 5min: {} segments for {} ({} ~ {})", + fiveMinTracks.size(), mmsi, fiveMinStart, endTime); } return allTracks; @@ -492,22 +447,22 @@ public class GisService { * Daily 테이블 조회 */ private List queryDailyTracks( - JdbcTemplate jdbcTemplate, String sigSrcCd, String targetId, + JdbcTemplate jdbcTemplate, String mmsi, LocalDate startDate, LocalDate endDate) { String sql = """ - SELECT sig_src_cd, target_id, + SELECT mmsi, time_bucket::timestamp as time_bucket, public.ST_AsText(track_geom) as track_geom, distance_nm, avg_speed, max_speed, point_count FROM signal.t_vessel_tracks_daily - WHERE sig_src_cd = ? AND target_id = ? + WHERE mmsi = ? AND time_bucket BETWEEN ?::date AND ?::date ORDER BY time_bucket """; return jdbcTemplate.query(sql, this::mapTrackResponse, - sigSrcCd, targetId, + mmsi, java.sql.Date.valueOf(startDate), java.sql.Date.valueOf(endDate)); } @@ -515,21 +470,21 @@ public class GisService { * Hourly 테이블 조회 */ private List queryHourlyTracks( - JdbcTemplate jdbcTemplate, String sigSrcCd, String targetId, + JdbcTemplate jdbcTemplate, String mmsi, LocalDateTime startTime, LocalDateTime endTime) { String sql = """ - SELECT sig_src_cd, target_id, time_bucket, + SELECT mmsi, time_bucket, public.ST_AsText(track_geom) as track_geom, distance_nm, avg_speed, max_speed, point_count FROM signal.t_vessel_tracks_hourly - WHERE sig_src_cd = ? AND target_id = ? + WHERE mmsi = ? AND time_bucket >= ? AND time_bucket < ? ORDER BY time_bucket """; return jdbcTemplate.query(sql, this::mapTrackResponse, - sigSrcCd, targetId, + mmsi, Timestamp.valueOf(startTime), Timestamp.valueOf(endTime)); } @@ -537,21 +492,21 @@ public class GisService { * 5min 테이블 조회 */ private List query5minTracks( - JdbcTemplate jdbcTemplate, String sigSrcCd, String targetId, + JdbcTemplate jdbcTemplate, String mmsi, LocalDateTime startTime, LocalDateTime endTime) { String sql = """ - SELECT sig_src_cd, target_id, time_bucket, + SELECT mmsi, time_bucket, public.ST_AsText(track_geom) as track_geom, distance_nm, avg_speed, max_speed, point_count FROM signal.t_vessel_tracks_5min - WHERE sig_src_cd = ? AND target_id = ? + WHERE mmsi = ? AND time_bucket >= ? AND time_bucket < ? ORDER BY time_bucket """; return jdbcTemplate.query(sql, this::mapTrackResponse, - sigSrcCd, targetId, + mmsi, Timestamp.valueOf(startTime), Timestamp.valueOf(endTime)); } @@ -591,122 +546,34 @@ public class GisService { return missingHours; } - /** - * 통합선박 기준 필터링 (REST API용) - */ - private List filterByIntegration(List tracks) { - if (tracks == null || tracks.isEmpty()) { - return tracks; - } - - // 1. 모든 트랙의 통합선박 정보 조회 (캐시에서) - Map vesselIntegrations = new HashMap<>(); - for (CompactVesselTrack track : tracks) { - String key = track.getSigSrcCd() + "_" + track.getTargetId(); - if (!vesselIntegrations.containsKey(key)) { - IntegrationVessel integration = integrationVesselService.findByVessel( - track.getSigSrcCd(), track.getTargetId() - ); - vesselIntegrations.put(key, integration); - } - } - - // 2. 통합선박별 그룹핑 - Map> groupedByIntegration = new HashMap<>(); - Map integrationMap = new HashMap<>(); - - long tempSeq = -1; - for (CompactVesselTrack track : tracks) { - String key = track.getSigSrcCd() + "_" + track.getTargetId(); - IntegrationVessel integration = vesselIntegrations.get(key); - - Long seq; - if (integration != null) { - seq = integration.getIntgrSeq(); - integrationMap.putIfAbsent(seq, integration); - } else { - seq = tempSeq--; - } - - groupedByIntegration.computeIfAbsent(seq, k -> new ArrayList<>()).add(track); - } - - // 3. 각 그룹에서 최고 우선순위 신호만 선택 - List result = new ArrayList<>(); - - for (Map.Entry> entry : groupedByIntegration.entrySet()) { - Long seq = entry.getKey(); - List groupTracks = entry.getValue(); - - if (seq < 0) { - // 통합정보 없는 단독 선박 - CompactVesselTrack firstTrack = groupTracks.get(0); - String soloIntegrationId = IntegrationSignalConstants.generateSoloIntegrationId( - firstTrack.getSigSrcCd(), - firstTrack.getTargetId() - ); - groupTracks.forEach(t -> t.setIntegrationTargetId(soloIntegrationId)); - result.addAll(groupTracks); - } else { - // 통합선박 → 존재하는 신호 중 최고 우선순위 선택 - IntegrationVessel integration = integrationMap.get(seq); - - java.util.Set existingSigSrcCds = groupTracks.stream() - .map(CompactVesselTrack::getSigSrcCd) - .collect(java.util.stream.Collectors.toSet()); - - String selectedSigSrcCd = integrationVesselService.selectHighestPriorityFromExisting(existingSigSrcCds); - - List selectedTracks = groupTracks.stream() - .filter(t -> t.getSigSrcCd().equals(selectedSigSrcCd)) - .collect(java.util.stream.Collectors.toList()); - - String integrationId = integration.generateIntegrationId(); - selectedTracks.forEach(t -> t.setIntegrationTargetId(integrationId)); - - result.addAll(selectedTracks); - } - } - - log.info("[INTEGRATION_FILTER] REST API - Filtered {} tracks to {} tracks", tracks.size(), result.size()); - return result; - } - private CompactVesselTrack buildCompactVesselTrack( - VesselTracksRequest.VesselIdentifier vessel, + String mmsi, List tracks) { - - String vesselId = vessel.getSigSrcCd() + "_" + vessel.getTargetId(); + List geometry = new ArrayList<>(); List timestamps = new ArrayList<>(); List speeds = new ArrayList<>(); double totalDistance = 0; double maxSpeed = 0; - int totalPoints = 0; - - // WKTReader reader = new WKTReader(); - + for (TrackResponse track : tracks) { if (track.getTrackGeom() != null && !track.getTrackGeom().isEmpty()) { try { - // Parse LineStringM String wkt = track.getTrackGeom(); if (wkt.startsWith("LINESTRING M")) { - // Extract coordinate data from WKT String coordsPart = wkt.substring("LINESTRING M(".length() + 1, wkt.length() - 1); String[] points = coordsPart.split(","); - + for (String point : points) { String[] parts = point.trim().split("\\s+"); if (parts.length >= 3) { double lon = Double.parseDouble(parts[0]); double lat = Double.parseDouble(parts[1]); - String timestamp = parts[2]; // Unix timestamp as string - + String timestamp = parts[2]; + geometry.add(new double[]{lon, lat}); timestamps.add(timestamp); - - // Add SOG value if available (could be from track data) + if (track.getAvgSpeed() != null) { speeds.add(track.getAvgSpeed().doubleValue()); } else { @@ -719,42 +586,30 @@ public class GisService { log.warn("Failed to parse track geometry: {}", e.getMessage()); } } - + if (track.getDistanceNm() != null) { totalDistance += track.getDistanceNm().doubleValue(); } if (track.getMaxSpeed() != null && track.getMaxSpeed().doubleValue() > maxSpeed) { maxSpeed = track.getMaxSpeed().doubleValue(); } - if (track.getPointCount() != null) { - totalPoints += track.getPointCount(); - } } - - // Calculate average speed + double avgSpeed = speeds.stream() .filter(s -> s > 0) .mapToDouble(Double::doubleValue) .average() .orElse(0.0); - - // Get vessel info - Map vesselInfo = getVesselInfo(vessel.getSigSrcCd(), vessel.getTargetId()); + + Map vesselInfo = getVesselInfo(mmsi); String shipName = vesselInfo.get("ship_name"); String shipType = vesselInfo.get("ship_type"); - // Calculate nationalCode (same as WebSocket) - String nationalCode = NationalCodeUtil.calculateNationalCode( - vessel.getSigSrcCd(), vessel.getTargetId()); - - // Calculate shipKindCode (same as WebSocket - using name pattern matching for buoy/net detection) - String shipKindCode = ShipKindCodeConverter.getShipKindCodeWithNamePattern( - vessel.getSigSrcCd(), shipType, shipName, vessel.getTargetId()); + String nationalCode = (mmsi != null && mmsi.length() >= 3) ? mmsi.substring(0, 3) : null; + String shipKindCode = SignalKindCode.resolve(shipType, null).getCode(); return CompactVesselTrack.builder() - .vesselId(vesselId) - .sigSrcCd(vessel.getSigSrcCd()) - .targetId(vessel.getTargetId()) + .vesselId(mmsi) .nationalCode(nationalCode) .geometry(geometry) .timestamps(timestamps) @@ -768,18 +623,18 @@ public class GisService { .shipKindCode(shipKindCode) .build(); } - - private Map getVesselInfo(String sigSrcCd, String targetId) { + + private Map getVesselInfo(String mmsi) { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); try { String sql = """ - SELECT ship_nm as ship_name, ship_ty as ship_type - FROM signal.t_vessel_latest_position - WHERE sig_src_cd = ? AND target_id = ? + SELECT ship_nm as ship_name, vessel_type as ship_type + FROM signal.t_ais_position + WHERE mmsi = ? LIMIT 1 """; - - return jdbcTemplate.queryForMap(sql, sigSrcCd, targetId) + + return jdbcTemplate.queryForMap(sql, mmsi) .entrySet().stream() .collect(Collectors.toMap( Map.Entry::getKey, @@ -789,4 +644,4 @@ public class GisService { return Map.of("ship_name", "", "ship_type", ""); } } -} \ No newline at end of file +} diff --git a/src/main/java/gc/mda/signal_batch/domain/gis/service/GisServiceV2.java b/src/main/java/gc/mda/signal_batch/domain/gis/service/GisServiceV2.java index 8f3f55f..59ddfaa 100644 --- a/src/main/java/gc/mda/signal_batch/domain/gis/service/GisServiceV2.java +++ b/src/main/java/gc/mda/signal_batch/domain/gis/service/GisServiceV2.java @@ -3,10 +3,7 @@ package gc.mda.signal_batch.domain.gis.service; import gc.mda.signal_batch.domain.vessel.dto.CompactVesselTrack; import gc.mda.signal_batch.domain.vessel.dto.TrackResponse; import gc.mda.signal_batch.domain.vessel.dto.VesselTracksRequest; -import gc.mda.signal_batch.domain.vessel.dto.IntegrationVessel; -import gc.mda.signal_batch.domain.vessel.service.IntegrationVesselService; import gc.mda.signal_batch.global.exception.QueryTimeoutException; -import gc.mda.signal_batch.global.util.IntegrationSignalConstants; import gc.mda.signal_batch.global.util.TrackConverter; import gc.mda.signal_batch.global.websocket.service.ActiveQueryManager; import gc.mda.signal_batch.global.websocket.service.CacheTrackSimplifier; @@ -29,7 +26,6 @@ import java.util.stream.Collectors; * GIS 서비스 V2 - CompactVesselTrack 기반 응답 * WebSocket API와 동일한 응답 구조 제공 * - * Phase: REST V2 캐시 + 부하 제어 + 응답 크기 제한 * - Semaphore 기반 동시성 제어 (ActiveQueryManager 공유) * - POST /vessels: DailyTrackCacheManager 캐시 우선 조회 * - 2단계 간소화 파이프라인 (표준 간소화 + 포인트 버짓 강제) @@ -39,7 +35,6 @@ import java.util.stream.Collectors; public class GisServiceV2 { private final DataSource queryDataSource; - private final IntegrationVesselService integrationVesselService; private final ActiveQueryManager activeQueryManager; private final DailyTrackCacheManager dailyTrackCacheManager; private final CacheTrackSimplifier cacheTrackSimplifier; @@ -56,13 +51,11 @@ public class GisServiceV2 { private static final long VESSEL_CACHE_TTL = 3600_000; // 1시간 public GisServiceV2(@Qualifier("queryDataSource") DataSource queryDataSource, - IntegrationVesselService integrationVesselService, ActiveQueryManager activeQueryManager, DailyTrackCacheManager dailyTrackCacheManager, CacheTrackSimplifier cacheTrackSimplifier, GisService gisService) { this.queryDataSource = queryDataSource; - this.integrationVesselService = integrationVesselService; this.activeQueryManager = activeQueryManager; this.dailyTrackCacheManager = dailyTrackCacheManager; this.cacheTrackSimplifier = cacheTrackSimplifier; @@ -71,7 +64,6 @@ public class GisServiceV2 { /** * 해구별 선박 항적 조회 (V2 - CompactVesselTrack 반환) - * Semaphore 부하 제어 + 간소화 파이프라인 적용 */ public List getHaeguTracks(Integer haeguNo, int minutes, boolean filterByIntegration) { String queryId = "rest-haegu-" + haeguNo + "-" + UUID.randomUUID().toString().substring(0, 8); @@ -91,69 +83,61 @@ public class GisServiceV2 { if (minutes <= 1440) { String hourlySql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_hourly t WHERE EXISTS ( SELECT 1 FROM signal.t_grid_vessel_tracks g - WHERE g.sig_src_cd = t.sig_src_cd - AND g.target_id = t.target_id + WHERE g.mmsi = t.mmsi AND g.haegu_no = %d AND g.time_bucket >= '%s' ) AND t.time_bucket >= '%s' AND t.time_bucket < '%s' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(haeguNo, startTime, startTime, currentHour); rawTracks.addAll(jdbcTemplate.query(hourlySql, this::mapTrackResponse)); } String recentSql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_5min t WHERE EXISTS ( SELECT 1 FROM signal.t_grid_vessel_tracks g - WHERE g.sig_src_cd = t.sig_src_cd - AND g.target_id = t.target_id + WHERE g.mmsi = t.mmsi AND g.haegu_no = %d AND g.time_bucket >= '%s' ) AND t.time_bucket >= '%s' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(haeguNo, startTime, currentHour); rawTracks.addAll(jdbcTemplate.query(recentSql, this::mapTrackResponse)); } else { String sql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_5min t WHERE EXISTS ( SELECT 1 FROM signal.t_grid_vessel_tracks g - WHERE g.sig_src_cd = t.sig_src_cd - AND g.target_id = t.target_id + WHERE g.mmsi = t.mmsi AND g.haegu_no = %d AND g.time_bucket >= NOW() - INTERVAL '%d minutes' ) AND t.time_bucket >= NOW() - INTERVAL '%d minutes' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(haeguNo, minutes, minutes); rawTracks = jdbcTemplate.query(sql, this::mapTrackResponse); } List result = TrackConverter.convert(rawTracks, this::getVesselInfo); - - if (filterByIntegration && integrationVesselService.isEnabled()) { - result = filterByIntegration(result); - } - result = applySimplificationPipeline(result); log.debug("V2 API: Fetched {} compact tracks for haegu {} in last {} minutes", @@ -173,7 +157,6 @@ public class GisServiceV2 { /** * 영역별 선박 항적 조회 (V2 - CompactVesselTrack 반환) - * Semaphore 부하 제어 + 간소화 파이프라인 적용 */ public List getAreaTracks(String areaId, int minutes, boolean filterByIntegration) { String queryId = "rest-area-" + areaId + "-" + UUID.randomUUID().toString().substring(0, 8); @@ -193,69 +176,61 @@ public class GisServiceV2 { if (minutes <= 1440) { String hourlySql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_hourly t WHERE EXISTS ( SELECT 1 FROM signal.t_area_vessel_tracks a - WHERE a.sig_src_cd = t.sig_src_cd - AND a.target_id = t.target_id + WHERE a.mmsi = t.mmsi AND a.area_id = '%s' AND a.time_bucket >= '%s' ) AND t.time_bucket >= '%s' AND t.time_bucket < '%s' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(areaId, startTime, startTime, currentHour); rawTracks.addAll(jdbcTemplate.query(hourlySql, this::mapTrackResponse)); } String recentSql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_5min t WHERE EXISTS ( SELECT 1 FROM signal.t_area_vessel_tracks a - WHERE a.sig_src_cd = t.sig_src_cd - AND a.target_id = t.target_id + WHERE a.mmsi = t.mmsi AND a.area_id = '%s' AND a.time_bucket >= '%s' ) AND t.time_bucket >= '%s' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(areaId, startTime, currentHour); rawTracks.addAll(jdbcTemplate.query(recentSql, this::mapTrackResponse)); } else { String sql = """ - SELECT DISTINCT t.sig_src_cd, t.target_id, t.time_bucket, + SELECT DISTINCT t.mmsi, t.time_bucket, public.ST_AsText(t.track_geom) as track_geom, t.distance_nm, t.avg_speed, t.max_speed, t.point_count FROM signal.t_vessel_tracks_5min t WHERE EXISTS ( SELECT 1 FROM signal.t_area_vessel_tracks a - WHERE a.sig_src_cd = t.sig_src_cd - AND a.target_id = t.target_id + WHERE a.mmsi = t.mmsi AND a.area_id = '%s' AND a.time_bucket >= NOW() - INTERVAL '%d minutes' ) AND t.time_bucket >= NOW() - INTERVAL '%d minutes' - ORDER BY t.sig_src_cd, t.target_id, t.time_bucket + ORDER BY t.mmsi, t.time_bucket """.formatted(areaId, minutes, minutes); rawTracks = jdbcTemplate.query(sql, this::mapTrackResponse); } List result = TrackConverter.convert(rawTracks, this::getVesselInfo); - - if (filterByIntegration && integrationVesselService.isEnabled()) { - result = filterByIntegration(result); - } - result = applySimplificationPipeline(result); log.debug("V2 API: Fetched {} compact tracks for area {} in last {} minutes", @@ -275,7 +250,6 @@ public class GisServiceV2 { /** * 선박별 항적 조회 V2 (캐시 + Semaphore + 간소화) - * DailyTrackCacheManager를 활용한 캐시 우선 조회 */ public List getVesselTracksV2(VesselTracksRequest request) { String queryId = "rest-vessels-" + UUID.randomUUID().toString().substring(0, 8); @@ -292,7 +266,6 @@ public class GisServiceV2 { result = queryWithCache(request); } else { - // 캐시 비활성화/미준비: 기존 GisService에 위임 result = gisService.getVesselTracks(request); } @@ -306,7 +279,6 @@ public class GisServiceV2 { } finally { if (slotAcquired) { activeQueryManager.releaseQuerySlot(queryId); - // Humongous 영역 조기 회수 (G1GC에서 8MB+ 객체는 Mixed GC에서만 회수) if (activeQueryManager.isHeapPressureHigh()) { System.gc(); } @@ -316,10 +288,6 @@ public class GisServiceV2 { // ── 캐시 조회 로직 ── - /** - * splitQueryRange를 사용한 캐시 우선 조회 - * D-1부터 역순으로 캐시 존재 확인 → 캐시/DB 분리 조회 → 병합 - */ private List queryWithCache(VesselTracksRequest request) { LocalDateTime startTime = request.getStartTime(); LocalDateTime endTime = request.getEndTime(); @@ -329,24 +297,20 @@ public class GisServiceV2 { List allTracks = new ArrayList<>(); - // 요청 선박 ID 집합 구성 - Set requestedVesselKeys = request.getVessels().stream() - .map(v -> v.getSigSrcCd() + "_" + v.getTargetId()) - .collect(Collectors.toSet()); + Set requestedMmsis = new HashSet<>(request.getVessels()); // 1. 캐시에서 조회 (캐시된 날짜) if (split.hasCachedData()) { List cachedTracks = dailyTrackCacheManager.getCachedTracksMultipleDays(split.getCachedDates()); - // 요청 선박만 필터링 + 방어적 복사 (캐시 원본 보호: simplify가 in-place 수정하므로) int totalCachedCount = cachedTracks.size(); List filteredCached = cachedTracks.stream() - .filter(t -> requestedVesselKeys.contains(t.getSigSrcCd() + "_" + t.getTargetId())) + .filter(t -> requestedMmsis.contains(t.getVesselId())) .map(t -> t.toBuilder().build()) .collect(Collectors.toList()); - cachedTracks.clear(); // 메모리 즉시 해제: 캐시 참조 리스트 + cachedTracks.clear(); allTracks.addAll(filteredCached); log.debug("[CacheQuery] cached {} days -> {} tracks (filtered from {})", @@ -360,7 +324,6 @@ public class GisServiceV2 { .startTime(dbRange.getStart()) .endTime(dbRange.getEnd()) .vessels(request.getVessels()) - .isIntegration(request.getIsIntegration()) .build(); List dbTracks = gisService.getVesselTracks(dbRequest); allTracks.addAll(dbTracks); @@ -376,7 +339,6 @@ public class GisServiceV2 { .startTime(today.getStart()) .endTime(today.getEnd()) .vessels(request.getVessels()) - .isIntegration(request.getIsIntegration()) .build(); List todayTracks = gisService.getVesselTracks(todayRequest); allTracks.addAll(todayTracks); @@ -386,22 +348,13 @@ public class GisServiceV2 { // 4. 동일 선박 병합 (캐시 + DB 결과) List merged = mergeTracksByVessel(allTracks); - allTracks.clear(); // 메모리 즉시 해제: 병합 완료 후 원본 리스트 - - // 5. 통합선박 필터링 (isIntegration이 null이거나 "1"이면 적용, "0"만 미적용) - String isInteg = request.getIsIntegration(); - if (!"0".equals(isInteg) && integrationVesselService.isEnabled()) { - merged = filterByIntegration(merged); - } + allTracks.clear(); return merged; } // ── Semaphore 슬롯 획득 ── - /** - * REST V2 전용 슬롯 획득: 즉시 시도 → blocking 대기 → 타임아웃 시 예외 - */ private boolean acquireSlotWithWait(String queryId) { if (activeQueryManager.tryAcquireQuerySlotImmediate(queryId)) { return true; @@ -422,20 +375,12 @@ public class GisServiceV2 { // ── 간소화 파이프라인 ── - /** - * 2단계 간소화 파이프라인 - * [1단계] 표준 간소화 (DP + 거리/시간 + 줌) - * [2단계] 포인트 버짓 강제 (총 포인트 상한 초과 시 균일 Nth-point) - */ private List applySimplificationPipeline(List tracks) { if (tracks == null || tracks.isEmpty()) { return tracks; } - // 1단계: 표준 간소화 tracks = cacheTrackSimplifier.simplify(tracks, CacheTrackSimplifier.SimplificationConfig.builder().build()); - - // 2단계: 포인트 버짓 강제 tracks = cacheTrackSimplifier.enforcePointBudget(tracks, maxTotalPoints); return tracks; @@ -443,19 +388,14 @@ public class GisServiceV2 { // ── 선박별 트랙 병합 ── - /** - * 동일 선박(vesselId)의 트랙을 병합 - * 캐시와 DB에서 동일 선박 데이터가 올 수 있으므로 geometry/timestamps/speeds 합산 - */ private List mergeTracksByVessel(List tracks) { if (tracks == null || tracks.size() <= 1) { return tracks != null ? tracks : Collections.emptyList(); } Map> grouped = tracks.stream() - .collect(Collectors.groupingBy(t -> t.getSigSrcCd() + "_" + t.getTargetId())); + .collect(Collectors.groupingBy(CompactVesselTrack::getVesselId)); - // 병합이 필요 없는 경우 (모든 선박이 1개씩만) if (grouped.values().stream().allMatch(list -> list.size() == 1)) { return tracks; } @@ -470,7 +410,6 @@ public class GisServiceV2 { continue; } - // 첫 번째 트랙을 기준으로 병합 CompactVesselTrack base = vesselTracks.get(0); List allGeometry = new ArrayList<>(base.getGeometry() != null ? base.getGeometry() : Collections.emptyList()); List allTimestamps = new ArrayList<>(base.getTimestamps() != null ? base.getTimestamps() : Collections.emptyList()); @@ -491,13 +430,10 @@ public class GisServiceV2 { CompactVesselTrack mergedTrack = CompactVesselTrack.builder() .vesselId(base.getVesselId()) - .sigSrcCd(base.getSigSrcCd()) - .targetId(base.getTargetId()) .nationalCode(base.getNationalCode()) .shipName(base.getShipName()) .shipType(base.getShipType()) .shipKindCode(base.getShipKindCode()) - .integrationTargetId(base.getIntegrationTargetId()) .geometry(allGeometry) .timestamps(allTimestamps) .speeds(allSpeeds) @@ -514,12 +450,11 @@ public class GisServiceV2 { return merged; } - // ── 기존 유틸리티 메서드 (변경 없음) ── + // ── 유틸리티 메서드 ── private TrackResponse mapTrackResponse(ResultSet rs, int rowNum) throws SQLException { return TrackResponse.builder() - .sigSrcCd(rs.getString("sig_src_cd")) - .targetId(rs.getString("target_id")) + .mmsi(rs.getString("mmsi")) .timeBucket(rs.getObject("time_bucket", LocalDateTime.class)) .trackGeom(rs.getString("track_geom")) .distanceNm(rs.getBigDecimal("distance_nm")) @@ -529,10 +464,8 @@ public class GisServiceV2 { .build(); } - private TrackConverter.VesselInfo getVesselInfo(String sigSrcCd, String targetId) { - String cacheKey = sigSrcCd + "_" + targetId; - - VesselInfoCache cached = vesselInfoCache.get(cacheKey); + private TrackConverter.VesselInfo getVesselInfo(String mmsi) { + VesselInfoCache cached = vesselInfoCache.get(mmsi); if (cached != null && !cached.isExpired()) { return new TrackConverter.VesselInfo(cached.shipName, cached.shipType); } @@ -540,17 +473,17 @@ public class GisServiceV2 { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); try { String sql = """ - SELECT ship_nm, ship_ty - FROM signal.t_vessel_latest_position - WHERE sig_src_cd = ? AND target_id = ? + SELECT ship_nm, vessel_type + FROM signal.t_ais_position + WHERE mmsi = ? LIMIT 1 """; - Map result = jdbcTemplate.queryForMap(sql, sigSrcCd, targetId); + Map result = jdbcTemplate.queryForMap(sql, mmsi); String shipName = result.get("ship_nm") != null ? result.get("ship_nm").toString() : "-"; - String shipType = result.get("ship_ty") != null ? result.get("ship_ty").toString() : "-"; + String shipType = result.get("vessel_type") != null ? result.get("vessel_type").toString() : "-"; - vesselInfoCache.put(cacheKey, new VesselInfoCache(shipName, shipType)); + vesselInfoCache.put(mmsi, new VesselInfoCache(shipName, shipType)); return new TrackConverter.VesselInfo(shipName, shipType); } catch (Exception e) { @@ -558,79 +491,6 @@ public class GisServiceV2 { } } - private List filterByIntegration(List tracks) { - if (tracks == null || tracks.isEmpty()) { - return tracks; - } - - Map vesselIntegrations = new HashMap<>(); - for (CompactVesselTrack track : tracks) { - String key = track.getSigSrcCd() + "_" + track.getTargetId(); - if (!vesselIntegrations.containsKey(key)) { - IntegrationVessel integration = integrationVesselService.findByVessel( - track.getSigSrcCd(), track.getTargetId() - ); - vesselIntegrations.put(key, integration); - } - } - - Map> groupedByIntegration = new HashMap<>(); - Map integrationMap = new HashMap<>(); - - long tempSeq = -1; - for (CompactVesselTrack track : tracks) { - String key = track.getSigSrcCd() + "_" + track.getTargetId(); - IntegrationVessel integration = vesselIntegrations.get(key); - - Long seq; - if (integration != null) { - seq = integration.getIntgrSeq(); - integrationMap.putIfAbsent(seq, integration); - } else { - seq = tempSeq--; - } - - groupedByIntegration.computeIfAbsent(seq, k -> new ArrayList<>()).add(track); - } - - List result = new ArrayList<>(); - - for (Map.Entry> entry : groupedByIntegration.entrySet()) { - Long seq = entry.getKey(); - List groupTracks = entry.getValue(); - - if (seq < 0) { - CompactVesselTrack firstTrack = groupTracks.get(0); - String soloIntegrationId = IntegrationSignalConstants.generateSoloIntegrationId( - firstTrack.getSigSrcCd(), - firstTrack.getTargetId() - ); - groupTracks.forEach(t -> t.setIntegrationTargetId(soloIntegrationId)); - result.addAll(groupTracks); - } else { - IntegrationVessel integration = integrationMap.get(seq); - - Set existingSigSrcCds = groupTracks.stream() - .map(CompactVesselTrack::getSigSrcCd) - .collect(Collectors.toSet()); - - String selectedSigSrcCd = integrationVesselService.selectHighestPriorityFromExisting(existingSigSrcCds); - - List selectedTracks = groupTracks.stream() - .filter(t -> t.getSigSrcCd().equals(selectedSigSrcCd)) - .collect(Collectors.toList()); - - String integrationId = integration.generateIntegrationId(); - selectedTracks.forEach(t -> t.setIntegrationTargetId(integrationId)); - - result.addAll(selectedTracks); - } - } - - log.info("[INTEGRATION_FILTER] V2 API - Filtered {} tracks to {} tracks", tracks.size(), result.size()); - return result; - } - private static class VesselInfoCache { String shipName; String shipType; diff --git a/src/main/java/gc/mda/signal_batch/domain/gis/service/VesselContactService.java b/src/main/java/gc/mda/signal_batch/domain/gis/service/VesselContactService.java index 568b6d1..047aa6c 100644 --- a/src/main/java/gc/mda/signal_batch/domain/gis/service/VesselContactService.java +++ b/src/main/java/gc/mda/signal_batch/domain/gis/service/VesselContactService.java @@ -54,18 +54,8 @@ public class VesselContactService { return buildEmptyResponse(request, targetDates, startMs); } - // 3. sigSrcCd 필터 - String targetSigSrcCd = request.getSigSrcCd(); - Map filtered = new HashMap<>(); - for (Map.Entry entry : mergedTracks.entrySet()) { - if (targetSigSrcCd.equals(entry.getValue().getSigSrcCd())) { - filtered.put(entry.getKey(), entry.getValue()); - } - } - - if (filtered.isEmpty()) { - return buildEmptyResponse(request, targetDates, startMs); - } + // 3. 병합된 트랙을 직접 사용 (단일 수집원이므로 필터 불필요) + Map filtered = mergedTracks; // 4. JTS Polygon + PreparedGeometry VesselContactRequest.SearchPolygon poly = request.getPolygon(); @@ -94,8 +84,8 @@ public class VesselContactService { } int totalVesselsInPolygon = insidePositions.size(); - log.info("Vessel contact: sigSrcCd={}, filtered={}, insidePolygon={}, dates={}", - targetSigSrcCd, filtered.size(), totalVesselsInPolygon, targetDates.size()); + log.info("Vessel contact: filtered={}, insidePolygon={}, dates={}", + filtered.size(), totalVesselsInPolygon, targetDates.size()); // 6. 시간 범위 겹침 사전 필터 + 선박 쌍별 접촉 판정 List vesselIds = new ArrayList<>(insidePositions.keySet()); @@ -336,7 +326,6 @@ public class VesselContactService { .shipType(track.getShipType()) .shipKindCode(track.getShipKindCode()) .nationalCode(track.getNationalCode()) - .integrationTargetId(track.getIntegrationTargetId()) .insidePolygonStartTs(startTs) .insidePolygonEndTs(endTs) .insidePolygonDurationMinutes(durationMin) diff --git a/src/main/java/gc/mda/signal_batch/domain/passage/controller/SequentialPassageController.java b/src/main/java/gc/mda/signal_batch/domain/passage/controller/SequentialPassageController.java index 589ee75..5eba552 100644 --- a/src/main/java/gc/mda/signal_batch/domain/passage/controller/SequentialPassageController.java +++ b/src/main/java/gc/mda/signal_batch/domain/passage/controller/SequentialPassageController.java @@ -153,9 +153,8 @@ public class SequentialPassageController { String sql = String.format(""" WITH vessel_zones AS ( - SELECT - sig_src_cd, - target_id, + SELECT + mmsi, COUNT(DISTINCT %s) as zone_count, array_agg(DISTINCT %s ORDER BY %s) as visited_zones, MIN(time_bucket) as first_seen, @@ -165,7 +164,7 @@ public class SequentialPassageController { FROM signal.%s WHERE time_bucket BETWEEN ? AND ? AND %s = ANY(?) - GROUP BY sig_src_cd, target_id + GROUP BY mmsi HAVING COUNT(DISTINCT %s) = ? ) SELECT * FROM vessel_zones @@ -208,20 +207,19 @@ public class SequentialPassageController { private SequentialPassageResponse.VesselPassage buildVesselPassage( Map row, SequentialPassageRequest request) { - String sigSrcCd = (String) row.get("sig_src_cd"); - String targetId = (String) row.get("target_id"); - + String mmsi = (String) row.get("mmsi"); + // 구역별 통과 정보 구성 List zonePassages = new ArrayList<>(); - + for (int i = 0; i < request.getZoneIds().size(); i++) { String zoneId = request.getZoneIds().get(i); - String prefix = request.getType() == SequentialPassageRequest.PassageType.GRID + String prefix = request.getType() == SequentialPassageRequest.PassageType.GRID ? "haegu" : "area"; - + Timestamp entryTime = (Timestamp) row.get(prefix + (i + 1) + "_entry"); Timestamp exitTime = (Timestamp) row.get(prefix + (i + 1) + "_exit"); - + if (entryTime != null) { zonePassages.add(SequentialPassageResponse.ZonePassage.builder() .zoneId(zoneId) @@ -231,13 +229,12 @@ public class SequentialPassageController { .build()); } } - + // 선박 정보 조회 (캐시 활용 가능) - SequentialPassageResponse.VesselInfo vesselInfo = getVesselInfo(sigSrcCd, targetId); - + SequentialPassageResponse.VesselInfo vesselInfo = getVesselInfo(mmsi); + return SequentialPassageResponse.VesselPassage.builder() - .sigSrcCd(sigSrcCd) - .targetId(targetId) + .mmsi(mmsi) .vesselInfo(vesselInfo) .zonePassages(zonePassages) .build(); @@ -251,18 +248,18 @@ public class SequentialPassageController { } } - private SequentialPassageResponse.VesselInfo getVesselInfo(String sigSrcCd, String targetId) { + private SequentialPassageResponse.VesselInfo getVesselInfo(String mmsi) { JdbcTemplate jdbcTemplate = new JdbcTemplate(queryDataSource); String sql = """ - SELECT ship_nm as ship_name, ship_ty as ship_type - FROM signal.t_vessel_latest_position - WHERE sig_src_cd = ? AND target_id = ? + SELECT ship_nm as ship_name, vessel_type as ship_type + FROM signal.t_ais_position + WHERE mmsi = ? LIMIT 1 """; try { - Map result = jdbcTemplate.queryForMap(sql, sigSrcCd, targetId); + Map result = jdbcTemplate.queryForMap(sql, mmsi); return SequentialPassageResponse.VesselInfo.builder() .shipName(result.get("ship_name") != null ? (String) result.get("ship_name") : null) .shipType(result.get("ship_type") != null ? (String) result.get("ship_type") : null) @@ -283,7 +280,7 @@ public class SequentialPassageController { String sql = """ SELECT - COUNT(DISTINCT CONCAT(sig_src_cd, '_', target_id)) as unique_vessels, + COUNT(DISTINCT mmsi) as unique_vessels, COUNT(*) as total_passages, SUM(distance_nm) as total_distance, AVG(avg_speed) as avg_speed, diff --git a/src/main/java/gc/mda/signal_batch/domain/passage/dto/SequentialPassageResponse.java b/src/main/java/gc/mda/signal_batch/domain/passage/dto/SequentialPassageResponse.java index bfd6694..def8326 100644 --- a/src/main/java/gc/mda/signal_batch/domain/passage/dto/SequentialPassageResponse.java +++ b/src/main/java/gc/mda/signal_batch/domain/passage/dto/SequentialPassageResponse.java @@ -42,11 +42,8 @@ public class SequentialPassageResponse { @Schema(description = "선박 통과 정보") public static class VesselPassage { - @Schema(description = "신호원 코드", example = "000001") - private String sigSrcCd; - - @Schema(description = "타겟 ID", example = "440308230") - private String targetId; + @Schema(description = "MMSI", example = "440308230") + private String mmsi; @Schema(description = "선박 정보") private VesselInfo vesselInfo; diff --git a/src/main/java/gc/mda/signal_batch/domain/passage/service/SequentialAreaTrackingService.java b/src/main/java/gc/mda/signal_batch/domain/passage/service/SequentialAreaTrackingService.java index 5fc9f3e..2dbbc38 100644 --- a/src/main/java/gc/mda/signal_batch/domain/passage/service/SequentialAreaTrackingService.java +++ b/src/main/java/gc/mda/signal_batch/domain/passage/service/SequentialAreaTrackingService.java @@ -41,25 +41,23 @@ public class SequentialAreaTrackingService { String sql = """ WITH vessel_passages AS ( SELECT DISTINCT - sig_src_cd, - target_id, + mmsi, haegu_no, FIRST_VALUE(time_bucket) OVER ( - PARTITION BY sig_src_cd, target_id, haegu_no + PARTITION BY mmsi, haegu_no ORDER BY time_bucket ) as entry_time, LAST_VALUE(time_bucket) OVER ( - PARTITION BY sig_src_cd, target_id, haegu_no - ORDER BY time_bucket + PARTITION BY mmsi, haegu_no + ORDER BY time_bucket ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING ) as exit_time FROM signal.t_grid_vessel_tracks WHERE time_bucket BETWEEN ? AND ? AND haegu_no = ANY(ARRAY[?]::integer[]) ) - SELECT - v1.sig_src_cd, - v1.target_id, + SELECT + v1.mmsi, v1.entry_time as haegu1_entry, v1.exit_time as haegu1_exit, v2.entry_time as haegu2_entry, @@ -67,11 +65,9 @@ public class SequentialAreaTrackingService { v3.entry_time as haegu3_entry, v3.exit_time as haegu3_exit FROM vessel_passages v1 - JOIN vessel_passages v2 ON v1.sig_src_cd = v2.sig_src_cd - AND v1.target_id = v2.target_id + JOIN vessel_passages v2 ON v1.mmsi = v2.mmsi AND v2.haegu_no = ? AND v2.entry_time > v1.exit_time - JOIN vessel_passages v3 ON v2.sig_src_cd = v3.sig_src_cd - AND v2.target_id = v3.target_id + JOIN vessel_passages v3 ON v2.mmsi = v3.mmsi AND v3.haegu_no = ? AND v3.entry_time > v2.exit_time WHERE v1.haegu_no = ? ORDER BY v1.entry_time @@ -100,25 +96,23 @@ public class SequentialAreaTrackingService { String sql = """ WITH area_passages AS ( SELECT DISTINCT - sig_src_cd, - target_id, + mmsi, area_id, FIRST_VALUE(time_bucket) OVER ( - PARTITION BY sig_src_cd, target_id, area_id + PARTITION BY mmsi, area_id ORDER BY time_bucket ) as entry_time, LAST_VALUE(time_bucket) OVER ( - PARTITION BY sig_src_cd, target_id, area_id - ORDER BY time_bucket + PARTITION BY mmsi, area_id + ORDER BY time_bucket ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING ) as exit_time FROM signal.t_area_vessel_tracks WHERE time_bucket BETWEEN ? AND ? AND area_id = ANY(ARRAY[?]::varchar[]) ) - SELECT - a1.sig_src_cd, - a1.target_id, + SELECT + a1.mmsi, a1.entry_time as area1_entry, a1.exit_time as area1_exit, a2.entry_time as area2_entry, @@ -126,11 +120,9 @@ public class SequentialAreaTrackingService { a3.entry_time as area3_entry, a3.exit_time as area3_exit FROM area_passages a1 - JOIN area_passages a2 ON a1.sig_src_cd = a2.sig_src_cd - AND a1.target_id = a2.target_id + JOIN area_passages a2 ON a1.mmsi = a2.mmsi AND a2.area_id = ? AND a2.entry_time > a1.exit_time - JOIN area_passages a3 ON a2.sig_src_cd = a3.sig_src_cd - AND a2.target_id = a3.target_id + JOIN area_passages a3 ON a2.mmsi = a3.mmsi AND a3.area_id = ? AND a3.entry_time > a2.exit_time WHERE a1.area_id = ? ORDER BY a1.entry_time @@ -158,7 +150,7 @@ public class SequentialAreaTrackingService { String sql = """ SELECT - COUNT(DISTINCT CONCAT(sig_src_cd, '_', target_id)) as unique_vessels, + COUNT(DISTINCT mmsi) as unique_vessels, COUNT(*) as total_passages, SUM(distance_nm) as total_distance, AVG(avg_speed) as avg_speed, diff --git a/src/main/java/gc/mda/signal_batch/domain/track/controller/AbnormalTrackController.java b/src/main/java/gc/mda/signal_batch/domain/track/controller/AbnormalTrackController.java index 12cfc72..ccc4a88 100644 --- a/src/main/java/gc/mda/signal_batch/domain/track/controller/AbnormalTrackController.java +++ b/src/main/java/gc/mda/signal_batch/domain/track/controller/AbnormalTrackController.java @@ -46,20 +46,19 @@ public class AbnormalTrackController { return ResponseEntity.ok(tracks); } - @GetMapping("/vessel/{sigSrcCd}/{targetId}") + @GetMapping("/vessel/{mmsi}") @Operation(summary = "특정 선박의 비정상 항적 이력", description = "특정 선박의 비정상 항적 이력을 조회합니다.") public ResponseEntity> getVesselAbnormalTracks( - @PathVariable String sigSrcCd, - @PathVariable String targetId, + @PathVariable String mmsi, @Parameter(description = "시작 날짜") @RequestParam @DateTimeFormat(iso = DateTimeFormat.ISO.DATE) LocalDate startDate, @Parameter(description = "종료 날짜") @RequestParam @DateTimeFormat(iso = DateTimeFormat.ISO.DATE) LocalDate endDate) { - + List tracks = abnormalTrackService.getVesselAbnormalTracks( - sigSrcCd, targetId, startDate.atStartOfDay(), endDate.plusDays(1).atStartOfDay() + mmsi, startDate.atStartOfDay(), endDate.plusDays(1).atStartOfDay() ); - + return ResponseEntity.ok(tracks); } @@ -184,11 +183,8 @@ public class AbnormalTrackController { @lombok.Data public static class TrackIdentifier { - @Schema(description = "신호 소스 코드", example = "000001") - private String sigSrcCd; - - @Schema(description = "타겟 ID", example = "440123456") - private String targetId; + @Schema(description = "MMSI", example = "440123456") + private String mmsi; @Schema( description = """ diff --git a/src/main/java/gc/mda/signal_batch/domain/track/dto/AbnormalTrackResponse.java b/src/main/java/gc/mda/signal_batch/domain/track/dto/AbnormalTrackResponse.java index 5e1d449..64c9808 100644 --- a/src/main/java/gc/mda/signal_batch/domain/track/dto/AbnormalTrackResponse.java +++ b/src/main/java/gc/mda/signal_batch/domain/track/dto/AbnormalTrackResponse.java @@ -14,9 +14,7 @@ import java.util.Map; @Builder public class AbnormalTrackResponse { private Long id; - private String sigSrcCd; - private String targetId; - private String vesselId; // sigSrcCd:targetId + private String mmsi; private LocalDateTime timeBucket; private String abnormalType; private String typeDescription; @@ -28,7 +26,7 @@ public class AbnormalTrackResponse { private String sourceTable; private LocalDateTime detectedAt; private Map details; - + // GeoJSON 형식의 궤적 (선택적) private Object trackGeoJson; -} \ No newline at end of file +} diff --git a/src/main/java/gc/mda/signal_batch/domain/track/service/AbnormalTrackService.java b/src/main/java/gc/mda/signal_batch/domain/track/service/AbnormalTrackService.java index 547dcf3..f74f505 100644 --- a/src/main/java/gc/mda/signal_batch/domain/track/service/AbnormalTrackService.java +++ b/src/main/java/gc/mda/signal_batch/domain/track/service/AbnormalTrackService.java @@ -43,11 +43,9 @@ public class AbnormalTrackService { */ public List getAbnormalTracksSince(LocalDateTime since) { String sql = """ - SELECT + SELECT id, - sig_src_cd, - target_id, - sig_src_cd || ':' || target_id as vessel_id, + mmsi, time_bucket, abnormal_type, abnormal_reason, @@ -77,23 +75,21 @@ public class AbnormalTrackService { ORDER BY detected_at DESC LIMIT 1000 """; - + return jdbcTemplate.query(sql, (rs, rowNum) -> { Map abnormalReason = null; try { abnormalReason = objectMapper.readValue( - rs.getString("abnormal_reason"), + rs.getString("abnormal_reason"), new TypeReference>() {} ); } catch (Exception e) { log.error("Failed to parse abnormal_reason: {}", e.getMessage()); } - + return AbnormalTrackResponse.builder() .id(rs.getLong("id")) - .sigSrcCd(rs.getString("sig_src_cd")) - .targetId(rs.getString("target_id")) - .vesselId(rs.getString("vessel_id")) + .mmsi(rs.getString("mmsi")) .timeBucket(rs.getTimestamp("time_bucket").toLocalDateTime()) .abnormalType(rs.getString("abnormal_type")) .typeDescription(getTypeDescription(rs.getString("abnormal_type"))) @@ -114,14 +110,12 @@ public class AbnormalTrackService { * 특정 선박의 비정상 궤적 이력 조회 */ public List getVesselAbnormalTracks( - String sigSrcCd, String targetId, LocalDateTime startTime, LocalDateTime endTime) { - + String mmsi, LocalDateTime startTime, LocalDateTime endTime) { + String sql = """ - SELECT + SELECT id, - sig_src_cd, - target_id, - sig_src_cd || ':' || target_id as vessel_id, + mmsi, time_bucket, abnormal_type, abnormal_reason, @@ -132,29 +126,26 @@ public class AbnormalTrackService { source_table, detected_at FROM signal.t_abnormal_tracks - WHERE sig_src_cd = ? - AND target_id = ? + WHERE mmsi = ? AND time_bucket >= ? AND time_bucket < ? ORDER BY time_bucket DESC """; - + return jdbcTemplate.query(sql, (rs, rowNum) -> { Map abnormalReason = null; try { abnormalReason = objectMapper.readValue( - rs.getString("abnormal_reason"), + rs.getString("abnormal_reason"), new TypeReference>() {} ); } catch (Exception e) { log.error("Failed to parse abnormal_reason: {}", e.getMessage()); } - + return AbnormalTrackResponse.builder() .id(rs.getLong("id")) - .sigSrcCd(rs.getString("sig_src_cd")) - .targetId(rs.getString("target_id")) - .vesselId(rs.getString("vessel_id")) + .mmsi(rs.getString("mmsi")) .timeBucket(rs.getTimestamp("time_bucket").toLocalDateTime()) .abnormalType(rs.getString("abnormal_type")) .typeDescription(getTypeDescription(rs.getString("abnormal_type"))) @@ -167,7 +158,7 @@ public class AbnormalTrackService { .detectedAt(rs.getTimestamp("detected_at").toLocalDateTime()) .details(abnormalReason) .build(); - }, sigSrcCd, targetId, startTime, endTime); + }, mmsi, startTime, endTime); } /** @@ -214,21 +205,21 @@ public class AbnormalTrackService { SELECT COUNT(DISTINCT abnormal_type) as type_count, COUNT(*) as total_tracks, - COUNT(DISTINCT sig_src_cd || ':' || target_id) as vessel_count, + COUNT(DISTINCT mmsi) as vessel_count, AVG(distance_nm) as avg_distance, MAX(max_speed) as max_speed_detected FROM signal.t_abnormal_tracks WHERE detected_at >= ? """; - + Map summary = jdbcTemplate.queryForMap(totalSql, since); - + // 유형별 통계 String typeSql = """ - SELECT + SELECT abnormal_type, COUNT(*) as count, - COUNT(DISTINCT sig_src_cd || ':' || target_id) as vessel_count + COUNT(DISTINCT mmsi) as vessel_count FROM signal.t_abnormal_tracks WHERE detected_at >= ? GROUP BY abnormal_type @@ -278,10 +269,8 @@ public class AbnormalTrackService { // track_geom 사용 String sql = String.format(""" - SELECT - sig_src_cd, - target_id, - sig_src_cd || ':' || target_id as vessel_id, + SELECT + mmsi, time_bucket, distance_nm, avg_speed, @@ -312,15 +301,15 @@ public class AbnormalTrackService { ORDER BY time_bucket DESC, distance_nm DESC LIMIT 500 """, tableName); - + return jdbcTemplate.query(sql, (rs, rowNum) -> { // 비정상 유형 결정 String abnormalType = "user_detected"; String description = "사용자 정의 기준 (거리: " + finalMinDistance + "nm, 속도: " + finalMinSpeed + "kts 이상)"; - + BigDecimal distance = rs.getBigDecimal("distance_nm"); BigDecimal avgSpeed = rs.getBigDecimal("avg_speed"); - + if (avgSpeed != null && avgSpeed.compareTo(new BigDecimal("100")) > 0) { abnormalType = "extreme_speed"; description = "극단적 속도: " + avgSpeed + "kts"; @@ -328,19 +317,16 @@ public class AbnormalTrackService { abnormalType = "extreme_distance"; description = "극단적 이동거리: " + distance + "nm"; } - - String sigSrcCd = rs.getString("sig_src_cd"); - String targetId = rs.getString("target_id"); + + String mmsi = rs.getString("mmsi"); LocalDateTime timeBucket = rs.getTimestamp("time_bucket").toLocalDateTime(); - + // ID를 조합 해시로 생성 (고유성 보장) - long generatedId = (sigSrcCd + targetId + timeBucket.toString()).hashCode() & 0x7fffffffL; - + long generatedId = (mmsi + timeBucket.toString()).hashCode() & 0x7fffffffL; + return AbnormalTrackResponse.builder() .id(generatedId) - .sigSrcCd(sigSrcCd) - .targetId(targetId) - .vesselId(rs.getString("vessel_id")) + .mmsi(mmsi) .timeBucket(timeBucket) .abnormalType(abnormalType) .typeDescription(getTypeDescription(abnormalType)) @@ -389,56 +375,52 @@ public class AbnormalTrackService { String insertSql = String.format(""" INSERT INTO signal.t_abnormal_tracks ( - sig_src_cd, target_id, time_bucket, abnormal_type, abnormal_reason, + mmsi, time_bucket, abnormal_type, abnormal_reason, distance_nm, avg_speed, max_speed, point_count, track_geom, source_table, detected_at ) - SELECT - sig_src_cd, target_id, time_bucket, ?, ?::jsonb, + SELECT + mmsi, time_bucket, ?, ?::jsonb, distance_nm, avg_speed, max_speed, point_count, track_geom, ?, NOW() FROM signal.%s - WHERE sig_src_cd = ? - AND target_id = ? + WHERE mmsi = ? AND time_bucket = ? """, sourceTable); - - int inserted = jdbcTemplate.update(insertSql, + + int inserted = jdbcTemplate.update(insertSql, abnormalType, reasonJson, sourceTable, - track.getSigSrcCd(), - track.getTargetId(), + track.getMmsi(), track.getTimeBucket() ); - + if (inserted > 0) { // 2. 원본 테이블에서 삭제 String deleteSql = String.format(""" DELETE FROM signal.%s - WHERE sig_src_cd = ? - AND target_id = ? + WHERE mmsi = ? AND time_bucket = ? """, sourceTable); - + int deleted = jdbcTemplate.update(deleteSql, - track.getSigSrcCd(), - track.getTargetId(), + track.getMmsi(), track.getTimeBucket() ); - + if (deleted > 0) { totalMoved++; - log.debug("Moved track: {} {} {}", - track.getSigSrcCd(), track.getTargetId(), track.getTimeBucket()); + log.debug("Moved track: {} {}", + track.getMmsi(), track.getTimeBucket()); } else { - log.warn("Failed to delete track after insert: {} {} {}", - track.getSigSrcCd(), track.getTargetId(), track.getTimeBucket()); + log.warn("Failed to delete track after insert: {} {}", + track.getMmsi(), track.getTimeBucket()); } } } catch (Exception e) { - log.error("Error moving track {} {} {}: {}", - track.getSigSrcCd(), track.getTargetId(), track.getTimeBucket(), e.getMessage()); + log.error("Error moving track {} {}: {}", + track.getMmsi(), track.getTimeBucket(), e.getMessage()); } } diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/AisTargetApiResponse.java b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/AisTargetApiResponse.java new file mode 100644 index 0000000..51a8844 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/AisTargetApiResponse.java @@ -0,0 +1,27 @@ +package gc.mda.signal_batch.domain.vessel.dto; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.util.List; + +/** + * S&P Global AIS GetTargetsEnhanced API 응답 래퍼 + * + * API 응답 구조: + * { + * "targetEnhancedArr": [...] + * } + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class AisTargetApiResponse { + + @JsonProperty("targetEnhancedArr") + private List targetArr; +} diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/AisTargetDto.java b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/AisTargetDto.java new file mode 100644 index 0000000..89001d5 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/AisTargetDto.java @@ -0,0 +1,183 @@ +package gc.mda.signal_batch.domain.vessel.dto; + +import com.fasterxml.jackson.annotation.JsonProperty; +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +/** + * S&P Global AIS API 응답 DTO + * + * API: POST /AisSvc.svc/AIS/GetTargetsEnhanced + * Request: {"sinceSeconds": "60"} + * Response: {"targetEnhancedArr": [...]} + * + * mmsi는 String 타입 — 문자 혼합 MMSI 장비 수집 지원 + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class AisTargetDto { + + @JsonProperty("MMSI") + private String mmsi; + + @JsonProperty("IMO") + private Long imo; + + @JsonProperty("AgeMinutes") + private Double ageMinutes; + + @JsonProperty("Lat") + private Double lat; + + @JsonProperty("Lon") + private Double lon; + + @JsonProperty("Heading") + private Double heading; + + @JsonProperty("SoG") + private Double sog; + + @JsonProperty("CoG") + private Double cog; + + @JsonProperty("Width") + private Integer width; + + @JsonProperty("Length") + private Integer length; + + @JsonProperty("Draught") + private Double draught; + + @JsonProperty("Name") + private String name; + + @JsonProperty("Callsign") + private String callsign; + + @JsonProperty("Destination") + private String destination; + + @JsonProperty("ETA") + private String eta; + + @JsonProperty("Status") + private String status; + + @JsonProperty("VesselType") + private String vesselType; + + @JsonProperty("ExtraInfo") + private String extraInfo; + + @JsonProperty("PositionAccuracy") + private Integer positionAccuracy; + + @JsonProperty("RoT") + private Integer rot; + + @JsonProperty("TimestampUTC") + private Integer timestampUtc; + + @JsonProperty("RepeatIndicator") + private Integer repeatIndicator; + + @JsonProperty("RAIMFlag") + private Integer raimFlag; + + @JsonProperty("RadioStatus") + private Integer radioStatus; + + @JsonProperty("Regional") + private Integer regional; + + @JsonProperty("Regional2") + private Integer regional2; + + @JsonProperty("Spare") + private Integer spare; + + @JsonProperty("Spare2") + private Integer spare2; + + @JsonProperty("AISVersion") + private Integer aisVersion; + + @JsonProperty("PositionFixType") + private Integer positionFixType; + + @JsonProperty("DTE") + private Integer dte; + + @JsonProperty("BandFlag") + private Integer bandFlag; + + @JsonProperty("ReceivedDate") + private String receivedDate; + + @JsonProperty("MessageTimestamp") + private String messageTimestamp; + + @JsonProperty("LengthBow") + private Integer lengthBow; + + @JsonProperty("LengthStern") + private Integer lengthStern; + + @JsonProperty("WidthPort") + private Integer widthPort; + + @JsonProperty("WidthStarboard") + private Integer widthStarboard; + + // TargetEnhanced 추가 필드 + @JsonProperty("TonnesCargo") + private Integer tonnesCargo; + + @JsonProperty("InSTS") + private Integer inSTS; + + @JsonProperty("OnBerth") + private Boolean onBerth; + + @JsonProperty("DWT") + private Integer dwt; + + @JsonProperty("Anomalous") + private String anomalous; + + @JsonProperty("DestinationPortID") + private Integer destinationPortID; + + @JsonProperty("DestinationTidied") + private String destinationTidied; + + @JsonProperty("DestinationUNLOCODE") + private String destinationUNLOCODE; + + @JsonProperty("ImoVerified") + private String imoVerified; + + @JsonProperty("LastStaticUpdateReceived") + private String lastStaticUpdateReceived; + + @JsonProperty("LPCCode") + private Integer lpcCode; + + @JsonProperty("MessageType") + private Integer messageType; + + @JsonProperty("Source") + private String source; + + @JsonProperty("StationId") + private String stationId; + + @JsonProperty("ZoneId") + private Double zoneId; +} diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/CompactVesselTrack.java b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/CompactVesselTrack.java index c294896..2604e93 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/CompactVesselTrack.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/CompactVesselTrack.java @@ -23,15 +23,9 @@ import java.util.List; @Schema(description = "선박별 병합된 항적 데이터 (WebSocket/REST 공통)") public class CompactVesselTrack { - @Schema(description = "선박 고유 ID (sigSrcCd_targetId)", example = "000001_440113620") + @Schema(description = "선박 고유 ID (MMSI)", example = "440113620") private String vesselId; - @Schema(description = "신호 소스 코드", example = "000001") - private String sigSrcCd; - - @Schema(description = "타겟 ID (MMSI 등)", example = "440113620") - private String targetId; - @Schema(description = "국적 코드 (MID 기반, MMSI 앞 3자리로 판별)", example = "KR") private String nationalCode; @@ -90,10 +84,4 @@ public class CompactVesselTrack { example = "000023" ) private String shipKindCode; - - @Schema( - description = "통합선박 ID (동일 선박의 다중 신호원 통합 식별자)", - example = "440113620___440113620_" - ) - private String integrationTargetId; } diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/IntegrationVessel.java b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/IntegrationVessel.java deleted file mode 100644 index fb34f0e..0000000 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/IntegrationVessel.java +++ /dev/null @@ -1,78 +0,0 @@ -package gc.mda.signal_batch.domain.vessel.dto; - -import gc.mda.signal_batch.global.util.IntegrationSignalConstants; -import lombok.AllArgsConstructor; -import lombok.Builder; -import lombok.Data; -import lombok.NoArgsConstructor; - -/** - * 통합선박 정보 DTO - * signal.t_ship_integration_sub 테이블 매핑 - */ -@Data -@Builder -@NoArgsConstructor -@AllArgsConstructor -public class IntegrationVessel { - - private Long intgrSeq; - private String ais; - private String enav; - private String vpass; - private String vtsAis; - private String dMfHf; - private String aisShipNm; - private String enavShipNm; - private String vpassShipNm; - private String vtsAisShipNm; - private String dMfHfShipNm; - private String integrationShipTy; - - /** - * integration_target_id 생성 - */ - public String generateIntegrationId() { - return IntegrationSignalConstants.generateIntegrationId( - ais, enav, vpass, vtsAis, dMfHf - ); - } - - /** - * 신호 타입에 해당하는 target_id 반환 - */ - public String getTargetIdBySignalType(IntegrationSignalConstants.SignalType signalType) { - if (signalType == null) return null; - - return switch (signalType) { - case AIS -> ais; - case E_NAVIGATION -> enav; - case VPASS -> vpass; - case VTS_AIS -> vtsAis; - case D_MF_HF -> dMfHf; - }; - } - - /** - * 신호 타입에 해당하는 선박명 반환 - */ - public String getShipNameBySignalType(IntegrationSignalConstants.SignalType signalType) { - if (signalType == null) return null; - - return switch (signalType) { - case AIS -> aisShipNm; - case E_NAVIGATION -> enavShipNm; - case VPASS -> vpassShipNm; - case VTS_AIS -> vtsAisShipNm; - case D_MF_HF -> dMfHfShipNm; - }; - } - - /** - * 해당 신호 타입의 데이터가 존재하는지 확인 - */ - public boolean hasSignalType(IntegrationSignalConstants.SignalType signalType) { - String targetId = getTargetIdBySignalType(signalType); - return targetId != null && !"0".equals(targetId) && !targetId.isEmpty(); - } -} diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/RecentVesselPositionDto.java b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/RecentVesselPositionDto.java index 79e0f51..d3117d8 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/RecentVesselPositionDto.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/RecentVesselPositionDto.java @@ -17,14 +17,8 @@ import java.time.LocalDateTime; @Schema(description = "최근 위치 업데이트된 선박 정보") public class RecentVesselPositionDto { - @Schema( - description = "신호원 코드 (000001:AIS, 000002:LRIT, 000003:VPASS, 000004:VTS-AIS 등)", - example = "000001" - ) - private String sigSrcCd; - - @Schema(description = "대상 ID (MMSI: 9자리, 한국선박 440/441로 시작)", example = "440113620") - private String targetId; + @Schema(description = "MMSI (9자리, 한국선박 440/441로 시작)", example = "440113620") + private String mmsi; @Schema(description = "경도 (WGS84)", example = "127.0638") private Double lon; diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/TrackResponse.java b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/TrackResponse.java index e7df90c..93b2700 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/TrackResponse.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/TrackResponse.java @@ -13,16 +13,9 @@ import java.time.LocalDateTime; @Schema(description = "항적 세그먼트 (V1 API용, WKT 기반)") public class TrackResponse { - @JsonProperty("sig_src_cd") - @Schema( - description = "신호 소스 코드 (000001:AIS, 000002:LRIT, 000003:VPASS, 000004:VTS-AIS 등)", - example = "000001" - ) - private String sigSrcCd; - - @JsonProperty("target_id") - @Schema(description = "타겟 ID (MMSI: 9자리, 한국선박 440/441로 시작)", example = "440113620") - private String targetId; + @JsonProperty("mmsi") + @Schema(description = "MMSI (선박 식별자)", example = "440113620") + private String mmsi; @JsonProperty("track_geom") @Schema( diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/VesselBucketPositionDto.java b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/VesselBucketPositionDto.java index aab5ee7..cacdf0b 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/VesselBucketPositionDto.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/VesselBucketPositionDto.java @@ -15,42 +15,19 @@ import java.time.LocalDateTime; * 데이터 흐름: * 1. 5분 집계 완료 후 각 선박의 버킷 종료 위치 저장 * 2. 다음 버킷 처리 시 이전 위치와 비교하여 점프 검출 - * 3. 캐시 미스 시 t_vessel_latest_position 테이블에서 fallback 조회 + * 3. 캐시 미스 시 t_ais_position 테이블에서 fallback 조회 */ @Data @Builder @NoArgsConstructor @AllArgsConstructor public class VesselBucketPositionDto { - /** - * 신호원 코드 - */ - private String sigSrcCd; - - /** - * 대상 ID - */ - private String targetId; - - /** - * 경도 (버킷 종료 시점) - */ + private String mmsi; private Double endLon; - - /** - * 위도 (버킷 종료 시점) - */ private Double endLat; - - /** - * 버킷 종료 시간 - */ private LocalDateTime endTime; - /** - * 선박 키 생성 (sigSrcCd:targetId) - */ public String getVesselKey() { - return sigSrcCd + ":" + targetId; + return mmsi; } } diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/VesselTracksRequest.java b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/VesselTracksRequest.java index 481b9aa..2cb8c48 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/dto/VesselTracksRequest.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/dto/VesselTracksRequest.java @@ -42,36 +42,6 @@ public class VesselTracksRequest { @JsonDeserialize(using = FlexibleLocalDateTimeDeserializer.class) private LocalDateTime endTime; - @Schema(description = "조회할 선박 목록", required = true) - private List vessels; - - @Schema( - description = "통합선박 필터링 모드 (0: 전체 항적, 1: 통합선박 우선순위 적용)", - example = "0", - allowableValues = {"0", "1"} - ) - @Builder.Default - private String isIntegration = "0"; - - @Data - @Builder - @AllArgsConstructor - @NoArgsConstructor - @Schema(description = "선박 식별자") - public static class VesselIdentifier { - - @Schema( - description = "신호 소스 코드 (000001:AIS, 000002:LRIT, 000003:VPASS, 000004:VTS-AIS, 000019:항공기 등)", - example = "000001", - required = true - ) - private String sigSrcCd; - - @Schema( - description = "타겟 ID (MMSI: 9자리, 한국선박 440/441로 시작)", - example = "440113620", - required = true - ) - private String targetId; - } + @Schema(description = "조회할 선박 MMSI 목록", required = true) + private List vessels; } diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/model/AisTargetEntity.java b/src/main/java/gc/mda/signal_batch/domain/vessel/model/AisTargetEntity.java new file mode 100644 index 0000000..2e651f1 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/model/AisTargetEntity.java @@ -0,0 +1,63 @@ +package gc.mda.signal_batch.domain.vessel.model; + +import lombok.AllArgsConstructor; +import lombok.Builder; +import lombok.Data; +import lombok.NoArgsConstructor; + +import java.time.OffsetDateTime; + +/** + * AIS Target 캐시 엔트리 및 t_ais_position 테이블 매핑 모델 + * + * 용도: + * - Caffeine 캐시 저장 (key: mmsi) + * - t_ais_position UPSERT (5분 집계 Job 편승) + * - 배치 집계 시 VesselData로 변환 + * + * mmsi는 String 타입 — 문자 혼합 MMSI 장비 수집 지원 (위법 장비 추적 필수) + */ +@Data +@Builder +@NoArgsConstructor +@AllArgsConstructor +public class AisTargetEntity { + + // ========== PK ========== + private String mmsi; + + // ========== 선박 식별 정보 ========== + private Long imo; + private String name; + private String callsign; + private String vesselType; + private String extraInfo; + + // ========== 위치 정보 ========== + private Double lat; + private Double lon; + // geom은 DB에서 ST_SetSRID(ST_MakePoint(lon, lat), 4326)로 생성 + + // ========== 항해 정보 ========== + private Double heading; + private Double sog; + private Double cog; + private Integer rot; + + // ========== 선박 제원 ========== + private Integer length; + private Integer width; + private Double draught; + + // ========== 목적지 정보 ========== + private String destination; + private OffsetDateTime eta; + private String status; + + // ========== 타임스탬프 ========== + private OffsetDateTime messageTimestamp; + + // ========== 선종 분류 정보 ========== + private String signalKindCode; + private String classType; +} diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselData.java b/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselData.java index a9ae4d4..225a5a3 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselData.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselData.java @@ -8,15 +8,17 @@ import lombok.NoArgsConstructor; import java.math.BigDecimal; import java.time.LocalDateTime; +/** + * AIS 수집 데이터 모델 + * S&P Global AIS API → Caffeine 캐시 → 배치 집계에서 사용 + */ @Data @Builder @NoArgsConstructor @AllArgsConstructor public class VesselData { + private String mmsi; private LocalDateTime messageTime; - private LocalDateTime realTime; - private String sigSrcCd; - private String targetId; private Double lat; private Double lon; private BigDecimal sog; @@ -25,19 +27,9 @@ public class VesselData { private String shipNm; private String shipTy; private Integer rot; - private Integer posacc; - private String sensorId; - private String baseStId; - private Integer mode; - private Integer gpsSttus; - private Integer batterySttus; - private String vtsCd; - private String mmsi; - private String vpassId; - private String shipNo; public String getVesselKey() { - return sigSrcCd + "_" + targetId; + return mmsi; } public boolean isValidPosition() { @@ -45,4 +37,4 @@ public class VesselData { lat >= -90 && lat <= 90 && lon >= -180 && lon <= 180; } -} \ No newline at end of file +} diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselLatestPosition.java b/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselLatestPosition.java index f405d28..7344752 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselLatestPosition.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselLatestPosition.java @@ -13,8 +13,7 @@ import java.time.LocalDateTime; @NoArgsConstructor @AllArgsConstructor public class VesselLatestPosition { - private String sigSrcCd; - private String targetId; + private String mmsi; private Double lat; private Double lon; private String geomWkt; @@ -29,8 +28,7 @@ public class VesselLatestPosition { public static VesselLatestPosition fromVesselData(VesselData data) { return VesselLatestPosition.builder() - .sigSrcCd(data.getSigSrcCd()) - .targetId(data.getTargetId()) + .mmsi(data.getMmsi()) .lat(data.getLat()) .lon(data.getLon()) .geomWkt(String.format("POINT(%f %f)", data.getLon(), data.getLat())) @@ -43,4 +41,4 @@ public class VesselLatestPosition { .updateCount(1L) .build(); } -} \ No newline at end of file +} diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselTrack.java b/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselTrack.java index d2145af..5245a71 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselTrack.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/model/VesselTrack.java @@ -7,6 +7,7 @@ import lombok.NoArgsConstructor; import java.io.Serializable; import java.math.BigDecimal; +import java.math.RoundingMode; import java.time.LocalDateTime; import java.util.List; @@ -15,33 +16,32 @@ import java.util.List; @NoArgsConstructor @AllArgsConstructor public class VesselTrack implements Serializable { - private static final long serialVersionUID = 1L; - + private static final long serialVersionUID = 2L; + // 기본 식별자 - private String sigSrcCd; - private String targetId; + private String mmsi; private LocalDateTime timeBucket; - + // 궤적 정보 private List trackPoints; - private String trackGeom; // MIGRATION_V2: PostGIS LineStringM WKT format (unix timestamp) + private String trackGeom; // PostGIS LineStringM WKT format (unix timestamp) private BigDecimal distanceNm; // 이동 거리 (해리) private BigDecimal avgSpeed; private BigDecimal maxSpeed; private Integer pointCount; - + // 시작/종료 위치 private TrackPosition startPosition; private TrackPosition endPosition; - + // 해구/구역 정보 (선택적) private Integer haeguNo; private String areaId; private LocalDateTime entryTime; private LocalDateTime exitTime; - + private LocalDateTime createdAt; - + @Data @Builder @NoArgsConstructor @@ -54,7 +54,7 @@ public class VesselTrack implements Serializable { private BigDecimal cog; private Integer heading; } - + @Data @Builder @NoArgsConstructor @@ -65,31 +65,30 @@ public class VesselTrack implements Serializable { private LocalDateTime time; private BigDecimal sog; } - + @Data @Builder @NoArgsConstructor @AllArgsConstructor public static class VesselKey implements Serializable { - private String sigSrcCd; - private String targetId; + private String mmsi; private LocalDateTime timeBucket; } - + public String getVesselKey() { - return sigSrcCd + "_" + targetId; + return mmsi; } - + public boolean hasValidTrack() { - return trackPoints != null && trackPoints.size() >= 1; // 1개 이상이면 유효 + return trackPoints != null && trackPoints.size() >= 1; } - + // 거리 계산 (Haversine formula) public BigDecimal calculateDistance() { if (!hasValidTrack()) { return BigDecimal.ZERO; } - + double totalDistance = 0.0; for (int i = 1; i < trackPoints.size(); i++) { TrackPoint prev = trackPoints.get(i - 1); @@ -99,10 +98,10 @@ public class VesselTrack implements Serializable { curr.getLat(), curr.getLon() ); } - - return BigDecimal.valueOf(totalDistance).setScale(2, BigDecimal.ROUND_HALF_UP); + + return BigDecimal.valueOf(totalDistance).setScale(2, RoundingMode.HALF_UP); } - + private double calculateDistanceBetweenPoints(double lat1, double lon1, double lat2, double lon2) { final double R = 3440.065; // 지구 반경 (해리) double dLat = Math.toRadians(lat2 - lat1); @@ -113,4 +112,4 @@ public class VesselTrack implements Serializable { double c = 2 * Math.atan2(Math.sqrt(a), Math.sqrt(1-a)); return R * c; } -} \ No newline at end of file +} diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/service/IntegrationVesselService.java b/src/main/java/gc/mda/signal_batch/domain/vessel/service/IntegrationVesselService.java deleted file mode 100644 index 23fe19b..0000000 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/service/IntegrationVesselService.java +++ /dev/null @@ -1,295 +0,0 @@ -package gc.mda.signal_batch.domain.vessel.service; - -import com.zaxxer.hikari.HikariConfig; -import com.zaxxer.hikari.HikariDataSource; -import gc.mda.signal_batch.domain.vessel.dto.IntegrationVessel; -import gc.mda.signal_batch.global.util.IntegrationSignalConstants; -import gc.mda.signal_batch.global.util.IntegrationSignalConstants.SignalType; -import jakarta.annotation.PostConstruct; -import jakarta.annotation.PreDestroy; -import lombok.extern.slf4j.Slf4j; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.scheduling.annotation.Scheduled; -import org.springframework.stereotype.Service; - -import javax.sql.DataSource; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * 통합선박 정보 서비스 - * 글로벌 캐시를 통한 통합선박 정보 관리 - * - * 전용 DataSource 설정 (vessel.integration.datasource.*): - * - jdbc-url이 설정되면 별도 DB에서 통합선박 정보를 로드 - * - 미설정 시 queryDataSource를 폴백으로 사용 - */ -@Slf4j -@Service -public class IntegrationVesselService { - - private final DataSource queryDataSource; - - @Value("${vessel.integration.enabled:false}") - private boolean integrationEnabled; - - @Value("${vessel.integration.datasource.jdbc-url:}") - private String integrationJdbcUrl; - - @Value("${vessel.integration.datasource.username:}") - private String integrationUsername; - - @Value("${vessel.integration.datasource.password:}") - private String integrationPassword; - - @Value("${vessel.integration.table-name:signal.t_ship_integration_sub}") - private String integrationTableName; - - // 글로벌 캐시 (키: "sig_src_cd_target_id") - private final Map integrationCache = new ConcurrentHashMap<>(); - - // 캐시 로드 상태 - private final AtomicBoolean cacheLoaded = new AtomicBoolean(false); - - // 전용 DataSource (별도 DB 사용 시) - private DataSource integrationDataSource; - private boolean dedicatedDataSource = false; - - public IntegrationVesselService(@Qualifier("queryDataSource") DataSource queryDataSource) { - this.queryDataSource = queryDataSource; - } - - @PostConstruct - public void init() { - if (integrationEnabled && integrationJdbcUrl != null && !integrationJdbcUrl.isBlank()) { - try { - HikariConfig config = new HikariConfig(); - config.setJdbcUrl(integrationJdbcUrl); - config.setUsername(integrationUsername); - config.setPassword(integrationPassword); - config.setDriverClassName("org.postgresql.Driver"); - config.setMaximumPoolSize(3); - config.setMinimumIdle(1); - config.setPoolName("IntegrationHikariPool"); - config.setConnectionTimeout(10000); - config.setIdleTimeout(300000); - config.setMaxLifetime(600000); - config.setConnectionTestQuery("SELECT 1"); - this.integrationDataSource = new HikariDataSource(config); - this.dedicatedDataSource = true; - log.info("Integration dedicated DataSource created: {}", integrationJdbcUrl); - } catch (Exception e) { - log.warn("Failed to create integration DataSource, falling back to queryDataSource: {}", e.getMessage()); - this.integrationDataSource = queryDataSource; - } - } else { - this.integrationDataSource = queryDataSource; - if (integrationEnabled) { - log.info("Integration using queryDataSource (no dedicated datasource configured)"); - } - } - } - - @PreDestroy - public void destroy() { - if (dedicatedDataSource && integrationDataSource instanceof HikariDataSource hikari) { - hikari.close(); - log.info("Integration dedicated DataSource closed"); - } - } - - /** - * 통합선박 기능 활성화 여부 - */ - public boolean isEnabled() { - return integrationEnabled; - } - - /** - * 스케줄 갱신 (기본: 매일 03:00) - */ - @Scheduled(cron = "${vessel.integration.cache.refresh-cron:0 0 3 * * ?}") - public void scheduledRefresh() { - if (!integrationEnabled) { - log.debug("Integration feature is disabled, skipping scheduled refresh"); - return; - } - log.info("Scheduled integration cache refresh started"); - loadCacheFromDB(); - } - - /** - * 단일 선박 통합정보 조회 (캐시에서) - */ - public IntegrationVessel findByVessel(String sigSrcCd, String targetId) { - if (!integrationEnabled) { - return null; - } - ensureCacheLoaded(); - return integrationCache.get(sigSrcCd + "_" + targetId); - } - - /** - * 다중 선박 통합정보 조회 (캐시에서) - * - * @param vesselKeys Set of "sig_src_cd_target_id" format - * @return Map of vesselKey -> IntegrationVessel - */ - public Map findByVessels(Set vesselKeys) { - if (!integrationEnabled) { - return new HashMap<>(); - } - ensureCacheLoaded(); - - Map result = new HashMap<>(); - for (String key : vesselKeys) { - IntegrationVessel vessel = integrationCache.get(key); - if (vessel != null) { - result.put(key, vessel); - } - } - return result; - } - - /** - * 존재하는 신호들 중 최고 우선순위 선택 - * - * @param existingSigSrcCds 존재하는 sig_src_cd 집합 - * @return 최고 우선순위 sig_src_cd - */ - public String selectHighestPriorityFromExisting(Set existingSigSrcCds) { - for (String sigSrcCd : IntegrationSignalConstants.PRIORITY_ORDER) { - if (existingSigSrcCds.contains(sigSrcCd)) { - return sigSrcCd; - } - } - // fallback: 아무거나 반환 - return existingSigSrcCds.isEmpty() ? null : existingSigSrcCds.iterator().next(); - } - - /** - * 통합선박에서 최고 우선순위 신호 타입 반환 - * (통합테이블에 등록된 신호 중) - */ - public SignalType getHighestPrioritySignalType(IntegrationVessel vessel) { - for (String sigSrcCd : IntegrationSignalConstants.PRIORITY_ORDER) { - SignalType type = SignalType.fromSigSrcCd(sigSrcCd); - if (type != null && vessel.hasSignalType(type)) { - return type; - } - } - return null; - } - - /** - * 캐시 수동 갱신 API - */ - public void refreshCache() { - log.info("Manual integration cache refresh requested"); - loadCacheFromDB(); - } - - /** - * 캐시 상태 조회 - */ - public Map getCacheStatus() { - Map status = new HashMap<>(); - status.put("enabled", integrationEnabled); - status.put("loaded", cacheLoaded.get()); - status.put("size", integrationCache.size()); - status.put("dedicatedDataSource", dedicatedDataSource); - status.put("tableName", integrationTableName); - return status; - } - - /** - * 캐시가 비어있으면 즉시 로드 (Fallback) - */ - private void ensureCacheLoaded() { - if (!cacheLoaded.get() || integrationCache.isEmpty()) { - synchronized (this) { - if (!cacheLoaded.get() || integrationCache.isEmpty()) { - log.info("Integration cache is empty, loading from DB (fallback)..."); - loadCacheFromDB(); - } - } - } - } - - /** - * DB에서 전체 통합선박 정보 로드 - */ - private void loadCacheFromDB() { - long startTime = System.currentTimeMillis(); - - try { - JdbcTemplate jdbcTemplate = new JdbcTemplate(integrationDataSource); - - String sql = "SELECT intgr_seq, ais, enav, vpass, vts_ais, d_mf_hf," + - " ais_ship_nm, enav_ship_nm, vpass_ship_nm, vts_ais_ship_nm, d_mf_hf_ship_nm," + - " integration_ship_ty FROM " + integrationTableName; - - List vessels = jdbcTemplate.query(sql, (rs, rowNum) -> - IntegrationVessel.builder() - .intgrSeq(rs.getLong("intgr_seq")) - .ais(rs.getString("ais")) - .enav(rs.getString("enav")) - .vpass(rs.getString("vpass")) - .vtsAis(rs.getString("vts_ais")) - .dMfHf(rs.getString("d_mf_hf")) - .aisShipNm(rs.getString("ais_ship_nm")) - .enavShipNm(rs.getString("enav_ship_nm")) - .vpassShipNm(rs.getString("vpass_ship_nm")) - .vtsAisShipNm(rs.getString("vts_ais_ship_nm")) - .dMfHfShipNm(rs.getString("d_mf_hf_ship_nm")) - .integrationShipTy(rs.getString("integration_ship_ty")) - .build() - ); - - // 캐시 초기화 및 재구성 - Map newCache = new ConcurrentHashMap<>(); - - for (IntegrationVessel vessel : vessels) { - // 각 신호 타입별로 키 생성하여 캐시에 저장 - if (isValidTargetId(vessel.getAis())) { - newCache.put("000001_" + vessel.getAis(), vessel); - } - if (isValidTargetId(vessel.getEnav())) { - newCache.put("000002_" + vessel.getEnav(), vessel); - } - if (isValidTargetId(vessel.getVpass())) { - newCache.put("000003_" + vessel.getVpass(), vessel); - } - if (isValidTargetId(vessel.getVtsAis())) { - newCache.put("000004_" + vessel.getVtsAis(), vessel); - } - if (isValidTargetId(vessel.getDMfHf())) { - newCache.put("000016_" + vessel.getDMfHf(), vessel); - } - } - - // 기존 캐시를 새 캐시로 교체 - integrationCache.clear(); - integrationCache.putAll(newCache); - cacheLoaded.set(true); - - long elapsed = System.currentTimeMillis() - startTime; - log.info("Integration cache loaded successfully: {} vessels, {} cache entries in {}ms", - vessels.size(), integrationCache.size(), elapsed); - - } catch (Exception e) { - log.error("Failed to load integration cache from DB", e); - // 실패해도 기존 캐시 유지 - } - } - - /** - * 유효한 target_id인지 확인 - */ - private boolean isValidTargetId(String targetId) { - return targetId != null && !"0".equals(targetId) && !targetId.isEmpty(); - } -} diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselLatestPositionCache.java b/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselLatestPositionCache.java index 7c80901..37c6005 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselLatestPositionCache.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselLatestPositionCache.java @@ -18,7 +18,7 @@ import java.util.concurrent.ConcurrentMap; * 선박 최신 위치 정보 캐시 관리 서비스 * * 캐시 구조: - * - Key: "{sigSrcCd}:{targetId}" (예: "000001:440123456") + * - Key: mmsi (예: "440123456") * - Value: RecentVesselPositionDto * - TTL: 60분 (CacheConfig에서 설정) * @@ -40,20 +40,20 @@ public class VesselLatestPositionCache { /** * 캐시 키 생성 */ - private String createKey(String sigSrcCd, String targetId) { - return sigSrcCd + ":" + targetId; + private String createKey(String mmsi) { + return mmsi; } /** * 단일 선박 위치 캐시 저장 */ public void put(RecentVesselPositionDto position) { - if (position == null || position.getSigSrcCd() == null || position.getTargetId() == null) { + if (position == null || position.getMmsi() == null) { log.warn("Invalid position data, skipping cache: {}", position); return; } - String key = createKey(position.getSigSrcCd(), position.getTargetId()); + String key = createKey(position.getMmsi()); getCache().put(key, position); } @@ -71,7 +71,7 @@ public class VesselLatestPositionCache { int count = 0; for (RecentVesselPositionDto position : positions) { - if (position.getSigSrcCd() != null && position.getTargetId() != null) { + if (position.getMmsi() != null) { put(position); count++; } diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselPositionService.java b/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselPositionService.java index de18c05..9aa68ef 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselPositionService.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselPositionService.java @@ -1,7 +1,7 @@ package gc.mda.signal_batch.domain.vessel.service; import gc.mda.signal_batch.domain.vessel.dto.RecentVesselPositionDto; -import gc.mda.signal_batch.global.util.ShipKindCodeConverter; +import gc.mda.signal_batch.global.util.SignalKindCode; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Autowired; @@ -10,10 +10,8 @@ import org.springframework.jdbc.core.JdbcTemplate; import org.springframework.jdbc.core.RowMapper; import org.springframework.stereotype.Service; -import java.math.BigDecimal; import java.sql.ResultSet; import java.sql.SQLException; -import java.time.LocalDateTime; import java.util.List; @Slf4j @@ -71,8 +69,8 @@ public class VesselPositionService { SELECT NOW() as db_now, NOW() - INTERVAL '%d minutes' as threshold_time, - (SELECT MAX(last_update) FROM signal.t_vessel_latest_position) as max_last_update, - (SELECT COUNT(*) FROM signal.t_vessel_latest_position WHERE last_update >= NOW() - INTERVAL '%d minutes') as matching_count + (SELECT MAX(last_update) FROM signal.t_ais_position) as max_last_update, + (SELECT COUNT(*) FROM signal.t_ais_position WHERE last_update >= NOW() - INTERVAL '%d minutes') as matching_count """.formatted(minutes, minutes); queryJdbcTemplate.query(debugSql, rs -> { @@ -85,18 +83,16 @@ public class VesselPositionService { String sql = """ SELECT - sig_src_cd, - target_id, + mmsi, lon, lat, sog, cog, - ship_nm, - ship_ty, + name as ship_nm, + vessel_type as ship_ty, last_update - FROM signal.t_vessel_latest_position + FROM signal.t_ais_position WHERE last_update >= NOW() - INTERVAL '%d minutes' - AND sig_src_cd NOT IN ('000004', '000005') ORDER BY last_update DESC """.formatted(minutes); @@ -112,24 +108,18 @@ public class VesselPositionService { private static class VesselPositionRowMapper implements RowMapper { @Override public RecentVesselPositionDto mapRow(ResultSet rs, int rowNum) throws SQLException { - String sigSrcCd = rs.getString("sig_src_cd"); - String targetId = rs.getString("target_id"); + String mmsi = rs.getString("mmsi"); String shipTy = rs.getString("ship_ty"); - - // shipKindCode 계산 - String shipKindCode = ShipKindCodeConverter.getShipKindCode(sigSrcCd, shipTy); - - // nationalCode 계산 - String nationalCode; - if ("000001".equals(sigSrcCd) && targetId != null && targetId.length() >= 3) { - nationalCode = targetId.substring(0, 3); - } else { - nationalCode = "440"; // 기본값 - } - + + // shipKindCode 계산 (vesselType 기반, extraInfo 없음) + String shipKindCode = SignalKindCode.resolve(shipTy, null).getCode(); + + // nationalCode 계산 (MMSI 앞 3자리 = MID) + String nationalCode = mmsi != null && mmsi.length() >= 3 + ? mmsi.substring(0, 3) : "000"; + return RecentVesselPositionDto.builder() - .sigSrcCd(sigSrcCd) - .targetId(targetId) + .mmsi(mmsi) .lon(rs.getDouble("lon")) .lat(rs.getDouble("lat")) .sog(rs.getBigDecimal("sog")) @@ -138,8 +128,8 @@ public class VesselPositionService { .shipTy(shipTy) .shipKindCode(shipKindCode) .nationalCode(nationalCode) - .lastUpdate(rs.getTimestamp("last_update") != null ? - rs.getTimestamp("last_update").toLocalDateTime() : null) + .lastUpdate(rs.getTimestamp("last_update") != null ? + rs.getTimestamp("last_update").toLocalDateTime() : null) .build(); } } diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselPreviousBucketCache.java b/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselPreviousBucketCache.java index d088218..11de7fc 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselPreviousBucketCache.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselPreviousBucketCache.java @@ -13,8 +13,6 @@ import org.springframework.stereotype.Service; import java.sql.ResultSet; import java.sql.SQLException; -import java.sql.Timestamp; -import java.time.LocalDateTime; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -26,13 +24,13 @@ import java.util.concurrent.ConcurrentMap; * 용도: 버킷 간 점프 검출을 위한 이전 버킷 종료 위치 캐싱 * * 캐시 구조: - * - Key: "{sigSrcCd}:{targetId}" (예: "000001:440123456") + * - Key: mmsi (예: "440123456") * - Value: VesselBucketPositionDto (endLon, endLat, endTime) - * - TTL: 120분 (위성 AIS 고려) + * - TTL: 120분 * * 데이터 흐름: * 1. 5분 집계 시작 시 캐시에서 이전 버킷 위치 조회 - * 2. 캐시 미스 시 t_vessel_latest_position 테이블에서 최근 2시간 데이터 조회 (fallback) + * 2. 캐시 미스 시 t_ais_position 테이블에서 최근 2시간 데이터 조회 (fallback) * 3. 현재 버킷과 이전 버킷 간 점프 검출 * 4. 처리 완료 후 현재 버킷 종료 위치를 캐시에 업데이트 */ @@ -55,26 +53,17 @@ public class VesselPreviousBucketCache { private volatile int totalDbQueriesCount = 0; private volatile int totalVesselsLoadedFromDb = 0; - /** - * 캐시 키 생성 - */ - private String createKey(String sigSrcCd, String targetId) { - return sigSrcCd + ":" + targetId; - } - /** * 단일 선박의 버킷 종료 위치 조회 * - * @param sigSrcCd 신호원 코드 - * @param targetId 대상 ID + * @param mmsi MMSI 식별자 * @return 이전 버킷 종료 위치 (캐시 미스 시 null) */ - public VesselBucketPositionDto get(String sigSrcCd, String targetId) { - String key = createKey(sigSrcCd, targetId); + public VesselBucketPositionDto get(String mmsi) { org.springframework.cache.Cache cache = getCache(); if (cache != null) { - org.springframework.cache.Cache.ValueWrapper wrapper = cache.get(key); + org.springframework.cache.Cache.ValueWrapper wrapper = cache.get(mmsi); if (wrapper != null && wrapper.get() instanceof VesselBucketPositionDto) { return (VesselBucketPositionDto) wrapper.get(); } @@ -86,7 +75,7 @@ public class VesselPreviousBucketCache { /** * 여러 선박의 이전 버킷 위치 일괄 조회 (캐시 + DB Fallback) * - * @param vesselKeys 조회할 선박 키 목록 (sigSrcCd:targetId) + * @param vesselKeys 조회할 선박 키 목록 (mmsi) * @return 선박 키 -> 이전 버킷 위치 매핑 */ public Map getBatch(List vesselKeys) { @@ -113,8 +102,6 @@ public class VesselPreviousBucketCache { } } - // 캐시 조회 로그 제거 (Job 레벨 통계로 대체) - // 2. 캐시 미스 시 DB에서 일괄 조회 (최근 2시간) - 첫 실행 시 1회만 if (cacheMisses > 0 && !fallbackLoaded) { synchronized (this) { @@ -123,7 +110,7 @@ public class VesselPreviousBucketCache { Map dbResults = loadFromDatabaseBatch(); long elapsed = System.currentTimeMillis() - startTime; - // 전체 결과를 캐시에 저장 (요청 여부와 관계없이 모두 저장) + // 전체 결과를 캐시에 저장 dbResults.values().forEach(this::put); // 요청된 선박들을 결과에 추가 @@ -152,17 +139,15 @@ public class VesselPreviousBucketCache { private Map loadFromDatabaseBatch() { Map result = new HashMap<>(); - // t_vessel_latest_position 테이블에서 최근 2시간 데이터 조회 + // t_ais_position 테이블에서 최근 2시간 데이터 조회 String sql = """ SELECT - sig_src_cd, - target_id, + mmsi, lon, lat, last_update - FROM signal.t_vessel_latest_position + FROM signal.t_ais_position WHERE last_update >= NOW() - INTERVAL '2 hours' - AND sig_src_cd NOT IN ('000004', '000005') """; try { @@ -172,7 +157,7 @@ public class VesselPreviousBucketCache { result.put(position.getVesselKey(), position); } - log.debug("Queried {} vessel positions from t_vessel_latest_position (last 2 hours)", positions.size()); + log.debug("Queried {} vessel positions from t_ais_position (last 2 hours)", positions.size()); } catch (Exception e) { log.error("Failed to load previous bucket positions from DB", e); @@ -185,13 +170,12 @@ public class VesselPreviousBucketCache { * 단일 선박 위치 캐시 저장 */ public void put(VesselBucketPositionDto position) { - if (position == null || position.getSigSrcCd() == null || position.getTargetId() == null) { + if (position == null || position.getMmsi() == null) { log.warn("Invalid position data, skipping cache: {}", position); return; } - String key = createKey(position.getSigSrcCd(), position.getTargetId()); - getCache().put(key, position); + getCache().put(position.getMmsi(), position); } /** @@ -205,7 +189,7 @@ public class VesselPreviousBucketCache { int count = 0; for (VesselBucketPositionDto position : positions) { - if (position.getSigSrcCd() != null && position.getTargetId() != null) { + if (position.getMmsi() != null) { put(position); count++; } @@ -291,8 +275,7 @@ public class VesselPreviousBucketCache { @Override public VesselBucketPositionDto mapRow(ResultSet rs, int rowNum) throws SQLException { return VesselBucketPositionDto.builder() - .sigSrcCd(rs.getString("sig_src_cd")) - .targetId(rs.getString("target_id")) + .mmsi(rs.getString("mmsi")) .endLon(rs.getDouble("lon")) .endLat(rs.getDouble("lat")) .endTime(rs.getTimestamp("last_update") != null ? diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselTrackMerger.java b/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselTrackMerger.java index 99d237e..c80630a 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselTrackMerger.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/service/VesselTrackMerger.java @@ -35,7 +35,7 @@ public class VesselTrackMerger { public List mergeTracksByVessel(List tracks) { // 선박별로 그룹화 Map> vesselGroups = tracks.stream() - .collect(Collectors.groupingBy(t -> t.getSigSrcCd() + "_" + t.getTargetId())); + .collect(Collectors.groupingBy(t -> t.getMmsi())); log.info("Merging tracks for {} vessels from {} segments", vesselGroups.size(), tracks.size()); @@ -126,10 +126,9 @@ public class VesselTrackMerger { .collect(Collectors.toList()); return MergedVesselTrack.builder() - .sigSrcCd(firstSegment.getSigSrcCd()) - .targetId(firstSegment.getTargetId()) - .nationalCode(gc.mda.signal_batch.global.util.NationalCodeUtil.calculateNationalCode( - firstSegment.getSigSrcCd(), firstSegment.getTargetId())) + .mmsi(firstSegment.getMmsi()) + .nationalCode(firstSegment.getMmsi() != null && firstSegment.getMmsi().length() >= 3 + ? firstSegment.getMmsi().substring(0, 3) : "000") .vesselId(vesselId) .mergedTrackGeom(mergedGeom) .totalDistanceNm(totalDistance) diff --git a/src/main/java/gc/mda/signal_batch/domain/vessel/service/filter/VesselTrackFilter.java b/src/main/java/gc/mda/signal_batch/domain/vessel/service/filter/VesselTrackFilter.java index 6803b1b..9a23c42 100644 --- a/src/main/java/gc/mda/signal_batch/domain/vessel/service/filter/VesselTrackFilter.java +++ b/src/main/java/gc/mda/signal_batch/domain/vessel/service/filter/VesselTrackFilter.java @@ -120,20 +120,20 @@ public class VesselTrackFilter { */ private Map> loadVesselTracks(TrackQueryRequest request, String tableName) { String sql = """ - SELECT sig_src_cd, target_id, time_bucket, + SELECT mmsi, time_bucket, public.ST_AsText(track_geom) as track_geom, -- WKT 형식으로 변환 distance_nm, avg_speed, point_count, - LEAD(time_bucket) OVER (PARTITION BY sig_src_cd, target_id ORDER BY time_bucket) as next_bucket, - LEAD(public.ST_AsText(track_geom)) OVER (PARTITION BY sig_src_cd, target_id ORDER BY time_bucket) as next_geom -- WKT 형식으로 변환 + LEAD(time_bucket) OVER (PARTITION BY mmsi ORDER BY time_bucket) as next_bucket, + LEAD(public.ST_AsText(track_geom)) OVER (PARTITION BY mmsi ORDER BY time_bucket) as next_geom -- WKT 형식으로 변환 FROM %s WHERE time_bucket >= ? AND time_bucket < ? - ORDER BY sig_src_cd, target_id, time_bucket + ORDER BY mmsi, time_bucket """.formatted(tableName); Map> vesselTracks = new ConcurrentHashMap<>(); queryJdbcTemplate.query(sql, rs -> { - String vesselId = rs.getString("sig_src_cd") + "_" + rs.getString("target_id"); + String vesselId = rs.getString("mmsi"); VesselTrackSegment segment = VesselTrackSegment.builder() .vesselId(vesselId) diff --git a/src/main/java/gc/mda/signal_batch/global/config/AisApiWebClientConfig.java b/src/main/java/gc/mda/signal_batch/global/config/AisApiWebClientConfig.java new file mode 100644 index 0000000..da47f31 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/global/config/AisApiWebClientConfig.java @@ -0,0 +1,43 @@ +package gc.mda.signal_batch.global.config; + +import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.Bean; +import org.springframework.context.annotation.Configuration; +import org.springframework.context.annotation.Profile; +import org.springframework.web.reactive.function.client.WebClient; + +/** + * S&P Global AIS API WebClient 설정 + * + * API: POST /AisSvc.svc/AIS/GetTargetsEnhanced + * 인증: Basic Authentication + * 버퍼: 50MB (AIS GetTargets 응답 ~20MB+) + */ +@Slf4j +@Configuration +@Profile("!query") +public class AisApiWebClientConfig { + + @Value("${app.ais-api.url}") + private String aisApiUrl; + + @Value("${app.ais-api.username}") + private String aisApiUsername; + + @Value("${app.ais-api.password}") + private String aisApiPassword; + + @Bean(name = "aisApiWebClient") + public WebClient aisApiWebClient() { + log.info("AIS API WebClient 생성 - Base URL: {}", aisApiUrl); + + return WebClient.builder() + .baseUrl(aisApiUrl) + .defaultHeaders(headers -> headers.setBasicAuth(aisApiUsername, aisApiPassword)) + .codecs(configurer -> configurer + .defaultCodecs() + .maxInMemorySize(50 * 1024 * 1024)) + .build(); + } +} diff --git a/src/main/java/gc/mda/signal_batch/global/config/DevDataSourceConfig.java b/src/main/java/gc/mda/signal_batch/global/config/DevDataSourceConfig.java index c08afdb..e20d7bd 100644 --- a/src/main/java/gc/mda/signal_batch/global/config/DevDataSourceConfig.java +++ b/src/main/java/gc/mda/signal_batch/global/config/DevDataSourceConfig.java @@ -21,39 +21,42 @@ import javax.sql.DataSource; @Profile("dev") public class DevDataSourceConfig { + private final DataSourceConfigProperties properties; + + public DevDataSourceConfig(DataSourceConfigProperties properties) { + this.properties = properties; + } + @Bean - @ConfigurationProperties(prefix = "spring.datasource.collect") + @ConfigurationProperties(prefix = "spring.datasource.collect.hikari") public HikariConfig collectHikariConfig() { HikariConfig config = new HikariConfig(); - // 여기서 기본값을 설정하면 yml 파일의 설정과 병합됨 + applyConnectionProps(config, properties.getCollect()); config.setConnectionInitSql("SET TIME ZONE 'Asia/Seoul'; SET search_path TO signal, public;"); return config; } @Bean public DataSource collectDataSource(@Qualifier("collectHikariConfig") HikariConfig hikariConfig) { - // HikariConfig는 이미 @ConfigurationProperties로 설정이 주입되어 있음 HikariDataSource dataSource = new HikariDataSource(hikariConfig); - - log.info("Collect DataSource created:"); - log.info(" - URL: {}", hikariConfig.getJdbcUrl()); - log.info(" - Connection Init SQL: {}", hikariConfig.getConnectionInitSql()); - log.info(" - Pool Name: {}", hikariConfig.getPoolName()); - - // PostGIS 타입 등록 (선택사항) + + log.info("Collect DataSource created: URL={}, pool={}, maxSize={}", + hikariConfig.getJdbcUrl(), hikariConfig.getPoolName(), hikariConfig.getMaximumPoolSize()); + try { PostGISConfig.registerPostGISTypes(dataSource); } catch (Exception e) { log.warn("PostGIS type registration skipped: {}", e.getMessage()); } - + return dataSource; } @Bean(name = "devQueryHikariConfig") - @ConfigurationProperties(prefix = "spring.datasource.query") + @ConfigurationProperties(prefix = "spring.datasource.query.hikari") public HikariConfig devQueryHikariConfig() { HikariConfig config = new HikariConfig(); + applyConnectionProps(config, properties.getQuery()); config.setConnectionInitSql("SET TIME ZONE 'Asia/Seoul'; SET search_path TO signal, public;"); return config; } @@ -61,26 +64,24 @@ public class DevDataSourceConfig { @Bean public DataSource queryDataSource(@Qualifier("devQueryHikariConfig") HikariConfig hikariConfig) { HikariDataSource dataSource = new HikariDataSource(hikariConfig); - - log.info("Query DataSource created:"); - log.info(" - URL: {}", hikariConfig.getJdbcUrl()); - log.info(" - Connection Init SQL: {}", hikariConfig.getConnectionInitSql()); - log.info(" - Pool Name: {}", hikariConfig.getPoolName()); - - // PostGIS 타입 등록 (선택사항) + + log.info("Query DataSource created: URL={}, pool={}, maxSize={}", + hikariConfig.getJdbcUrl(), hikariConfig.getPoolName(), hikariConfig.getMaximumPoolSize()); + try { PostGISConfig.registerPostGISTypes(dataSource); } catch (Exception e) { log.warn("PostGIS type registration skipped: {}", e.getMessage()); } - + return dataSource; } @Bean - @ConfigurationProperties(prefix = "spring.datasource.batch") + @ConfigurationProperties(prefix = "spring.datasource.batch.hikari") public HikariConfig batchHikariConfig() { HikariConfig config = new HikariConfig(); + applyConnectionProps(config, properties.getBatch()); config.setConnectionInitSql("SET TIME ZONE 'Asia/Seoul'"); return config; } @@ -89,19 +90,16 @@ public class DevDataSourceConfig { @Primary public DataSource batchDataSource(@Qualifier("batchHikariConfig") HikariConfig hikariConfig) { HikariDataSource dataSource = new HikariDataSource(hikariConfig); - - log.info("Batch DataSource created:"); - log.info(" - URL: {}", hikariConfig.getJdbcUrl()); - log.info(" - Connection Init SQL: {}", hikariConfig.getConnectionInitSql()); - log.info(" - Pool Name: {}", hikariConfig.getPoolName()); - - // PostGIS 타입 등록 (선택사항) + + log.info("Batch DataSource created: URL={}, pool={}, maxSize={}", + hikariConfig.getJdbcUrl(), hikariConfig.getPoolName(), hikariConfig.getMaximumPoolSize()); + try { PostGISConfig.registerPostGISTypes(dataSource); } catch (Exception e) { log.warn("PostGIS type registration skipped: {}", e.getMessage()); } - + return dataSource; } @@ -114,7 +112,7 @@ public class DevDataSourceConfig { public PlatformTransactionManager transactionManager(@Qualifier("collectDataSource") DataSource dataSource) { return new DataSourceTransactionManager(dataSource); } - + @Bean public PlatformTransactionManager queryTransactionManager(@Qualifier("queryDataSource") DataSource dataSource) { return new DataSourceTransactionManager(dataSource); @@ -150,4 +148,11 @@ public class DevDataSourceConfig { public NamedParameterJdbcTemplate queryNamedJdbcTemplate(@Qualifier("queryDataSource") DataSource dataSource) { return new NamedParameterJdbcTemplate(dataSource); } -} \ No newline at end of file + + private void applyConnectionProps(HikariConfig config, DataSourceConfigProperties.DatabaseProperties props) { + config.setJdbcUrl(props.getJdbcUrl()); + config.setUsername(props.getUsername()); + config.setPassword(props.getPassword()); + config.setDriverClassName(props.getDriverClassName()); + } +} diff --git a/src/main/java/gc/mda/signal_batch/global/config/LocalDataSourceConfig.java b/src/main/java/gc/mda/signal_batch/global/config/LocalDataSourceConfig.java index 6d8ba4f..405108a 100644 --- a/src/main/java/gc/mda/signal_batch/global/config/LocalDataSourceConfig.java +++ b/src/main/java/gc/mda/signal_batch/global/config/LocalDataSourceConfig.java @@ -1,19 +1,23 @@ package gc.mda.signal_batch.global.config; +import com.zaxxer.hikari.HikariConfig; import com.zaxxer.hikari.HikariDataSource; +import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.jdbc.DataSourceBuilder; +import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Primary; import org.springframework.context.annotation.Profile; import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate; import org.springframework.jdbc.datasource.DataSourceTransactionManager; import org.springframework.transaction.PlatformTransactionManager; import javax.sql.DataSource; +@Slf4j @Configuration @Profile("local") public class LocalDataSourceConfig { @@ -25,55 +29,69 @@ public class LocalDataSourceConfig { } @Bean - public DataSource collectDataSource() { - HikariDataSource dataSource = DataSourceBuilder.create() - .type(HikariDataSource.class) - .url(properties.getCollect().getJdbcUrl()) - .username(properties.getCollect().getUsername()) - .password(properties.getCollect().getPassword()) - .driverClassName(properties.getCollect().getDriverClassName()) - .build(); + @ConfigurationProperties(prefix = "spring.datasource.collect.hikari") + public HikariConfig localCollectHikariConfig() { + HikariConfig config = new HikariConfig(); + applyConnectionProps(config, properties.getCollect()); + config.setConnectionInitSql("SET TIME ZONE 'Asia/Seoul'; SET search_path TO signal, public;"); + return config; + } + + @Bean + public DataSource collectDataSource(@Qualifier("localCollectHikariConfig") HikariConfig hikariConfig) { + HikariDataSource dataSource = new HikariDataSource(hikariConfig); + log.info("Local Collect DataSource created: URL={}, poolSize={}", hikariConfig.getJdbcUrl(), hikariConfig.getMaximumPoolSize()); return dataSource; } + @Bean(name = "localQueryHikariConfig") + @ConfigurationProperties(prefix = "spring.datasource.query.hikari") + public HikariConfig localQueryHikariConfig() { + HikariConfig config = new HikariConfig(); + applyConnectionProps(config, properties.getQuery()); + config.setConnectionInitSql("SET TIME ZONE 'Asia/Seoul'; SET search_path TO signal, public;"); + return config; + } @Bean - public DataSource queryDataSource() { - HikariDataSource dataSource = DataSourceBuilder.create() - .type(HikariDataSource.class) - .url(properties.getQuery().getJdbcUrl()) - .username(properties.getQuery().getUsername()) - .password(properties.getQuery().getPassword()) - .driverClassName(properties.getQuery().getDriverClassName()) - .build(); + public DataSource queryDataSource(@Qualifier("localQueryHikariConfig") HikariConfig hikariConfig) { + HikariDataSource dataSource = new HikariDataSource(hikariConfig); + log.info("Local Query DataSource created: URL={}, poolSize={}", hikariConfig.getJdbcUrl(), hikariConfig.getMaximumPoolSize()); return dataSource; } + @Bean + @ConfigurationProperties(prefix = "spring.datasource.batch.hikari") + public HikariConfig localBatchHikariConfig() { + HikariConfig config = new HikariConfig(); + applyConnectionProps(config, properties.getBatch()); + config.setConnectionInitSql("SET TIME ZONE 'Asia/Seoul'"); + return config; + } + @Bean @Primary - public DataSource batchDataSource() { - HikariDataSource dataSource = DataSourceBuilder.create() - .type(HikariDataSource.class) - .url(properties.getBatch().getJdbcUrl()) - .username(properties.getBatch().getUsername()) - .password(properties.getBatch().getPassword()) - .driverClassName(properties.getBatch().getDriverClassName()) - .build(); + public DataSource batchDataSource(@Qualifier("localBatchHikariConfig") HikariConfig hikariConfig) { + HikariDataSource dataSource = new HikariDataSource(hikariConfig); + log.info("Local Batch DataSource created: URL={}, poolSize={}", hikariConfig.getJdbcUrl(), hikariConfig.getMaximumPoolSize()); return dataSource; } - // Spring Batch가 찾는 기본 dataSource 빈 @Bean public DataSource dataSource(@Qualifier("batchDataSource") DataSource batchDataSource) { return batchDataSource; } - // 나머지 빈들은 동일... @Bean public PlatformTransactionManager transactionManager(@Qualifier("collectDataSource") DataSource dataSource) { return new DataSourceTransactionManager(dataSource); } + @Bean + public PlatformTransactionManager queryTransactionManager(@Qualifier("queryDataSource") DataSource dataSource) { + return new DataSourceTransactionManager(dataSource); + } + @Bean @Primary public PlatformTransactionManager batchTransactionManager(@Qualifier("batchDataSource") DataSource dataSource) { @@ -94,4 +112,21 @@ public class LocalDataSourceConfig { jdbcTemplate.setQueryTimeout(300); return jdbcTemplate; } -} \ No newline at end of file + + @Bean(name = "collectNamedJdbcTemplate") + public NamedParameterJdbcTemplate collectNamedJdbcTemplate(@Qualifier("collectDataSource") DataSource dataSource) { + return new NamedParameterJdbcTemplate(dataSource); + } + + @Bean(name = "queryNamedJdbcTemplate") + public NamedParameterJdbcTemplate queryNamedJdbcTemplate(@Qualifier("queryDataSource") DataSource dataSource) { + return new NamedParameterJdbcTemplate(dataSource); + } + + private void applyConnectionProps(HikariConfig config, DataSourceConfigProperties.DatabaseProperties props) { + config.setJdbcUrl(props.getJdbcUrl()); + config.setUsername(props.getUsername()); + config.setPassword(props.getPassword()); + config.setDriverClassName(props.getDriverClassName()); + } +} diff --git a/src/main/java/gc/mda/signal_batch/global/config/ProdDataSourceConfig.java b/src/main/java/gc/mda/signal_batch/global/config/ProdDataSourceConfig.java index 3173f01..05574b6 100644 --- a/src/main/java/gc/mda/signal_batch/global/config/ProdDataSourceConfig.java +++ b/src/main/java/gc/mda/signal_batch/global/config/ProdDataSourceConfig.java @@ -1,8 +1,10 @@ package gc.mda.signal_batch.global.config; +import com.zaxxer.hikari.HikariConfig; import com.zaxxer.hikari.HikariDataSource; +import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.jdbc.DataSourceBuilder; +import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Primary; @@ -14,6 +16,7 @@ import org.springframework.transaction.PlatformTransactionManager; import javax.sql.DataSource; +@Slf4j @Configuration @Profile({"prod", "prod-mpr"}) public class ProdDataSourceConfig { @@ -25,62 +28,62 @@ public class ProdDataSourceConfig { } @Bean - public DataSource collectDataSource() { - HikariDataSource dataSource = DataSourceBuilder.create() - .type(HikariDataSource.class) - .url(properties.getCollect().getJdbcUrl()) - .username(properties.getCollect().getUsername()) - .password(properties.getCollect().getPassword()) - .driverClassName(properties.getCollect().getDriverClassName()) - .build(); + @ConfigurationProperties(prefix = "spring.datasource.collect.hikari") + public HikariConfig prodCollectHikariConfig() { + HikariConfig config = new HikariConfig(); + applyConnectionProps(config, properties.getCollect()); + config.setConnectionInitSql("SET TIME ZONE 'Asia/Seoul'; SET search_path TO signal, public;"); + return config; + } + + @Bean + public DataSource collectDataSource(@Qualifier("prodCollectHikariConfig") HikariConfig hikariConfig) { + HikariDataSource dataSource = new HikariDataSource(hikariConfig); + log.info("Prod Collect DataSource created: URL={}, pool={}, maxSize={}", + hikariConfig.getJdbcUrl(), hikariConfig.getPoolName(), hikariConfig.getMaximumPoolSize()); return dataSource; } + @Bean(name = "prodQueryHikariConfig") + @ConfigurationProperties(prefix = "spring.datasource.query.hikari") + public HikariConfig prodQueryHikariConfig() { + HikariConfig config = new HikariConfig(); + applyConnectionProps(config, properties.getQuery()); + config.setConnectionInitSql("SET TIME ZONE 'Asia/Seoul'; SET search_path TO signal, public;"); + return config; + } @Bean - public DataSource queryDataSource() { - System.out.println("========================================"); - System.out.println("!!! CREATING queryDataSource !!!"); - System.out.println("URL: " + properties.getQuery().getJdbcUrl()); - System.out.println("Username: " + properties.getQuery().getUsername()); - System.out.println("========================================"); - - HikariDataSource dataSource = DataSourceBuilder.create() - .type(HikariDataSource.class) - .url(properties.getQuery().getJdbcUrl()) - .username(properties.getQuery().getUsername()) - .password(properties.getQuery().getPassword()) - .driverClassName(properties.getQuery().getDriverClassName()) - .build(); + public DataSource queryDataSource(@Qualifier("prodQueryHikariConfig") HikariConfig hikariConfig) { + HikariDataSource dataSource = new HikariDataSource(hikariConfig); + log.info("Prod Query DataSource created: URL={}, pool={}, maxSize={}", + hikariConfig.getJdbcUrl(), hikariConfig.getPoolName(), hikariConfig.getMaximumPoolSize()); return dataSource; } + @Bean + @ConfigurationProperties(prefix = "spring.datasource.batch.hikari") + public HikariConfig prodBatchHikariConfig() { + HikariConfig config = new HikariConfig(); + applyConnectionProps(config, properties.getBatch()); + config.setConnectionInitSql("SET TIME ZONE 'Asia/Seoul'"); + return config; + } + @Bean @Primary - public DataSource batchDataSource() { - System.out.println("========================================"); - System.out.println("!!! CREATING batchDataSource (PRIMARY) !!!"); - System.out.println("URL: " + properties.getBatch().getJdbcUrl()); - System.out.println("Username: " + properties.getBatch().getUsername()); - System.out.println("========================================"); - - HikariDataSource dataSource = DataSourceBuilder.create() - .type(HikariDataSource.class) - .url(properties.getBatch().getJdbcUrl()) - .username(properties.getBatch().getUsername()) - .password(properties.getBatch().getPassword()) - .driverClassName(properties.getBatch().getDriverClassName()) - .build(); + public DataSource batchDataSource(@Qualifier("prodBatchHikariConfig") HikariConfig hikariConfig) { + HikariDataSource dataSource = new HikariDataSource(hikariConfig); + log.info("Prod Batch DataSource created: URL={}, pool={}, maxSize={}", + hikariConfig.getJdbcUrl(), hikariConfig.getPoolName(), hikariConfig.getMaximumPoolSize()); return dataSource; } - // Spring Batch가 찾는 기본 dataSource 빈 @Bean public DataSource dataSource(@Qualifier("batchDataSource") DataSource batchDataSource) { return batchDataSource; } - // Transaction Manager 설정 @Bean public PlatformTransactionManager transactionManager(@Qualifier("collectDataSource") DataSource dataSource) { return new DataSourceTransactionManager(dataSource); @@ -97,7 +100,6 @@ public class ProdDataSourceConfig { return new DataSourceTransactionManager(dataSource); } - // JdbcTemplate 빈 설정 @Bean(name = "collectJdbcTemplate") public JdbcTemplate collectJdbcTemplate(@Qualifier("collectDataSource") DataSource dataSource) { JdbcTemplate jdbcTemplate = new JdbcTemplate(dataSource); @@ -122,4 +124,11 @@ public class ProdDataSourceConfig { public NamedParameterJdbcTemplate queryNamedJdbcTemplate(@Qualifier("queryDataSource") DataSource dataSource) { return new NamedParameterJdbcTemplate(dataSource); } -} \ No newline at end of file + + private void applyConnectionProps(HikariConfig config, DataSourceConfigProperties.DatabaseProperties props) { + config.setJdbcUrl(props.getJdbcUrl()); + config.setUsername(props.getUsername()); + config.setPassword(props.getPassword()); + config.setDriverClassName(props.getDriverClassName()); + } +} diff --git a/src/main/java/gc/mda/signal_batch/global/config/SwaggerConfig.java b/src/main/java/gc/mda/signal_batch/global/config/SwaggerConfig.java index f9de431..43dd01f 100644 --- a/src/main/java/gc/mda/signal_batch/global/config/SwaggerConfig.java +++ b/src/main/java/gc/mda/signal_batch/global/config/SwaggerConfig.java @@ -30,7 +30,7 @@ import org.springframework.context.annotation.Configuration; ### 3. 비정상 항적 검출 및 필터링 - **실시간 검출**: 물리적 불가능 항적 자동 필터링 (속도 100knots, 거리 10nm/5분 초과) - - **항공기 예외처리**: sig_src_cd='000019' 항공기는 300knots/30nm 기준 적용 + - **항공기 예외처리**: 항공기 AIS 신호는 300knots/30nm 기준 적용 - **분리 저장**: 비정상 항적은 별도 테이블(t_abnormal_tracks)에 보관 ### 4. WebSocket 기반 대용량 항적 스트리밍 diff --git a/src/main/java/gc/mda/signal_batch/global/tool/BatchDiagnosticTool.java b/src/main/java/gc/mda/signal_batch/global/tool/BatchDiagnosticTool.java index c751cd5..8e357b2 100644 --- a/src/main/java/gc/mda/signal_batch/global/tool/BatchDiagnosticTool.java +++ b/src/main/java/gc/mda/signal_batch/global/tool/BatchDiagnosticTool.java @@ -1,6 +1,5 @@ package gc.mda.signal_batch.global.tool; -import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.jdbc.core.JdbcTemplate; @@ -14,15 +13,14 @@ import java.util.*; @Slf4j @Component -@RequiredArgsConstructor public class BatchDiagnosticTool { - @Qualifier("collectJdbcTemplate") - private final JdbcTemplate collectJdbcTemplate; - - @Qualifier("queryJdbcTemplate") private final JdbcTemplate queryJdbcTemplate; + public BatchDiagnosticTool(@Qualifier("queryJdbcTemplate") JdbcTemplate queryJdbcTemplate) { + this.queryJdbcTemplate = queryJdbcTemplate; + } + /** * 종합 진단 실행 */ @@ -61,7 +59,6 @@ public class BatchDiagnosticTool { DatabaseHealth health = new DatabaseHealth(); // 연결 상태 - health.setCollectDbConnected(testConnection(collectJdbcTemplate)); health.setQueryDbConnected(testConnection(queryJdbcTemplate)); // 활성 연결 수 @@ -80,31 +77,31 @@ public class BatchDiagnosticTool { } /** - * 파티션 상태 확인 + * 파티션 상태 확인 (집계 파티션) */ private PartitionStatus checkPartitionStatus() { PartitionStatus status = new PartitionStatus(); String sql = """ - SELECT + SELECT tablename, pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size, pg_total_relation_size(schemaname||'.'||tablename) as size_bytes FROM pg_tables - WHERE schemaname = 'signal' - AND tablename LIKE 'sig_test_%' + WHERE schemaname = 'signal' + AND tablename LIKE 't_vessel_tracks_5min_%' ORDER BY tablename """; - List> partitions = collectJdbcTemplate.queryForList(sql); + List> partitions = queryJdbcTemplate.queryForList(sql); status.setTotalPartitions(partitions.size()); status.setPartitionDetails(partitions); // 미래 파티션 확인 LocalDate tomorrow = LocalDate.now().plusDays(1); - String tomorrowPartition = "sig_test_" + - tomorrow.format(DateTimeFormatter.BASIC_ISO_DATE); + String tomorrowPartition = "t_vessel_tracks_5min_" + + tomorrow.format(DateTimeFormatter.ofPattern("yyMMdd")); status.setHasFuturePartitions( partitions.stream().anyMatch(p -> p.get("tablename").equals(tomorrowPartition)) @@ -129,7 +126,7 @@ public class BatchDiagnosticTool { SELECT COUNT(*) as records_last_hour, COUNT(*) / 3600.0 as records_per_second - FROM signal.t_vessel_latest_position + FROM signal.t_ais_position WHERE last_update > NOW() - INTERVAL '1 hour' """); @@ -137,7 +134,7 @@ public class BatchDiagnosticTool { metrics.setRecordsPerSecond(((Number) throughput.get("records_per_second")).doubleValue()); // 인덱스 효율성 - List> indexStats = collectJdbcTemplate.queryForList(""" + List> indexStats = queryJdbcTemplate.queryForList(""" SELECT indexrelname, idx_scan, @@ -154,7 +151,7 @@ public class BatchDiagnosticTool { metrics.setIndexEfficiency(indexStats); // 캐시 히트율 - Map cacheStats = collectJdbcTemplate.queryForMap(""" + Map cacheStats = queryJdbcTemplate.queryForMap(""" SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit) as heap_hit, @@ -170,28 +167,26 @@ public class BatchDiagnosticTool { } /** - * 데이터 무결성 검사 + * 데이터 무결성 검사 (t_ais_position 기반) */ private DataIntegrity checkDataIntegrity() { DataIntegrity integrity = new DataIntegrity(); - // 중복 데이터 확인 - Long duplicates = collectJdbcTemplate.queryForObject(""" + // 중복 데이터 확인 (t_ais_position은 PK=mmsi이므로 중복 없음, 5분 트랙에서 확인) + Long duplicates = queryJdbcTemplate.queryForObject(""" SELECT COUNT(*) FROM ( - SELECT sig_src_cd, target_id, message_time, COUNT(*) - FROM signal.sig_test - WHERE message_time > NOW() - INTERVAL '1 day' - AND sig_src_cd != '000005' - AND length(target_id) > 5 - GROUP BY sig_src_cd, target_id, message_time + SELECT mmsi, time_bucket, COUNT(*) + FROM signal.t_vessel_tracks_5min + WHERE time_bucket > NOW() - INTERVAL '1 day' + GROUP BY mmsi, time_bucket HAVING COUNT(*) > 1 ) dup """, Long.class); - integrity.setDuplicateRecords(duplicates); + integrity.setDuplicateRecords(duplicates != null ? duplicates : 0); - // 누락된 시간대 확인 - List missingHours = collectJdbcTemplate.queryForList(""" + // 누락된 시간대 확인 (5분 집계 기준) + List missingHours = queryJdbcTemplate.queryForList(""" WITH hours AS ( SELECT generate_series( NOW() - INTERVAL '24 hours', @@ -202,19 +197,19 @@ public class BatchDiagnosticTool { SELECT TO_CHAR(h.hour, 'YYYY-MM-DD HH24:00') as missing_hour FROM hours h LEFT JOIN ( - SELECT DATE_TRUNC('hour', message_time) as data_hour - FROM signal.sig_test - WHERE message_time > NOW() - INTERVAL '24 hours' - GROUP BY DATE_TRUNC('hour', message_time) + SELECT DATE_TRUNC('hour', time_bucket) as data_hour + FROM signal.t_vessel_tracks_5min + WHERE time_bucket > NOW() - INTERVAL '24 hours' + GROUP BY DATE_TRUNC('hour', time_bucket) ) d ON h.hour = d.data_hour WHERE d.data_hour IS NULL """, String.class); integrity.setMissingTimeRanges(missingHours); - // 데이터 지연 확인 - LocalDateTime latestData = collectJdbcTemplate.queryForObject( - "SELECT MAX(message_time) FROM signal.sig_test", + // 데이터 지연 확인 (최신 위치 기준) + LocalDateTime latestData = queryJdbcTemplate.queryForObject( + "SELECT MAX(last_update) FROM signal.t_ais_position", LocalDateTime.class ); @@ -247,7 +242,7 @@ public class BatchDiagnosticTool { resources.setActiveThreads(Thread.activeCount()); // 디스크 공간 (데이터베이스) - Map diskSpace = collectJdbcTemplate.queryForMap(""" + Map diskSpace = queryJdbcTemplate.queryForMap(""" SELECT pg_database_size(current_database()) as db_size, pg_size_pretty(pg_database_size(current_database())) as db_size_pretty @@ -322,14 +317,14 @@ public class BatchDiagnosticTool { } private int getActiveConnections() { - return collectJdbcTemplate.queryForObject( + return queryJdbcTemplate.queryForObject( "SELECT COUNT(*) FROM pg_stat_activity WHERE state = 'active'", Integer.class ); } private List> getSlowQueries() { - return collectJdbcTemplate.queryForList(""" + return queryJdbcTemplate.queryForList(""" SELECT query, mean_exec_time, @@ -343,7 +338,7 @@ public class BatchDiagnosticTool { } private List> getTableSizes() { - return collectJdbcTemplate.queryForList(""" + return queryJdbcTemplate.queryForList(""" SELECT schemaname, tablename, @@ -356,7 +351,7 @@ public class BatchDiagnosticTool { } private List> getLockInfo() { - return collectJdbcTemplate.queryForList(""" + return queryJdbcTemplate.queryForList(""" SELECT pid, locktype, @@ -382,7 +377,6 @@ public class BatchDiagnosticTool { @lombok.Data public static class DatabaseHealth { - private boolean collectDbConnected; private boolean queryDbConnected; private int activeConnections; private List> slowQueries; diff --git a/src/main/java/gc/mda/signal_batch/global/util/ConcurrentUpdateManager.java b/src/main/java/gc/mda/signal_batch/global/util/ConcurrentUpdateManager.java index f24b277..29cd26c 100644 --- a/src/main/java/gc/mda/signal_batch/global/util/ConcurrentUpdateManager.java +++ b/src/main/java/gc/mda/signal_batch/global/util/ConcurrentUpdateManager.java @@ -1,22 +1,14 @@ package gc.mda.signal_batch.global.util; -import gc.mda.signal_batch.domain.vessel.model.VesselLatestPosition; import gc.mda.signal_batch.domain.gis.model.TileStatistics; -import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.retry.annotation.Backoff; -import org.springframework.retry.annotation.Retryable; import org.springframework.stereotype.Component; import org.springframework.transaction.annotation.Isolation; -import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; -import java.sql.Timestamp; -import java.time.Duration; import java.time.LocalDateTime; import java.util.*; import java.util.concurrent.ConcurrentHashMap; @@ -33,246 +25,9 @@ public class ConcurrentUpdateManager { this.queryJdbcTemplate = queryJdbcTemplate; } - @Value("${vessel.batch.lock.timeout:10}") - private int lockTimeoutSeconds; - - @Value("${vessel.batch.lock.max-retry:3}") - private int maxRetryAttempts; - // 락 통계 관리 private final Map lockStats = new ConcurrentHashMap<>(); - /** - * Advisory Lock을 사용한 최신 위치 업데이트 - */ - @Retryable( - value = {Exception.class}, - maxAttempts = 3, - backoff = @Backoff(delay = 100, maxDelay = 1000, multiplier = 2) - ) - @Transactional(isolation = Isolation.READ_COMMITTED, propagation = Propagation.REQUIRED) - public int updateLatestPositionWithLock(VesselLatestPosition position) { - String lockKey = position.getSigSrcCd() + ":" + position.getTargetId(); - long lockId = generateLockId(lockKey); - - LocalDateTime startTime = LocalDateTime.now(); - LockStatistics stats = lockStats.computeIfAbsent(lockKey, k -> new LockStatistics()); - stats.attempts.incrementAndGet(); - - try { - // Advisory Lock 획득 시도 - Boolean lockAcquired = queryJdbcTemplate.queryForObject( - "SELECT pg_try_advisory_lock(?)", - Boolean.class, - lockId - ); - - if (!lockAcquired) { - stats.failures.incrementAndGet(); - log.debug("Failed to acquire lock for vessel: {}", lockKey); - - // 대기 후 재시도 - Thread.sleep(50); - return updateLatestPositionWithoutLock(position); - } - - // 락 획득 성공 - 업데이트 수행 - // 방법 1: queryForList 사용 (권장) - String sql = """ - INSERT INTO signal.t_vessel_latest_position ( - sig_src_cd, target_id, lat, lon, geom, - sog, cog, heading, ship_nm, ship_ty, - last_update, update_count, created_at - ) VALUES ( - ?, ?, ?, ?, public.ST_SetSRID(public.ST_MakePoint(?, ?), 4326), - ?, ?, ?, ?, ?, - ?, 1, CURRENT_TIMESTAMP - ) - ON CONFLICT (sig_src_cd, target_id) DO UPDATE SET - lat = EXCLUDED.lat, - lon = EXCLUDED.lon, - geom = EXCLUDED.geom, - sog = EXCLUDED.sog, - cog = EXCLUDED.cog, - heading = EXCLUDED.heading, - ship_nm = COALESCE(EXCLUDED.ship_nm, t_vessel_latest_position.ship_nm), - ship_ty = COALESCE(EXCLUDED.ship_ty, t_vessel_latest_position.ship_ty), - last_update = EXCLUDED.last_update, - update_count = t_vessel_latest_position.update_count + 1 - WHERE EXCLUDED.last_update > t_vessel_latest_position.last_update - RETURNING update_count - """; - - List results = queryJdbcTemplate.queryForList(sql, - new Object[]{ - position.getSigSrcCd(), - position.getTargetId(), - position.getLat(), - position.getLon(), - position.getLon(), - position.getLat(), - position.getSog(), - position.getCog(), - position.getHeading(), - position.getShipNm(), - position.getShipTy(), - Timestamp.valueOf(position.getLastUpdate()) - }, - Integer.class - ); - - // 결과 확인 - 빈 리스트면 업데이트 안됨 (이미 최신 데이터) - int updateResult = results.isEmpty() ? 0 : 1; - - if (updateResult == 0) { - log.debug("Skipped update for vessel {} - existing data is newer", lockKey); - } - - stats.successes.incrementAndGet(); - Duration duration = Duration.between(startTime, LocalDateTime.now()); - stats.totalDuration.addAndGet((int) duration.toMillis()); - - return updateResult; - - } catch (Exception e) { - stats.errors.incrementAndGet(); - log.error("Error updating vessel position: {}", lockKey, e); - throw new RuntimeException("Failed to update vessel position", e); - - } finally { - // Advisory Lock 해제 - try { - queryJdbcTemplate.update("SELECT pg_advisory_unlock(?)", lockId); - } catch (Exception e) { - log.warn("Failed to release advisory lock: {}", lockId); - } - } - } - - /** - * 락 없이 업데이트 (Fallback) - 수정 버전 - */ - private int updateLatestPositionWithoutLock(VesselLatestPosition position) { - String sql = """ - INSERT INTO signal.t_vessel_latest_position ( - sig_src_cd, target_id, lat, lon, geom, - sog, cog, heading, ship_nm, ship_ty, - last_update, update_count, created_at - ) VALUES ( - ?, ?, ?, ?, public.ST_SetSRID(public.ST_MakePoint(?, ?), 4326), - ?, ?, ?, ?, ?, - ?, 1, CURRENT_TIMESTAMP - ) - ON CONFLICT (sig_src_cd, target_id) DO UPDATE SET - lat = EXCLUDED.lat, - lon = EXCLUDED.lon, - geom = EXCLUDED.geom, - sog = EXCLUDED.sog, - cog = EXCLUDED.cog, - heading = EXCLUDED.heading, - ship_nm = COALESCE(EXCLUDED.ship_nm, t_vessel_latest_position.ship_nm), - ship_ty = COALESCE(EXCLUDED.ship_ty, t_vessel_latest_position.ship_ty), - last_update = EXCLUDED.last_update, - update_count = t_vessel_latest_position.update_count + 1 - WHERE EXCLUDED.last_update > t_vessel_latest_position.last_update - """; - - return queryJdbcTemplate.update(sql, - position.getSigSrcCd(), - position.getTargetId(), - position.getLat(), - position.getLon(), - position.getLon(), - position.getLat(), - position.getSog(), - position.getCog(), - position.getHeading(), - position.getShipNm(), - position.getShipTy(), - Timestamp.valueOf(position.getLastUpdate()) - ); - } - /** - * 배치 업데이트 with Row-Level Locking - */ - @Transactional(isolation = Isolation.READ_COMMITTED) - public void batchUpdateWithRowLock(List positions) { - // 선박별로 정렬하여 데드락 방지 - positions.sort(Comparator.comparing(p -> p.getSigSrcCd() + p.getTargetId())); - - String lockSql = """ - SELECT 1 FROM signal.t_vessel_latest_position - WHERE sig_src_cd = ? AND target_id = ? - FOR UPDATE NOWAIT - """; - - String updateSql = """ - UPDATE signal.t_vessel_latest_position SET - lat = ?, lon = ?, geom = public.ST_SetSRID(public.ST_MakePoint(?, ?), 4326), - sog = ?, cog = ?, heading = ?, - ship_nm = COALESCE(?, ship_nm), - ship_ty = COALESCE(?, ship_ty), - last_update = ?, - update_count = update_count + 1 - WHERE sig_src_cd = ? AND target_id = ? - AND ? > last_update - """; - - String insertSql = """ - INSERT INTO signal.t_vessel_latest_position ( - sig_src_cd, target_id, lat, lon, geom, - sog, cog, heading, ship_nm, ship_ty, - last_update, update_count, created_at - ) VALUES ( - ?, ?, ?, ?, public.ST_SetSRID(public.ST_MakePoint(?, ?), 4326), - ?, ?, ?, ?, ?, - ?, 1, CURRENT_TIMESTAMP - ) - """; - - for (VesselLatestPosition position : positions) { - try { - // Row lock 시도 - List locked = queryJdbcTemplate.queryForList( - lockSql, Integer.class, - position.getSigSrcCd(), position.getTargetId() - ); - - if (!locked.isEmpty()) { - // 업데이트 - int updated = queryJdbcTemplate.update(updateSql, - position.getLat(), position.getLon(), - position.getLon(), position.getLat(), - position.getSog(), position.getCog(), position.getHeading(), - position.getShipNm(), position.getShipTy(), - Timestamp.valueOf(position.getLastUpdate()), - position.getSigSrcCd(), position.getTargetId(), - Timestamp.valueOf(position.getLastUpdate()) - ); - - if (updated == 0) { - log.debug("Skipped outdated update for vessel: {}:{}", - position.getSigSrcCd(), position.getTargetId()); - } - } else { - // 신규 삽입 - queryJdbcTemplate.update(insertSql, - position.getSigSrcCd(), position.getTargetId(), - position.getLat(), position.getLon(), - position.getLon(), position.getLat(), - position.getSog(), position.getCog(), position.getHeading(), - position.getShipNm(), position.getShipTy(), - Timestamp.valueOf(position.getLastUpdate()) - ); - } - - } catch (Exception e) { - log.warn("Failed to update vessel position: {}:{}", - position.getSigSrcCd(), position.getTargetId(), e); - } - } - } - /** * 타일 통계 병합 업데이트 */ diff --git a/src/main/java/gc/mda/signal_batch/global/util/HaeguGeoUtils.java b/src/main/java/gc/mda/signal_batch/global/util/HaeguGeoUtils.java index 198b101..c8fe673 100644 --- a/src/main/java/gc/mda/signal_batch/global/util/HaeguGeoUtils.java +++ b/src/main/java/gc/mda/signal_batch/global/util/HaeguGeoUtils.java @@ -167,7 +167,7 @@ public class HaeguGeoUtils { Integer.class, lat, lat, lon, lon ); } catch (Exception e) { - log.debug("No haegu found for coordinates: {}, {}", lat, lon); + log.trace("No haegu found for coordinates: {}, {}", lat, lon); return null; } } diff --git a/src/main/java/gc/mda/signal_batch/global/util/IntegrationSignalConstants.java b/src/main/java/gc/mda/signal_batch/global/util/IntegrationSignalConstants.java deleted file mode 100644 index 019d5e5..0000000 --- a/src/main/java/gc/mda/signal_batch/global/util/IntegrationSignalConstants.java +++ /dev/null @@ -1,133 +0,0 @@ -package gc.mda.signal_batch.global.util; - -import java.util.Arrays; -import java.util.Map; -import java.util.function.Function; -import java.util.stream.Collectors; - -/** - * 통합선박신호 관련 상수 및 유틸리티 - */ -public class IntegrationSignalConstants { - - /** - * 신호 타입 정의 (우선순위 포함) - */ - public enum SignalType { - AIS("000001", "ais", 1), - VTS_AIS("000004", "vts_ais", 2), - VPASS("000003", "vpass", 3), - E_NAVIGATION("000002", "enav", 4), - D_MF_HF("000016", "d_mf_hf", 5); - - private final String sigSrcCd; - private final String columnName; - private final int priority; - - // sig_src_cd로 빠른 조회를 위한 맵 - private static final Map BY_SIG_SRC_CD = Arrays.stream(values()) - .collect(Collectors.toMap(SignalType::getSigSrcCd, Function.identity())); - - SignalType(String sigSrcCd, String columnName, int priority) { - this.sigSrcCd = sigSrcCd; - this.columnName = columnName; - this.priority = priority; - } - - public String getSigSrcCd() { - return sigSrcCd; - } - - public String getColumnName() { - return columnName; - } - - public int getPriority() { - return priority; - } - - /** - * sig_src_cd로 SignalType 조회 - */ - public static SignalType fromSigSrcCd(String sigSrcCd) { - return BY_SIG_SRC_CD.get(sigSrcCd); - } - - /** - * sig_src_cd에 해당하는 컬럼명 조회 - */ - public static String getColumnNameBySigSrcCd(String sigSrcCd) { - SignalType type = fromSigSrcCd(sigSrcCd); - return type != null ? type.getColumnName() : null; - } - } - - /** - * 우선순위 순서 (높음 → 낮음) - */ - public static final String[] PRIORITY_ORDER = { - "000001", // AIS - "000004", // VTS-AIS - "000003", // VPASS - "000002", // E-NAVIGATION - "000016" // D-MF/HF - }; - - /** - * integration_target_id 생성 - * 형식: {AIS}_{ENAV}_{VPASS}_{VTS-AIS}_{D-MF/HF} - * - * @param ais AIS target_id ('0'이면 공백) - * @param enav E-NAVIGATION target_id ('0'이면 공백) - * @param vpass VPASS target_id ('0'이면 공백) - * @param vtsAis VTS-AIS target_id ('0'이면 공백) - * @param dMfHf D-MF/HF target_id ('0'이면 공백) - * @return 통합선박 ID - */ - public static String generateIntegrationId(String ais, String enav, String vpass, - String vtsAis, String dMfHf) { - return String.format("%s_%s_%s_%s_%s", - isZeroOrNull(ais) ? "" : ais, - isZeroOrNull(enav) ? "" : enav, - isZeroOrNull(vpass) ? "" : vpass, - isZeroOrNull(vtsAis) ? "" : vtsAis, - isZeroOrNull(dMfHf) ? "" : dMfHf - ); - } - - /** - * 단독 선박의 integration_target_id 생성 - * 해당 신호 타입 위치에만 target_id 배치 - * - * @param sigSrcCd 신호 종류 코드 - * @param targetId 대상 ID - * @return 통합선박 ID - */ - public static String generateSoloIntegrationId(String sigSrcCd, String targetId) { - String ais = "", enav = "", vpass = "", vtsAis = "", dMfHf = ""; - - SignalType type = SignalType.fromSigSrcCd(sigSrcCd); - if (type != null) { - switch (type) { - case AIS -> ais = targetId; - case E_NAVIGATION -> enav = targetId; - case VPASS -> vpass = targetId; - case VTS_AIS -> vtsAis = targetId; - case D_MF_HF -> dMfHf = targetId; - } - } - - return generateIntegrationId(ais, enav, vpass, vtsAis, dMfHf); - } - - /** - * 값이 '0' 또는 null인지 확인 - */ - private static boolean isZeroOrNull(String value) { - return value == null || "0".equals(value) || value.isEmpty(); - } - - private IntegrationSignalConstants() { - // 인스턴스화 방지 - } -} diff --git a/src/main/java/gc/mda/signal_batch/global/util/NationalCodeUtil.java b/src/main/java/gc/mda/signal_batch/global/util/NationalCodeUtil.java deleted file mode 100644 index 4d5c06b..0000000 --- a/src/main/java/gc/mda/signal_batch/global/util/NationalCodeUtil.java +++ /dev/null @@ -1,30 +0,0 @@ -package gc.mda.signal_batch.global.util; - -import lombok.experimental.UtilityClass; - -/** - * National Code 계산 유틸리티 - */ -@UtilityClass -public class NationalCodeUtil { - - /** - * sigSrcCd와 targetId를 기반으로 nationalCode를 계산합니다. - * - * @param sigSrcCd 신호소스코드 - * @param targetId 대상ID - * @return nationalCode - */ - public static String calculateNationalCode(String sigSrcCd, String targetId) { - if (("000001".equals(sigSrcCd) || "000004".equals(sigSrcCd)) && - targetId != null && targetId.length() >= 3) { - - String prefix = targetId.substring(0, 3); - // 앞 3자리가 숫자인지 확인 - if (prefix.matches("\\d{3}")) { - return prefix; - } - } - return "440"; // 기본값 - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/global/util/PartitionManager.java b/src/main/java/gc/mda/signal_batch/global/util/PartitionManager.java index 40ab556..8898dbf 100644 --- a/src/main/java/gc/mda/signal_batch/global/util/PartitionManager.java +++ b/src/main/java/gc/mda/signal_batch/global/util/PartitionManager.java @@ -1,9 +1,7 @@ package gc.mda.signal_batch.global.util; -import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Profile; import org.springframework.jdbc.core.JdbcTemplate; @@ -26,27 +24,19 @@ import java.util.stream.IntStream; public class PartitionManager { private final JdbcTemplate queryJdbcTemplate; - private final JdbcTemplate collectJdbcTemplate; private final gc.mda.signal_batch.global.config.PartitionRetentionConfig retentionConfig; public PartitionManager( @Qualifier("queryJdbcTemplate") JdbcTemplate queryJdbcTemplate, - @Qualifier("collectJdbcTemplate") JdbcTemplate collectJdbcTemplate, gc.mda.signal_batch.global.config.PartitionRetentionConfig retentionConfig) { this.queryJdbcTemplate = queryJdbcTemplate; - this.collectJdbcTemplate = collectJdbcTemplate; this.retentionConfig = retentionConfig; } private static final DateTimeFormatter PARTITION_DATE_FORMAT = DateTimeFormatter.ofPattern("yyMMdd"); private static final DateTimeFormatter PARTITION_MONTH_FORMAT = DateTimeFormatter.ofPattern("yyyy_MM"); - // CollectDB 일별 파티션 테이블 목록 - private static final List COLLECT_DAILY_PARTITION_TABLES = List.of( - "sig_test" - ); - - // QueryDB 일별 파티션 테이블 목록 + // 일별 파티션 테이블 목록 private static final List QUERY_DAILY_PARTITION_TABLES = List.of( "t_vessel_tracks_5min", "t_grid_vessel_tracks", @@ -76,9 +66,6 @@ public class PartitionManager { log.info("========== PartitionManager Initialization =========="); // DataSource 정보 로깅 - log.info("=== Collect DataSource Info ==="); - DataSourceLogger.logJdbcTemplateInfo("PartitionManager-Collect", collectJdbcTemplate); - log.info("=== Query DataSource Info ==="); DataSourceLogger.logJdbcTemplateInfo("PartitionManager-Query", queryJdbcTemplate); @@ -153,32 +140,27 @@ public class PartitionManager { private void checkExistingTables() { log.info("Checking existing tables..."); - // CollectDB 일별 파티션 테이블 확인 - log.info("--- CollectDB Tables ---"); - for (String table : COLLECT_DAILY_PARTITION_TABLES) { - checkTableExists("signal", table, collectJdbcTemplate, "CollectDB"); - } - - // QueryDB 일별 파티션 테이블 확인 - log.info("--- QueryDB Tables ---"); + // 일별 파티션 테이블 확인 + log.info("--- Daily Partition Tables ---"); for (String table : QUERY_DAILY_PARTITION_TABLES) { - checkTableExists("signal", table, queryJdbcTemplate, "QueryDB"); + checkTableExists("signal", table); } // 월별 파티션 테이블 확인 + log.info("--- Monthly Partition Tables ---"); for (String table : MONTHLY_PARTITION_TABLES) { - checkTableExists("signal", table, queryJdbcTemplate, "QueryDB"); + checkTableExists("signal", table); } } - private void checkTableExists(String schema, String table, JdbcTemplate jdbcTemplate, String dbType) { + private void checkTableExists(String schema, String table) { String sql = "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = ? AND tablename = ?)"; - Boolean exists = jdbcTemplate.queryForObject(sql, Boolean.class, schema, table); + Boolean exists = queryJdbcTemplate.queryForObject(sql, Boolean.class, schema, table); if (Boolean.TRUE.equals(exists)) { - log.info("[{}] Table exists: {}.{}", dbType, schema, table); + log.info("Table exists: {}.{}", schema, table); } else { - log.warn("[{}] Table NOT found: {}.{}", dbType, schema, table); + log.warn("Table NOT found: {}.{}", schema, table); } } @@ -193,14 +175,8 @@ public class PartitionManager { IntStream.range(0, days).forEach(offset -> { LocalDate targetDate = startDate.plusDays(offset); - // CollectDB 파티션 - for (String table : COLLECT_DAILY_PARTITION_TABLES) { - tasks.add(new PartitionTask("signal", table, targetDate, collectJdbcTemplate, "daily", "CollectDB")); - } - - // QueryDB 파티션 for (String table : QUERY_DAILY_PARTITION_TABLES) { - tasks.add(new PartitionTask("signal", table, targetDate, queryJdbcTemplate, "daily", "QueryDB")); + tasks.add(new PartitionTask("signal", table, targetDate, queryJdbcTemplate, "daily", "DB")); } }); @@ -217,7 +193,7 @@ public class PartitionManager { List tasks = new ArrayList<>(); for (String table : MONTHLY_PARTITION_TABLES) { - tasks.add(new PartitionTask("signal", table, targetMonth, queryJdbcTemplate, "monthly", "QueryDB")); + tasks.add(new PartitionTask("signal", table, targetMonth, queryJdbcTemplate, "monthly", "DB")); } // 병렬 처리 @@ -281,29 +257,14 @@ public class PartitionManager { String baseTable, JdbcTemplate jdbcTemplate) { List indexSqls = new ArrayList<>(); - // sig_test 테이블 - if (baseTable.contains("sig_test")) { - indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_msg_time_idx ON %s.%s (message_time DESC)", - partitionName, schema, partitionName - )); - indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_real_time_idx ON %s.%s (real_time DESC)", - partitionName, schema, partitionName - )); - indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_sig_target_idx ON %s.%s (sig_src_cd, target_id)", - partitionName, schema, partitionName - )); - } // 5분 궤적 테이블 - else if (baseTable.equals("t_vessel_tracks_5min")) { + if (baseTable.equals("t_vessel_tracks_5min")) { indexSqls.add(String.format( "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_time_idx ON %s.%s (time_bucket DESC)", partitionName, schema, partitionName )); indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_idx ON %s.%s (sig_src_cd, target_id, time_bucket DESC)", + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_idx ON %s.%s (mmsi, time_bucket DESC)", partitionName, schema, partitionName )); indexSqls.add(String.format( @@ -312,7 +273,7 @@ public class PartitionManager { )); // 성능 최적화를 위한 복합 인덱스 (WebSocket API 개선) indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_time_vessel_include_idx ON %s.%s (time_bucket, sig_src_cd, target_id) " + + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_time_vessel_include_idx ON %s.%s (time_bucket, mmsi) " + "INCLUDE (distance_nm, avg_speed, max_speed, point_count) WHERE track_geom IS NOT NULL", partitionName, schema, partitionName )); @@ -331,7 +292,7 @@ public class PartitionManager { // 선박별 진입 이력 조회를 위한 인덱스 (순서 있는 다중 해구 진입 체크) if (baseTable.equals("t_grid_vessel_tracks")) { indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_time_idx ON %s.%s (sig_src_cd, target_id, time_bucket DESC)", + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_time_idx ON %s.%s (mmsi, time_bucket DESC)", partitionName, schema, partitionName )); // 진입/퇴출 시간 인덱스 @@ -355,12 +316,12 @@ public class PartitionManager { // 선박별 진입 이력 조회를 위한 인덱스 (순서 있는 다중 구역 진입 체크) if (baseTable.equals("t_area_vessel_tracks")) { indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_time_idx ON %s.%s (sig_src_cd, target_id, time_bucket DESC)", + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_time_idx ON %s.%s (mmsi, time_bucket DESC)", partitionName, schema, partitionName )); // 다중 구역 순차 진입 체크를 위한 복합 인덱스 indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_area_vessel_time_idx ON %s.%s (area_id, sig_src_cd, target_id, time_bucket DESC)", + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_area_vessel_time_idx ON %s.%s (area_id, mmsi, time_bucket DESC)", partitionName, schema, partitionName )); } @@ -391,13 +352,13 @@ public class PartitionManager { )); if (baseTable.contains("vessel_tracks")) { indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_idx ON %s.%s (sig_src_cd, target_id, time_bucket DESC)", + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_idx ON %s.%s (mmsi, time_bucket DESC)", partitionName, schema, partitionName )); // 성능 최적화를 위한 복합 인덱스 (WebSocket API 개선) if (baseTable.equals("t_vessel_tracks_hourly")) { indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_time_vessel_include_idx ON %s.%s (time_bucket, sig_src_cd, target_id) " + + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_time_vessel_include_idx ON %s.%s (time_bucket, mmsi) " + "INCLUDE (distance_nm, avg_speed, max_speed, point_count) WHERE track_geom IS NOT NULL", partitionName, schema, partitionName )); @@ -416,13 +377,13 @@ public class PartitionManager { )); if (baseTable.contains("vessel_tracks")) { indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_idx ON %s.%s (sig_src_cd, target_id, time_bucket DESC)", + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_idx ON %s.%s (mmsi, time_bucket DESC)", partitionName, schema, partitionName )); // 성능 최적화를 위한 복합 인덱스 (WebSocket API 개선) if (baseTable.equals("t_vessel_tracks_daily")) { indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_time_vessel_include_idx ON %s.%s (time_bucket, sig_src_cd, target_id) " + + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_time_vessel_include_idx ON %s.%s (time_bucket, mmsi) " + "INCLUDE (distance_nm, avg_speed, max_speed, point_count) WHERE track_geom IS NOT NULL", partitionName, schema, partitionName )); @@ -440,7 +401,7 @@ public class PartitionManager { partitionName, schema, partitionName )); indexSqls.add(String.format( - "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_idx ON %s.%s (sig_src_cd, target_id)", + "CREATE INDEX CONCURRENTLY IF NOT EXISTS %s_vessel_idx ON %s.%s (mmsi)", partitionName, schema, partitionName )); indexSqls.add(String.format( @@ -475,36 +436,20 @@ public class PartitionManager { log.info("Cleanup Date: {}", today); try { - // CollectDB 일별 파티션 테이블 정리 - log.info("--- CollectDB Daily Partition Tables ---"); - for (String tableName : COLLECT_DAILY_PARTITION_TABLES) { - int retentionDays = retentionConfig.getRetentionDays(tableName); - - if (retentionDays <= 0) { - log.info("[CollectDB:{}] Unlimited retention (days={}). Skipping cleanup.", tableName, retentionDays); - continue; - } - - LocalDate cutoffDate = today.minusDays(retentionDays); - log.info("[CollectDB:{}] Retention: {} days, Cutoff: {}", tableName, retentionDays, cutoffDate); - - dropPartitionsForTable(tableName, cutoffDate, collectJdbcTemplate, "CollectDB"); - } - - // QueryDB 일별 파티션 테이블 정리 - log.info("--- QueryDB Daily Partition Tables ---"); + // 일별 파티션 테이블 정리 + log.info("--- Daily Partition Tables ---"); for (String tableName : QUERY_DAILY_PARTITION_TABLES) { int retentionDays = retentionConfig.getRetentionDays(tableName); if (retentionDays <= 0) { - log.info("[QueryDB:{}] Unlimited retention (days={}). Skipping cleanup.", tableName, retentionDays); + log.info("[{}] Unlimited retention (days={}). Skipping cleanup.", tableName, retentionDays); continue; } LocalDate cutoffDate = today.minusDays(retentionDays); - log.info("[QueryDB:{}] Retention: {} days, Cutoff: {}", tableName, retentionDays, cutoffDate); + log.info("[{}] Retention: {} days, Cutoff: {}", tableName, retentionDays, cutoffDate); - dropPartitionsForTable(tableName, cutoffDate, queryJdbcTemplate, "QueryDB"); + dropPartitionsForTable(tableName, cutoffDate, queryJdbcTemplate, "DB"); } // QueryDB 월별 파티션 테이블 정리 @@ -514,15 +459,15 @@ public class PartitionManager { int retentionMonths = retentionConfig.getRetentionMonths(tableName); if (retentionMonths <= 0) { - log.info("[QueryDB:{}] Unlimited retention (months={}). Skipping cleanup.", tableName, retentionMonths); + log.info("[{}] Unlimited retention (months={}). Skipping cleanup.", tableName, retentionMonths); continue; } // 월 단위 계산: N개월 전의 1일 LocalDate cutoffDate = today.minusMonths(retentionMonths).withDayOfMonth(1); - log.info("[QueryDB:{}] Retention: {} months, Cutoff: {}", tableName, retentionMonths, cutoffDate); + log.info("[{}] Retention: {} months, Cutoff: {}", tableName, retentionMonths, cutoffDate); - dropPartitionsForTable(tableName, cutoffDate, queryJdbcTemplate, "QueryDB"); + dropPartitionsForTable(tableName, cutoffDate, queryJdbcTemplate, "DB"); } log.info("========== Partition Cleanup Completed =========="); diff --git a/src/main/java/gc/mda/signal_batch/global/util/SharedDataJobListener.java b/src/main/java/gc/mda/signal_batch/global/util/SharedDataJobListener.java deleted file mode 100644 index 487aa40..0000000 --- a/src/main/java/gc/mda/signal_batch/global/util/SharedDataJobListener.java +++ /dev/null @@ -1,104 +0,0 @@ -package gc.mda.signal_batch.global.util; - -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.JobExecution; -import org.springframework.batch.core.JobExecutionListener; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Component; - -import java.sql.Timestamp; -import java.time.LocalDateTime; -import java.util.List; - - -@Component -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@RequiredArgsConstructor -@Slf4j -public class SharedDataJobListener implements JobExecutionListener { - - private final JdbcTemplate collectJdbcTemplate; - private final VesselDataHolder dataHolder; - - @Override - public void beforeJob(JobExecution jobExecution) { - try { - LocalDateTime startTime = LocalDateTime.parse( - jobExecution.getJobParameters().getString("startTime") - ); - LocalDateTime endTime = LocalDateTime.parse( - jobExecution.getJobParameters().getString("endTime") - ); - - // 최신 위치 데이터 한 번만 로드 - String sql = """ - WITH latest_positions AS ( - SELECT DISTINCT ON (sig_src_cd, target_id) - sig_src_cd, target_id, lat, lon, - sog, cog, heading, ship_nm, ship_ty, - message_time, real_time, mmsi, vpass_id, ship_no - FROM signal.sig_test - WHERE message_time >= ? AND message_time < ? - AND sig_src_cd != '000005' - AND length(target_id) > 5 - ORDER BY sig_src_cd, target_id, message_time DESC - ) - SELECT * FROM latest_positions - """; - - List vesselData = collectJdbcTemplate.query(sql, - ps -> { - ps.setTimestamp(1, Timestamp.valueOf(startTime)); - ps.setTimestamp(2, Timestamp.valueOf(endTime)); - }, - (rs, rowNum) -> { - VesselData data = VesselData.builder() - .sigSrcCd(rs.getString("sig_src_cd")) - .targetId(rs.getString("target_id")) - .lat(rs.getDouble("lat")) - .lon(rs.getDouble("lon")) - .sog(rs.getBigDecimal("sog")) - .cog(rs.getBigDecimal("cog")) - .shipNm(rs.getString("ship_nm")) - .shipTy(rs.getString("ship_ty")) - .messageTime(rs.getTimestamp("message_time").toLocalDateTime()) - .realTime(rs.getTimestamp("real_time").toLocalDateTime()) - .mmsi(rs.getString("mmsi")) - .vpassId(rs.getString("vpass_id")) - .shipNo(rs.getString("ship_no")) - .build(); - - // heading 처리 - numeric 타입을 Integer로 변환 - Object headingValue = rs.getObject("heading"); - if (headingValue != null && !rs.wasNull()) { - if (headingValue instanceof java.math.BigDecimal) { - data.setHeading(((java.math.BigDecimal) headingValue).intValue()); - } else if (headingValue instanceof Number) { - data.setHeading(((Number) headingValue).intValue()); - } - } - - return data; - } - ); - - dataHolder.setData(vesselData); - jobExecution.getExecutionContext().putInt("totalCount", vesselData.size()); - - log.info("Loaded {} vessel positions for job execution", vesselData.size()); - - } catch (Exception e) { - log.error("Failed to load vessel data", e); - throw new RuntimeException("Failed to load vessel data", e); - } - } - - @Override - public void afterJob(JobExecution jobExecution) { - dataHolder.clear(); - log.info("Cleared vessel data from memory"); - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/global/util/ShipKindCodeConverter.java b/src/main/java/gc/mda/signal_batch/global/util/ShipKindCodeConverter.java deleted file mode 100644 index 44b0685..0000000 --- a/src/main/java/gc/mda/signal_batch/global/util/ShipKindCodeConverter.java +++ /dev/null @@ -1,222 +0,0 @@ -package gc.mda.signal_batch.global.util; - -import java.util.HashMap; -import java.util.Map; - - -/** - * sig_src_cd와 shipType을 조합하여 shipKindCode로 변환하는 유틸리티 - */ -public class ShipKindCodeConverter { - - private static final Map SHIP_KIND_MAP = new HashMap<>(); - - static { - // 어선 (000020) - SHIP_KIND_MAP.put("000001_30", "000020"); // AIS - 어선 - SHIP_KIND_MAP.put("000004_30", "000020"); // VTS_AIS - 어선 - SHIP_KIND_MAP.put("000002_B005", "000020"); // ENVI - 어선(채낚기) - SHIP_KIND_MAP.put("000002_B009", "000020"); // ENVI - 어선(복합어업) - SHIP_KIND_MAP.put("000002_B001", "000020"); // ENVI - 어선(일반) - SHIP_KIND_MAP.put("000002_B008", "000020"); // ENVI - 어선(통발) - SHIP_KIND_MAP.put("000002_B002", "000020"); // ENVI - 어선(유자망) - SHIP_KIND_MAP.put("000002_B004", "000020"); // ENVI - 어선(안강망) - SHIP_KIND_MAP.put("000002_B007", "000020"); // ENVI - 어선(트롤) - SHIP_KIND_MAP.put("000002_B006", "000020"); // ENVI - 어선(연승) - SHIP_KIND_MAP.put("000002_B003", "000020"); // ENVI - 어선(선망) - SHIP_KIND_MAP.put("000002_B016", "000020"); // ENVI - 어선(원양트롤어업) - SHIP_KIND_MAP.put("000002_B014", "000020"); // ENVI - 어선(원양참치연승어업) - SHIP_KIND_MAP.put("000002_B019", "000020"); // ENVI - 어선(원양통발어업) - SHIP_KIND_MAP.put("000002_B012", "000020"); // ENVI - 어선(권현망) - SHIP_KIND_MAP.put("000002_B013", "000020"); // ENVI - 어선(원양어선) - SHIP_KIND_MAP.put("000002_B021", "000020"); // ENVI - 어선(원양봉수망어업) - SHIP_KIND_MAP.put("000002_B017", "000020"); // ENVI - 어선(원양저인망어업) - SHIP_KIND_MAP.put("000002_B020", "000020"); // ENVI - 어선(원양저연승어업) - SHIP_KIND_MAP.put("000002_B023", "000020"); // ENVI - 어선(원양어업운반선) - SHIP_KIND_MAP.put("000002_B015", "000020"); // ENVI - 어선(원양선망어업) - SHIP_KIND_MAP.put("000002_B010", "000020"); // ENVI - 어선(저인망) - SHIP_KIND_MAP.put("000002_B011", "000020"); // ENVI - 어선(자망) - SHIP_KIND_MAP.put("000002_B018", "000020"); // ENVI - 어선(원양채낚기어업) - SHIP_KIND_MAP.put("000002_B022", "000020"); // ENVI - 어선(원양모선식어업) - SHIP_KIND_MAP.put("000003_00", "000020"); // VPASS - - // 함정 (000021) - SHIP_KIND_MAP.put("000001_51", "000021"); // AIS - SHIP_KIND_MAP.put("000001_35", "000021"); // AIS - SHIP_KIND_MAP.put("000004_51", "000021"); // VTS_AIS - SHIP_KIND_MAP.put("000004_35", "000021"); // VTS_AIS - - // 여객선 (000022) - for (int i = 60; i <= 69; i++) { - SHIP_KIND_MAP.put("000001_" + i, "000022"); // AIS - SHIP_KIND_MAP.put("000004_" + i, "000022"); // VTS_AIS - } - SHIP_KIND_MAP.put("000002_A006", "000022"); // ENVI - 여객선(차도선) - SHIP_KIND_MAP.put("000002_A007", "000022"); // ENVI - 여객선(화객선) - SHIP_KIND_MAP.put("000002_A001", "000022"); // ENVI - 여객선(일반) - SHIP_KIND_MAP.put("000002_A002", "000022"); // ENVI - 여객선(고속선) - SHIP_KIND_MAP.put("000002_A005", "000022"); // ENVI - 여객선(카훼리) - SHIP_KIND_MAP.put("000002_A003", "000022"); // ENVI - 여객선(쾌속선) - SHIP_KIND_MAP.put("000002_A004", "000022"); // ENVI - 여객선(초쾌속선) - // KSU는 SignalSourceCode에 없으므로 생략 - - // 화물선 (000023) - for (int i = 70; i <= 79; i++) { - SHIP_KIND_MAP.put("000001_" + i, "000023"); // AIS - SHIP_KIND_MAP.put("000004_" + i, "000023"); // VTS_AIS - } - SHIP_KIND_MAP.put("000002_C018", "000023"); // ENVI - 화물선(기타 유조선) - SHIP_KIND_MAP.put("000002_C007", "000023"); // ENVI - 화물선(시멘트운반선) - SHIP_KIND_MAP.put("000002_C021", "000023"); // ENVI - 화물선(LPG 운반선) - SHIP_KIND_MAP.put("000002_C013", "000023"); // ENVI - 화물선(코일운반선-RORO선) - SHIP_KIND_MAP.put("000002_C005", "000023"); // ENVI - 화물선(광목운반선) - SHIP_KIND_MAP.put("000002_C015", "000023"); // ENVI - 화물선(컨테이너선) - SHIP_KIND_MAP.put("000002_C008", "000023"); // ENVI - 화물선(자동차운반선) - SHIP_KIND_MAP.put("000002_C010", "000023"); // ENVI - 화물선(철강재운반선) - SHIP_KIND_MAP.put("000002_C003", "000023"); // ENVI - 화물선(양곡운반선) - SHIP_KIND_MAP.put("000002_C012", "000023"); // ENVI - 화물선(폐기물운반선) - SHIP_KIND_MAP.put("000002_C016", "000023"); // ENVI - 화물선(원유운반선) - SHIP_KIND_MAP.put("000002_C001", "000023"); // ENVI - 화물선(일반) - SHIP_KIND_MAP.put("000002_C023", "000023"); // ENVI - 화물선(일반탱커) - SHIP_KIND_MAP.put("000002_C022", "000023"); // ENVI - 화물선(LNG 운반선) - SHIP_KIND_MAP.put("000002_C009", "000023"); // ENVI - 화물선(핫코일운반선) - SHIP_KIND_MAP.put("000002_C011", "000023"); // ENVI - 화물선(모래운반선) - SHIP_KIND_MAP.put("000002_C004", "000023"); // ENVI - 화물선(원목운반선) - SHIP_KIND_MAP.put("000002_C002", "000023"); // ENVI - 화물선(벌크선) - SHIP_KIND_MAP.put("000002_C014", "000023"); // ENVI - 화물선(냉동, 냉장선) - SHIP_KIND_MAP.put("000002_C017", "000023"); // ENVI - 화물선(석유제품 운반선) - SHIP_KIND_MAP.put("000002_C006", "000023"); // ENVI - 화물선(석탄운반선) - SHIP_KIND_MAP.put("000002_C019", "000023"); // ENVI - 화물선(케미칼 운반선) - SHIP_KIND_MAP.put("000002_C024", "000023"); // ENVI - 화물선(세미 컨테이너선) - - // 유조선 (000024) - for (int i = 80; i <= 89; i++) { - SHIP_KIND_MAP.put("000001_" + i, "000024"); // AIS - SHIP_KIND_MAP.put("000004_" + i, "000024"); // VTS_AIS - } - - // 관공선 (000025) - SHIP_KIND_MAP.put("000001_59", "000025"); // AIS - SHIP_KIND_MAP.put("000002_D008", "000025"); // ENVI - 관공선(방제선) - SHIP_KIND_MAP.put("000002_D006", "000025"); // ENVI - 관공선(군선) - SHIP_KIND_MAP.put("000002_D002", "000025"); // ENVI - 관공선(해경정) - SHIP_KIND_MAP.put("000002_D004", "000025"); // ENVI - 관공선(지도선) - SHIP_KIND_MAP.put("000002_D003", "000025"); // ENVI - 관공선(시험조사선) - SHIP_KIND_MAP.put("000002_D009", "000025"); // ENVI - 관공선(의료선) - SHIP_KIND_MAP.put("000002_D007", "000025"); // ENVI - 관공선(해경항공기) - SHIP_KIND_MAP.put("000002_D001", "000025"); // ENVI - 관공선(일반) - SHIP_KIND_MAP.put("000002_D005", "000025"); // ENVI - 관공선(시험선) - } - - /** - * sig_src_cd와 shipType을 조합하여 shipKindCode를 반환 - * - * @param sigSrcCd 신호 소스 코드 (ex: 000001, 000002, ...) - * @param shipType 선박 타입 (ex: 30, B005, ...) - * @return shipKindCode (ex: 000020, 000021, ...) 매칭되지 않으면 000027(기타) - */ - public static String getShipKindCode(String sigSrcCd, String shipType) { - if (sigSrcCd == null || shipType == null) { - return "000027"; // 기타 - } - - String key = sigSrcCd + "_" + shipType; - return SHIP_KIND_MAP.getOrDefault(key, "000027"); // 기본값: 기타 - } - - /** - * sig_src_cd, shipType, shipName, targetId를 조합하여 shipKindCode를 반환 - * 선박명 패턴 매칭을 통해 어망/부이(000028) 우선 판별 - * - * @param sigSrcCd 신호 소스 코드 (ex: 000001, 000004, ...) - * @param shipType 선박 타입 (ex: 30, B005, ...) - * @param shipName 선박명 (ex: "부이-123", "어망.설치선", ...) - * @param targetId 타겟 ID (MMSI 등, ex: "440123456", "123456789") - * @return shipKindCode (ex: 000020, 000028, ...) 매칭되지 않으면 000027(기타) - */ - public static String getShipKindCodeWithNamePattern(String sigSrcCd, String shipType, String shipName, String targetId) { - // 1. 어망/부이 패턴 체크 조건: - // - AIS(000001) 또는 VTS-AIS(000004) - // - 한국 국적 선박 제외 (target_id가 440 또는 441로 시작하지 않음) - // - 선박명에 어망/부이 패턴 포함 - if (isAisOrVtsAis(sigSrcCd) && !isKoreanVessel(targetId) && containsBuoyPattern(shipName)) { - return "000028"; // 어망/부이 - } - - // 2. 기존 로직 수행 - return getShipKindCode(sigSrcCd, shipType); - } - - /** - * 하위 호환성을 위한 오버로드 메서드 (targetId 없이 호출 시) - * @deprecated targetId를 포함한 메서드 사용 권장 - */ - @Deprecated - public static String getShipKindCodeWithNamePattern(String sigSrcCd, String shipType, String shipName) { - return getShipKindCodeWithNamePattern(sigSrcCd, shipType, shipName, null); - } - - /** - * AIS(000001) 또는 VTS-AIS(000004) 신호원인지 확인 - */ - private static boolean isAisOrVtsAis(String sigSrcCd) { - return "000001".equals(sigSrcCd) || "000004".equals(sigSrcCd); - } - - /** - * 한국 국적 선박인지 확인 (MMSI가 440 또는 441로 시작) - * 한국 국적 선박은 선박명에 특수문자가 포함되어도 어망/부이가 아님 - */ - private static boolean isKoreanVessel(String targetId) { - if (targetId == null || targetId.length() < 3) { - return false; - } - String prefix = targetId.substring(0, 3); - return "440".equals(prefix) || "441".equals(prefix); - } - - /** - * 선박명에 어망/부이 패턴이 포함되어 있는지 확인 - * - * 패턴 규칙: - * - '%' 포함 시 → 어망/부이 - * - '-'와 '.'가 동시에 포함 시 → 어망/부이 (예: "ABC-5.5", "ABC.5-5") - * - '-'만 또는 '.'만 포함 시 → 일반 선박 (예: "ABC NO.5", "S-92") - */ - private static boolean containsBuoyPattern(String shipName) { - if (shipName == null || shipName.isEmpty()) { - return false; - } - - // '%' 포함 시 어망/부이 - if (shipName.contains("%")) { - return true; - } - - // '-'와 '.'가 동시에 포함될 때만 어망/부이 - boolean hasDash = shipName.contains("-"); - boolean hasDot = shipName.contains("."); - - return hasDash && hasDot; - } - - /** - * 선박 종류 명칭 반환 - * - * @param shipKindCode 선박 종류 코드 - * @return 선박 종류 명칭 - */ - public static String getShipKindName(String shipKindCode) { - switch (shipKindCode) { - case "000020": return "어선"; - case "000021": return "함정"; - case "000022": return "여객선"; - case "000023": return "화물선"; - case "000024": return "유조선"; - case "000025": return "관공선"; - case "000027": return "기타"; - case "000028": return "어망/부이"; - default: return "기타"; - } - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/global/util/SignalKindCode.java b/src/main/java/gc/mda/signal_batch/global/util/SignalKindCode.java new file mode 100644 index 0000000..cb4b6d6 --- /dev/null +++ b/src/main/java/gc/mda/signal_batch/global/util/SignalKindCode.java @@ -0,0 +1,118 @@ +package gc.mda.signal_batch.global.util; + +import lombok.Getter; +import lombok.RequiredArgsConstructor; + +/** + * MDA 선종 범례코드 + * + * S&P Global AIS API의 vesselType + extraInfo를 기반으로 + * MDA 범례코드(signalKindCode)로 치환한다. + * + * ShipKindCodeConverter를 대체하며, SNP-Batch-1의 치환 로직을 이식. + */ +@Getter +@RequiredArgsConstructor +public enum SignalKindCode { + + FISHING("000020", "어선"), + KCGV("000021", "함정"), + FERRY("000022", "여객선"), + CARGO("000023", "카고"), + TANKER("000024", "탱커"), + GOV("000025", "관공선"), + DEFAULT("000027", "일반/기타선박"), + BUOY("000028", "부이/항로표지"); + + private final String code; + private final String koreanName; + + /** + * vesselType + extraInfo → MDA 범례코드 치환 + * + * 치환 우선순위: + * 1. vesselType 단독 매칭 (Cargo, Tanker, Passenger, AtoN 등) + * 2. vesselType + extraInfo 조합 매칭 (Vessel + Fishing 등) + * 3. fallback → DEFAULT (000027) + */ + public static SignalKindCode resolve(String vesselType, String extraInfo) { + String vt = normalizeOrEmpty(vesselType); + String ei = normalizeOrEmpty(extraInfo); + + // 1. vesselType 단독 매칭 + switch (vt) { + case "cargo": + return CARGO; + case "tanker": + return TANKER; + case "passenger": + return FERRY; + case "aton": + return BUOY; + case "law enforcement": + return GOV; + case "search and rescue": + return KCGV; + case "local vessel": + return FISHING; + default: + break; + } + + // vesselType 그룹 매칭 + if (matchesAny(vt, "tug", "pilot boat", "tender", "anti pollution", "medical transport")) { + return GOV; + } + if (matchesAny(vt, "high speed craft", "wing in ground-effect")) { + return FERRY; + } + + // 2. "Vessel" + extraInfo 조합 + if ("vessel".equals(vt)) { + return resolveVesselExtraInfo(ei); + } + + // 3. "N/A" + extraInfo 조합 + if ("n/a".equals(vt)) { + if (ei.startsWith("hazardous cat")) { + return CARGO; + } + return DEFAULT; + } + + // 4. fallback + return DEFAULT; + } + + private static SignalKindCode resolveVesselExtraInfo(String extraInfo) { + if ("fishing".equals(extraInfo)) { + return FISHING; + } + if ("military operations".equals(extraInfo)) { + return GOV; + } + if (matchesAny(extraInfo, "towing", "towing (large)", "dredging/underwater ops", "diving operations")) { + return GOV; + } + if (matchesAny(extraInfo, "pleasure craft", "sailing", "n/a")) { + return FISHING; + } + if (extraInfo.startsWith("hazardous cat")) { + return CARGO; + } + return DEFAULT; + } + + private static boolean matchesAny(String value, String... candidates) { + for (String candidate : candidates) { + if (candidate.equals(value)) { + return true; + } + } + return false; + } + + private static String normalizeOrEmpty(String value) { + return (value == null || value.isBlank()) ? "" : value.strip().toLowerCase(); + } +} diff --git a/src/main/java/gc/mda/signal_batch/global/util/TrackClippingUtils.java b/src/main/java/gc/mda/signal_batch/global/util/TrackClippingUtils.java index d69abbc..3fd6585 100644 --- a/src/main/java/gc/mda/signal_batch/global/util/TrackClippingUtils.java +++ b/src/main/java/gc/mda/signal_batch/global/util/TrackClippingUtils.java @@ -137,8 +137,7 @@ public class TrackClippingUtils { String areaId) { // 새로운 track 생성 VesselTrack clippedTrack = VesselTrack.builder() - .sigSrcCd(originalTrack.getSigSrcCd()) - .targetId(originalTrack.getTargetId()) + .mmsi(originalTrack.getMmsi()) .timeBucket(originalTrack.getTimeBucket()) .trackPoints(filteredPoints) .pointCount(filteredPoints.size()) diff --git a/src/main/java/gc/mda/signal_batch/global/util/TrackConverter.java b/src/main/java/gc/mda/signal_batch/global/util/TrackConverter.java index be80c1b..403719d 100644 --- a/src/main/java/gc/mda/signal_batch/global/util/TrackConverter.java +++ b/src/main/java/gc/mda/signal_batch/global/util/TrackConverter.java @@ -26,7 +26,7 @@ public class TrackConverter { * - LineStringM을 geometry/timestamps/speeds 배열로 파싱 * * @param trackResponses TrackResponse 리스트 (세그먼트별 분리) - * @param vesselInfoProvider 선박 정보 조회 함수 (sigSrcCd, targetId → VesselInfo) + * @param vesselInfoProvider 선박 정보 조회 함수 (mmsi → VesselInfo) * @return CompactVesselTrack 리스트 (선박별 병합) */ public static List convert( @@ -37,10 +37,10 @@ public class TrackConverter { return Collections.emptyList(); } - // 1. 선박별로 그룹핑 + // 1. 선박별로 그룹핑 (mmsi 기반) Map> byVessel = trackResponses.stream() .collect(Collectors.groupingBy( - t -> t.getSigSrcCd() + "_" + t.getTargetId(), + TrackResponse::getMmsi, LinkedHashMap::new, Collectors.toList() )); @@ -49,10 +49,10 @@ public class TrackConverter { List result = new ArrayList<>(); for (Map.Entry> entry : byVessel.entrySet()) { - String vesselId = entry.getKey(); + String mmsi = entry.getKey(); List segments = entry.getValue(); - CompactVesselTrack compactTrack = mergeSegments(vesselId, segments, vesselInfoProvider); + CompactVesselTrack compactTrack = mergeSegments(mmsi, segments, vesselInfoProvider); if (compactTrack != null && compactTrack.getPointCount() > 0) { result.add(compactTrack); } @@ -65,7 +65,7 @@ public class TrackConverter { * 단일 선박의 여러 세그먼트를 하나의 CompactVesselTrack으로 병합 */ private static CompactVesselTrack mergeSegments( - String vesselId, + String mmsi, List segments, VesselInfoProvider vesselInfoProvider) { @@ -76,9 +76,6 @@ public class TrackConverter { // 시간순 정렬 segments.sort(Comparator.comparing(TrackResponse::getTimeBucket)); - String sigSrcCd = segments.get(0).getSigSrcCd(); - String targetId = segments.get(0).getTargetId(); - // 누적 데이터 List allGeometry = new ArrayList<>(); List allTimestamps = new ArrayList<>(); @@ -136,21 +133,17 @@ public class TrackConverter { } } catch (ParseException e) { - log.warn("Failed to parse LineStringM for vessel {}: {}", vesselId, e.getMessage()); + log.warn("Failed to parse LineStringM for vessel {}: {}", mmsi, e.getMessage()); } } - // 선박 정보 조회 (geometry가 비어있어도 선박 객체는 생성) + // 선박 정보 조회 VesselInfo vesselInfo = vesselInfoProvider != null - ? vesselInfoProvider.getVesselInfo(sigSrcCd, targetId) + ? vesselInfoProvider.getVesselInfo(mmsi) : new VesselInfo("-", "-"); - // 국적 코드 계산 - String nationalCode = NationalCodeUtil.calculateNationalCode(sigSrcCd, targetId); - - // shipKindCode 계산 - String shipKindCode = ShipKindCodeConverter.getShipKindCodeWithNamePattern( - sigSrcCd, vesselInfo.getShipType(), vesselInfo.getShipName(), targetId); + // 국적 코드: MMSI 앞 3자리 (MID) + String nationalCode = mmsi != null && mmsi.length() >= 3 ? mmsi.substring(0, 3) : "-"; // 평균 속도 계산 double avgSpeed = allSpeeds.stream() @@ -160,9 +153,7 @@ public class TrackConverter { .orElse(0.0); return CompactVesselTrack.builder() - .vesselId(vesselId) - .sigSrcCd(sigSrcCd) - .targetId(targetId) + .vesselId(mmsi) .nationalCode(nationalCode) .geometry(allGeometry) .timestamps(allTimestamps) @@ -173,7 +164,6 @@ public class TrackConverter { .pointCount(allGeometry.size()) .shipName(vesselInfo.getShipName()) .shipType(vesselInfo.getShipType()) - .shipKindCode(shipKindCode) .build(); } @@ -201,7 +191,7 @@ public class TrackConverter { */ @FunctionalInterface public interface VesselInfoProvider { - VesselInfo getVesselInfo(String sigSrcCd, String targetId); + VesselInfo getVesselInfo(String mmsi); } /** diff --git a/src/main/java/gc/mda/signal_batch/global/util/VesselDataHolder.java b/src/main/java/gc/mda/signal_batch/global/util/VesselDataHolder.java deleted file mode 100644 index 4a1a442..0000000 --- a/src/main/java/gc/mda/signal_batch/global/util/VesselDataHolder.java +++ /dev/null @@ -1,41 +0,0 @@ -package gc.mda.signal_batch.global.util; - -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import lombok.extern.slf4j.Slf4j; -import org.springframework.stereotype.Component; - -import java.time.LocalDateTime; -import java.util.ArrayList; -import java.util.List; - -@Component -@Slf4j -public class VesselDataHolder { - private List latestPositions; - private LocalDateTime loadTime; - - public synchronized void setData(List data) { - this.latestPositions = new ArrayList<>(data); - this.loadTime = LocalDateTime.now(); - log.info("Loaded {} vessel positions into memory", data.size()); - } - - public synchronized List getData() { - return latestPositions != null ? new ArrayList<>(latestPositions) : new ArrayList<>(); - } - - public boolean isDataStale(int maxAgeMinutes) { - return loadTime == null || - loadTime.isBefore(LocalDateTime.now().minusMinutes(maxAgeMinutes)); - } - - public void clear() { - latestPositions = null; - loadTime = null; - } - - public synchronized int size() { - return latestPositions != null ? latestPositions.size() : 0; - } - -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/global/util/VesselTrackConverter.java b/src/main/java/gc/mda/signal_batch/global/util/VesselTrackConverter.java index 11077d6..ff1a16e 100644 --- a/src/main/java/gc/mda/signal_batch/global/util/VesselTrackConverter.java +++ b/src/main/java/gc/mda/signal_batch/global/util/VesselTrackConverter.java @@ -58,8 +58,6 @@ public class VesselTrackConverter { return CompactVesselTrack.builder() .vesselId(merged.getVesselId()) - .sigSrcCd(merged.getSigSrcCd()) - .targetId(merged.getTargetId()) .geometry(geometry) .timestamps(timestamps) .speeds(speeds) diff --git a/src/main/java/gc/mda/signal_batch/global/util/VesselTrackDataHolder.java b/src/main/java/gc/mda/signal_batch/global/util/VesselTrackDataHolder.java deleted file mode 100644 index 6814fff..0000000 --- a/src/main/java/gc/mda/signal_batch/global/util/VesselTrackDataHolder.java +++ /dev/null @@ -1,28 +0,0 @@ -package gc.mda.signal_batch.global.util; - -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import lombok.Getter; -import lombok.Setter; -import org.springframework.stereotype.Component; - -import java.util.ArrayList; -import java.util.List; - -@Component -@Getter -@Setter -public class VesselTrackDataHolder { - private List allVesselData = new ArrayList<>(); - - public void clear() { - allVesselData.clear(); - } - - public void setData(List data) { - this.allVesselData = new ArrayList<>(data); - } - - public int size() { - return allVesselData.size(); - } -} diff --git a/src/main/java/gc/mda/signal_batch/global/util/VesselTrackDataJobListener.java b/src/main/java/gc/mda/signal_batch/global/util/VesselTrackDataJobListener.java deleted file mode 100644 index 0764353..0000000 --- a/src/main/java/gc/mda/signal_batch/global/util/VesselTrackDataJobListener.java +++ /dev/null @@ -1,150 +0,0 @@ -package gc.mda.signal_batch.global.util; - -import gc.mda.signal_batch.domain.vessel.model.VesselData; -import gc.mda.signal_batch.domain.gis.cache.AreaBoundaryCache; -import gc.mda.signal_batch.domain.vessel.service.VesselPreviousBucketCache; - -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.JobExecution; -import org.springframework.batch.core.JobExecutionListener; -import org.springframework.batch.core.annotation.BeforeJob; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Component; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.time.LocalDateTime; -import java.util.List; - - -@Slf4j -@Component -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@RequiredArgsConstructor -public class VesselTrackDataJobListener implements JobExecutionListener { - - private final JdbcTemplate collectJdbcTemplate; - private final VesselTrackDataHolder vesselTrackDataHolder; - private final AreaBoundaryCache areaBoundaryCache; - private final VesselPreviousBucketCache previousBucketCache; - - @Value("${vessel.batch.fetch-size:50000}") - private int fetchSize; - - @BeforeJob - public void beforeJob(JobExecution jobExecution) { - // Area/Haegu 경계 캐시 갱신 - areaBoundaryCache.refresh(); - log.info("Refreshed area boundary cache"); - - // 이전 버킷 캐시 Fallback 플래그 리셋 (새 Job 실행 시 1회만 DB 조회) - previousBucketCache.resetFallbackFlag(); - log.info("Reset previous bucket cache fallback flag"); - - LocalDateTime startTime = LocalDateTime.parse( - jobExecution.getJobParameters().getString("startTime")); - LocalDateTime endTime = LocalDateTime.parse( - jobExecution.getJobParameters().getString("endTime")); - - log.info("Loading all vessel data for track generation from {} to {}", startTime, endTime); - - // 5분간 전체 데이터 조회 (최신 위치가 아닌 모든 포인트) - String sql = """ - SELECT message_time, real_time, sig_src_cd, target_id, - lat, lon, sog, cog, heading, ship_nm, ship_ty, - rot, posacc, sensor_id, base_st_id, mode, - gps_sttus, battery_sttus, vts_cd, mmsi, vpass_id, ship_no - FROM signal.sig_test - WHERE message_time >= ? AND message_time < ? - AND lat IS NOT NULL AND lon IS NOT NULL - AND lat BETWEEN -90 AND 90 AND lon BETWEEN -180 AND 180 - AND sig_src_cd != '000005' - AND length(target_id) > 5 - ORDER BY sig_src_cd, target_id, message_time - """; - - collectJdbcTemplate.setFetchSize(fetchSize); - - List vesselDataList = collectJdbcTemplate.query( - sql, - new Object[]{Timestamp.valueOf(startTime), Timestamp.valueOf(endTime)}, - (rs, rowNum) -> { - VesselData data = new VesselData(); - - Timestamp messageTime = rs.getTimestamp("message_time"); - if (messageTime != null) { - data.setMessageTime(messageTime.toLocalDateTime()); - } - - Timestamp realTime = rs.getTimestamp("real_time"); - if (realTime != null) { - data.setRealTime(realTime.toLocalDateTime()); - } - - data.setSigSrcCd(rs.getString("sig_src_cd")); - data.setTargetId(rs.getString("target_id")); - data.setLat(rs.getDouble("lat")); - data.setLon(rs.getDouble("lon")); - data.setSog(rs.getBigDecimal("sog")); - data.setCog(rs.getBigDecimal("cog")); - data.setHeading(getIntegerFromNumeric(rs, "heading")); - data.setShipNm(rs.getString("ship_nm")); - data.setShipTy(rs.getString("ship_ty")); - data.setRot(getIntegerFromNumeric(rs, "rot")); - data.setPosacc(getIntegerFromNumeric(rs, "posacc")); - data.setSensorId(rs.getString("sensor_id")); - data.setBaseStId(rs.getString("base_st_id")); - data.setMode(getIntegerFromNumeric(rs, "mode")); - data.setGpsSttus(getIntegerFromNumeric(rs, "gps_sttus")); - data.setBatterySttus(getIntegerFromNumeric(rs, "battery_sttus")); - data.setVtsCd(rs.getString("vts_cd")); - data.setMmsi(rs.getString("mmsi")); - data.setVpassId(rs.getString("vpass_id")); - data.setShipNo(rs.getString("ship_no")); - - return data; - } - ); - - vesselTrackDataHolder.setData(vesselDataList); - log.info("Loaded {} vessel track points for {} to {}", - vesselDataList.size(), startTime, endTime); - } - - @Override - public void afterJob(JobExecution jobExecution) { - // DB 조회 통계 출력 - previousBucketCache.logJobStatistics(); - - // 데이터 정리 - vesselTrackDataHolder.clear(); - log.debug("Cleared vessel track data after job completion"); - } - - private Integer getIntegerFromNumeric(ResultSet rs, String columnName) throws SQLException { - Object value = rs.getObject(columnName); - if (value == null || rs.wasNull()) { - return null; - } - - if (value instanceof java.math.BigDecimal) { - return ((java.math.BigDecimal) value).intValue(); - } else if (value instanceof Integer) { - return (Integer) value; - } else if (value instanceof Number) { - return ((Number) value).intValue(); - } else if (value instanceof String) { - try { - return Integer.parseInt((String) value); - } catch (NumberFormatException e) { - return null; - } - } - - return null; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/global/websocket/dto/MergedVesselTrack.java b/src/main/java/gc/mda/signal_batch/global/websocket/dto/MergedVesselTrack.java index 44389f4..e429f57 100644 --- a/src/main/java/gc/mda/signal_batch/global/websocket/dto/MergedVesselTrack.java +++ b/src/main/java/gc/mda/signal_batch/global/websocket/dto/MergedVesselTrack.java @@ -16,10 +16,9 @@ import java.util.List; @NoArgsConstructor @AllArgsConstructor public class MergedVesselTrack { - private String sigSrcCd; - private String targetId; - private String nationalCode; // National code based on sigSrcCd and targetId - private String vesselId; // sigSrcCd_targetId + private String mmsi; + private String nationalCode; + private String vesselId; // mmsi private String mergedTrackGeom; // 병합된 전체 궤적 (LineStringM) private Double totalDistanceNm; private Double avgSpeed; diff --git a/src/main/java/gc/mda/signal_batch/global/websocket/dto/ProcessedTrackData.java b/src/main/java/gc/mda/signal_batch/global/websocket/dto/ProcessedTrackData.java index 78fd058..9631561 100644 --- a/src/main/java/gc/mda/signal_batch/global/websocket/dto/ProcessedTrackData.java +++ b/src/main/java/gc/mda/signal_batch/global/websocket/dto/ProcessedTrackData.java @@ -9,9 +9,8 @@ import java.time.LocalDateTime; */ @Data public class ProcessedTrackData { - private String sigSrcCd; - private String targetId; - private String nationalCode; // National code based on sigSrcCd and targetId + private String mmsi; + private String nationalCode; private LocalDateTime timeBucket; private String trackGeom; // M값 보정된 LineStringM private Double distanceNm; diff --git a/src/main/java/gc/mda/signal_batch/global/websocket/dto/VesselTrackData.java b/src/main/java/gc/mda/signal_batch/global/websocket/dto/VesselTrackData.java index 6b31dc2..9e5ec66 100644 --- a/src/main/java/gc/mda/signal_batch/global/websocket/dto/VesselTrackData.java +++ b/src/main/java/gc/mda/signal_batch/global/websocket/dto/VesselTrackData.java @@ -5,9 +5,8 @@ import java.time.LocalDateTime; @Data public class VesselTrackData { - private String sigSrcCd; - private String targetId; - private String nationalCode; // National code based on sigSrcCd and targetId + private String mmsi; + private String nationalCode; private String trackGeom; // LineStringM as WKT private Double distanceNm; private Double avgSpeed; diff --git a/src/main/java/gc/mda/signal_batch/global/websocket/service/ChunkedTrackStreamingService.java b/src/main/java/gc/mda/signal_batch/global/websocket/service/ChunkedTrackStreamingService.java index c8924d6..3c99095 100644 --- a/src/main/java/gc/mda/signal_batch/global/websocket/service/ChunkedTrackStreamingService.java +++ b/src/main/java/gc/mda/signal_batch/global/websocket/service/ChunkedTrackStreamingService.java @@ -1,12 +1,9 @@ package gc.mda.signal_batch.global.websocket.service; -import gc.mda.signal_batch.global.util.ShipKindCodeConverter; -import gc.mda.signal_batch.global.util.IntegrationSignalConstants; +import gc.mda.signal_batch.global.util.SignalKindCode; import gc.mda.signal_batch.global.websocket.dto.TrackChunkResponse; import gc.mda.signal_batch.global.websocket.interceptor.TrackQueryInterceptor; import gc.mda.signal_batch.domain.vessel.dto.CompactVesselTrack; -import gc.mda.signal_batch.domain.vessel.dto.IntegrationVessel; -import gc.mda.signal_batch.domain.vessel.service.IntegrationVesselService; import gc.mda.signal_batch.global.websocket.dto.TrackQueryRequest; import gc.mda.signal_batch.global.websocket.dto.ChunkStats; import gc.mda.signal_batch.domain.vessel.service.simplification.TrackSimplificationStrategy; @@ -56,7 +53,6 @@ public class ChunkedTrackStreamingService { @SuppressWarnings("unused") private final TrackSimplificationStrategy simplificationStrategy; private final ActiveQueryManager activeQueryManager; - private final IntegrationVesselService integrationVesselService; private final TrackQueryInterceptor trackQueryInterceptor; private final DailyTrackCacheManager dailyTrackCacheManager; private final CacheTrackSimplifier cacheTrackSimplifier; @@ -98,7 +94,6 @@ public class ChunkedTrackStreamingService { @Qualifier("queryDataSource") DataSource queryDataSource, TrackSimplificationStrategy simplificationStrategy, ActiveQueryManager activeQueryManager, - IntegrationVesselService integrationVesselService, TrackQueryInterceptor trackQueryInterceptor, DailyTrackCacheManager dailyTrackCacheManager, CacheTrackSimplifier cacheTrackSimplifier) { @@ -106,7 +101,6 @@ public class ChunkedTrackStreamingService { this.queryDataSource = queryDataSource; this.simplificationStrategy = simplificationStrategy; this.activeQueryManager = activeQueryManager; - this.integrationVesselService = integrationVesselService; this.trackQueryInterceptor = trackQueryInterceptor; this.dailyTrackCacheManager = dailyTrackCacheManager; this.cacheTrackSimplifier = cacheTrackSimplifier; @@ -203,11 +197,10 @@ public class ChunkedTrackStreamingService { */ // 선박 데이터 누적용 내부 클래스 private static class VesselAccumulator { - String sigSrcCd; - String targetId; - String shipName; // 선명 추가 - String shipType; // 선종 추가 - String shipKindCode; // 선박 종류 코드 추가 + String mmsi; + String shipName; + String shipType; + String shipKindCode; List geometry = new ArrayList<>(500); List timestamps = new ArrayList<>(500); List speeds = new ArrayList<>(500); @@ -219,44 +212,40 @@ public class ChunkedTrackStreamingService { /** * 선박 정보 조회 (캐시 우선) */ - private VesselInfo getVesselInfo(String sigSrcCd, String targetId) { - String vesselKey = sigSrcCd + "_" + targetId; - + private VesselInfo getVesselInfo(String mmsi) { // 캐시 청소 (10분마다) if (System.currentTimeMillis() - lastCacheCleanup > 600_000) { cleanupVesselCache(); } // 캐시에서 조회 - VesselInfo cached = vesselInfoCache.get(vesselKey); + VesselInfo cached = vesselInfoCache.get(mmsi); if (cached != null && !cached.isExpired()) { return cached; } // DB에서 조회 try { - String sql = "SELECT ship_nm, ship_ty FROM signal.t_vessel_latest_position " + - "WHERE sig_src_cd = ? AND target_id = ?"; + String sql = "SELECT ship_nm, vessel_type FROM signal.t_ais_position " + + "WHERE mmsi = ? LIMIT 1"; VesselInfo info = queryJdbcTemplate.queryForObject(sql, (rs, rowNum) -> new VesselInfo( rs.getString("ship_nm"), - rs.getString("ship_ty") + rs.getString("vessel_type") ), - sigSrcCd, targetId + mmsi ); - // 캐시에 저장 - vesselInfoCache.put(vesselKey, info); + vesselInfoCache.put(mmsi, info); log.debug("Vessel info loaded from DB and cached: {} - {} ({})", - vesselKey, info.shipName, info.shipType); + mmsi, info.shipName, info.shipType); return info; } catch (Exception e) { - log.debug("No vessel info found for {}, using defaults", vesselKey); + log.debug("No vessel info found for {}, using defaults", mmsi); VesselInfo defaultInfo = new VesselInfo(null, null); - // 기본값도 캐시에 저장 (DB 부하 감소) - vesselInfoCache.put(vesselKey, defaultInfo); + vesselInfoCache.put(mmsi, defaultInfo); return defaultInfo; } } @@ -293,16 +282,16 @@ public class ChunkedTrackStreamingService { // 캐시에 없는 것들은 DB에서 배치 조회 if (!uncachedIds.isEmpty()) { try { - String sql = "SELECT sig_src_cd, target_id, ship_nm, ship_ty " + - "FROM signal.t_vessel_latest_position " + - "WHERE sig_src_cd || '_' || target_id IN (" + + String sql = "SELECT mmsi, ship_nm, vessel_type " + + "FROM signal.t_ais_position " + + "WHERE mmsi IN (" + String.join(",", Collections.nCopies(uncachedIds.size(), "?")) + ")"; queryJdbcTemplate.query(sql, rs -> { - String vesselId = rs.getString("sig_src_cd") + "_" + rs.getString("target_id"); + String vesselId = rs.getString("mmsi"); VesselInfo info = new VesselInfo( rs.getString("ship_nm"), - rs.getString("ship_ty") + rs.getString("vessel_type") ); result.put(vesselId, info); vesselInfoCache.put(vesselId, info); @@ -371,17 +360,17 @@ public class ChunkedTrackStreamingService { .map(id -> "?") .collect(Collectors.joining(",")); - String sql = "SELECT sig_src_cd, target_id, ship_nm, ship_ty " + - "FROM signal.t_vessel_latest_position " + - "WHERE sig_src_cd || '_' || target_id IN (" + placeholders + ")"; + String sql = "SELECT mmsi, ship_nm, vessel_type " + + "FROM signal.t_ais_position " + + "WHERE mmsi IN (" + placeholders + ")"; Set foundIds = new HashSet<>(); queryJdbcTemplate.query(sql, rs -> { - String visselId = rs.getString("sig_src_cd") + "_" + rs.getString("target_id"); + String visselId = rs.getString("mmsi"); VesselInfo info = new VesselInfo( rs.getString("ship_nm"), - rs.getString("ship_ty") + rs.getString("vessel_type") ); // 세션 캐시와 전역 캐시 모두에 저장 sessionCache.put(visselId, info); @@ -439,17 +428,17 @@ public class ChunkedTrackStreamingService { .map(id -> "?") .collect(Collectors.joining(",")); - String sql = "SELECT sig_src_cd, target_id, ship_nm, ship_ty " + - "FROM signal.t_vessel_latest_position " + - "WHERE sig_src_cd || '_' || target_id IN (" + placeholders + ")"; + String sql = "SELECT mmsi, ship_nm, vessel_type " + + "FROM signal.t_ais_position " + + "WHERE mmsi IN (" + placeholders + ")"; Set foundIds = new HashSet<>(); queryJdbcTemplate.query(sql, rs -> { - String vesselId = rs.getString("sig_src_cd") + "_" + rs.getString("target_id"); + String vesselId = rs.getString("mmsi"); VesselInfo info = new VesselInfo( rs.getString("ship_nm"), - rs.getString("ship_ty") + rs.getString("vessel_type") ); vesselInfoCache.put(vesselId, info); foundIds.add(vesselId); @@ -517,7 +506,7 @@ public class ChunkedTrackStreamingService { // DB 쿼리로 뷰포트 교차 선박 수집 dbQueryDays++; StringBuilder sql = new StringBuilder(); - sql.append("SELECT DISTINCT sig_src_cd, target_id FROM ").append(tableName); + sql.append("SELECT DISTINCT mmsi FROM ").append(tableName); sql.append(" WHERE time_bucket >= ? AND time_bucket < ?"); sql.append(" AND public.ST_Intersects(track_geom, public.ST_MakeEnvelope(?, ?, ?, ?, 4326))"); @@ -539,7 +528,7 @@ public class ChunkedTrackStreamingService { try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { - vesselIds.add(rs.getString("sig_src_cd") + "_" + rs.getString("target_id")); + vesselIds.add(rs.getString("mmsi")); } } if (benchmark != null) benchmark.connViewportPass1++; // [BENCHMARK] @@ -614,9 +603,8 @@ public class ChunkedTrackStreamingService { return Collections.emptyList(); } } - String sigSrcCd = rs.getString("sig_src_cd"); - String targetId = rs.getString("target_id"); - String vesselId = sigSrcCd + "_" + targetId; + String mmsi = rs.getString("mmsi"); + String vesselId = mmsi; // LineStringM 파싱 String trackGeomWkt = rs.getString("track_geom"); @@ -633,22 +621,16 @@ public class ChunkedTrackStreamingService { } // 선박 객체는 geometry가 비어있어도 생성 (선박 누락 방지) - // 먼저 선박 객체 확보 VesselAccumulator accumulator = vesselMap.get(vesselId); if (accumulator == null) { - vesselCount++; // 새 선박 추가 시 카운트 + vesselCount++; accumulator = new VesselAccumulator(); - accumulator.sigSrcCd = sigSrcCd; - accumulator.targetId = targetId; + accumulator.mmsi = mmsi; - // 선박 정보 조회 (캐시 우선) - VesselInfo vesselInfo = getVesselInfo(sigSrcCd, targetId); + VesselInfo vesselInfo = getVesselInfo(mmsi); accumulator.shipName = vesselInfo.shipName; accumulator.shipType = vesselInfo.shipType; - - // shipKindCode 계산 (선박명 패턴 매칭 포함 - 어망/부이 판별) - accumulator.shipKindCode = ShipKindCodeConverter.getShipKindCodeWithNamePattern( - sigSrcCd, vesselInfo.shipType, vesselInfo.shipName, targetId); + accumulator.shipKindCode = SignalKindCode.resolve(vesselInfo.shipType, null).getCode(); vesselMap.put(vesselId, accumulator); } @@ -776,13 +758,10 @@ public class ChunkedTrackStreamingService { return CompactVesselTrack.builder() .vesselId(vesselId) - .sigSrcCd(acc.sigSrcCd) - .targetId(acc.targetId) - .nationalCode(gc.mda.signal_batch.global.util.NationalCodeUtil.calculateNationalCode( - acc.sigSrcCd, acc.targetId)) - .shipName(acc.shipName) // 선명 추가 - .shipType(acc.shipType) // 선종 추가 - .shipKindCode(acc.shipKindCode) // 선박 종류 코드 추가 + .nationalCode(acc.mmsi != null && acc.mmsi.length() >= 3 ? acc.mmsi.substring(0, 3) : null) + .shipName(acc.shipName) + .shipType(acc.shipType) + .shipKindCode(acc.shipKindCode) .geometry(acc.geometry) .timestamps(acc.timestamps) .speeds(acc.speeds) @@ -1098,17 +1077,12 @@ public class ChunkedTrackStreamingService { if (accumulator == null) { accumulator = new VesselAccumulator(); - accumulator.sigSrcCd = track.getSigSrcCd(); - accumulator.targetId = track.getTargetId(); + accumulator.mmsi = track.getVesselId(); - // 선박 정보 조회 (캐시 우선) - 추가 - VesselInfo vesselInfo = getVesselInfo(track.getSigSrcCd(), track.getTargetId()); + VesselInfo vesselInfo = getVesselInfo(track.getVesselId()); accumulator.shipName = vesselInfo.shipName; accumulator.shipType = vesselInfo.shipType; - - // shipKindCode 계산 (선박명 패턴 매칭 포함 - 어망/부이 판별) - accumulator.shipKindCode = ShipKindCodeConverter.getShipKindCodeWithNamePattern( - track.getSigSrcCd(), vesselInfo.shipType, vesselInfo.shipName, track.getTargetId()); + accumulator.shipKindCode = SignalKindCode.resolve(vesselInfo.shipType, null).getCode(); mergedMap.put(vesselId, accumulator); } @@ -1162,13 +1136,10 @@ public class ChunkedTrackStreamingService { return CompactVesselTrack.builder() .vesselId(vesselId) - .sigSrcCd(acc.sigSrcCd) - .targetId(acc.targetId) - .nationalCode(gc.mda.signal_batch.global.util.NationalCodeUtil.calculateNationalCode( - acc.sigSrcCd, acc.targetId)) - .shipName(acc.shipName) // 선명 추가 - .shipType(acc.shipType) // 선종 추가 - .shipKindCode(acc.shipKindCode) // 선박 종류 코드 추가 + .nationalCode(acc.mmsi != null && acc.mmsi.length() >= 3 ? acc.mmsi.substring(0, 3) : null) + .shipName(acc.shipName) + .shipType(acc.shipType) + .shipKindCode(acc.shipKindCode) .geometry(acc.geometry) .timestamps(acc.timestamps) .speeds(acc.speeds) @@ -1180,11 +1151,6 @@ public class ChunkedTrackStreamingService { }) .collect(Collectors.toList()); - // 통합선박 필터링 적용 (isIntegration = "1" 이고 기능이 활성화된 경우) - if ("1".equals(request.getIsIntegration()) && integrationVesselService.isEnabled()) { - mergedTracks = filterByIntegration(mergedTracks); - } - // 전체 포인트 통계 계산 int totalOriginalPoints = mergedTracks.stream() .mapToInt(t -> t.getPointCount()) @@ -1724,8 +1690,6 @@ public class ChunkedTrackStreamingService { if (builder == null) { builder = CompactVesselTrack.builder() .vesselId(track.getVesselId()) - .sigSrcCd(track.getSigSrcCd()) - .targetId(track.getTargetId()) .nationalCode(track.getNationalCode()) .geometry(new ArrayList<>()) .timestamps(new ArrayList<>()) @@ -1829,9 +1793,7 @@ public class ChunkedTrackStreamingService { private String buildRangeQuery(String tableName, TrackQueryRequest request, TimeRange range, SimplificationLevel simplificationLevel, Set viewportVesselIds) { StringBuilder sql = new StringBuilder(); - sql.append("SELECT sig_src_cd, target_id, time_bucket, "); - - // track_geom 고정 사용 + sql.append("SELECT mmsi, time_bucket, "); // 간소화 적용 if (simplificationLevel != SimplificationLevel.NONE && simplificationLevel.getTolerance() > 0) { @@ -1843,9 +1805,7 @@ public class ChunkedTrackStreamingService { sql.append("distance_nm, avg_speed, max_speed, point_count"); - // start_position, end_position에서 시간 정보 추출 (가능한 경우) if (!tableName.contains("5min")) { - // hourly, daily 테이블에는 start_position, end_position이 있음 sql.append(", start_position->>'time' as start_time"); sql.append(", end_position->>'time' as end_time"); } @@ -1854,14 +1814,12 @@ public class ChunkedTrackStreamingService { sql.append("WHERE time_bucket >= ? "); sql.append("AND time_bucket < ? "); - // 2-pass 뷰포트 필터: vessel ID 기반 필터 (Pass 2) 또는 기존 viewport 필터 if (viewportVesselIds != null && !viewportVesselIds.isEmpty()) { - sql.append("AND sig_src_cd || '_' || target_id = ANY(?) "); + sql.append("AND mmsi = ANY(?) "); } else if (request.getViewport() != null) { sql.append("AND public.ST_Intersects(track_geom, public.ST_MakeEnvelope(?, ?, ?, ?, 4326)) "); } - // 거리/속도 필터 if (request.getMinAvgSpeed() != null) { sql.append("AND avg_speed >= ").append(request.getMinAvgSpeed()).append(" "); } @@ -1869,8 +1827,7 @@ public class ChunkedTrackStreamingService { sql.append("AND avg_speed <= ").append(request.getMaxAvgSpeed()).append(" "); } - // 정렬 및 제한 - sql.append("ORDER BY sig_src_cd, target_id, time_bucket "); + sql.append("ORDER BY mmsi, time_bucket "); sql.append("LIMIT ").append(MAX_TRACKS_PER_CHUNK); return sql.toString(); @@ -1880,11 +1837,10 @@ public class ChunkedTrackStreamingService { * Daily 테이블용 페이지네이션 쿼리 생성 */ private String buildDailyPaginationQuery(String tableName, TrackQueryRequest request, TimeRange range, - double tolerance, String lastSigSrcCd, String lastTargetId, Set viewportVesselIds) { + double tolerance, String lastMmsi, String lastTargetId, Set viewportVesselIds) { StringBuilder sql = new StringBuilder(); - sql.append("SELECT sig_src_cd, target_id, time_bucket, "); + sql.append("SELECT mmsi, time_bucket, "); - // 강화된 간소화 적용 if (tolerance > 0) { sql.append("public.ST_AsText(public.ST_Simplify(track_geom, ").append(tolerance) .append(")) as track_geom, "); @@ -1900,19 +1856,16 @@ public class ChunkedTrackStreamingService { sql.append("WHERE time_bucket >= ? "); sql.append("AND time_bucket < ? "); - // 선박 기준 페이지네이션 조건 - if (lastSigSrcCd != null && lastTargetId != null) { - sql.append("AND (sig_src_cd, target_id) > (?, ?) "); + if (lastMmsi != null) { + sql.append("AND mmsi > ? "); } - // 2-pass 뷰포트 필터: vessel ID 기반 필터 (Pass 2) 또는 기존 viewport 필터 if (viewportVesselIds != null && !viewportVesselIds.isEmpty()) { - sql.append("AND sig_src_cd || '_' || target_id = ANY(?) "); + sql.append("AND mmsi = ANY(?) "); } else if (request.getViewport() != null) { sql.append("AND public.ST_Intersects(track_geom, public.ST_MakeEnvelope(?, ?, ?, ?, 4326)) "); } - // 거리/속도 필터 if (request.getMinAvgSpeed() != null) { sql.append("AND avg_speed >= ").append(request.getMinAvgSpeed()).append(" "); } @@ -1920,7 +1873,7 @@ public class ChunkedTrackStreamingService { sql.append("AND avg_speed <= ").append(request.getMaxAvgSpeed()).append(" "); } - sql.append("ORDER BY sig_src_cd, target_id, time_bucket "); + sql.append("ORDER BY mmsi, time_bucket "); sql.append("LIMIT ").append(DAILY_PAGE_SIZE); return sql.toString(); @@ -1942,14 +1895,13 @@ public class ChunkedTrackStreamingService { log.info("Daily pagination started for range [{} - {}] with tolerance {} (zoom: {})", range.getStart(), range.getEnd(), tolerance, request.getZoomLevel()); - String lastSigSrcCd = null; - String lastTargetId = null; + String lastMmsi = null; int pageNum = 0; int totalTrackCount = 0; // 페이지네이션 루프 while (true) { - String sql = buildDailyPaginationQuery(tableName, request, range, tolerance, lastSigSrcCd, lastTargetId, viewportVesselIds); + String sql = buildDailyPaginationQuery(tableName, request, range, tolerance, lastMmsi, null, viewportVesselIds); try (Connection conn = queryDataSource.getConnection(); PreparedStatement ps = conn.prepareStatement(sql)) { @@ -1959,9 +1911,8 @@ public class ChunkedTrackStreamingService { ps.setTimestamp(paramIndex++, Timestamp.valueOf(range.getEnd())); // 페이지네이션 파라미터 - if (lastSigSrcCd != null && lastTargetId != null) { - ps.setString(paramIndex++, lastSigSrcCd); - ps.setString(paramIndex++, lastTargetId); + if (lastMmsi != null) { + ps.setString(paramIndex++, lastMmsi); } // 2-pass 뷰포트 필터: vessel ID 배열 또는 기존 viewport 좌표 바인딩 @@ -1980,8 +1931,7 @@ public class ChunkedTrackStreamingService { try (ResultSet rs = ps.executeQuery()) { int pageTrackCount = 0; - String currentSigSrcCd = null; - String currentTargetId = null; + String currentMmsi = null; while (rs.next()) { // 세션 연결 끊김 체크 (1000개마다) @@ -1993,9 +1943,8 @@ public class ChunkedTrackStreamingService { } } - currentSigSrcCd = rs.getString("sig_src_cd"); - currentTargetId = rs.getString("target_id"); - String vesselId = currentSigSrcCd + "_" + currentTargetId; + currentMmsi = rs.getString("mmsi"); + String vesselId = currentMmsi; // LineStringM 파싱 String trackGeomWkt = rs.getString("track_geom"); @@ -2009,18 +1958,14 @@ public class ChunkedTrackStreamingService { } catch (SQLException ignored) {} // 선박 객체는 geometry가 비어있어도 생성 (선박 누락 방지) - final String finalSigSrcCd = currentSigSrcCd; - final String finalTargetId = currentTargetId; + final String finalMmsi = currentMmsi; VesselAccumulator accumulator = vesselMap.computeIfAbsent(vesselId, k -> { - VesselInfo info = getVesselInfo(finalSigSrcCd, finalTargetId); + VesselInfo info = getVesselInfo(finalMmsi); VesselAccumulator acc = new VesselAccumulator(); - acc.sigSrcCd = finalSigSrcCd; - acc.targetId = finalTargetId; + acc.mmsi = finalMmsi; acc.shipName = info.shipName; acc.shipType = info.shipType; - // shipKindCode 계산 (선박명 패턴 매칭 포함 - 어망/부이 판별) - acc.shipKindCode = ShipKindCodeConverter.getShipKindCodeWithNamePattern( - finalSigSrcCd, info.shipType, info.shipName, finalTargetId); + acc.shipKindCode = SignalKindCode.resolve(info.shipType, null).getCode(); return acc; }); @@ -2097,9 +2042,8 @@ public class ChunkedTrackStreamingService { break; } - // 다음 페이지를 위한 마지막 선박 키 저장 - lastSigSrcCd = currentSigSrcCd; - lastTargetId = currentTargetId; + // 다음 페이지를 위한 마지막 MMSI 저장 + lastMmsi = currentMmsi; pageNum++; log.debug("Daily pagination page {} completed: {} tracks (total: {}, vessels: {})", @@ -2145,13 +2089,10 @@ public class ChunkedTrackStreamingService { return CompactVesselTrack.builder() .vesselId(vesselId) - .sigSrcCd(acc.sigSrcCd) - .targetId(acc.targetId) - .nationalCode(gc.mda.signal_batch.global.util.NationalCodeUtil.calculateNationalCode( - acc.sigSrcCd, acc.targetId)) + .nationalCode(acc.mmsi != null && acc.mmsi.length() >= 3 ? acc.mmsi.substring(0, 3) : null) .shipName(acc.shipName) .shipType(acc.shipType) - .shipKindCode(acc.shipKindCode) // 선박 종류 코드 추가 + .shipKindCode(acc.shipKindCode) .geometry(acc.geometry) .timestamps(acc.timestamps) .speeds(acc.speeds) @@ -2665,8 +2606,7 @@ public class ChunkedTrackStreamingService { // 세션 ID 추출 (쿼리 취소 확인용) String sessionId = queryId != null ? queryId.split("_")[0] : null; - String lastSigSrcCd = null; - String lastTargetId = null; + String lastMmsi = null; int pageNum = 0; int totalTrackCount = 0; int totalVesselsSent = 0; @@ -2681,7 +2621,7 @@ public class ChunkedTrackStreamingService { } Map pageVesselMap = new HashMap<>(20000); - String sql = buildDailyPaginationQuery(tableName, request, range, tolerance, lastSigSrcCd, lastTargetId, viewportVesselIds); + String sql = buildDailyPaginationQuery(tableName, request, range, tolerance, lastMmsi, null, viewportVesselIds); try (Connection conn = queryDataSource.getConnection(); PreparedStatement ps = conn.prepareStatement(sql)) { @@ -2691,9 +2631,8 @@ public class ChunkedTrackStreamingService { ps.setTimestamp(paramIndex++, Timestamp.valueOf(range.getEnd())); // 페이지네이션 파라미터 - if (lastSigSrcCd != null && lastTargetId != null) { - ps.setString(paramIndex++, lastSigSrcCd); - ps.setString(paramIndex++, lastTargetId); + if (lastMmsi != null) { + ps.setString(paramIndex++, lastMmsi); } // 2-pass 뷰포트 필터: vessel ID 배열 또는 기존 viewport 좌표 바인딩 @@ -2723,8 +2662,7 @@ public class ChunkedTrackStreamingService { } int pageTrackCount = 0; - String currentSigSrcCd = null; - String currentTargetId = null; + String currentMmsi3 = null; // 1단계: 먼저 모든 선박 ID를 수집 List trackDataList = new ArrayList<>(DAILY_PAGE_SIZE); @@ -2732,14 +2670,12 @@ public class ChunkedTrackStreamingService { long collectStartTime = System.currentTimeMillis(); while (rs.next()) { - String sigSrcCd = rs.getString("sig_src_cd"); - String targetId = rs.getString("target_id"); - vesselIdsInPage.add(sigSrcCd + "_" + targetId); + String mmsi = rs.getString("mmsi"); + vesselIdsInPage.add(mmsi); // ResultSet 데이터를 임시 저장 trackDataList.add(new String[]{ - sigSrcCd, - targetId, + mmsi, rs.getString("track_geom"), rs.getString("time_bucket"), rs.getString("start_time"), @@ -2771,34 +2707,28 @@ public class ChunkedTrackStreamingService { } } - currentSigSrcCd = trackData[0]; - currentTargetId = trackData[1]; - String vesselId = currentSigSrcCd + "_" + currentTargetId; - String trackGeomWkt = trackData[2]; - String timeBucketStr = trackData[3]; - String startTimeStr = trackData[4]; - String endTimeStr = trackData[5]; - double distanceNm = Double.parseDouble(trackData[6]); - double maxSpeed = Double.parseDouble(trackData[7]); + currentMmsi3 = trackData[0]; + String vesselId = currentMmsi3; + String trackGeomWkt = trackData[1]; + String timeBucketStr = trackData[2]; + String startTimeStr = trackData[3]; + String endTimeStr = trackData[4]; + double distanceNm = Double.parseDouble(trackData[5]); + double maxSpeed = Double.parseDouble(trackData[6]); // 선박 객체는 geometry가 비어있어도 생성 (선박 누락 방지) - final String finalSigSrcCd = currentSigSrcCd; - final String finalTargetId = currentTargetId; - final String finalVesselId = vesselId; + final String finalMmsi3 = currentMmsi3; VesselAccumulator accumulator = pageVesselMap.computeIfAbsent(vesselId, k -> { // 세션 캐시에서 조회 (이미 preload됨) - VesselInfo info = sessionVesselCache.get(finalVesselId); + VesselInfo info = sessionVesselCache.get(finalMmsi3); if (info == null) { info = new VesselInfo(null, null); } VesselAccumulator acc = new VesselAccumulator(); - acc.sigSrcCd = finalSigSrcCd; - acc.targetId = finalTargetId; + acc.mmsi = finalMmsi3; acc.shipName = info.shipName; acc.shipType = info.shipType; - // shipKindCode 계산 (선박명 패턴 매칭 포함 - 어망/부이 판별) - acc.shipKindCode = ShipKindCodeConverter.getShipKindCodeWithNamePattern( - finalSigSrcCd, info.shipType, info.shipName, finalTargetId); + acc.shipKindCode = SignalKindCode.resolve(info.shipType, null).getCode(); return acc; }); @@ -2868,10 +2798,7 @@ public class ChunkedTrackStreamingService { if (!pageVesselMap.isEmpty()) { List pageTracks = convertAccumulatorsToTracks(pageVesselMap); - // 통합선박 필터링 적용 (isIntegration = "1" 이고 기능이 활성화된 경우) - if ("1".equals(request.getIsIntegration()) && integrationVesselService.isEnabled()) { - pageTracks = filterByIntegration(pageTracks); - } + // 통합선박 필터링 제거됨 (SNP API 전환: MMSI 단일 식별자) totalVesselsSent += pageTracks.size(); @@ -2945,9 +2872,8 @@ public class ChunkedTrackStreamingService { break; } - // 다음 페이지를 위한 마지막 선박 키 저장 - lastSigSrcCd = currentSigSrcCd; - lastTargetId = currentTargetId; + // 다음 페이지를 위한 마지막 MMSI 저장 + lastMmsi = currentMmsi3; pageNum++; pageVesselMap.clear(); // 메모리 즉시 해제: 페이지 선박 누적 맵 } @@ -2995,10 +2921,7 @@ public class ChunkedTrackStreamingService { return CompactVesselTrack.builder() .vesselId(visselId) - .sigSrcCd(acc.sigSrcCd) - .targetId(acc.targetId) - .nationalCode(gc.mda.signal_batch.global.util.NationalCodeUtil.calculateNationalCode( - acc.sigSrcCd, acc.targetId)) + .nationalCode(acc.mmsi != null && acc.mmsi.length() >= 3 ? acc.mmsi.substring(0, 3) : null) .shipName(acc.shipName) .shipType(acc.shipType) .shipKindCode(acc.shipKindCode) @@ -3034,102 +2957,4 @@ public class ChunkedTrackStreamingService { return query != null && query.isCancelled(); } - /** - * 통합선박 기준 필터링 - * 동일 통합선박의 여러 신호 중 실제 데이터가 있는 최고 우선순위 신호만 반환 - * - * @param tracks 원본 트랙 목록 - * @return 필터링된 트랙 목록 - */ - private List filterByIntegration(List tracks) { - if (tracks == null || tracks.isEmpty()) { - return tracks; - } - - long startTime = System.currentTimeMillis(); - int originalCount = tracks.size(); - - // 1. 모든 트랙의 통합선박 정보 조회 (캐시에서) - Map vesselIntegrations = new HashMap<>(); - for (CompactVesselTrack track : tracks) { - String key = track.getSigSrcCd() + "_" + track.getTargetId(); - if (!vesselIntegrations.containsKey(key)) { - IntegrationVessel integration = integrationVesselService.findByVessel( - track.getSigSrcCd(), track.getTargetId() - ); - vesselIntegrations.put(key, integration); // null 가능 - } - } - - // 2. 통합선박별 그룹핑 - // Key: intgrSeq (통합정보 없으면 임시 키 사용) - Map> groupedByIntegration = new HashMap<>(); - Map integrationMap = new HashMap<>(); - Map soloVesselKeys = new HashMap<>(); // 단독 선박용 - - long tempSeq = -1; - for (CompactVesselTrack track : tracks) { - String key = track.getSigSrcCd() + "_" + track.getTargetId(); - IntegrationVessel integration = vesselIntegrations.get(key); - - Long seq; - if (integration != null) { - seq = integration.getIntgrSeq(); - integrationMap.putIfAbsent(seq, integration); - } else { - // 통합정보 없음 → 단독 선박 (고유 임시 키) - seq = tempSeq--; - soloVesselKeys.put(seq, key); - } - - groupedByIntegration.computeIfAbsent(seq, k -> new ArrayList<>()).add(track); - } - - // 3. 각 그룹에서 최고 우선순위 신호만 선택 - List result = new ArrayList<>(); - - for (Map.Entry> entry : groupedByIntegration.entrySet()) { - Long seq = entry.getKey(); - List groupTracks = entry.getValue(); - - if (seq < 0) { - // 통합정보 없는 단독 선박 → 그대로 추가 + integration_target_id 설정 - CompactVesselTrack firstTrack = groupTracks.get(0); - String soloIntegrationId = IntegrationSignalConstants.generateSoloIntegrationId( - firstTrack.getSigSrcCd(), - firstTrack.getTargetId() - ); - groupTracks.forEach(t -> t.setIntegrationTargetId(soloIntegrationId)); - result.addAll(groupTracks); - } else { - // 통합선박 → 존재하는 신호 중 최고 우선순위 선택 - IntegrationVessel integration = integrationMap.get(seq); - - // 그룹 내 존재하는 신호 타입들 - Set existingSigSrcCds = groupTracks.stream() - .map(CompactVesselTrack::getSigSrcCd) - .collect(Collectors.toSet()); - - // 존재하는 것 중 최고 우선순위 - String selectedSigSrcCd = integrationVesselService.selectHighestPriorityFromExisting(existingSigSrcCds); - - // 해당 신호의 항적만 필터링 - List selectedTracks = groupTracks.stream() - .filter(t -> t.getSigSrcCd().equals(selectedSigSrcCd)) - .collect(Collectors.toList()); - - // integration_target_id 설정 - String integrationId = integration.generateIntegrationId(); - selectedTracks.forEach(t -> t.setIntegrationTargetId(integrationId)); - - result.addAll(selectedTracks); - } - } - - long elapsed = System.currentTimeMillis() - startTime; - log.info("[INTEGRATION_FILTER] Filtered {} tracks to {} tracks ({} integration groups) in {}ms", - originalCount, result.size(), groupedByIntegration.size(), elapsed); - - return result; - } } \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/global/websocket/service/DailyTrackCacheManager.java b/src/main/java/gc/mda/signal_batch/global/websocket/service/DailyTrackCacheManager.java index 72caae2..49a124e 100644 --- a/src/main/java/gc/mda/signal_batch/global/websocket/service/DailyTrackCacheManager.java +++ b/src/main/java/gc/mda/signal_batch/global/websocket/service/DailyTrackCacheManager.java @@ -2,11 +2,7 @@ package gc.mda.signal_batch.global.websocket.service; import gc.mda.signal_batch.domain.vessel.dto.CompactVesselTrack; import gc.mda.signal_batch.global.config.DailyTrackCacheProperties; -import gc.mda.signal_batch.global.util.NationalCodeUtil; -import gc.mda.signal_batch.global.util.ShipKindCodeConverter; -import gc.mda.signal_batch.domain.vessel.service.IntegrationVesselService; -import gc.mda.signal_batch.domain.vessel.dto.IntegrationVessel; -import gc.mda.signal_batch.global.util.IntegrationSignalConstants; +import gc.mda.signal_batch.global.util.SignalKindCode; import lombok.extern.slf4j.Slf4j; import org.locationtech.jts.geom.Coordinate; import org.locationtech.jts.geom.Envelope; @@ -49,7 +45,6 @@ public class DailyTrackCacheManager { private final DataSource queryDataSource; private final DailyTrackCacheProperties cacheProperties; - private final IntegrationVesselService integrationVesselService; // 날짜별 캐시 (D-1 ~ D-N) private final ConcurrentHashMap cache = new ConcurrentHashMap<>(); @@ -62,11 +57,9 @@ public class DailyTrackCacheManager { public DailyTrackCacheManager( @Qualifier("queryDataSource") DataSource queryDataSource, - DailyTrackCacheProperties cacheProperties, - IntegrationVesselService integrationVesselService) { + DailyTrackCacheProperties cacheProperties) { this.queryDataSource = queryDataSource; this.cacheProperties = cacheProperties; - this.integrationVesselService = integrationVesselService; } /** @@ -74,7 +67,7 @@ public class DailyTrackCacheManager { */ public static class DailyTrackData { private final LocalDate date; - private final Map tracks; // key: "sigSrcCd_targetId" + private final Map tracks; // key: mmsi private final long loadedAtMillis; private final int vesselCount; private final long memorySizeBytes; @@ -225,14 +218,14 @@ public class DailyTrackCacheManager { LocalDateTime dayStart = date.atStartOfDay(); LocalDateTime dayEnd = date.plusDays(1).atStartOfDay(); - String sql = "SELECT sig_src_cd, target_id, time_bucket, " + + String sql = "SELECT mmsi, time_bucket, " + "public.ST_AsText(track_geom) as track_geom, " + "distance_nm, avg_speed, max_speed, point_count, " + "start_position->>'time' as start_time, " + "end_position->>'time' as end_time " + "FROM signal.t_vessel_tracks_daily " + "WHERE time_bucket >= ? AND time_bucket < ? " + - "ORDER BY sig_src_cd, target_id"; + "ORDER BY mmsi"; Map vesselMap = new HashMap<>(50000); long estimatedMemory = 0; @@ -247,14 +240,12 @@ public class DailyTrackCacheManager { try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { - String sigSrcCd = rs.getString("sig_src_cd"); - String targetId = rs.getString("target_id"); - String vesselId = sigSrcCd + "_" + targetId; + String mmsi = rs.getString("mmsi"); + String vesselId = mmsi; VesselAccumulator acc = vesselMap.computeIfAbsent(vesselId, k -> { VesselAccumulator a = new VesselAccumulator(); - a.sigSrcCd = sigSrcCd; - a.targetId = targetId; + a.mmsi = mmsi; return a; }); @@ -311,7 +302,7 @@ public class DailyTrackCacheManager { return null; } - // 선박 정보 일괄 보강 (t_vessel_latest_position에서 shipName, shipType 조회) + // 선박 정보 일괄 보강 (t_ais_position에서 shipName, shipType 조회) enrichVesselInfo(vesselMap); // VesselAccumulator → CompactVesselTrack 변환 @@ -322,32 +313,18 @@ public class DailyTrackCacheManager { double avgSpeed = acc.pointCount > 0 ? acc.totalDistance / Math.max(1, acc.pointCount) * 60 : 0; - // shipKindCode 계산 (선박명 패턴 매칭 포함) - String shipKindCode = ShipKindCodeConverter.getShipKindCodeWithNamePattern( - acc.sigSrcCd, acc.shipType, acc.shipName, acc.targetId); + // shipKindCode 계산 + String shipKindCode = SignalKindCode.resolve(acc.shipType, null).getCode(); - // nationalCode 계산 - String nationalCode = NationalCodeUtil.calculateNationalCode( - acc.sigSrcCd, acc.targetId); - - // 통합선박 ID 조회 - String integrationTargetId = null; - try { - IntegrationVessel iv = integrationVesselService.findByVessel(acc.sigSrcCd, acc.targetId); - if (iv != null) { - integrationTargetId = iv.generateIntegrationId(); - } - } catch (Exception ignored) {} + // nationalCode 계산 (MMSI 앞 3자리 = MID) + String nationalCode = acc.mmsi.length() >= 3 ? acc.mmsi.substring(0, 3) : acc.mmsi; CompactVesselTrack track = CompactVesselTrack.builder() .vesselId(entry.getKey()) - .sigSrcCd(acc.sigSrcCd) - .targetId(acc.targetId) .nationalCode(nationalCode) .shipName(acc.shipName) .shipType(acc.shipType) .shipKindCode(shipKindCode) - .integrationTargetId(integrationTargetId) .geometry(acc.geometry) .timestamps(acc.timestamps) .speeds(acc.speeds) @@ -418,13 +395,10 @@ public class DailyTrackCacheManager { // 첫 번째 날짜: 빌더 생성 builder = CompactVesselTrack.builder() .vesselId(vesselId) - .sigSrcCd(track.getSigSrcCd()) - .targetId(track.getTargetId()) .nationalCode(track.getNationalCode()) .shipName(track.getShipName()) .shipType(track.getShipType()) .shipKindCode(track.getShipKindCode()) - .integrationTargetId(track.getIntegrationTargetId()) .geometry(new ArrayList<>(track.getGeometry())) .timestamps(new ArrayList<>(track.getTimestamps())) .speeds(new ArrayList<>(track.getSpeeds())) @@ -644,7 +618,7 @@ public class DailyTrackCacheManager { } /** - * 선박 정보 일괄 보강 (t_vessel_latest_position에서 ship_nm, ship_ty 조회) + * 선박 정보 일괄 보강 (t_ais_position에서 ship_nm, ship_ty 조회) * IN 절 1000건 배치로 처리 */ private void enrichVesselInfo(Map vesselMap) { @@ -657,9 +631,9 @@ public class DailyTrackCacheManager { try (Connection conn = queryDataSource.getConnection()) { String placeholders = batch.stream().map(id -> "?").collect(Collectors.joining(",")); - String sql = "SELECT sig_src_cd, target_id, ship_nm, ship_ty " + - "FROM signal.t_vessel_latest_position " + - "WHERE sig_src_cd || '_' || target_id IN (" + placeholders + ")"; + String sql = "SELECT mmsi, name as ship_nm, vessel_type as ship_ty " + + "FROM signal.t_ais_position " + + "WHERE mmsi IN (" + placeholders + ")"; try (PreparedStatement ps = conn.prepareStatement(sql)) { for (int j = 0; j < batch.size(); j++) { @@ -668,7 +642,7 @@ public class DailyTrackCacheManager { try (ResultSet rs = ps.executeQuery()) { while (rs.next()) { - String vesselId = rs.getString("sig_src_cd") + "_" + rs.getString("target_id"); + String vesselId = rs.getString("mmsi"); VesselAccumulator acc = vesselMap.get(vesselId); if (acc != null) { acc.shipName = rs.getString("ship_nm"); @@ -713,8 +687,7 @@ public class DailyTrackCacheManager { * 선박 데이터 누적용 내부 클래스 */ private static class VesselAccumulator { - String sigSrcCd; - String targetId; + String mmsi; String shipName; String shipType; List geometry = new ArrayList<>(500); diff --git a/src/main/java/gc/mda/signal_batch/global/websocket/service/StompTrackStreamingService.java b/src/main/java/gc/mda/signal_batch/global/websocket/service/StompTrackStreamingService.java index 5ffb9c6..ea33e83 100644 --- a/src/main/java/gc/mda/signal_batch/global/websocket/service/StompTrackStreamingService.java +++ b/src/main/java/gc/mda/signal_batch/global/websocket/service/StompTrackStreamingService.java @@ -723,14 +723,12 @@ public class StompTrackStreamingService { private VesselTrackData mapResultSetToTrack(ResultSet rs) throws SQLException { VesselTrackData track = new VesselTrackData(); - String sigSrcCd = rs.getString("sig_src_cd"); - String targetId = rs.getString("target_id"); + String mmsi = rs.getString("mmsi"); - track.setSigSrcCd(sigSrcCd); - track.setTargetId(targetId); + track.setMmsi(mmsi); - // National Code 계산 - track.setNationalCode(gc.mda.signal_batch.global.util.NationalCodeUtil.calculateNationalCode(sigSrcCd, targetId)); + // National Code 계산 (MMSI 앞 3자리 = MID) + track.setNationalCode(mmsi != null && mmsi.length() >= 3 ? mmsi.substring(0, 3) : "000"); // LineStringM을 WKT로 변환 track.setTrackGeom(rs.getString("track_geom_wkt")); @@ -817,11 +815,11 @@ public class StompTrackStreamingService { // 단순화 옵션 적용 if (simplificationLevel != SimplificationLevel.NONE && simplificationLevel.getTolerance() > 0) { - sql.append("SELECT sig_src_cd, target_id, "); + sql.append("SELECT mmsi, "); sql.append("public.ST_AsText(public.ST_Simplify(track_geom, ").append(simplificationLevel.getTolerance()) .append(")) as track_geom_wkt, "); } else { - sql.append("SELECT sig_src_cd, target_id, "); + sql.append("SELECT mmsi, "); sql.append("public.ST_AsText(track_geom) as track_geom_wkt, "); } @@ -838,8 +836,8 @@ public class StompTrackStreamingService { // 해구 필터 (JOIN 대신 IN 사용으로 성능 개선) if (request.getHaeguNumbers() != null && !request.getHaeguNumbers().isEmpty()) { - sql.append("AND (sig_src_cd, target_id, time_bucket) IN ("); - sql.append("SELECT sig_src_cd, target_id, time_bucket "); + sql.append("AND (mmsi, time_bucket) IN ("); + sql.append("SELECT mmsi, time_bucket "); sql.append("FROM t_grid_vessel_tracks "); sql.append("WHERE haegu_no = ANY(ARRAY[").append( request.getHaeguNumbers().stream() @@ -850,28 +848,28 @@ public class StompTrackStreamingService { // 영역 필터 if (request.getAreaIds() != null && !request.getAreaIds().isEmpty()) { - sql.append("AND (sig_src_cd, target_id, time_bucket) IN ("); - sql.append("SELECT sig_src_cd, target_id, time_bucket "); + sql.append("AND (mmsi, time_bucket) IN ("); + sql.append("SELECT mmsi, time_bucket "); sql.append("FROM t_area_vessel_tracks "); sql.append("WHERE area_id = ANY(ARRAY['").append( String.join("','", request.getAreaIds())).append("']) "); sql.append("AND time_bucket >= ? AND time_bucket < ?) "); } - // 선박 ID 필터 + // 선박 ID 필터 (vesselIds are now mmsi values) if (request.getVesselIds() != null && !request.getVesselIds().isEmpty()) { - sql.append("AND target_id = ANY(ARRAY['").append( + sql.append("AND mmsi = ANY(ARRAY['").append( String.join("','", request.getVesselIds())).append("']) "); } - // 거리/속도 필터링된 선박 목록 + // 거리/속도 필터링된 선박 목록 (filteredVessels are now mmsi values) if (filteredVessels != null && !filteredVessels.isEmpty()) { - sql.append("AND (sig_src_cd || '_' || target_id) = ANY(ARRAY['").append( + sql.append("AND mmsi = ANY(ARRAY['").append( String.join("','", filteredVessels)).append("']) "); } // 인덱스 활용을 위한 정렬 - sql.append("ORDER BY time_bucket, target_id"); + sql.append("ORDER BY time_bucket, mmsi"); return sql.toString(); } @@ -888,8 +886,8 @@ public class StompTrackStreamingService { } if (request.getHaeguNumbers() != null && !request.getHaeguNumbers().isEmpty()) { - sql.append("AND (sig_src_cd, target_id, time_bucket) IN ("); - sql.append("SELECT sig_src_cd, target_id, time_bucket "); + sql.append("AND (mmsi, time_bucket) IN ("); + sql.append("SELECT mmsi, time_bucket "); sql.append("FROM t_grid_vessel_tracks "); sql.append("WHERE haegu_no = ANY(ARRAY[").append( request.getHaeguNumbers().stream() @@ -899,8 +897,8 @@ public class StompTrackStreamingService { } if (request.getAreaIds() != null && !request.getAreaIds().isEmpty()) { - sql.append("AND (sig_src_cd, target_id, time_bucket) IN ("); - sql.append("SELECT sig_src_cd, target_id, time_bucket "); + sql.append("AND (mmsi, time_bucket) IN ("); + sql.append("SELECT mmsi, time_bucket "); sql.append("FROM t_area_vessel_tracks "); sql.append("WHERE area_id = ANY(ARRAY['").append( String.join("','", request.getAreaIds())).append("']) "); @@ -908,13 +906,13 @@ public class StompTrackStreamingService { } if (request.getVesselIds() != null && !request.getVesselIds().isEmpty()) { - sql.append("AND target_id = ANY(ARRAY['").append( + sql.append("AND mmsi = ANY(ARRAY['").append( String.join("','", request.getVesselIds())).append("']) "); } // 거리/속도 필터링된 선박 목록 if (filteredVessels != null && !filteredVessels.isEmpty()) { - sql.append("AND (sig_src_cd || '_' || target_id) = ANY(ARRAY['").append( + sql.append("AND mmsi = ANY(ARRAY['").append( String.join("','", filteredVessels)).append("']) "); } @@ -1264,8 +1262,7 @@ public class StompTrackStreamingService { List vesselTracks = mergedTracks.stream() .map(merged -> { VesselTrackData track = new VesselTrackData(); - track.setSigSrcCd(merged.getSigSrcCd()); - track.setTargetId(merged.getTargetId()); + track.setMmsi(merged.getMmsi()); track.setNationalCode(merged.getNationalCode()); track.setTrackGeom(merged.getMergedTrackGeom()); track.setDistanceNm(merged.getTotalDistanceNm()); diff --git a/src/main/java/gc/mda/signal_batch/monitoring/controller/BatchAdminController.java b/src/main/java/gc/mda/signal_batch/monitoring/controller/BatchAdminController.java index 093a605..730cf0b 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/controller/BatchAdminController.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/controller/BatchAdminController.java @@ -38,10 +38,6 @@ public class BatchAdminController { @Qualifier("asyncJobLauncher") private JobLauncher jobLauncher; - @Autowired - @Qualifier("vesselAggregationJob") - private Job vesselAggregationJob; - @Autowired @Qualifier("vesselTrackAggregationJob") private Job vesselTrackAggregationJob; @@ -61,9 +57,9 @@ public class BatchAdminController { * Job 실행 */ @PostMapping("/job/run") - @Operation(summary = "배치 Job 실행", description = "지정된 배치 Job을 실행합니다. vesselAggregationJob, vesselTrackAggregationJob, dailyAggregationJob 지원") + @Operation(summary = "배치 Job 실행", description = "지정된 배치 Job을 실행합니다. vesselTrackAggregationJob, dailyAggregationJob 지원") public ResponseEntity> runJob( - @Parameter(description = "Job 이름 (기본: vesselAggregationJob)") @RequestParam(required = false) String jobName, + @Parameter(description = "Job 이름 (기본: vesselTrackAggregationJob)") @RequestParam(required = false) String jobName, @Parameter(description = "시작 시간 (형식: yyyy-MM-ddTHH:mm:ss)") @RequestParam(required = false) String startTime, @Parameter(description = "종료 시간 (형식: yyyy-MM-ddTHH:mm:ss)") @RequestParam(required = false) String endTime) { @@ -71,10 +67,8 @@ public class BatchAdminController { Job job; if ("dailyAggregationJob".equals(jobName)) { job = dailyAggregationJob; - } else if ("vesselTrackAggregationJob".equals(jobName)) { - job = vesselTrackAggregationJob; } else { - job = vesselAggregationJob; + job = vesselTrackAggregationJob; } LocalDateTime start = startTime != null ? diff --git a/src/main/java/gc/mda/signal_batch/monitoring/controller/DataSourceDebugController.java b/src/main/java/gc/mda/signal_batch/monitoring/controller/DataSourceDebugController.java index c2db053..daab424 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/controller/DataSourceDebugController.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/controller/DataSourceDebugController.java @@ -114,15 +114,16 @@ public class DataSourceDebugController { // signal 스키마의 테이블 목록 List> tables = jdbcTemplate.queryForList( """ - SELECT + SELECT tablename, pg_size_pretty(pg_total_relation_size('signal.'||tablename)) as size, - CASE - WHEN tablename LIKE 'sig_test%' THEN 'Source Data' - WHEN tablename = 't_vessel_latest_position' THEN 'Latest Position' + CASE + WHEN tablename = 't_ais_position' THEN 'Latest Position' + WHEN tablename LIKE 't_vessel_tracks%' THEN 'Vessel Tracks' WHEN tablename = 't_areas' THEN 'Area Definition' WHEN tablename LIKE 't_tile_summary%' THEN 'Tile Summary' WHEN tablename LIKE 't_area_statistics%' THEN 'Area Statistics' + WHEN tablename LIKE 't_abnormal%' THEN 'Abnormal Tracks' ELSE 'Other' END as table_type FROM pg_tables @@ -130,27 +131,17 @@ public class DataSourceDebugController { ORDER BY table_type, tablename """ ); - + result.put("tables", tables); result.put("tableCount", tables.size()); - - // 특정 테이블 존재 확인 + + // 주요 테이블 존재 확인 Map criticalTables = new HashMap<>(); - - if ("collect".equals(dbType)) { - // 수집 DB에 있어야 할 테이블 - criticalTables.put("sig_test", checkTableExists(jdbcTemplate, "signal", "sig_test")); - String todayPartition = "sig_test_" + java.time.LocalDate.now().format( - java.time.format.DateTimeFormatter.ofPattern("yyMMdd") - ); - criticalTables.put(todayPartition, checkTableExists(jdbcTemplate, "signal", todayPartition)); - } else { - // 조회 DB에 있어야 할 테이블 - criticalTables.put("t_areas", checkTableExists(jdbcTemplate, "signal", "t_areas")); - criticalTables.put("t_vessel_latest_position", checkTableExists(jdbcTemplate, "signal", "t_vessel_latest_position")); - criticalTables.put("t_tile_summary", checkTableExists(jdbcTemplate, "signal", "t_tile_summary")); - criticalTables.put("t_area_statistics", checkTableExists(jdbcTemplate, "signal", "t_area_statistics")); - } + criticalTables.put("t_ais_position", checkTableExists(jdbcTemplate, "signal", "t_ais_position")); + criticalTables.put("t_vessel_tracks_5min", checkTableExists(jdbcTemplate, "signal", "t_vessel_tracks_5min")); + criticalTables.put("t_areas", checkTableExists(jdbcTemplate, "signal", "t_areas")); + criticalTables.put("t_tile_summary", checkTableExists(jdbcTemplate, "signal", "t_tile_summary")); + criticalTables.put("t_area_statistics", checkTableExists(jdbcTemplate, "signal", "t_area_statistics")); result.put("criticalTables", criticalTables); diff --git a/src/main/java/gc/mda/signal_batch/monitoring/controller/MetricsController.java b/src/main/java/gc/mda/signal_batch/monitoring/controller/MetricsController.java index 2feaeb0..fcc5b48 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/controller/MetricsController.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/controller/MetricsController.java @@ -36,7 +36,7 @@ public class MetricsController { try { // Job 통계 - Set allExecutions = jobExplorer.findRunningJobExecutions("vesselAggregationJob"); + Set allExecutions = jobExplorer.findRunningJobExecutions("vesselTrackAggregationJob"); summary.put("totalJobsCompleted", allExecutions.size()); summary.put("totalRecordsProcessed", 0); @@ -85,14 +85,14 @@ public class MetricsController { } @GetMapping("/jobs/recent") - @Operation(summary = "최근 Job 실행 이력", description = "vesselAggregationJob의 최근 실행 이력을 조회합니다. 상태, 시작/종료 시간, 처리 건수를 포함합니다") + @Operation(summary = "최근 Job 실행 이력", description = "vesselTrackAggregationJob의 최근 실행 이력을 조회합니다. 상태, 시작/종료 시간, 처리 건수를 포함합니다") public List> getRecentJobs( @Parameter(description = "조회할 Job 수 (기본: 10)") @RequestParam(defaultValue = "10") int count) { List> jobs = new ArrayList<>(); try { // 최근 JobInstance 조회 - List instances = jobExplorer.getJobInstances("vesselAggregationJob", 0, count); + List instances = jobExplorer.getJobInstances("vesselTrackAggregationJob", 0, count); for (JobInstance instance : instances) { List executions = jobExplorer.getJobExecutions(instance); diff --git a/src/main/java/gc/mda/signal_batch/monitoring/controller/MonitoringController.java b/src/main/java/gc/mda/signal_batch/monitoring/controller/MonitoringController.java index 713ae18..6da0b89 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/controller/MonitoringController.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/controller/MonitoringController.java @@ -19,7 +19,6 @@ import java.util.*; @Tag(name = "시스템 모니터링 API", description = "데이터 처리 지연, 해구별 현황, 처리량 및 데이터 품질 모니터링 API") public class MonitoringController { - private final JdbcTemplate collectJdbcTemplate; private final JdbcTemplate queryJdbcTemplate; /** @@ -31,40 +30,40 @@ public class MonitoringController { Map result = new HashMap<>(); try { - // 수집 DB의 최신 데이터 - Map collectLatest = collectJdbcTemplate.queryForMap( + // AIS 최신 위치 데이터 (캐시 스냅샷) + Map aisLatest = queryJdbcTemplate.queryForMap( """ - SELECT - MAX(message_time) as latest_message_time, + SELECT + MAX(last_update) as latest_update_time, COUNT(*) as recent_count - FROM signal.sig_test - WHERE message_time > NOW() - INTERVAL '10 minutes' + FROM signal.t_ais_position + WHERE last_update > NOW() - INTERVAL '10 minutes' """ ); - - // 조회 DB의 최신 처리 데이터 + + // 집계 데이터의 최신 처리 시간 Map queryLatest = queryJdbcTemplate.queryForMap( """ - SELECT + SELECT MAX(time_bucket) as latest_processed_time, COUNT(DISTINCT tile_id) as processed_tiles FROM signal.t_tile_summary WHERE time_bucket > NOW() - INTERVAL '10 minutes' """ ); - - LocalDateTime collectTime = (LocalDateTime) collectLatest.get("latest_message_time"); + + LocalDateTime aisTime = (LocalDateTime) aisLatest.get("latest_update_time"); LocalDateTime queryTime = (LocalDateTime) queryLatest.get("latest_processed_time"); - - if (collectTime != null && queryTime != null) { - long delayMinutes = java.time.Duration.between(queryTime, collectTime).toMinutes(); + + if (aisTime != null && queryTime != null) { + long delayMinutes = java.time.Duration.between(queryTime, aisTime).toMinutes(); result.put("delayMinutes", delayMinutes); result.put("status", delayMinutes < 10 ? "NORMAL" : delayMinutes < 30 ? "WARNING" : "CRITICAL"); } - - result.put("collectLatestTime", collectTime); + + result.put("aisLatestTime", aisTime); result.put("queryLatestTime", queryTime); - result.put("recentCollectCount", collectLatest.get("recent_count")); + result.put("recentAisCount", aisLatest.get("recent_count")); result.put("processedTiles", queryLatest.get("processed_tiles")); } catch (Exception e) { diff --git a/src/main/java/gc/mda/signal_batch/monitoring/controller/PerformanceOptimizationController.java b/src/main/java/gc/mda/signal_batch/monitoring/controller/PerformanceOptimizationController.java index 4db420f..a086743 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/controller/PerformanceOptimizationController.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/controller/PerformanceOptimizationController.java @@ -240,7 +240,7 @@ public class PerformanceOptimizationController { "t_vessel_tracks_daily", "t_grid_tracks_summary_daily", "t_area_tracks_summary_daily", - "t_vessel_latest_position", + "t_ais_position", "t_tile_summary", "t_area_statistics" ); diff --git a/src/main/java/gc/mda/signal_batch/monitoring/health/BatchHealthIndicator.java b/src/main/java/gc/mda/signal_batch/monitoring/health/BatchHealthIndicator.java index 0837440..adcb736 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/health/BatchHealthIndicator.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/health/BatchHealthIndicator.java @@ -181,55 +181,55 @@ public class BatchHealthIndicator implements HealthIndicator { PartitionHealthStatus status = new PartitionHealthStatus(); try { - // collectDB의 원본 파티션 수 확인 - YYMMDD 형식 (6자리) - Integer collectPartitionCount = collectJdbcTemplate.queryForObject( + // 5분 집계 파티션 수 확인 (YYMMDD 형식) + Integer trackPartitionCount = queryJdbcTemplate.queryForObject( """ - SELECT COUNT(*) - FROM pg_tables - WHERE schemaname = 'signal' - AND tablename LIKE 'sig_test_%' + SELECT COUNT(*) + FROM pg_tables + WHERE schemaname = 'signal' + AND tablename LIKE 't_vessel_tracks_5min_%' AND tablename ~ '\\\\d{6}$' """, Integer.class ); - - // queryDB의 집계 파티션 수 확인 + + // 타일/영역 집계 파티션 수 확인 Integer queryPartitionCount = queryJdbcTemplate.queryForObject( """ - SELECT COUNT(*) - FROM pg_tables - WHERE schemaname = 'signal' + SELECT COUNT(*) + FROM pg_tables + WHERE schemaname = 'signal' AND (tablename LIKE 't_tile_summary_%' OR tablename LIKE 't_area_statistics_%') AND tablename ~ '\\\\d{6}$' """, Integer.class ); - - status.setCurrentPartitions(collectPartitionCount != null ? collectPartitionCount : 0); + + status.setCurrentPartitions(trackPartitionCount != null ? trackPartitionCount : 0); status.setQueryPartitions(queryPartitionCount != null ? queryPartitionCount : 0); - // collectDB의 미래 파티션 확인 - YYMMDD 형식 + // 미래 파티션 확인 (5분 트랙 기준) LocalDate tomorrow = LocalDate.now().plusDays(1); - String tomorrowPartition = "sig_test_" + + String tomorrowPartition = "t_vessel_tracks_5min_" + tomorrow.format(java.time.format.DateTimeFormatter.ofPattern("yyMMdd")); - Boolean hasFuturePartition = collectJdbcTemplate.queryForObject( + Boolean hasFuturePartition = queryJdbcTemplate.queryForObject( "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'signal' AND tablename = ?)", Boolean.class, tomorrowPartition ); status.setHasFuturePartitions(Boolean.TRUE.equals(hasFuturePartition)); - // collectDB의 가장 큰 파티션 + // 가장 큰 5분 트랙 파티션 try { - Map largestPartition = collectJdbcTemplate.queryForMap( + Map largestPartition = queryJdbcTemplate.queryForMap( """ - SELECT + SELECT tablename, pg_size_pretty(pg_total_relation_size(schemaname||'.'||tablename)) as size FROM pg_tables - WHERE schemaname = 'signal' - AND tablename LIKE 'sig_test_%' + WHERE schemaname = 'signal' + AND tablename LIKE 't_vessel_tracks_5min_%' ORDER BY pg_total_relation_size(schemaname||'.'||tablename) DESC LIMIT 1 """ @@ -239,7 +239,6 @@ public class BatchHealthIndicator implements HealthIndicator { status.setLargestPartition(new HashMap<>()); } - // 파티션이 하나도 없어도 healthy로 처리 (초기 상태) status.setHealthy(true); } catch (Exception e) { @@ -282,14 +281,14 @@ public class BatchHealthIndicator implements HealthIndicator { try { // 테이블 존재 여부 확인 Boolean tableExists = queryJdbcTemplate.queryForObject( - "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'signal' AND tablename = 't_vessel_latest_position')", + "SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'signal' AND tablename = 't_ais_position')", Boolean.class ); if (Boolean.TRUE.equals(tableExists)) { // 테이블에 데이터가 있는지 확인 Integer rowCount = queryJdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM signal.t_vessel_latest_position LIMIT 1", + "SELECT COUNT(*) FROM signal.t_ais_position LIMIT 1", Integer.class ); @@ -299,9 +298,9 @@ public class BatchHealthIndicator implements HealthIndicator { """ SELECT COALESCE(COUNT(*), 0) as total_records, - COALESCE(COUNT(DISTINCT sig_src_cd || ':' || target_id), 0) as unique_vessels, + COALESCE(COUNT(DISTINCT mmsi), 0) as unique_vessels, COALESCE(AVG(update_count), 0.0) as avg_updates - FROM signal.t_vessel_latest_position + FROM signal.t_ais_position WHERE last_update > NOW() - INTERVAL '1 hour' """ ); @@ -318,7 +317,7 @@ public class BatchHealthIndicator implements HealthIndicator { // 처리 지연 LocalDateTime latestProcessed = queryJdbcTemplate.queryForObject( - "SELECT MAX(last_update) FROM signal.t_vessel_latest_position", + "SELECT MAX(last_update) FROM signal.t_ais_position", LocalDateTime.class ); diff --git a/src/main/java/gc/mda/signal_batch/monitoring/performance/DatabaseIndexOptimizer.java b/src/main/java/gc/mda/signal_batch/monitoring/performance/DatabaseIndexOptimizer.java index 93951de..027ac28 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/performance/DatabaseIndexOptimizer.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/performance/DatabaseIndexOptimizer.java @@ -1,7 +1,7 @@ package gc.mda.signal_batch.monitoring.performance; -import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; +import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.jdbc.core.JdbcTemplate; import org.springframework.stereotype.Component; @@ -14,14 +14,15 @@ import java.util.*; */ @Slf4j @Component -@RequiredArgsConstructor @ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) public class DatabaseIndexOptimizer { - @SuppressWarnings("unused") - private final JdbcTemplate collectJdbcTemplate; private final JdbcTemplate queryJdbcTemplate; + public DatabaseIndexOptimizer(@Qualifier("queryJdbcTemplate") JdbcTemplate queryJdbcTemplate) { + this.queryJdbcTemplate = queryJdbcTemplate; + } + /** * 인덱스 분석 실행 */ @@ -88,15 +89,14 @@ public class DatabaseIndexOptimizer { // 주요 테이블별 권장 인덱스 확인 Map> recommendedIndexes = Map.of( - "sig_test", Arrays.asList( - "(target_id, message_time DESC)", - "(message_time)", - "(sig_src_cd, message_time)", - "(lat, lon)" + "t_ais_position", Arrays.asList( + "(mmsi)", + "(last_update DESC)", + "USING GIST (geom)" ), "t_vessel_tracks_5min", Arrays.asList( - "(time_bucket, sig_src_cd, target_id)", - "(sig_src_cd, target_id, time_bucket DESC)", + "(time_bucket, mmsi)", + "(mmsi, time_bucket DESC)", "USING GIST (track_geom)" ), "t_grid_vessel_tracks", Arrays.asList( @@ -159,11 +159,11 @@ public class DatabaseIndexOptimizer { * 인덱스 필요 이유 판단 */ private String determineIndexReason(String tableName, String columns) { - if (columns.contains("time_bucket")) { + if (columns.contains("time_bucket") || columns.contains("last_update")) { return "Time-based queries optimization"; } else if (columns.contains("GIST")) { return "Spatial queries optimization"; - } else if (columns.contains("target_id")) { + } else if (columns.contains("mmsi")) { return "Vessel lookup optimization"; } else if (columns.contains("haegu_no") || columns.contains("area_id")) { return "Area-based filtering optimization"; @@ -176,7 +176,7 @@ public class DatabaseIndexOptimizer { */ private int calculateIndexPriority(String tableName, String columns) { // 시간 기반 인덱스가 가장 중요 - if (columns.contains("time_bucket") || columns.contains("message_time")) { + if (columns.contains("time_bucket") || columns.contains("last_update")) { return 5; } // 공간 인덱스 @@ -184,7 +184,7 @@ public class DatabaseIndexOptimizer { return 4; } // 주요 조회 키 - else if (columns.contains("target_id")) { + else if (columns.contains("mmsi")) { return 3; } // 필터링용 인덱스 diff --git a/src/main/java/gc/mda/signal_batch/monitoring/performance/IndexCreator.java b/src/main/java/gc/mda/signal_batch/monitoring/performance/IndexCreator.java index b7b56e8..e7b30b7 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/performance/IndexCreator.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/performance/IndexCreator.java @@ -52,7 +52,7 @@ public class IndexCreator implements CommandLineRunner { List indexCreateStatements = List.of( // 5분 단위 궤적 테이블 "CREATE INDEX IF NOT EXISTS idx_vessel_tracks_5min_vessel_time " + - "ON signal.t_vessel_tracks_5min (sig_src_cd, target_id, time_bucket DESC)", + "ON signal.t_vessel_tracks_5min (mmsi, time_bucket DESC)", // 해구별 궤적 "CREATE INDEX IF NOT EXISTS idx_grid_vessel_tracks_haegu_time_desc " + diff --git a/src/main/java/gc/mda/signal_batch/monitoring/performance/PerformanceOptimizationManager.java b/src/main/java/gc/mda/signal_batch/monitoring/performance/PerformanceOptimizationManager.java index 7bf7220..c9ab992 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/performance/PerformanceOptimizationManager.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/performance/PerformanceOptimizationManager.java @@ -1,8 +1,7 @@ package gc.mda.signal_batch.monitoring.performance; +import gc.mda.signal_batch.batch.reader.AisTargetCacheManager; import gc.mda.signal_batch.domain.gis.cache.AreaBoundaryCache; -import gc.mda.signal_batch.global.util.VesselDataHolder; -import gc.mda.signal_batch.global.util.VesselTrackDataHolder; import gc.mda.signal_batch.monitoring.health.BatchMetricsCollector; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; @@ -30,8 +29,7 @@ import java.util.concurrent.atomic.AtomicLong; public class PerformanceOptimizationManager { private final AreaBoundaryCache areaCache; - private final VesselDataHolder vesselDataHolder; - private final VesselTrackDataHolder vesselTrackDataHolder; + private final AisTargetCacheManager aisTargetCacheManager; private final BatchMetricsCollector metricsCollector; // 성능 카운터 @@ -55,7 +53,7 @@ public class PerformanceOptimizationManager { // 메모리 상태 status.setMemoryUsage(getMemoryUsage()); - status.setDataHolderSize(vesselDataHolder.size() + vesselTrackDataHolder.size()); + status.setDataHolderSize((int) aisTargetCacheManager.size()); // 스레드풀 상태 threadPools.forEach((name, pool) -> { diff --git a/src/main/java/gc/mda/signal_batch/monitoring/performance/PerformanceTestRunner.java b/src/main/java/gc/mda/signal_batch/monitoring/performance/PerformanceTestRunner.java deleted file mode 100644 index 551f98a..0000000 --- a/src/main/java/gc/mda/signal_batch/monitoring/performance/PerformanceTestRunner.java +++ /dev/null @@ -1,603 +0,0 @@ -package gc.mda.signal_batch.monitoring.performance; - -import gc.mda.signal_batch.monitoring.health.BatchMetricsCollector; -import lombok.RequiredArgsConstructor; -import lombok.extern.slf4j.Slf4j; -import org.springframework.batch.core.*; -import org.springframework.batch.core.launch.JobLauncher; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.CommandLineRunner; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Profile; -import org.springframework.jdbc.core.JdbcTemplate; -import org.springframework.stereotype.Component; - -import java.time.Duration; -import java.time.LocalDateTime; -import java.util.*; -import java.util.concurrent.*; -import java.util.stream.IntStream; - - -@Slf4j -@Component -@Profile("performance-test") -@ConditionalOnProperty(name = "vessel.batch.scheduler.enabled", havingValue = "true", matchIfMissing = true) -@RequiredArgsConstructor -public class PerformanceTestRunner implements CommandLineRunner { - - @Autowired - @Qualifier("asyncJobLauncher") - private JobLauncher jobLauncher; - - @Autowired - @Qualifier("vesselAggregationJob") - private Job vesselAggregationJob; - - @Qualifier("collectJdbcTemplate") - private final JdbcTemplate collectJdbcTemplate; - - @Qualifier("queryJdbcTemplate") - private final JdbcTemplate queryJdbcTemplate; - - @SuppressWarnings("unused") - private final BatchMetricsCollector metricsCollector; - - @Override - public void run(String... args) throws Exception { - log.info("=== Starting Performance Test Suite ==="); - - // 테스트 시나리오 선택 - String scenario = args.length > 0 ? args[0] : "all"; - - switch (scenario) { - case "throughput": - runThroughputTest(); - break; - case "concurrent": - runConcurrentExecutionTest(); - break; - case "stress": - runStressTest(); - break; - case "endurance": - runEnduranceTest(); - break; - case "all": - runAllTests(); - break; - default: - log.error("Unknown scenario: {}", scenario); - } - - log.info("=== Performance Test Completed ==="); - } - - /** - * 처리량 테스트 - 다양한 데이터 크기로 처리 속도 측정 - */ - private void runThroughputTest() throws Exception { - log.info("Starting Throughput Test"); - - List dataSizes = Arrays.asList(10000, 100000, 500000, 1000000, 5000000); - Map results = new LinkedHashMap<>(); - - for (int size : dataSizes) { - log.info("Testing with {} records", size); - - // 테스트 데이터 생성 - generateTestData(size); - - // 실행 및 측정 - ThroughputResult result = measureThroughput(size); - results.put(size, result); - - // 클린업 - cleanupTestData(); - - // 결과 출력 - log.info("Result - Size: {}, Duration: {}s, Throughput: {} records/sec", - size, result.duration.getSeconds(), result.throughput); - } - - // 최종 리포트 - generateThroughputReport(results); - } - - /** - * 동시 실행 테스트 - 여러 Job을 동시에 실행 - */ - private void runConcurrentExecutionTest() throws Exception { - log.info("Starting Concurrent Execution Test"); - - int concurrentJobs = 5; - int recordsPerJob = 100000; - - // 각 Job에 대한 테스트 데이터 생성 - IntStream.range(0, concurrentJobs).forEach(i -> { - generatePartitionedTestData(recordsPerJob, i); - }); - - // 동시 실행 - List> futures = new ArrayList<>(); - ExecutorService executor = Executors.newFixedThreadPool(concurrentJobs); - - LocalDateTime baseTime = LocalDateTime.now(); - - for (int i = 0; i < concurrentJobs; i++) { - final int jobIndex = i; - CompletableFuture future = CompletableFuture.supplyAsync(() -> { - try { - LocalDateTime startTime = baseTime.minusHours(jobIndex + 1); - LocalDateTime endTime = startTime.plusHours(1); - - JobParameters params = new JobParametersBuilder() - .addLocalDateTime("startTime", startTime) - .addLocalDateTime("endTime", endTime) - .addLong("executionTime", System.currentTimeMillis() + jobIndex) - .toJobParameters(); - - return jobLauncher.run(vesselAggregationJob, params); - } catch (Exception e) { - throw new CompletionException(e); - } - }, executor); - - futures.add(future); - } - - // 모든 Job 완료 대기 - List executions = futures.stream() - .map(CompletableFuture::join) - .toList(); - - // 결과 분석 - analyzeConcurrentResults(executions); - - executor.shutdown(); - } - - /** - * 스트레스 테스트 - 시스템 한계 테스트 - */ - private void runStressTest() throws Exception { - log.info("Starting Stress Test"); - - StressTestConfig config = new StressTestConfig(); - config.initialLoad = 100000; - config.incrementFactor = 2; - config.maxIterations = 5; - config.targetErrorRate = 0.01; // 1% - - int currentLoad = config.initialLoad; - List results = new ArrayList<>(); - - for (int i = 0; i < config.maxIterations; i++) { - log.info("Stress test iteration {} with load {}", i + 1, currentLoad); - - // 데이터 생성 - generateTestData(currentLoad); - - // 시스템 메트릭 수집 시작 - SystemMetrics beforeMetrics = collectSystemMetrics(); - - // Job 실행 - JobExecution execution = runJob(); - - // 시스템 메트릭 수집 종료 - SystemMetrics afterMetrics = collectSystemMetrics(); - - // 결과 분석 - StressTestResult result = analyzeStressTestResult( - execution, beforeMetrics, afterMetrics, currentLoad - ); - results.add(result); - - // 에러율 체크 - if (result.errorRate > config.targetErrorRate) { - log.warn("Error rate {} exceeded target {}", result.errorRate, config.targetErrorRate); - break; - } - - // 다음 반복을 위한 부하 증가 - currentLoad *= config.incrementFactor; - - // 클린업 - cleanupTestData(); - } - - // 스트레스 테스트 리포트 - generateStressTestReport(results); - } - - /** - * 지속성 테스트 - 장시간 실행 안정성 테스트 - */ - private void runEnduranceTest() throws Exception { - log.info("Starting Endurance Test"); - - int durationHours = 4; // 4시간 테스트 - int recordsPerHour = 1000000; - LocalDateTime testStartTime = LocalDateTime.now(); - LocalDateTime testEndTime = testStartTime.plusHours(durationHours); - - List results = new ArrayList<>(); - - while (LocalDateTime.now().isBefore(testEndTime)) { - LocalDateTime iterationStart = LocalDateTime.now(); - - // 매 시간마다 데이터 생성 - generateTestData(recordsPerHour); - - // Job 실행 - JobExecution execution = runJob(); - - // 메모리 및 시스템 상태 체크 - EnduranceTestResult result = new EnduranceTestResult(); - result.iterationTime = iterationStart; - result.execution = execution; - result.memoryUsage = getMemoryUsage(); - result.activeThreads = Thread.activeCount(); - result.cpuUsage = getCpuUsage(); - - results.add(result); - - // 결과 로깅 - log.info("Endurance test iteration completed - Memory: {}MB, Threads: {}, CPU: {}%", - result.memoryUsage / 1024 / 1024, - result.activeThreads, - result.cpuUsage - ); - - // 다음 반복까지 대기 - Thread.sleep(TimeUnit.MINUTES.toMillis(10)); - } - - // 지속성 테스트 리포트 - generateEnduranceTestReport(results); - } - - /** - * 모든 테스트 실행 - */ - private void runAllTests() throws Exception { - runThroughputTest(); - Thread.sleep(5000); - - runConcurrentExecutionTest(); - Thread.sleep(5000); - - runStressTest(); - Thread.sleep(5000); - - runEnduranceTest(); - } - - /** - * 테스트 데이터 생성 - */ - private void generateTestData(int recordCount) { - log.info("Generating {} test records", recordCount); - - String sql = """ - INSERT INTO signal.sig_test ( - message_time, real_time, sig_src_cd, target_id, - lat, lon, sog, cog, heading, ship_nm, ship_ty, - vts_cd, mmsi, vpass_id, ship_no - ) - SELECT - NOW() - INTERVAL '1 hour' * (random() * 24), - NOW() - INTERVAL '1 hour' * (random() * 24), - CASE WHEN random() < 0.7 THEN 'AIS' ELSE 'VPASS' END, - 'TEST_VESSEL_' || seq, - 33.0 + random() * 6, - 124.0 + random() * 8, - random() * 30, - random() * 360, - floor(random() * 360)::numeric, - 'Test Ship ' || seq, - CASE floor(random() * 5)::int - WHEN 0 THEN 'CARGO' - WHEN 1 THEN 'TANKER' - WHEN 2 THEN 'PASSENGER' - WHEN 3 THEN 'FISHING' - ELSE 'OTHER' - END, - 'TEST', - '999' || lpad(seq::text, 6, '0'), - 'TEST_VP' || seq, - 'TEST_SN' || seq - FROM generate_series(1, ?) seq - """; - - collectJdbcTemplate.update(sql, recordCount); - } - - /** - * 파티션별 테스트 데이터 생성 - */ - private void generatePartitionedTestData(int recordCount, int partitionIndex) { - LocalDateTime baseTime = LocalDateTime.now().minusHours(partitionIndex + 1); - - String sql = """ - INSERT INTO signal.sig_test ( - message_time, real_time, sig_src_cd, target_id, - lat, lon, sog, cog, heading, ship_nm, ship_ty, - vts_cd, mmsi, vpass_id, ship_no - ) - SELECT - ? + INTERVAL '1 minute' * (seq % 60), - ? + INTERVAL '1 minute' * (seq % 60), - 'AIS', - 'PART_' || ? || '_VESSEL_' || seq, - 33.0 + random() * 6, - 124.0 + random() * 8, - random() * 30, - random() * 360, - floor(random() * 360)::numeric, - 'Partition ' || ? || ' Ship ' || seq, - 'CARGO', - 'TEST', - '888' || ? || lpad(seq::text, 5, '0'), - 'PART_VP' || ? || '_' || seq, - 'PART_SN' || ? || '_' || seq - FROM generate_series(1, ?) seq - """; - - collectJdbcTemplate.update(sql, - baseTime, baseTime, partitionIndex, partitionIndex, - partitionIndex, partitionIndex, partitionIndex, recordCount - ); - } - - /** - * 처리량 측정 - */ - private ThroughputResult measureThroughput(int dataSize) throws Exception { - LocalDateTime startTime = LocalDateTime.now(); - - JobExecution execution = runJob(); - - LocalDateTime endTime = LocalDateTime.now(); - Duration duration = Duration.between(startTime, endTime); - - long totalRead = execution.getStepExecutions().stream() - .mapToLong(StepExecution::getReadCount) - .sum(); - - double throughput = totalRead > 0 ? (double) totalRead / duration.getSeconds() : 0; - - ThroughputResult result = new ThroughputResult(); - result.dataSize = dataSize; - result.duration = duration; - result.throughput = throughput; - result.status = execution.getStatus(); - - return result; - } - - /** - * Job 실행 - */ - private JobExecution runJob() throws Exception { - LocalDateTime endTime = LocalDateTime.now(); - LocalDateTime startTime = endTime.minusHours(24); - - JobParameters params = new JobParametersBuilder() - .addLocalDateTime("startTime", startTime) - .addLocalDateTime("endTime", endTime) - .addLong("executionTime", System.currentTimeMillis()) - .toJobParameters(); - - return jobLauncher.run(vesselAggregationJob, params); - } - - /** - * 시스템 메트릭 수집 - */ - private SystemMetrics collectSystemMetrics() { - SystemMetrics metrics = new SystemMetrics(); - - Runtime runtime = Runtime.getRuntime(); - metrics.totalMemory = runtime.totalMemory(); - metrics.freeMemory = runtime.freeMemory(); - metrics.maxMemory = runtime.maxMemory(); - - metrics.activeThreads = Thread.activeCount(); - metrics.cpuCount = runtime.availableProcessors(); - - // DB 연결 상태 - metrics.activeDbConnections = collectJdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM pg_stat_activity WHERE state = 'active'", - Integer.class - ); - - return metrics; - } - - /** - * 메모리 사용량 조회 - */ - private long getMemoryUsage() { - Runtime runtime = Runtime.getRuntime(); - return runtime.totalMemory() - runtime.freeMemory(); - } - - /** - * CPU 사용률 조회 (근사치) - */ - private double getCpuUsage() { - // 실제 구현은 JMX나 시스템 명령어 사용 - return Math.random() * 100; // 임시 - } - - /** - * 테스트 데이터 정리 - */ - private void cleanupTestData() { - collectJdbcTemplate.update("DELETE FROM signal.sig_test WHERE vts_cd = 'TEST'"); - queryJdbcTemplate.update("DELETE FROM signal.t_vessel_latest_position WHERE sig_src_cd LIKE 'TEST%'"); - queryJdbcTemplate.update("DELETE FROM signal.t_tile_summary WHERE created_at > NOW() - INTERVAL '1 day'"); - } - - /** - * 동시 실행 결과 분석 - */ - private void analyzeConcurrentResults(List executions) { - log.info("=== Concurrent Execution Results ==="); - - int successful = 0; - int failed = 0; - long totalDuration = 0; - long totalRecords = 0; - - for (JobExecution execution : executions) { - if (execution.getStatus() == BatchStatus.COMPLETED) { - successful++; - } else { - failed++; - } - - if (execution.getStartTime() != null && execution.getEndTime() != null) { - totalDuration += Duration.between( - execution.getStartTime(), - execution.getEndTime() - ).toMillis(); - } - - totalRecords += execution.getStepExecutions().stream() - .mapToLong(StepExecution::getReadCount) - .sum(); - } - - log.info("Successful: {}, Failed: {}", successful, failed); - log.info("Average duration: {} ms", totalDuration / executions.size()); - log.info("Total records processed: {}", totalRecords); - } - - /** - * 스트레스 테스트 결과 분석 - */ - private StressTestResult analyzeStressTestResult(JobExecution execution, - SystemMetrics before, - SystemMetrics after, - int load) { - StressTestResult result = new StressTestResult(); - result.load = load; - result.status = execution.getStatus(); - - // 에러율 계산 - long totalRead = execution.getStepExecutions().stream() - .mapToLong(StepExecution::getReadCount) - .sum(); - long totalSkip = execution.getStepExecutions().stream() - .mapToLong(StepExecution::getSkipCount) - .sum(); - - result.errorRate = totalRead > 0 ? (double) totalSkip / totalRead : 0; - - // 메모리 증가량 - result.memoryIncrease = (after.totalMemory - after.freeMemory) - - (before.totalMemory - before.freeMemory); - - // 스레드 증가량 - result.threadIncrease = after.activeThreads - before.activeThreads; - - return result; - } - - /** - * 리포트 생성 메소드들 - */ - private void generateThroughputReport(Map results) { - log.info("\n=== Throughput Test Report ==="); - log.info("Data Size | Duration (s) | Throughput (rec/s) | Status"); - log.info("----------|--------------|-------------------|--------"); - - results.forEach((size, result) -> { - log.info(String.format("%-9d | %-12d | %-17.2f | %s", - size, - result.duration.getSeconds(), - result.throughput, - result.status - )); - }); - } - - private void generateStressTestReport(List results) { - log.info("\n=== Stress Test Report ==="); - log.info("Load | Error Rate | Memory Inc (MB) | Thread Inc | Status"); - log.info("---------|------------|-----------------|------------|--------"); - - results.forEach(result -> { - log.info(String.format("%-8d | %-10.2f | %-15d | %-10d | %s", - result.load, - result.errorRate * 100, - result.memoryIncrease / 1024 / 1024, - result.threadIncrease, - result.status - )); - }); - } - - private void generateEnduranceTestReport(List results) { - log.info("\n=== Endurance Test Report ==="); - log.info("Time | Memory (MB) | Threads | CPU (%) | Status"); - log.info("---------|-------------|---------|---------|--------"); - - results.forEach(result -> { - log.info(String.format("%-8s | %-11d | %-7d | %-7.2f | %s", - result.iterationTime.toLocalTime(), - result.memoryUsage / 1024 / 1024, - result.activeThreads, - result.cpuUsage, - result.execution.getStatus() - )); - }); - } - - // 내부 클래스들 - private static class ThroughputResult { - @SuppressWarnings("unused") - int dataSize; - Duration duration; - double throughput; - BatchStatus status; - } - - private static class StressTestConfig { - int initialLoad; - int incrementFactor; - int maxIterations; - double targetErrorRate; - } - - private static class StressTestResult { - int load; - double errorRate; - long memoryIncrease; - int threadIncrease; - BatchStatus status; - } - - private static class EnduranceTestResult { - LocalDateTime iterationTime; - JobExecution execution; - long memoryUsage; - int activeThreads; - double cpuUsage; - } - - private static class SystemMetrics { - long totalMemory; - long freeMemory; - @SuppressWarnings("unused") - long maxMemory; - int activeThreads; - @SuppressWarnings("unused") - int cpuCount; - @SuppressWarnings("unused") - int activeDbConnections; - } -} \ No newline at end of file diff --git a/src/main/java/gc/mda/signal_batch/monitoring/performance/QueryPerformanceOptimizer.java b/src/main/java/gc/mda/signal_batch/monitoring/performance/QueryPerformanceOptimizer.java index 2a0692f..5a19cec 100644 --- a/src/main/java/gc/mda/signal_batch/monitoring/performance/QueryPerformanceOptimizer.java +++ b/src/main/java/gc/mda/signal_batch/monitoring/performance/QueryPerformanceOptimizer.java @@ -101,7 +101,7 @@ public class QueryPerformanceOptimizer { */ private String getQueryByIdPattern(String queryId) { Map queryTemplates = Map.of( - "vessel_latest_position", "SELECT DISTINCT ON (target_id) * FROM signal.sig_test WHERE message_time > NOW() - INTERVAL '5 minutes'", + "vessel_latest_position", "SELECT * FROM signal.t_ais_position WHERE last_update > NOW() - INTERVAL '5 minutes'", "area_statistics", "SELECT * FROM signal.t_area_statistics WHERE time_bucket > NOW() - INTERVAL '1 hour'", "grid_tracks", "SELECT * FROM signal.t_grid_vessel_tracks WHERE time_bucket > NOW() - INTERVAL '1 hour'" ); @@ -268,8 +268,8 @@ public class QueryPerformanceOptimizer { // 쿼리별 특화 제안 switch (stats.getQueryId()) { case "vessel_latest_position": - suggestions.add("Consider partitioning sig_test table by time"); - suggestions.add("Add index on (target_id, message_time DESC)"); + suggestions.add("Add index on t_ais_position(last_update DESC)"); + suggestions.add("Add spatial index on t_ais_position USING GIST (geom)"); break; case "area_statistics": suggestions.add("Consider materialized view for area statistics"); diff --git a/src/main/resources/application-dev.yml b/src/main/resources/application-dev.yml index 74382be..b3f9177 100644 --- a/src/main/resources/application-dev.yml +++ b/src/main/resources/application-dev.yml @@ -243,6 +243,12 @@ vessel: # spring 하위가 아닌 최상위 레벨 exclude-stationary-vessels: true # 정박 선박 제외 여부 lenient-mode: true # 관대한 모드 활성화 +# S&P AIS API 캐시 TTL (개발: 60분) +app: + cache: + ais-target: + ttl-minutes: 60 + # 액추에이터 설정 management: endpoints: diff --git a/src/main/resources/application-local.yml b/src/main/resources/application-local.yml index e2630fa..62cbc62 100644 --- a/src/main/resources/application-local.yml +++ b/src/main/resources/application-local.yml @@ -1,83 +1,81 @@ -# 로컬 개발 환경 설정 -# 단일 PostgreSQL 인스턴스를 모든 DataSource가 공유 -server: - port: 8090 +# ============================================================================= +# Local Profile — snpdb (211.208.115.83) 직접 연결 테스트용 +# ============================================================================= +# 실행: java -jar target/*.jar --spring.profiles.active=local +# 또는: mvn spring-boot:run -Dspring-boot.run.profiles=local +# ============================================================================= spring: datasource: - # 로컬 수집 DB (동일 DB 사용) + # ── CollectDataSource (레거시 호환 — snpdb로 대체) ── collect: - jdbc-url: jdbc:postgresql://localhost:5432/mdadb2?stringtype=unspecified¤tSchema=signal&TimeZone=Asia/Seoul - username: mda - password: mda#8932 + jdbc-url: jdbc:postgresql://211.208.115.83:5432/snpdb?currentSchema=signal&options=-csearch_path=signal,public&assumeMinServerVersion=12&reWriteBatchedInserts=true + username: snp + password: snp#8932 driver-class-name: org.postgresql.Driver - hikari: - pool-name: LocalCollectPool - maximum-pool-size: 10 - minimum-idle: 2 - # 로컬 조회 DB (동일 DB 사용) + # ── QueryDataSource (집계 데이터 읽기/쓰기) ── query: - jdbc-url: jdbc:postgresql://localhost:5432/mdadb2?stringtype=unspecified¤tSchema=signal&TimeZone=Asia/Seoul - username: mda - password: mda#8932 + jdbc-url: jdbc:postgresql://211.208.115.83:5432/snpdb?currentSchema=signal&options=-csearch_path=signal,public&assumeMinServerVersion=12&reWriteBatchedInserts=true + username: snp + password: snp#8932 driver-class-name: org.postgresql.Driver hikari: - pool-name: LocalQueryPool - maximum-pool-size: 10 - minimum-idle: 2 + connection-init-sql: "SET TIME ZONE 'Asia/Seoul'; SET search_path TO signal, public;" - # 로컬 배치 메타 DB (동일 DB 사용) + + # ── BatchDataSource (Spring Batch 메타 테이블) ── batch: - jdbc-url: jdbc:postgresql://localhost:5432/mdadb2?stringtype=unspecified¤tSchema=signal&TimeZone=Asia/Seoul - username: mda - password: mda#8932 + jdbc-url: jdbc:postgresql://211.208.115.83:5432/snpdb?currentSchema=public&assumeMinServerVersion=12&reWriteBatchedInserts=true + username: snp + password: snp#8932 driver-class-name: org.postgresql.Driver hikari: - pool-name: LocalBatchPool - maximum-pool-size: 5 - minimum-idle: 1 + connection-init-sql: "SET TIME ZONE 'Asia/Seoul'; SET search_path TO public, signal;" batch: - job: - enabled: false # 자동 실행 방지 jdbc: - initialize-schema: always # 배치 테이블 자동 생성 - table-prefix: BATCH_ + # 최초 실행 시 Spring Batch 메타 테이블 자동 생성 + initialize-schema: always + # HikariCP — 로컬 테스트용 최소 풀 + hikari: + maximum-pool-size: 5 + minimum-idle: 2 + connection-timeout: 10000 + +# ── 배치 설정 (로컬 테스트용 축소) ── +vessel: + batch: + scheduler: + enabled: true + chunk-size: 1000 + partition-size: 4 + fetch-size: 50000 + bulk-insert: + batch-size: 1000 + parallel-threads: 2 + +# ── AIS API 수집 설정 ── +app: + cache: + ais-target: + ttl-minutes: 5 + + ais-api: + username: 7cc0517d-5ed6-452e-a06f-5bbfd6ab6ade + password: 2LLzSJNqtxWVD8zC + +# ── 서버 포트 ── +server: + port: 8090 + +# ── 로깅 (로컬 디버깅용) ── logging: level: - root: INFO gc.mda.signal_batch: DEBUG - gc.mda.signal_batch.batch: DEBUG - org.springframework.batch: DEBUG - org.springframework.jdbc: DEBUG - org.springframework.transaction: DEBUG - -# 로컬 환경 배치 설정 -vessel: - # 통합선박 기능 비활성화 (로컬에서는 테이블 없을 수 있음) - integration: - enabled: false - - batch: - # 스케줄러 설정 - 로컬에서는 비활성화 - scheduler: - enabled: false - incremental: - delay-minutes: 3 - - # 비정상 궤적 검출 비활성화 - abnormal-detection: - enabled: false - - # 로컬 최적화 설정 - chunk-size: 1000 - page-size: 1000 - partition-size: 4 - fetch-size: 10000 - - # 캐시 비활성화 - cache: - latest-position: - enabled: false + # PartitionManager sig_test 테이블 경고 억제 + gc.mda.signal_batch.global.util.PartitionManager: WARN + org.springframework.batch: INFO + # SQL 디버깅 필요시 아래 주석 해제 + # org.springframework.jdbc.core: DEBUG diff --git a/src/main/resources/application-prod-mpr.yml b/src/main/resources/application-prod-mpr.yml index 52d2931..a476aea 100644 --- a/src/main/resources/application-prod-mpr.yml +++ b/src/main/resources/application-prod-mpr.yml @@ -262,6 +262,12 @@ vessel: # spring 하위가 아닌 최상위 레벨 t_abnormal_tracks: retention-months: 0 # 비정상 항적: 무한 보관 +# S&P AIS API 캐시 TTL (운영 MPR: 120분) +app: + cache: + ais-target: + ttl-minutes: 120 + # 일일 항적 데이터 인메모리 캐시 cache: daily-track: diff --git a/src/main/resources/application-prod.yml b/src/main/resources/application-prod.yml index c81dc1e..f46b6ba 100644 --- a/src/main/resources/application-prod.yml +++ b/src/main/resources/application-prod.yml @@ -267,6 +267,12 @@ vessel: # spring 하위가 아닌 최상위 레벨 t_abnormal_tracks: retention-months: 0 # 비정상 항적: 무한 보관 +# S&P AIS API 캐시 TTL (운영: 120분) +app: + cache: + ais-target: + ttl-minutes: 120 + # 일일 항적 데이터 인메모리 캐시 cache: daily-track: diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml index 7421bd3..32a3aae 100644 --- a/src/main/resources/application.yml +++ b/src/main/resources/application.yml @@ -170,11 +170,6 @@ vessel: # 테이블별 보관 기간 (기본값과 다를 경우만 설정) tables: - # CollectDB 일별 파티션 테이블 (단위: 일) - sig_test: - retention-days: 14 # 14일 보관 - - # QueryDB 일별 파티션 테이블 (단위: 일) t_vessel_tracks_5min: retention-days: 7 # 7일 보관 t_area_vessel_tracks: @@ -263,6 +258,27 @@ vessel: ttl-minutes: 120 # 캐시 TTL: 120분 (위성 AIS 30~60분 간격 고려) max-size: 100000 # 최대 선박 수: 100,000척 (2시간 누적 고려) +# ==================== S&P Global AIS API 설정 ==================== +app: + ais-api: + url: ${AIS_API_URL:https://aisapi.maritime.spglobal.com} + username: ${AIS_API_USERNAME:} + password: ${AIS_API_PASSWORD:} + since-seconds: 60 # API 조회 범위 (초) + chunk-size: 50000 # 배치 청크 크기 (API 1회 호출 ~33K건) + + cache: + ais-target: + ttl-minutes: 120 # 기본 TTL (프로파일별 오버라이드) + max-size: 300000 # 최대 캐시 크기 (30만 건) + + chnprmship: + mmsi-resource-path: classpath:chnprmship-mmsi.txt + ttl-days: 2 + max-size: 2000 + warmup-enabled: true + warmup-days: 2 + # Swagger/OpenAPI 설정 springdoc: api-docs: diff --git a/src/main/resources/chnprmship-mmsi.txt b/src/main/resources/chnprmship-mmsi.txt new file mode 100644 index 0000000..5086ddc --- /dev/null +++ b/src/main/resources/chnprmship-mmsi.txt @@ -0,0 +1,1402 @@ +100895843 +100915113 +150201583 +186544332 +200005740 +200026355 +210105014 +210800202 +214100000 +261088888 +313443397 +314425141 +320709591 +332154938 +333545559 +365226688 +379824585 +400108800 +400123354 +400702597 +410210118 +411225585 +411256658 +412000996 +412001266 +412002674 +412005279 +412005557 +412005999 +412014688 +412015316 +412020019 +412026089 +412026099 +412026399 +412036999 +412053898 +412054958 +412055125 +412056987 +412085668 +412113500 +412121483 +412135789 +412167777 +412200193 +412200194 +412200217 +412200377 +412200384 +412200394 +412200404 +412200414 +412200432 +412200437 +412200527 +412200528 +412200561 +412200776 +412200805 +412200812 +412200813 +412200849 +412200853 +412200877 +412200879 +412201174 +412201239 +412202172 +412202321 +412202322 +412202326 +412202327 +412202356 +412202374 +412202375 +412202377 +412202384 +412202385 +412202388 +412202413 +412202414 +412202499 +412202736 +412202741 +412202782 +412202783 +412202796 +412202797 +412202802 +412202803 +412202888 +412202969 +412202974 +412203032 +412203062 +412203388 +412203608 +412204051 +412204069 +412204155 +412204201 +412205349 +412205351 +412205422 +412205461 +412205462 +412205602 +412205603 +412205629 +412205631 +412205632 +412205647 +412205648 +412205651 +412205697 +412205699 +412205742 +412205743 +412207019 +412207076 +412207077 +412207078 +412207079 +412207463 +412207465 +412208071 +412208072 +412208081 +412208082 +412208116 +412208162 +412208166 +412208213 +412208281 +412208282 +412209061 +412210017 +412210018 +412210019 +412210021 +412210022 +412210023 +412210024 +412210025 +412210026 +412210043 +412210044 +412210048 +412210049 +412210051 +412210054 +412210056 +412210109 +412210111 +412210112 +412210113 +412210115 +412210117 +412210118 +412210121 +412210123 +412210124 +412210126 +412210127 +412210131 +412210132 +412210134 +412210135 +412210136 +412210138 +412210139 +412210142 +412210154 +412210156 +412210158 +412210161 +412210162 +412210163 +412210165 +412210246 +412210258 +412210259 +412210261 +412210273 +412210297 +412210312 +412210313 +412210314 +412210315 +412210316 +412210329 +412210331 +412210332 +412210442 +412210463 +412210466 +412210467 +412210469 +412210471 +412210472 +412210473 +412210474 +412210475 +412210477 +412210478 +412210479 +412210484 +412210487 +412210489 +412210491 +412210517 +412210518 +412210519 +412210527 +412210822 +412210871 +412210938 +412211121 +412211161 +412212504 +412212655 +412212934 +412213298 +412213299 +412213351 +412213369 +412213373 +412213374 +412213375 +412213381 +412213382 +412213383 +412213384 +412213386 +412213401 +412213403 +412213405 +412213454 +412213455 +412213457 +412213478 +412213486 +412213487 +412213488 +412213495 +412213514 +412213520 +412213521 +412213522 +412213576 +412213624 +412213626 +412213663 +412213692 +412213702 +412213708 +412213769 +412213772 +412213773 +412213774 +412213775 +412213777 +412213778 +412213779 +412214808 +412214872 +412214873 +412215031 +412215139 +412217300 +412217304 +412217305 +412217678 +412218936 +412218937 +412219066 +412219067 +412219955 +412219956 +412219986 +412221489 +412221493 +412223022 +412223024 +412223032 +412223033 +412223050 +412225088 +412225282 +412225388 +412225502 +412225509 +412225512 +412225518 +412225525 +412225585 +412225591 +412225616 +412225734 +412225738 +412225743 +412225754 +412225766 +412225773 +412225788 +412225793 +412225795 +412225797 +412225802 +412225809 +412225814 +412225835 +412225841 +412225844 +412225854 +412225863 +412225925 +412225927 +412225936 +412225938 +412225948 +412225951 +412225952 +412225954 +412225959 +412225962 +412226004 +412226023 +412226057 +412226059 +412226087 +412226088 +412226089 +412226092 +412226094 +412226095 +412226107 +412226108 +412226109 +412226114 +412226115 +412226129 +412226151 +412226153 +412226205 +412226209 +412226318 +412226319 +412226321 +412226324 +412226388 +412229246 +412231777 +412251119 +412255855 +412256658 +412256789 +412258598 +412258777 +412265777 +412265888 +412280063 +412280237 +412280376 +412280377 +412280739 +412280741 +412280841 +412280842 +412284608 +412285646 +412286361 +412286362 +412286368 +412286369 +412286529 +412286540 +412286655 +412286661 +412286662 +412286666 +412286668 +412286669 +412286671 +412286672 +412286673 +412286674 +412286675 +412286677 +412286682 +412286684 +412286685 +412286686 +412286687 +412286688 +412286715 +412287545 +412287668 +412287669 +412287708 +412287709 +412287711 +412287712 +412287713 +412287748 +412287752 +412287753 +412287756 +412287757 +412287766 +412287767 +412287771 +412287772 +412287773 +412287774 +412287775 +412287776 +412287782 +412287783 +412287784 +412287804 +412287805 +412287812 +412287822 +412287824 +412287844 +412287861 +412287874 +412287877 +412287878 +412287918 +412289281 +412296865 +412300005 +412300006 +412300011 +412300012 +412300013 +412300026 +412300028 +412300029 +412300031 +412300032 +412300033 +412300034 +412300035 +412300036 +412300037 +412300038 +412300042 +412300043 +412300044 +412300046 +412300053 +412300054 +412300055 +412300056 +412300062 +412300064 +412300065 +412300066 +412300068 +412300069 +412300071 +412300084 +412300087 +412300146 +412300189 +412300233 +412300249 +412300292 +412300307 +412300332 +412300346 +412300504 +412300517 +412300817 +412301005 +412301006 +412301041 +412301063 +412301088 +412304086 +412304899 +412305328 +412305988 +412306396 +412306399 +412306663 +412306788 +412306887 +412308689 +412309679 +412311132 +412313345 +412314158 +412317827 +412319975 +412320009 +412320018 +412320035 +412320043 +412320045 +412320069 +412320091 +412320092 +412320093 +412320094 +412320122 +412320123 +412320151 +412320162 +412320163 +412320166 +412320167 +412320168 +412320257 +412320258 +412320274 +412320279 +412320315 +412320358 +412320393 +412320394 +412320404 +412320413 +412320414 +412320475 +412320476 +412320491 +412320492 +412320501 +412320511 +412320529 +412320599 +412320601 +412320625 +412320626 +412320646 +412320647 +412320706 +412320745 +412320746 +412320783 +412320784 +412320789 +412320805 +412320836 +412320837 +412320959 +412320961 +412320962 +412320963 +412321053 +412321054 +412321115 +412321116 +412321312 +412321339 +412321341 +412321346 +412321372 +412321373 +412321387 +412321516 +412321517 +412321624 +412321686 +412321718 +412321719 +412321797 +412321802 +412321865 +412322075 +412322114 +412322145 +412322148 +412322149 +412322174 +412322175 +412324015 +412324761 +412324808 +412325033 +412325034 +412325055 +412325056 +412325218 +412325219 +412325222 +412325223 +412325249 +412325251 +412325257 +412325279 +412325304 +412325386 +412325443 +412325533 +412325813 +412325936 +412326016 +412326017 +412326817 +412326835 +412326836 +412327066 +412327646 +412327647 +412327672 +412327673 +412327735 +412327736 +412327749 +412327751 +412327752 +412327753 +412327771 +412327772 +412327819 +412327821 +412327824 +412327825 +412327844 +412327845 +412327846 +412327847 +412327865 +412327866 +412327867 +412327868 +412327890 +412327897 +412327898 +412327908 +412327922 +412327923 +412327926 +412327927 +412327928 +412327929 +412327933 +412327934 +412327944 +412327945 +412327974 +412328111 +412328112 +412328113 +412328114 +412328115 +412328116 +412328285 +412328286 +412328287 +412328288 +412328294 +412328295 +412328301 +412328302 +412328304 +412328345 +412328346 +412328366 +412328372 +412328373 +412328384 +412328385 +412328386 +412328409 +412328411 +412328443 +412328444 +412328466 +412328467 +412328501 +412328502 +412328657 +412328658 +412328814 +412328815 +412328835 +412328836 +412328847 +412328848 +412328878 +412328894 +412328895 +412328897 +412328898 +412328905 +412328906 +412328907 +412328908 +412328923 +412328924 +412328934 +412328935 +412328936 +412328937 +412328942 +412328943 +412328944 +412328945 +412328965 +412328966 +412328989 +412328991 +412328996 +412328997 +412329001 +412329002 +412329006 +412329007 +412329078 +412329089 +412329091 +412329095 +412329096 +412329117 +412329134 +412329135 +412329148 +412329149 +412329173 +412329174 +412329176 +412329177 +412329183 +412329184 +412329211 +412329212 +412329215 +412329216 +412329245 +412329246 +412329289 +412329291 +412329316 +412329317 +412329321 +412329322 +412329323 +412329324 +412329374 +412329375 +412329396 +412329397 +412329398 +412329399 +412329489 +412329491 +412329492 +412329493 +412329551 +412329552 +412329614 +412329615 +412329759 +412329761 +412329782 +412329786 +412329788 +412329789 +412329803 +412329804 +412329808 +412329809 +412329817 +412329831 +412329832 +412329833 +412329847 +412329848 +412329892 +412329893 +412329901 +412329902 +412329916 +412329917 +412329919 +412329921 +412329924 +412329925 +412329926 +412329927 +412329934 +412329935 +412329941 +412329977 +412329982 +412329983 +412329986 +412329987 +412329988 +412329995 +412329996 +412330022 +412330023 +412330024 +412330027 +412330028 +412330476 +412330477 +412330503 +412330504 +412330505 +412330506 +412330522 +412330523 +412330524 +412330525 +412330545 +412330546 +412330554 +412330555 +412330558 +412330559 +412330569 +412330572 +412330573 +412330574 +412330575 +412330576 +412330577 +412330578 +412330579 +412330588 +412330589 +412330594 +412330595 +412330635 +412330636 +412330657 +412330862 +412330886 +412330887 +412330888 +412330889 +412330911 +412330912 +412331194 +412331195 +412331196 +412331197 +412331198 +412331199 +412331206 +412331207 +412331396 +412331397 +412331528 +412331529 +412331535 +412331847 +412332398 +412332808 +412333324 +412333325 +412333326 +412333327 +412333342 +412333343 +412333531 +412333532 +412333541 +412333550 +412333945 +412333946 +412334006 +412334007 +412334014 +412334015 +412334019 +412334027 +412334058 +412336074 +412336093 +412336094 +412336095 +412336102 +412336111 +412336116 +412336117 +412336118 +412336123 +412336129 +412336131 +412336132 +412336196 +412336606 +412336607 +412336612 +412336613 +412336623 +412336624 +412336637 +412336638 +412337325 +412337348 +412337349 +412337424 +412337644 +412337645 +412345621 +412350017 +412350047 +412350049 +412350058 +412350059 +412350112 +412350165 +412350338 +412352301 +412352381 +412352422 +412352436 +412352649 +412353058 +412353373 +412353857 +412353858 +412353886 +412355071 +412355141 +412356251 +412357799 +412358545 +412358882 +412358995 +412359066 +412359077 +412364135 +412364283 +412364303 +412364358 +412364513 +412364738 +412364783 +412364837 +412364947 +412365095 +412365194 +412365289 +412365328 +412365331 +412365335 +412365639 +412365939 +412366336 +412366358 +412366665 +412366669 +412366912 +412368875 +412368885 +412368902 +412368966 +412375283 +412386668 +412386669 +412410001 +412410009 +412410746 +412410747 +412411528 +412411605 +412411647 +412411909 +412413895 +412414342 +412414345 +412414423 +412414436 +412414538 +412414744 +412415482 +412415513 +412416104 +412416132 +412416207 +412416235 +412416249 +412416268 +412416269 +412416292 +412416296 +412416307 +412416308 +412416338 +412416367 +412416391 +412416394 +412416406 +412416448 +412416508 +412416535 +412416554 +412416557 +412416584 +412416591 +412416592 +412416595 +412416642 +412416699 +412416837 +412416842 +412416872 +412416875 +412416898 +412416927 +412416949 +412416981 +412417008 +412417106 +412417115 +412417151 +412417182 +412417188 +412417222 +412417247 +412417248 +412417287 +412417288 +412417295 +412417311 +412417334 +412417335 +412417338 +412417352 +412417365 +412417368 +412417412 +412417413 +412417483 +412417509 +412417556 +412417692 +412417712 +412417741 +412417785 +412417807 +412417825 +412417838 +412417851 +412417917 +412417954 +412417957 +412417977 +412417981 +412418011 +412418017 +412418018 +412418056 +412418082 +412418101 +412418158 +412418171 +412418185 +412418246 +412418319 +412418387 +412418401 +412418478 +412418488 +412418507 +412418511 +412418513 +412418515 +412418567 +412418568 +412418586 +412418629 +412418633 +412418679 +412418696 +412418698 +412418774 +412418785 +412418793 +412418795 +412418803 +412418814 +412418816 +412418833 +412418834 +412418872 +412418873 +412418874 +412418887 +412418918 +412418933 +412418941 +412418942 +412418952 +412418999 +412419018 +412419024 +412419064 +412419114 +412419132 +412419203 +412419233 +412419262 +412419264 +412419265 +412419266 +412419276 +412419324 +412419342 +412419345 +412419348 +412419406 +412419407 +412419455 +412419488 +412419495 +412419502 +412419506 +412419507 +412419509 +412419531 +412419536 +412419541 +412419544 +412419545 +412419549 +412419553 +412419564 +412419569 +412419585 +412419587 +412419638 +412419641 +412419642 +412419667 +412419668 +412419688 +412419689 +412419701 +412419702 +412419703 +412419704 +412419706 +412419709 +412425002 +412431008 +412431029 +412431033 +412431041 +412431058 +412431063 +412431066 +412431071 +412431084 +412431087 +412431120 +412431124 +412431129 +412431141 +412431151 +412431173 +412431222 +412431262 +412431263 +412431266 +412431291 +412431396 +412431491 +412431494 +412431743 +412431805 +412431809 +412431892 +412431911 +412431913 +412431914 +412431915 +412431963 +412431964 +412431966 +412431985 +412435125 +412435126 +412435127 +412435142 +412435309 +412435356 +412435386 +412435387 +412435595 +412435596 +412435784 +412435813 +412436079 +412436271 +412436329 +412436519 +412436521 +412436627 +412436631 +412436701 +412436710 +412436841 +412436874 +412436963 +412436969 +412436992 +412437006 +412437026 +412437037 +412437045 +412437054 +412437055 +412437071 +412437072 +412437079 +412437085 +412437095 +412437113 +412437118 +412437119 +412437166 +412437215 +412437418 +412437419 +412437626 +412437627 +412437633 +412437635 +412437659 +412437718 +412437817 +412437818 +412437821 +412437822 +412437988 +412437989 +412438043 +412438044 +412438045 +412438065 +412438066 +412438146 +412438235 +412438236 +412438646 +412438647 +412438696 +412438697 +412438868 +412438869 +412438873 +412438955 +412438996 +412438997 +412439055 +412439056 +412439111 +412439112 +412439139 +412439143 +412439145 +412439146 +412439252 +412439356 +412439357 +412443647 +412452265 +412456855 +412468166 +412471879 +412475803 +412476361 +412476457 +412479103 +412479385 +412480093 +412494141 +412494148 +412494156 +412494172 +412515088 +412526198 +412526798 +412532666 +412545687 +412556356 +412556357 +412556889 +412570001 +412577688 +412585665 +412588681 +412588778 +412653456 +412665478 +412685177 +412752470 +412798948 +412800888 +412852443 +412865966 +412879798 +412885120 +412886580 +412900240 +412952867 +412958588 +413000229 +413000769 +413004860 +413005116 +413027088 +413035319 +413089562 +413111322 +413122960 +413127608 +413155153 +413215238 +413216847 +413226089 +413245082 +413255506 +413255555 +413256667 +413296865 +413300026 +413300221 +413315088 +413320282 +413327929 +413335198 +413350165 +413357867 +413361808 +413388589 +413457866 +413464232 +413466077 +413520688 +413578254 +413593750 +413699592 +414328943 +415005666 +415051026 +415107777 +415108888 +415109607 +415140625 +415214102 +415232125 +415261386 +415506055 +415628585 +415782000 +415836666 +415901572 +415936288 +417758521 +420439112 +421233456 +422226789 +422277709 +423678955 +423789666 +425556789 +441235678 +512325936 +550026918 +558888888 +586358882 +586418965 +600950945 +688816888 +688826755 +688826968 +712210938 +712330656 +712888888 +789999999 +800004551 +800029774 +800044382 +800052359 +888888838 +888888877 +888888988 +888898888 +900000173 +900020650 +905106399 +905106699 +905108588 +926002285 +926002997 +926004879 +926009286 +926012291 diff --git a/src/main/resources/sql/create_signal_tables_and indexes_and_commant.sql b/src/main/resources/sql/create_signal_tables_and indexes_and_commant.sql new file mode 100644 index 0000000..a1959fb --- /dev/null +++ b/src/main/resources/sql/create_signal_tables_and indexes_and_commant.sql @@ -0,0 +1,658 @@ +-- ================================================ +-- Signal Batch Aggregation System - 실제 테이블 구조 +-- PostgreSQL 15+ with PostGIS 3.3+ +-- +-- 목적: 실시간 선박 위치 데이터의 계층적 집계 +-- 작성일: 2025-07-30 +-- 실제 DB: 10.26.252.48:5432/mdadb (schema: signal) +-- ================================================ + +-- 스키마 생성 +CREATE SCHEMA IF NOT EXISTS signal; + +-- PostGIS 확장 활성화 +CREATE EXTENSION IF NOT EXISTS postgis; + +-- ================================================ +-- 1. t_abnormal_track_stats - 비정상 항적 일별 통계 +-- ================================================ +CREATE TABLE signal.t_abnormal_track_stats ( + stat_date DATE NOT NULL, -- 통계 날짜 + abnormal_type VARCHAR(50) NOT NULL, -- 비정상 유형 (excessive_speed, teleport, impossible_distance, excessive_avg_speed, gap_jump) + vessel_count INTEGER NOT NULL, -- 비정상 항적이 발견된 선박 수 + track_count INTEGER NOT NULL, -- 비정상 항적 건수 + total_points INTEGER, -- 총 포인트 수 + avg_deviation NUMERIC(10,2), -- 평균 편차값 (속도 또는 거리) + max_deviation NUMERIC(10,2), -- 최대 편차값 + created_at TIMESTAMP DEFAULT NOW(), -- 생성 시각 + updated_at TIMESTAMP DEFAULT NOW(), -- 수정 시각 + CONSTRAINT t_abnormal_track_stats_pkey PRIMARY KEY (stat_date, abnormal_type) +); + +-- 인덱스 +CREATE INDEX idx_abnormal_track_stats_date ON signal.t_abnormal_track_stats (stat_date); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_abnormal_track_stats IS '비정상 항적 일별 통계'; +COMMENT ON COLUMN signal.t_abnormal_track_stats.stat_date IS '통계 날짜'; +COMMENT ON COLUMN signal.t_abnormal_track_stats.abnormal_type IS '비정상 유형 (excessive_speed: 과속, teleport: 순간이동, impossible_distance: 불가능한 거리, excessive_avg_speed: 평균속도 초과, gap_jump: 시간 간격 점프)'; +COMMENT ON COLUMN signal.t_abnormal_track_stats.vessel_count IS '비정상 항적이 발견된 선박 수'; +COMMENT ON COLUMN signal.t_abnormal_track_stats.track_count IS '비정상 항적 건수'; +COMMENT ON COLUMN signal.t_abnormal_track_stats.total_points IS '비정상 항적의 총 포인트 수'; +COMMENT ON COLUMN signal.t_abnormal_track_stats.avg_deviation IS '평균 편차값 (속도는 knots, 거리는 nm)'; +COMMENT ON COLUMN signal.t_abnormal_track_stats.max_deviation IS '최대 편차값 (속도는 knots, 거리는 nm)'; + +-- ================================================ +-- 2. t_abnormal_tracks - 비정상 선박 항적 저장 (파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_abnormal_tracks ( + id BIGINT NOT NULL, -- ID + sig_src_cd VARCHAR(10) NOT NULL, -- 신호원 코드 (AIS, V-PASS 등) + target_id VARCHAR(20) NOT NULL, -- 타겟 ID (MMSI, 선박번호 등) + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (5분 단위) + track_geom GEOMETRY(LINESTRINGM, 4326), -- 비정상 항적 (M값은 시간) + abnormal_type VARCHAR(50) NOT NULL, -- 비정상 유형 + abnormal_reason JSONB NOT NULL, -- 비정상 사유 상세 + distance_nm NUMERIC(10,2), -- 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + max_speed NUMERIC(6,2), -- 최대 속도 (knots) + point_count INTEGER, -- 항적 포인트 수 + source_table VARCHAR(50) NOT NULL, -- 원본 테이블명 (5min/hourly/daily) + detected_at TIMESTAMP DEFAULT NOW(), -- 검출 시각 + CONSTRAINT t_abnormal_tracks_pkey PRIMARY KEY (id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE UNIQUE INDEX abnormal_tracks_uk ON signal.t_abnormal_tracks (sig_src_cd, target_id, time_bucket, source_table); +CREATE INDEX idx_abnormal_tracks_vessel ON signal.t_abnormal_tracks (sig_src_cd, target_id); +CREATE INDEX idx_abnormal_tracks_time ON signal.t_abnormal_tracks (time_bucket); +CREATE INDEX idx_abnormal_tracks_type ON signal.t_abnormal_tracks (abnormal_type); +CREATE INDEX idx_abnormal_tracks_geom ON signal.t_abnormal_tracks USING GIST (track_geom); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_abnormal_tracks IS '비정상 선박 항적 저장 테이블'; +COMMENT ON COLUMN signal.t_abnormal_tracks.sig_src_cd IS '신호원 코드 (AIS, V-PASS, E-Navigation 등)'; +COMMENT ON COLUMN signal.t_abnormal_tracks.target_id IS '타겟 ID (MMSI, V-PASS ID, 선박번호 등)'; +COMMENT ON COLUMN signal.t_abnormal_tracks.time_bucket IS '5분 단위 시간 버킷'; +COMMENT ON COLUMN signal.t_abnormal_tracks.track_geom IS 'LineStringM 형식 항적 (M값은 첫 포인트 기준 상대시간)'; +COMMENT ON COLUMN signal.t_abnormal_tracks.abnormal_type IS '비정상 유형'; +COMMENT ON COLUMN signal.t_abnormal_tracks.abnormal_reason IS '비정상 사유 상세 정보 JSON'; +COMMENT ON COLUMN signal.t_abnormal_tracks.distance_nm IS '총 이동 거리 (해리)'; +COMMENT ON COLUMN signal.t_abnormal_tracks.avg_speed IS '평균 속도 (knots)'; +COMMENT ON COLUMN signal.t_abnormal_tracks.max_speed IS '최대 속도 (knots)'; +COMMENT ON COLUMN signal.t_abnormal_tracks.point_count IS '항적을 구성하는 포인트 수'; +COMMENT ON COLUMN signal.t_abnormal_tracks.source_table IS '검출된 원본 테이블 (t_vessel_tracks_5min 등)'; + +-- ================================================ +-- 3. t_area_statistics - 사용자 정의 영역별 선박 통계 (파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_area_statistics ( + area_id VARCHAR(50) NOT NULL, -- 영역 ID + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (5분 단위) + vessel_count INTEGER DEFAULT 0, -- 선박 수 + in_count INTEGER DEFAULT 0, -- 진입 선박 수 + out_count INTEGER DEFAULT 0, -- 이탈 선박 수 + transit_vessels JSONB, -- 통과 선박 목록 + stationary_vessels JSONB, -- 정박 선박 목록 + avg_sog NUMERIC(25,1), -- 평균 대지속력 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_area_statistics_pkey PRIMARY KEY (area_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_area_stats_lookup ON signal.t_area_statistics (area_id, time_bucket DESC); +CREATE INDEX idx_area_stats_congestion ON signal.t_area_statistics (vessel_count DESC); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_area_statistics IS '사용자 정의 영역별 5분 단위 선박 통계'; +COMMENT ON COLUMN signal.t_area_statistics.area_id IS '영역 ID (t_areas 테이블 참조)'; +COMMENT ON COLUMN signal.t_area_statistics.time_bucket IS '5분 단위 시간 버킷'; +COMMENT ON COLUMN signal.t_area_statistics.vessel_count IS '해당 시간에 영역 내 선박 수'; +COMMENT ON COLUMN signal.t_area_statistics.in_count IS '해당 시간에 영역에 진입한 선박 수'; +COMMENT ON COLUMN signal.t_area_statistics.out_count IS '해당 시간에 영역에서 이탈한 선박 수'; +COMMENT ON COLUMN signal.t_area_statistics.transit_vessels IS '통과 선박 목록 JSON 배열'; +COMMENT ON COLUMN signal.t_area_statistics.stationary_vessels IS '정박 선박 목록 JSON 배열'; +COMMENT ON COLUMN signal.t_area_statistics.avg_sog IS '평균 대지속력 (knots)'; + +-- ================================================ +-- 4. t_area_tracks_summary - 영역별 항적 요약 (5분, 파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_area_tracks_summary ( + area_id VARCHAR(50) NOT NULL, -- 영역 ID + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (5분 단위) + total_vessels INTEGER, -- 총 선박 수 + total_distance_nm NUMERIC(12,2), -- 총 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + vessel_list JSONB, -- 선박 목록 + metrics_summary JSONB, -- 메트릭 요약 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_area_tracks_summary_pkey PRIMARY KEY (area_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_area_tracks_summary IS '영역별 5분 단위 항적 요약 통계'; +COMMENT ON COLUMN signal.t_area_tracks_summary.area_id IS '영역 ID'; +COMMENT ON COLUMN signal.t_area_tracks_summary.time_bucket IS '5분 단위 시간 버킷'; +COMMENT ON COLUMN signal.t_area_tracks_summary.total_vessels IS '영역 내 총 선박 수'; +COMMENT ON COLUMN signal.t_area_tracks_summary.total_distance_nm IS '모든 선박의 총 이동 거리 (해리)'; +COMMENT ON COLUMN signal.t_area_tracks_summary.avg_speed IS '모든 선박의 평균 속도 (knots)'; +COMMENT ON COLUMN signal.t_area_tracks_summary.vessel_list IS '선박별 상세 정보 {sig_src_cd, target_id, distance_nm, avg_speed}'; +COMMENT ON COLUMN signal.t_area_tracks_summary.metrics_summary IS '추가 메트릭 정보'; + +-- ================================================ +-- 5. t_area_tracks_summary_daily - 영역별 일일 항적 요약 (파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_area_tracks_summary_daily ( + area_id VARCHAR(50) NOT NULL, -- 영역 ID + time_bucket DATE NOT NULL, -- 날짜 (일 단위) + total_vessels INTEGER, -- 총 선박 수 + total_distance_nm NUMERIC(12,2), -- 총 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + vessel_list JSONB, -- 선박 목록 + metrics_summary JSONB, -- 메트릭 요약 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_area_tracks_summary_daily_pkey PRIMARY KEY (area_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_area_tracks_daily_time ON signal.t_area_tracks_summary_daily (time_bucket); +CREATE INDEX idx_area_tracks_daily_area ON signal.t_area_tracks_summary_daily (area_id); +CREATE INDEX idx_area_tracks_summary_daily_time_area ON signal.t_area_tracks_summary_daily (time_bucket DESC, area_id); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_area_tracks_summary_daily IS '영역별 일일 항적 요약 통계'; +COMMENT ON COLUMN signal.t_area_tracks_summary_daily.time_bucket IS '일 단위 날짜'; +COMMENT ON COLUMN signal.t_area_tracks_summary_daily.total_vessels IS '해당일 영역을 방문한 고유 선박 수'; + +-- ================================================ +-- 6. t_area_tracks_summary_hourly - 영역별 시간별 항적 요약 (파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_area_tracks_summary_hourly ( + area_id VARCHAR(50) NOT NULL, -- 영역 ID + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (1시간 단위) + total_vessels INTEGER, -- 총 선박 수 + total_distance_nm NUMERIC(12,2), -- 총 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + vessel_list JSONB, -- 선박 목록 + metrics_summary JSONB, -- 메트릭 요약 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_area_tracks_summary_hourly_pkey PRIMARY KEY (area_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_area_tracks_summary_hourly_time_area ON signal.t_area_tracks_summary_hourly (time_bucket DESC, area_id); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_area_tracks_summary_hourly IS '영역별 시간별 항적 요약 통계'; +COMMENT ON COLUMN signal.t_area_tracks_summary_hourly.time_bucket IS '1시간 단위 시간 버킷'; + +-- ================================================ +-- 7. t_area_vessel_tracks - 영역별 선박 항적 (5분, 파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_area_vessel_tracks ( + area_id VARCHAR(50) NOT NULL, -- 영역 ID + sig_src_cd VARCHAR(10) NOT NULL, -- 신호원 코드 + target_id VARCHAR(50) NOT NULL, -- 타겟 ID + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (5분 단위) + distance_nm NUMERIC(10,2), -- 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + point_count INTEGER, -- 포인트 수 + metrics JSONB, -- 추가 메트릭 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_area_vessel_tracks_pkey PRIMARY KEY (area_id, sig_src_cd, target_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_area_vessel_tracks_vessel_time ON signal.t_area_vessel_tracks (sig_src_cd, target_id, time_bucket DESC); +CREATE INDEX idx_area_vessel_tracks_area_time_desc ON signal.t_area_vessel_tracks (area_id, time_bucket DESC); +CREATE INDEX idx_area_vessel_tracks_area_vessel_time ON signal.t_area_vessel_tracks (area_id, sig_src_cd, target_id, time_bucket DESC); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_area_vessel_tracks IS '영역별 선박 항적 (5분 단위)'; +COMMENT ON COLUMN signal.t_area_vessel_tracks.area_id IS '영역 ID'; +COMMENT ON COLUMN signal.t_area_vessel_tracks.distance_nm IS '영역 내 이동 거리 (해리)'; +COMMENT ON COLUMN signal.t_area_vessel_tracks.avg_speed IS '영역 내 평균 속도 (knots)'; +COMMENT ON COLUMN signal.t_area_vessel_tracks.point_count IS '영역 내 포인트 수'; +COMMENT ON COLUMN signal.t_area_vessel_tracks.metrics IS '추가 메트릭 정보 (max_speed, entry/exit_time 등)'; + +-- ================================================ +-- 8. t_areas - 사용자 정의 영역 +-- ================================================ +CREATE TABLE signal.t_areas ( + area_id VARCHAR(50) NOT NULL, -- 영역 ID + area_name VARCHAR(100) NOT NULL, -- 영역명 + area_type VARCHAR(20) NOT NULL, -- 영역 유형 (port, anchorage, fishing 등) + area_geom GEOMETRY(MULTIPOLYGON, 4326) NOT NULL, -- 영역 경계 + properties JSONB, -- 추가 속성 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_areas_pkey PRIMARY KEY (area_id) +); + +-- 인덱스 +CREATE INDEX idx_t_areas_area_geom ON signal.t_areas USING GIST (area_geom); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_areas IS '사용자 정의 영역 정보'; +COMMENT ON COLUMN signal.t_areas.area_id IS '영역 고유 ID'; +COMMENT ON COLUMN signal.t_areas.area_name IS '영역 이름'; +COMMENT ON COLUMN signal.t_areas.area_type IS '영역 유형 (항구, 정박지, 어장 등)'; +COMMENT ON COLUMN signal.t_areas.area_geom IS '영역 경계 (MultiPolygon)'; +COMMENT ON COLUMN signal.t_areas.properties IS '추가 속성 정보'; + +-- ================================================ +-- 9. t_batch_performance_metrics - 배치 작업 성능 메트릭 +-- ================================================ +CREATE TABLE signal.t_batch_performance_metrics ( + id SERIAL PRIMARY KEY, -- 자동 증가 ID + job_name VARCHAR(100) NOT NULL, -- Job 이름 + execution_id BIGINT NOT NULL, -- 실행 ID + start_time TIMESTAMP NOT NULL, -- 시작 시각 + end_time TIMESTAMP, -- 종료 시각 + duration_seconds BIGINT, -- 실행 시간 (초) + total_read BIGINT, -- 읽은 레코드 수 + total_write BIGINT, -- 쓴 레코드 수 + throughput_per_sec NUMERIC(10,2), -- 초당 처리량 + status VARCHAR(20), -- 상태 (STARTED, COMPLETED, FAILED) + error_message TEXT, -- 에러 메시지 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP -- 생성 시각 +); + +-- 인덱스 +CREATE INDEX idx_batch_metrics_job ON signal.t_batch_performance_metrics (job_name, start_time DESC); +CREATE INDEX idx_batch_metrics_status ON signal.t_batch_performance_metrics (status) WHERE status != 'COMPLETED'; + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_batch_performance_metrics IS '배치 작업 성능 메트릭'; +COMMENT ON COLUMN signal.t_batch_performance_metrics.job_name IS 'Spring Batch Job 이름'; +COMMENT ON COLUMN signal.t_batch_performance_metrics.execution_id IS 'Spring Batch 실행 ID'; +COMMENT ON COLUMN signal.t_batch_performance_metrics.duration_seconds IS '실행 소요 시간 (초)'; +COMMENT ON COLUMN signal.t_batch_performance_metrics.throughput_per_sec IS '초당 처리 레코드 수'; +COMMENT ON COLUMN signal.t_batch_performance_metrics.status IS '실행 상태'; + +-- ================================================ +-- 10. t_grid_tiles - 그리드 타일 정의 (대해구/소해구) +-- ================================================ +CREATE TABLE signal.t_grid_tiles ( + tile_id VARCHAR(50) NOT NULL, -- 타일 ID + tile_level INTEGER NOT NULL, -- 타일 레벨 (1: 대해구, 2: 소해구) + haegu_no INTEGER NOT NULL, -- 대해구 번호 + sohaegu_no INTEGER, -- 소해구 번호 + min_lat DOUBLE PRECISION NOT NULL, -- 최소 위도 + min_lon DOUBLE PRECISION NOT NULL, -- 최소 경도 + max_lat DOUBLE PRECISION NOT NULL, -- 최대 위도 + max_lon DOUBLE PRECISION NOT NULL, -- 최대 경도 + tile_geom GEOMETRY(POLYGON, 4326) NOT NULL, -- 타일 경계 + center_point GEOMETRY(POINT, 4326) NOT NULL, -- 중심점 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_grid_tiles_pkey PRIMARY KEY (tile_id) +); + +-- 인덱스 +CREATE INDEX idx_grid_tiles_tile_geom ON signal.t_grid_tiles USING GIST (tile_geom); +CREATE INDEX idx_grid_tiles_haegu ON signal.t_grid_tiles (haegu_no); +CREATE INDEX idx_grid_tiles_sohaegu ON signal.t_grid_tiles (sohaegu_no) WHERE sohaegu_no IS NOT NULL; +CREATE INDEX idx_grid_tiles_level ON signal.t_grid_tiles (tile_level); +CREATE INDEX idx_grid_tiles_haegu_sohaegu ON signal.t_grid_tiles (haegu_no, sohaegu_no); +CREATE INDEX idx_grid_tiles_tile_level ON signal.t_grid_tiles (tile_id, tile_level); +CREATE INDEX idx_grid_tiles_tile_id ON signal.t_grid_tiles (tile_id); +CREATE INDEX idx_grid_tiles_geom ON signal.t_grid_tiles USING GIST (tile_geom); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_grid_tiles IS '그리드 타일 정의 (대해구/소해구)'; +COMMENT ON COLUMN signal.t_grid_tiles.tile_id IS '타일 고유 ID'; +COMMENT ON COLUMN signal.t_grid_tiles.tile_level IS '타일 레벨 (1: 대해구, 2: 소해구)'; +COMMENT ON COLUMN signal.t_grid_tiles.haegu_no IS '대해구 번호'; +COMMENT ON COLUMN signal.t_grid_tiles.sohaegu_no IS '소해구 번호 (소해구인 경우)'; +COMMENT ON COLUMN signal.t_grid_tiles.min_lat IS '타일 최소 위도'; +COMMENT ON COLUMN signal.t_grid_tiles.min_lon IS '타일 최소 경도'; +COMMENT ON COLUMN signal.t_grid_tiles.max_lat IS '타일 최대 위도'; +COMMENT ON COLUMN signal.t_grid_tiles.max_lon IS '타일 최대 경도'; +COMMENT ON COLUMN signal.t_grid_tiles.tile_geom IS '타일 경계 폴리곤'; +COMMENT ON COLUMN signal.t_grid_tiles.center_point IS '타일 중심점'; + +-- ================================================ +-- 11. t_grid_tracks_summary - 해구별 항적 요약 (5분, 파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_grid_tracks_summary ( + haegu_no INTEGER NOT NULL, -- 대해구 번호 + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (5분 단위) + total_vessels INTEGER, -- 총 선박 수 + total_distance_nm NUMERIC(12,2), -- 총 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + vessel_list JSONB, -- 선박 목록 + traffic_density NUMERIC(10,4), -- 교통 밀도 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_grid_tracks_summary_pkey PRIMARY KEY (haegu_no, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_grid_tracks_summary IS '해구별 5분 단위 항적 요약 통계'; +COMMENT ON COLUMN signal.t_grid_tracks_summary.haegu_no IS '대해구 번호'; +COMMENT ON COLUMN signal.t_grid_tracks_summary.time_bucket IS '5분 단위 시간 버킷'; +COMMENT ON COLUMN signal.t_grid_tracks_summary.total_vessels IS '해구 내 총 선박 수'; +COMMENT ON COLUMN signal.t_grid_tracks_summary.total_distance_nm IS '모든 선박의 총 이동 거리 (해리)'; +COMMENT ON COLUMN signal.t_grid_tracks_summary.avg_speed IS '모든 선박의 평균 속도 (knots)'; +COMMENT ON COLUMN signal.t_grid_tracks_summary.vessel_list IS '선박별 상세 정보 {sig_src_cd, target_id, distance_nm, avg_speed}'; +COMMENT ON COLUMN signal.t_grid_tracks_summary.traffic_density IS '교통 밀도 (선박수/면적)'; + + +-- ================================================ +-- 12. t_grid_tracks_summary_daily - 해구별 일일 항적 요약 (파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_grid_tracks_summary_daily ( + haegu_no INTEGER NOT NULL, -- 대해구 번호 + time_bucket DATE NOT NULL, -- 날짜 (일 단위) + total_vessels INTEGER, -- 총 선박 수 + total_distance_nm NUMERIC(12,2), -- 총 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + vessel_list JSONB, -- 선박 목록 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_grid_tracks_summary_daily_pkey PRIMARY KEY (haegu_no, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_grid_tracks_daily_time ON signal.t_grid_tracks_summary_daily (time_bucket); +CREATE INDEX idx_grid_tracks_daily_haegu ON signal.t_grid_tracks_summary_daily (haegu_no); +CREATE INDEX idx_grid_tracks_summary_daily_time_haegu ON signal.t_grid_tracks_summary_daily (time_bucket DESC, haegu_no); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_grid_tracks_summary_daily IS '해구별 일일 항적 요약 통계'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_daily.haegu_no IS '대해구 번호'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_daily.time_bucket IS '1시간 단위 시간 버킷'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_daily.total_vessels IS '해구 내 총 선박 수'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_daily.total_distance_nm IS '모든 선박의 총 이동 거리 (해리)'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_daily.avg_speed IS '모든 선박의 평균 속도 (knots)'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_daily.vessel_list IS '선박별 상세 정보 {sig_src_cd, target_id, distance_nm, avg_speed}'; + +-- ================================================ +-- 13. t_grid_tracks_summary_hourly - 해구별 시간별 항적 요약 (파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_grid_tracks_summary_hourly ( + haegu_no INTEGER NOT NULL, -- 대해구 번호 + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (1시간 단위) + total_vessels INTEGER, -- 총 선박 수 + total_distance_nm NUMERIC(12,2), -- 총 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + vessel_list JSONB, -- 선박 목록 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_grid_tracks_summary_hourly_pkey PRIMARY KEY (haegu_no, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_grid_tracks_hourly_time ON signal.t_grid_tracks_summary_hourly (time_bucket); +CREATE INDEX idx_grid_tracks_hourly_haegu ON signal.t_grid_tracks_summary_hourly (haegu_no); +CREATE INDEX idx_grid_tracks_summary_hourly_time_haegu ON signal.t_grid_tracks_summary_hourly (time_bucket DESC, haegu_no); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_grid_tracks_summary_hourly IS '해구별 시간별 항적 요약 통계'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_hourly.haegu_no IS '대해구 번호'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_hourly.time_bucket IS '1시간 단위 시간 버킷'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_hourly.total_vessels IS '해구 내 총 선박 수'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_hourly.total_distance_nm IS '모든 선박의 총 이동 거리 (해리)'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_hourly.avg_speed IS '모든 선박의 평균 속도 (knots)'; +COMMENT ON COLUMN signal.t_grid_tracks_summary_hourly.vessel_list IS '선박별 상세 정보 {sig_src_cd, target_id, distance_nm, avg_speed}'; + +-- ================================================ +-- 14. t_grid_vessel_tracks - 해구별 선박 항적 (5분, 파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_grid_vessel_tracks ( + haegu_no INTEGER NOT NULL, -- 대해구 번호 + sig_src_cd VARCHAR(10) NOT NULL, -- 신호원 코드 + target_id VARCHAR(50) NOT NULL, -- 타겟 ID + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (5분 단위) + distance_nm NUMERIC(10,2), -- 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + point_count INTEGER, -- 포인트 수 + entry_time TIMESTAMP, -- 해구 진입 시각 + exit_time TIMESTAMP, -- 해구 이탈 시각 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_grid_vessel_tracks_pkey PRIMARY KEY (haegu_no, sig_src_cd, target_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_grid_vessel_tracks_vessel_time ON signal.t_grid_vessel_tracks (sig_src_cd, target_id, time_bucket DESC); +CREATE INDEX idx_grid_vessel_tracks_haegu_time_desc ON signal.t_grid_vessel_tracks (haegu_no, time_bucket DESC); +CREATE INDEX idx_grid_vessel_tracks_entry_exit ON signal.t_grid_vessel_tracks (entry_time, exit_time) WHERE entry_time IS NOT NULL; + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_grid_vessel_tracks IS '해구별 선박 항적 (5분 단위)'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks.haegu_no IS '대해구 번호'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks.distance_nm IS '해구 내 이동 거리 (해리)'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks.avg_speed IS '해구 내 평균 속도 (knots)'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks.point_count IS '해구 내 포인트 수'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks.entry_time IS '해구 진입 시각'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks.exit_time IS '해구 이탈 시각'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks.created_at IS '생성 시각'; + +-- ================================================ +-- 15. t_grid_vessel_tracks_hourly - 해구별 선박 항적 (시간별, 파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_grid_vessel_tracks_hourly ( + haegu_no INTEGER NOT NULL, -- 대해구 번호 + sig_src_cd VARCHAR(10) NOT NULL, -- 신호원 코드 + target_id VARCHAR(50) NOT NULL, -- 타겟 ID + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (1시간 단위) + distance_nm NUMERIC(10,2), -- 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + point_count INTEGER, -- 포인트 수 + entry_time TIMESTAMP, -- 해구 진입 시각 + exit_time TIMESTAMP, -- 해구 이탈 시각 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_grid_vessel_tracks_hourly_pkey PRIMARY KEY (haegu_no, sig_src_cd, target_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_grid_vessel_tracks_hourly IS '해구별 선박 항적 (시간별)'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks_hourly.haegu_no IS '대해구 번호'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks_hourly.distance_nm IS '해구 내 이동 거리 (해리)'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks_hourly.avg_speed IS '해구 내 평균 속도 (knots)'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks_hourly.point_count IS '해구 내 포인트 수'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks_hourly.entry_time IS '해구 진입 시각'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks_hourly.exit_time IS '해구 이탈 시각'; +COMMENT ON COLUMN signal.t_grid_vessel_tracks_hourly.created_at IS '생성 시각'; + +-- ================================================ +-- 16. t_haegu_definitions - 대해구 정의 +-- ================================================ +CREATE TABLE signal.t_haegu_definitions ( + haegu_no INTEGER NOT NULL, -- 대해구 번호 + min_lat DOUBLE PRECISION NOT NULL, -- 최소 위도 + min_lon DOUBLE PRECISION NOT NULL, -- 최소 경도 + max_lat DOUBLE PRECISION NOT NULL, -- 최대 위도 + max_lon DOUBLE PRECISION NOT NULL, -- 최대 경도 + center_lat DOUBLE PRECISION NOT NULL, -- 중심 위도 + center_lon DOUBLE PRECISION NOT NULL, -- 중심 경도 + geom GEOMETRY(MULTIPOLYGON, 4326) NOT NULL, -- 대해구 경계 + center_point GEOMETRY(POINT, 4326) NOT NULL, -- 중심점 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_haegu_definitions_pkey PRIMARY KEY (haegu_no) +); + +-- 인덱스 +CREATE INDEX idx_haegu_definitions_geom ON signal.t_haegu_definitions USING GIST (geom); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_haegu_definitions IS '대해구 정의 정보'; +COMMENT ON COLUMN signal.t_haegu_definitions.haegu_no IS '대해구 번호'; +COMMENT ON COLUMN signal.t_haegu_definitions.min_lat IS '대해구 최소 위도'; +COMMENT ON COLUMN signal.t_haegu_definitions.min_lon IS '대해구 최소 경도'; +COMMENT ON COLUMN signal.t_haegu_definitions.max_lat IS '대해구 최대 위도'; +COMMENT ON COLUMN signal.t_haegu_definitions.max_lon IS '대해구 최대 경도'; +COMMENT ON COLUMN signal.t_haegu_definitions.center_lat IS '대해구 중심 위도'; +COMMENT ON COLUMN signal.t_haegu_definitions.center_lon IS '대해구 중심 경도'; +COMMENT ON COLUMN signal.t_haegu_definitions.geom IS '대해구 경계 (MultiPolygon)'; +COMMENT ON COLUMN signal.t_haegu_definitions.center_point IS '대해구 중심점'; + +-- ================================================ +-- 17. t_tile_summary - 타일별 선박 요약 (5분, 파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_tile_summary ( + tile_id VARCHAR(50) NOT NULL, -- 타일 ID + tile_level INTEGER NOT NULL, -- 타일 레벨 + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (5분 단위) + vessel_count INTEGER DEFAULT 0, -- 선박 수 + unique_vessels JSONB, -- 고유 선박 목록 + total_points BIGINT DEFAULT 0, -- 총 포인트 수 + avg_sog NUMERIC(25,1), -- 평균 대지속력 + max_sog NUMERIC(25,1), -- 최대 대지속력 + vessel_density NUMERIC(10,6), -- 선박 밀도 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + haegu_no INTEGER, -- 대해구 번호 + sohaegu_no INTEGER, -- 소해구 번호 + CONSTRAINT t_tile_summary_pkey PRIMARY KEY (tile_id, time_bucket, tile_level) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_tile_summary_lookup ON signal.t_tile_summary (tile_id, time_bucket DESC); +CREATE INDEX idx_tile_summary_time ON signal.t_tile_summary (time_bucket DESC); +CREATE INDEX idx_tile_summary_vessel_count ON signal.t_tile_summary (vessel_count DESC); +CREATE INDEX idx_tile_summary_density ON signal.t_tile_summary (vessel_density) WHERE vessel_density > 0; +CREATE INDEX idx_tile_summary_tile_level ON signal.t_tile_summary (tile_level); +CREATE INDEX idx_tile_summary_bucket_tile_level ON signal.t_tile_summary (time_bucket, tile_id, tile_level); +CREATE INDEX idx_tile_summary_time_bucket ON signal.t_tile_summary (time_bucket DESC); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_tile_summary IS '타일별 5분 단위 선박 요약 통계'; +COMMENT ON COLUMN signal.t_tile_summary.tile_id IS '타일 ID'; +COMMENT ON COLUMN signal.t_tile_summary.tile_level IS '타일 레벨 (1: 대해구, 2: 소해구)'; +COMMENT ON COLUMN signal.t_tile_summary.vessel_count IS '타일 내 선박 수'; +COMMENT ON COLUMN signal.t_tile_summary.unique_vessels IS '고유 선박 목록 [{sig_src_cd, target_id}]'; +COMMENT ON COLUMN signal.t_tile_summary.total_points IS '총 위치 포인트 수'; +COMMENT ON COLUMN signal.t_tile_summary.avg_sog IS '평균 대지속력 (knots)'; +COMMENT ON COLUMN signal.t_tile_summary.max_sog IS '최대 대지속력 (knots)'; +COMMENT ON COLUMN signal.t_tile_summary.vessel_density IS '선박 밀도 (선박수/평방킬로미터)'; +COMMENT ON COLUMN signal.t_tile_summary.haegu_no IS '대해구 번호'; +COMMENT ON COLUMN signal.t_tile_summary.sohaegu_no IS '소해구 번호'; + +-- ================================================ +-- 18. t_vessel_latest_position - 선박 최신 위치 +-- ================================================ +CREATE TABLE signal.t_vessel_latest_position ( + sig_src_cd VARCHAR(6) NOT NULL, -- 신호원 코드 + target_id VARCHAR(20) NOT NULL, -- 타겟 ID + lat DOUBLE PRECISION NOT NULL, -- 위도 + lon DOUBLE PRECISION NOT NULL, -- 경도 + geom GEOMETRY(POINT, 4326), -- 위치 (PostGIS) + sog NUMERIC(25,1), -- 대지속력 (knots) + cog NUMERIC(25,1), -- 대지침로 (도) + heading INTEGER, -- 선수방향 (도) + ship_nm VARCHAR(30), -- 선박명 + ship_ty VARCHAR(25), -- 선박 유형 + last_update TIMESTAMP NOT NULL, -- 최종 업데이트 시각 + update_count BIGINT DEFAULT 1, -- 업데이트 횟수 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_vessel_latest_position_pkey PRIMARY KEY (sig_src_cd, target_id) +); + +-- 인덱스 +CREATE INDEX idx_vessel_latest_position_geom ON signal.t_vessel_latest_position USING GIST (geom); +CREATE INDEX idx_latest_position_geom ON signal.t_vessel_latest_position USING GIST (geom); +CREATE INDEX idx_latest_position_update ON signal.t_vessel_latest_position (last_update DESC); +CREATE INDEX idx_latest_position_ship_type ON signal.t_vessel_latest_position (ship_ty); +CREATE INDEX idx_vessel_latest_position_key ON signal.t_vessel_latest_position ((sig_src_cd || ':' || target_id)); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_vessel_latest_position IS '선박 최신 위치 정보'; +COMMENT ON COLUMN signal.t_vessel_latest_position.sig_src_cd IS '신호원 코드'; +COMMENT ON COLUMN signal.t_vessel_latest_position.target_id IS '타겟 ID'; +COMMENT ON COLUMN signal.t_vessel_latest_position.lat IS '위도'; +COMMENT ON COLUMN signal.t_vessel_latest_position.lon IS '경도'; +COMMENT ON COLUMN signal.t_vessel_latest_position.geom IS 'PostGIS Point 형식 위치'; +COMMENT ON COLUMN signal.t_vessel_latest_position.sog IS '대지속력 (Speed Over Ground, knots)'; +COMMENT ON COLUMN signal.t_vessel_latest_position.cog IS '대지침로 (Course Over Ground, 도)'; +COMMENT ON COLUMN signal.t_vessel_latest_position.heading IS '선수방향 (Heading, 도)'; +COMMENT ON COLUMN signal.t_vessel_latest_position.ship_nm IS '선박명'; +COMMENT ON COLUMN signal.t_vessel_latest_position.ship_ty IS '선박 유형'; +COMMENT ON COLUMN signal.t_vessel_latest_position.last_update IS '최종 업데이트 시각'; +COMMENT ON COLUMN signal.t_vessel_latest_position.update_count IS '위치 업데이트 횟수'; + +-- ================================================ +-- 19. t_vessel_tracks_5min - 선박 항적 (5분, 파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_vessel_tracks_5min ( + sig_src_cd VARCHAR(10) NOT NULL, -- 신호원 코드 + target_id VARCHAR(50) NOT NULL, -- 타겟 ID + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (5분 단위) + track_geom GEOMETRY(LINESTRINGM, 4326), -- 항적 + distance_nm NUMERIC(10,2), -- 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + max_speed NUMERIC(6,2), -- 최대 속도 (knots) + point_count INTEGER, -- 포인트 수 + start_position JSONB, -- 시작 위치 + end_position JSONB, -- 종료 위치 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_vessel_tracks_5min_pkey PRIMARY KEY (sig_src_cd, target_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_vessel_tracks_5min IS '선박 항적 5분 단위 집계'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.sig_src_cd IS '신호원 코드'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.target_id IS '타겟 ID'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.time_bucket IS '5분 단위 시간 버킷'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.track_geom IS 'LineStringM 형식 항적 (M값은 첫 포인트 기준 상대시간)'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.distance_nm IS '총 이동 거리 (해리)'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.avg_speed IS '평균 속도 (knots, ST_Length 기반 계산)'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.max_speed IS '최대 속도 (knots)'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.point_count IS '항적을 구성하는 포인트 수'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.start_position IS '시작 위치 JSON {lat, lon, time, sog}'; +COMMENT ON COLUMN signal.t_vessel_tracks_5min.end_position IS '종료 위치 JSON {lat, lon, time, sog}'; + +-- ================================================ +-- 20. t_vessel_tracks_daily - 선박 항적 (일별, 파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_vessel_tracks_daily ( + sig_src_cd VARCHAR(10) NOT NULL, -- 신호원 코드 + target_id VARCHAR(50) NOT NULL, -- 타겟 ID + time_bucket DATE NOT NULL, -- 날짜 (일 단위) + track_geom GEOMETRY(LINESTRINGM, 4326), -- 항적 + distance_nm NUMERIC(10,2), -- 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + max_speed NUMERIC(6,2), -- 최대 속도 (knots) + point_count INTEGER, -- 포인트 수 + operating_hours NUMERIC(4,2), -- 운항 시간 + port_visits JSONB, -- 항구 방문 정보 + start_position JSONB, -- 시작 위치 + end_position JSONB, -- 종료 위치 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_vessel_tracks_daily_pkey PRIMARY KEY (sig_src_cd, target_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_vessel_tracks_daily_vessel ON signal.t_vessel_tracks_daily (sig_src_cd, target_id); +CREATE INDEX idx_vessel_tracks_daily_time ON signal.t_vessel_tracks_daily (time_bucket); +CREATE INDEX idx_vessel_tracks_daily_track_geom ON signal.t_vessel_tracks_daily USING GIST (track_geom); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_vessel_tracks_daily IS '선박 항적 일별 집계'; +COMMENT ON COLUMN signal.t_vessel_tracks_daily.time_bucket IS '일 단위 날짜'; +COMMENT ON COLUMN signal.t_vessel_tracks_daily.track_geom IS '일별 병합된 항적 (간소화 적용)'; +COMMENT ON COLUMN signal.t_vessel_tracks_daily.operating_hours IS '실제 운항 시간'; +COMMENT ON COLUMN signal.t_vessel_tracks_daily.port_visits IS '항구 방문 정보 [{port_id, entry_time, exit_time}]'; + +-- ================================================ +-- 21. t_vessel_tracks_hourly - 선박 항적 (시간별, 파티션 테이블) +-- ================================================ +CREATE TABLE signal.t_vessel_tracks_hourly ( + sig_src_cd VARCHAR(10) NOT NULL, -- 신호원 코드 + target_id VARCHAR(50) NOT NULL, -- 타겟 ID + time_bucket TIMESTAMP NOT NULL, -- 시간 버킷 (1시간 단위) + track_geom GEOMETRY(LINESTRINGM, 4326), -- 항적 + distance_nm NUMERIC(10,2), -- 이동 거리 (해리) + avg_speed NUMERIC(6,2), -- 평균 속도 (knots) + max_speed NUMERIC(6,2), -- 최대 속도 (knots) + point_count INTEGER, -- 포인트 수 + start_position JSONB, -- 시작 위치 + end_position JSONB, -- 종료 위치 + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, -- 생성 시각 + CONSTRAINT t_vessel_tracks_hourly_pkey PRIMARY KEY (sig_src_cd, target_id, time_bucket) +) PARTITION BY RANGE (time_bucket); + +-- 인덱스 +CREATE INDEX idx_vessel_tracks_hourly_vessel ON signal.t_vessel_tracks_hourly (sig_src_cd, target_id); +CREATE INDEX idx_vessel_tracks_hourly_time ON signal.t_vessel_tracks_hourly (time_bucket); +CREATE INDEX idx_vessel_tracks_hourly_track_geom ON signal.t_vessel_tracks_hourly USING GIST (track_geom); + +-- 테이블 코멘트 +COMMENT ON TABLE signal.t_vessel_tracks_hourly IS '선박 항적 시간별 집계'; +COMMENT ON COLUMN signal.t_vessel_tracks_hourly.time_bucket IS '1시간 단위 시간 버킷'; +COMMENT ON COLUMN signal.t_vessel_tracks_hourly.track_geom IS '시간별 병합된 항적 (간소화 적용)'; diff --git a/src/main/resources/static/v2/README.md b/src/main/resources/static/v2/README.md new file mode 100644 index 0000000..fca8476 --- /dev/null +++ b/src/main/resources/static/v2/README.md @@ -0,0 +1,132 @@ +# Signal Batch Static Resources v2.0 + +폐쇄망 환경에서 운용되는 Signal Batch 프로젝트의 개선된 정적 리소스 구조입니다. + +## 구조 개요 + +``` +v2/ +├── css/ # 스타일시트 +│ ├── common.css # 공통 스타일 +│ └── abnormal-tracks.css # 비정상 궤적 전용 스타일 +├── js/ # JavaScript 모듈 +│ ├── api/ # API 통신 모듈 +│ │ └── abnormal-tracks-api.js +│ ├── components/ # 재사용 가능한 컴포넌트 +│ │ ├── map-controller.js +│ │ ├── vessel-list.js +│ │ └── statistics-panel.js +│ ├── pages/ # 페이지별 애플리케이션 +│ │ └── abnormal-tracks-app.js +│ └── utils/ # 유틸리티 함수 +│ ├── constants.js +│ └── helpers.js +├── pages/ # HTML 페이지 +│ └── abnormal-tracks.html +└── README.md +``` + +## 주요 개선사항 + +### 1. 모듈화된 구조 +- 인라인 CSS/JavaScript를 별도 파일로 분리 +- 기능별로 모듈 분할하여 유지보수성 향상 +- ES6 모듈 시스템 사용 + +### 2. 컴포넌트 기반 설계 +- **MapController**: 지도 렌더링 및 상호작용 관리 +- **VesselList**: 선박 목록 표시 및 필터링 +- **StatisticsPanel**: 통계 정보 표시 및 범례 관리 + +### 3. API 모듈 분리 +- API 호출 로직을 별도 모듈로 분리 +- 에러 처리 및 로딩 상태 관리 개선 + +### 4. 폐쇄망 환경 최적화 +- 모든 라이브러리는 `/libs` 경로의 로컬 파일 사용 유지 +- 지도 타일도 로컬 API 엔드포인트 사용 +- CDN 의존성 없음 + +## 사용 방법 + +### 기본 사용법 +1. 웹 서버에서 `/v2/pages/abnormal-tracks.html` 접근 +2. 기존 기능과 동일하게 작동하되, 개선된 코드 구조 + +### 개발 시 주의사항 +1. **모듈 임포트**: ES6 모듈 문법 사용 +2. **이벤트 처리**: 컴포넌트 간 이벤트 핸들러 패턴 활용 +3. **상태 관리**: 각 컴포넌트가 자체 상태를 관리 + +## API 엔드포인트 + +기존과 동일한 엔드포인트 사용: +- `GET /api/v1/abnormal-tracks/recent` - 최근 비정상 궤적 조회 +- `POST /api/v1/abnormal-tracks/detect` - 사용자 정의 검출 +- `POST /api/v1/abnormal-tracks/move-to-abnormal` - 비정상 테이블 이동 +- `GET /api/tiles/world/{z}/{x}/{y}.webp` - 지도 타일 + +## 컴포넌트 상세 + +### MapController +- MapLibre GL JS와 Deck.gl 통합 관리 +- 트랙 시각화 및 상호작용 처리 +- GeoJSON 캐싱으로 성능 최적화 + +### VesselList +- 선박별 궤적 그룹화 및 정렬 +- 필터링 및 검색 기능 +- 사용자 정의 검출 모드 지원 + +### StatisticsPanel +- 실시간 통계 업데이트 +- 비정상 유형별 범례 생성 +- 조회 기간 표시 + +## 설정 및 상수 + +### constants.js +```javascript +// API 엔드포인트 +export const API_ENDPOINTS = { ... }; + +// 비정상 유형 및 색상 정의 +export const ABNORMAL_TYPES = { ... }; +export const ABNORMAL_TYPE_COLORS = { ... }; + +// 지도 설정 +export const MAP_CONFIG = { ... }; +``` + +### helpers.js +```javascript +// 날짜 처리, 포맷팅, 유틸리티 함수들 +export function formatDistance(distance) { ... } +export function getDateRange(days) { ... } +``` + +## 성능 최적화 + +1. **GeoJSON 캐싱**: 파싱된 GeoJSON 데이터 메모리 캐시 +2. **디바운스**: 빈번한 API 호출 방지 +3. **이벤트 위임**: 동적 요소 이벤트 처리 최적화 +4. **레이어 관리**: Deck.gl 레이어 효율적 업데이트 + +## 브라우저 호환성 + +- ES6 모듈을 지원하는 모던 브라우저 필요 +- Chrome 61+, Firefox 60+, Safari 10.1+, Edge 79+ + +## 마이그레이션 가이드 + +기존 abnormal-tracks.html에서 v2로 이전 시: +1. 기능적으로는 100% 동일 +2. URL만 `/v2/pages/abnormal-tracks.html`로 변경 +3. 개발자 도구에서 모듈 구조 확인 가능 + +## 향후 확장 계획 + +1. **TypeScript 도입**: 타입 안전성 확보 +2. **테스트 코드**: 단위 테스트 및 통합 테스트 +3. **번들링**: Webpack/Vite를 통한 최적화 +4. **PWA 기능**: 오프라인 지원 및 캐싱 \ No newline at end of file diff --git a/src/test/java/gc/mda/signal_batch/batch/processor/AisTargetDataProcessorTest.java b/src/test/java/gc/mda/signal_batch/batch/processor/AisTargetDataProcessorTest.java new file mode 100644 index 0000000..bc4b38c --- /dev/null +++ b/src/test/java/gc/mda/signal_batch/batch/processor/AisTargetDataProcessorTest.java @@ -0,0 +1,230 @@ +package gc.mda.signal_batch.batch.processor; + +import gc.mda.signal_batch.domain.vessel.dto.AisTargetDto; +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +class AisTargetDataProcessorTest { + + private AisTargetDataProcessor processor; + + @BeforeEach + void setUp() { + processor = new AisTargetDataProcessor(); + } + + private AisTargetDto createValidDto() { + return AisTargetDto.builder() + .mmsi("440331240") + .lat(34.227527) + .lon(127.063800) + .sog(10.4) + .cog(125.0) + .heading(130.0) + .name("TEST VESSEL") + .callsign("DSAB") + .vesselType("Cargo") + .extraInfo(null) + .imo(9123456L) + .length(120) + .width(20) + .draught(6.5) + .destination("BUSAN") + .status("Under Way Using Engine") + .rot(0) + .messageTimestamp("2025-08-26T10:30:45Z") + .eta("2025-09-01T14:00:00Z") + .build(); + } + + @Nested + @DisplayName("유효성 검사") + class Validation { + + @Test + @DisplayName("유효한 데이터 → Entity 변환 성공") + void process_validDto_returnsEntity() { + AisTargetDto dto = createValidDto(); + + AisTargetEntity entity = processor.process(dto); + + assertThat(entity).isNotNull(); + assertThat(entity.getMmsi()).isEqualTo("440331240"); + assertThat(entity.getLat()).isEqualTo(34.227527); + assertThat(entity.getLon()).isEqualTo(127.063800); + assertThat(entity.getName()).isEqualTo("TEST VESSEL"); + assertThat(entity.getImo()).isEqualTo(9123456L); + assertThat(entity.getVesselType()).isEqualTo("Cargo"); + assertThat(entity.getSog()).isEqualTo(10.4); + assertThat(entity.getMessageTimestamp()).isNotNull(); + } + + @Test + @DisplayName("MMSI null → null 반환") + void process_nullMmsi_returnsNull() { + AisTargetDto dto = createValidDto(); + dto.setMmsi(null); + + assertThat(processor.process(dto)).isNull(); + } + + @Test + @DisplayName("MMSI 빈 문자열 → null 반환") + void process_blankMmsi_returnsNull() { + AisTargetDto dto = createValidDto(); + dto.setMmsi(" "); + + assertThat(processor.process(dto)).isNull(); + } + + @Test + @DisplayName("Lat null → null 반환") + void process_nullLat_returnsNull() { + AisTargetDto dto = createValidDto(); + dto.setLat(null); + + assertThat(processor.process(dto)).isNull(); + } + + @Test + @DisplayName("Lon null → null 반환") + void process_nullLon_returnsNull() { + AisTargetDto dto = createValidDto(); + dto.setLon(null); + + assertThat(processor.process(dto)).isNull(); + } + } + + @Nested + @DisplayName("타임스탬프 파싱") + class TimestampParsing { + + @Test + @DisplayName("유효한 ISO 8601 타임스탬프 파싱") + void process_validTimestamp_parsed() { + AisTargetDto dto = createValidDto(); + dto.setMessageTimestamp("2025-08-26T10:30:45Z"); + + AisTargetEntity entity = processor.process(dto); + + assertThat(entity).isNotNull(); + assertThat(entity.getMessageTimestamp()).isNotNull(); + assertThat(entity.getMessageTimestamp().getYear()).isEqualTo(2025); + assertThat(entity.getMessageTimestamp().getMonthValue()).isEqualTo(8); + assertThat(entity.getMessageTimestamp().getDayOfMonth()).isEqualTo(26); + } + + @Test + @DisplayName("잘못된 타임스탬프 → null 반환") + void process_invalidTimestamp_returnsNull() { + AisTargetDto dto = createValidDto(); + dto.setMessageTimestamp("invalid-date"); + + assertThat(processor.process(dto)).isNull(); + } + + @Test + @DisplayName("null 타임스탬프 → null 반환") + void process_nullTimestamp_returnsNull() { + AisTargetDto dto = createValidDto(); + dto.setMessageTimestamp(null); + + assertThat(processor.process(dto)).isNull(); + } + + @Test + @DisplayName("빈 문자열 타임스탬프 → null 반환") + void process_emptyTimestamp_returnsNull() { + AisTargetDto dto = createValidDto(); + dto.setMessageTimestamp(""); + + assertThat(processor.process(dto)).isNull(); + } + } + + @Nested + @DisplayName("ETA 파싱") + class EtaParsing { + + @Test + @DisplayName("유효한 ETA → 파싱 성공") + void process_validEta_parsed() { + AisTargetDto dto = createValidDto(); + + AisTargetEntity entity = processor.process(dto); + + assertThat(entity).isNotNull(); + assertThat(entity.getEta()).isNotNull(); + assertThat(entity.getEta().getYear()).isEqualTo(2025); + } + + @Test + @DisplayName("특수값 ETA (9999-12-31) → null 처리") + void process_specialEta_null() { + AisTargetDto dto = createValidDto(); + dto.setEta("9999-12-31T23:59:59Z"); + + AisTargetEntity entity = processor.process(dto); + + assertThat(entity).isNotNull(); + assertThat(entity.getEta()).isNull(); + } + + @Test + @DisplayName("null ETA → null 처리") + void process_nullEta_null() { + AisTargetDto dto = createValidDto(); + dto.setEta(null); + + AisTargetEntity entity = processor.process(dto); + + assertThat(entity).isNotNull(); + assertThat(entity.getEta()).isNull(); + } + } + + @Test + @DisplayName("전체 필드 매핑 검증") + void process_allFields_mappedCorrectly() { + AisTargetDto dto = createValidDto(); + + AisTargetEntity entity = processor.process(dto); + + assertThat(entity).isNotNull(); + assertThat(entity.getMmsi()).isEqualTo(dto.getMmsi()); + assertThat(entity.getImo()).isEqualTo(dto.getImo()); + assertThat(entity.getName()).isEqualTo(dto.getName()); + assertThat(entity.getCallsign()).isEqualTo(dto.getCallsign()); + assertThat(entity.getVesselType()).isEqualTo(dto.getVesselType()); + assertThat(entity.getExtraInfo()).isEqualTo(dto.getExtraInfo()); + assertThat(entity.getLat()).isEqualTo(dto.getLat()); + assertThat(entity.getLon()).isEqualTo(dto.getLon()); + assertThat(entity.getHeading()).isEqualTo(dto.getHeading()); + assertThat(entity.getSog()).isEqualTo(dto.getSog()); + assertThat(entity.getCog()).isEqualTo(dto.getCog()); + assertThat(entity.getRot()).isEqualTo(dto.getRot()); + assertThat(entity.getLength()).isEqualTo(dto.getLength()); + assertThat(entity.getWidth()).isEqualTo(dto.getWidth()); + assertThat(entity.getDraught()).isEqualTo(dto.getDraught()); + assertThat(entity.getDestination()).isEqualTo(dto.getDestination()); + assertThat(entity.getStatus()).isEqualTo(dto.getStatus()); + } + + @Test + @DisplayName("문자 혼합 MMSI도 정상 처리") + void process_mixedMmsi_handled() { + AisTargetDto dto = createValidDto(); + dto.setMmsi("MID123456"); + + AisTargetEntity entity = processor.process(dto); + + assertThat(entity).isNotNull(); + assertThat(entity.getMmsi()).isEqualTo("MID123456"); + } +} diff --git a/src/test/java/gc/mda/signal_batch/batch/reader/AisTargetCacheManagerTest.java b/src/test/java/gc/mda/signal_batch/batch/reader/AisTargetCacheManagerTest.java new file mode 100644 index 0000000..9eb87e2 --- /dev/null +++ b/src/test/java/gc/mda/signal_batch/batch/reader/AisTargetCacheManagerTest.java @@ -0,0 +1,309 @@ +package gc.mda.signal_batch.batch.reader; + +import gc.mda.signal_batch.domain.vessel.model.AisTargetEntity; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.springframework.test.util.ReflectionTestUtils; + +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.*; + +import static org.assertj.core.api.Assertions.assertThat; + +class AisTargetCacheManagerTest { + + private AisTargetCacheManager cacheManager; + + @BeforeEach + void setUp() { + cacheManager = new AisTargetCacheManager(); + ReflectionTestUtils.setField(cacheManager, "ttlMinutes", 120L); + ReflectionTestUtils.setField(cacheManager, "maxSize", 1000); + cacheManager.init(); + } + + private AisTargetEntity createEntity(String mmsi, OffsetDateTime timestamp) { + return AisTargetEntity.builder() + .mmsi(mmsi) + .lat(34.0) + .lon(127.0) + .sog(10.0) + .messageTimestamp(timestamp) + .build(); + } + + @Nested + @DisplayName("단건 조회/저장") + class SingleOperations { + + @Test + @DisplayName("get — 존재하는 MMSI") + void get_existing_returnsEntity() { + OffsetDateTime ts = OffsetDateTime.now(ZoneOffset.UTC); + cacheManager.put(createEntity("440331240", ts)); + + Optional result = cacheManager.get("440331240"); + + assertThat(result).isPresent(); + assertThat(result.get().getMmsi()).isEqualTo("440331240"); + } + + @Test + @DisplayName("get — 없는 MMSI") + void get_nonExisting_returnsEmpty() { + assertThat(cacheManager.get("999999999")).isEmpty(); + } + + @Test + @DisplayName("put — 신규 저장") + void put_newEntity_stored() { + OffsetDateTime ts = OffsetDateTime.now(ZoneOffset.UTC); + cacheManager.put(createEntity("440331240", ts)); + + assertThat(cacheManager.size()).isEqualTo(1); + } + + @Test + @DisplayName("put — null entity 무시") + void put_null_ignored() { + cacheManager.put(null); + + assertThat(cacheManager.size()).isEqualTo(0); + } + + @Test + @DisplayName("put — mmsi null entity 무시") + void put_nullMmsi_ignored() { + cacheManager.put(AisTargetEntity.builder().lat(34.0).lon(127.0).build()); + + assertThat(cacheManager.size()).isEqualTo(0); + } + } + + @Nested + @DisplayName("timestamp 비교 (isNewer)") + class TimestampComparison { + + @Test + @DisplayName("최신 데이터로 업데이트") + void put_newerTimestamp_updates() { + OffsetDateTime older = OffsetDateTime.of(2025, 1, 1, 10, 0, 0, 0, ZoneOffset.UTC); + OffsetDateTime newer = OffsetDateTime.of(2025, 1, 1, 11, 0, 0, 0, ZoneOffset.UTC); + + cacheManager.put(createEntity("440331240", older)); + cacheManager.put(createEntity("440331240", newer)); + + AisTargetEntity entity = cacheManager.get("440331240").orElseThrow(); + assertThat(entity.getMessageTimestamp()).isEqualTo(newer); + } + + @Test + @DisplayName("이전 데이터는 업데이트하지 않음") + void put_olderTimestamp_skipped() { + OffsetDateTime newer = OffsetDateTime.of(2025, 1, 1, 11, 0, 0, 0, ZoneOffset.UTC); + OffsetDateTime older = OffsetDateTime.of(2025, 1, 1, 10, 0, 0, 0, ZoneOffset.UTC); + + cacheManager.put(createEntity("440331240", newer)); + cacheManager.put(createEntity("440331240", older)); + + AisTargetEntity entity = cacheManager.get("440331240").orElseThrow(); + assertThat(entity.getMessageTimestamp()).isEqualTo(newer); + } + + @Test + @DisplayName("기존 timestamp=null → 새 데이터로 업데이트") + void put_existingTimestampNull_updates() { + cacheManager.put(createEntity("440331240", null)); + + OffsetDateTime ts = OffsetDateTime.now(ZoneOffset.UTC); + cacheManager.put(createEntity("440331240", ts)); + + AisTargetEntity entity = cacheManager.get("440331240").orElseThrow(); + assertThat(entity.getMessageTimestamp()).isEqualTo(ts); + } + + @Test + @DisplayName("새 데이터의 timestamp=null → 업데이트하지 않음") + void put_newTimestampNull_skipped() { + OffsetDateTime ts = OffsetDateTime.now(ZoneOffset.UTC); + cacheManager.put(createEntity("440331240", ts)); + cacheManager.put(createEntity("440331240", null)); + + AisTargetEntity entity = cacheManager.get("440331240").orElseThrow(); + assertThat(entity.getMessageTimestamp()).isEqualTo(ts); + } + } + + @Nested + @DisplayName("배치 조회/업데이트") + class BatchOperations { + + @Test + @DisplayName("getAll — 존재하는 MMSI들 조회") + void getAll_existing_returnsMap() { + OffsetDateTime ts = OffsetDateTime.now(ZoneOffset.UTC); + cacheManager.put(createEntity("111111111", ts)); + cacheManager.put(createEntity("222222222", ts)); + cacheManager.put(createEntity("333333333", ts)); + + Map result = cacheManager.getAll(List.of("111111111", "333333333")); + + assertThat(result).hasSize(2); + assertThat(result).containsKeys("111111111", "333333333"); + } + + @Test + @DisplayName("getAll — null 리스트") + void getAll_null_returnsEmpty() { + assertThat(cacheManager.getAll(null)).isEmpty(); + } + + @Test + @DisplayName("getAll — 빈 리스트") + void getAll_empty_returnsEmpty() { + assertThat(cacheManager.getAll(List.of())).isEmpty(); + } + + @Test + @DisplayName("putAll — 정상 배치 저장") + void putAll_multipleEntities_stored() { + OffsetDateTime ts = OffsetDateTime.now(ZoneOffset.UTC); + List entities = List.of( + createEntity("111111111", ts), + createEntity("222222222", ts), + createEntity("333333333", ts) + ); + + cacheManager.putAll(entities); + + assertThat(cacheManager.size()).isEqualTo(3); + } + + @Test + @DisplayName("putAll — null 리스트 무시") + void putAll_null_ignored() { + cacheManager.putAll(null); + + assertThat(cacheManager.size()).isEqualTo(0); + } + + @Test + @DisplayName("putAll — 빈 리스트 무시") + void putAll_empty_ignored() { + cacheManager.putAll(List.of()); + + assertThat(cacheManager.size()).isEqualTo(0); + } + + @Test + @DisplayName("putAll — 신규/업데이트/스킵 혼합") + void putAll_mixedUpdates() { + OffsetDateTime t1 = OffsetDateTime.of(2025, 1, 1, 10, 0, 0, 0, ZoneOffset.UTC); + OffsetDateTime t2 = OffsetDateTime.of(2025, 1, 1, 11, 0, 0, 0, ZoneOffset.UTC); + OffsetDateTime t0 = OffsetDateTime.of(2025, 1, 1, 9, 0, 0, 0, ZoneOffset.UTC); + + // 기존 데이터 + cacheManager.put(createEntity("111111111", t1)); + + // 배치: 111=이전(스킵), 222=신규, 333=신규 + List batch = List.of( + createEntity("111111111", t0), // 이전 → 스킵 + createEntity("222222222", t2), // 신규 + createEntity("333333333", t1) // 신규 + ); + cacheManager.putAll(batch); + + assertThat(cacheManager.size()).isEqualTo(3); + // 111은 기존 t1 유지 + assertThat(cacheManager.get("111111111").get().getMessageTimestamp()).isEqualTo(t1); + } + } + + @Nested + @DisplayName("캐시 스냅샷 및 필터링") + class SnapshotAndFilter { + + @Test + @DisplayName("getAllValues — 전체 값 조회") + void getAllValues_returnsAll() { + OffsetDateTime ts = OffsetDateTime.now(ZoneOffset.UTC); + cacheManager.put(createEntity("111111111", ts)); + cacheManager.put(createEntity("222222222", ts)); + + assertThat(cacheManager.getAllValues()).hasSize(2); + } + + @Test + @DisplayName("getByTimeRange — 시간 범위 내 데이터만 반환") + void getByTimeRange_filtersCorrectly() { + OffsetDateTime recent = OffsetDateTime.now(ZoneOffset.UTC).minusMinutes(5); + OffsetDateTime old = OffsetDateTime.now(ZoneOffset.UTC).minusMinutes(120); + + cacheManager.put(createEntity("111111111", recent)); + cacheManager.put(createEntity("222222222", old)); + + List result = cacheManager.getByTimeRange(60); + + assertThat(result).hasSize(1); + assertThat(result.get(0).getMmsi()).isEqualTo("111111111"); + } + + @Test + @DisplayName("getByTimeRange — timestamp=null 데이터 제외") + void getByTimeRange_excludesNullTimestamp() { + OffsetDateTime recent = OffsetDateTime.now(ZoneOffset.UTC).minusMinutes(5); + + cacheManager.put(createEntity("111111111", recent)); + cacheManager.put(createEntity("222222222", null)); + + List result = cacheManager.getByTimeRange(60); + + assertThat(result).hasSize(1); + } + } + + @Nested + @DisplayName("캐시 관리") + class CacheManagement { + + @Test + @DisplayName("evict — 개별 삭제") + void evict_removes() { + cacheManager.put(createEntity("440331240", OffsetDateTime.now(ZoneOffset.UTC))); + + cacheManager.evict("440331240"); + + assertThat(cacheManager.get("440331240")).isEmpty(); + } + + @Test + @DisplayName("clear — 전체 삭제") + void clear_removesAll() { + OffsetDateTime ts = OffsetDateTime.now(ZoneOffset.UTC); + cacheManager.put(createEntity("111111111", ts)); + cacheManager.put(createEntity("222222222", ts)); + + cacheManager.clear(); + + assertThat(cacheManager.size()).isEqualTo(0); + } + + @Test + @DisplayName("getStats — 통계 반환") + void getStats_returnsMap() { + cacheManager.put(createEntity("440331240", OffsetDateTime.now(ZoneOffset.UTC))); + cacheManager.get("440331240"); // hit + cacheManager.get("999999999"); // miss + + Map stats = cacheManager.getStats(); + + assertThat(stats).containsKey("estimatedSize"); + assertThat(stats).containsKey("hitCount"); + assertThat(stats).containsKey("missCount"); + assertThat(stats).containsKey("maxSize"); + } + } +} diff --git a/src/test/java/gc/mda/signal_batch/config/TestDataSourceConfig.java b/src/test/java/gc/mda/signal_batch/config/TestDataSourceConfig.java index b4f4853..f8ebe5a 100644 --- a/src/test/java/gc/mda/signal_batch/config/TestDataSourceConfig.java +++ b/src/test/java/gc/mda/signal_batch/config/TestDataSourceConfig.java @@ -15,18 +15,6 @@ public class TestDataSourceConfig { @Bean @Primary - public DataSource collectDataSource() { - HikariConfig config = new HikariConfig(); - config.setJdbcUrl("jdbc:h2:mem:collectdb;MODE=PostgreSQL;DATABASE_TO_LOWER=TRUE"); - config.setDriverClassName("org.h2.Driver"); - config.setUsername("sa"); - config.setPassword(""); - config.setPoolName("CollectPool-Test"); - config.setMaximumPoolSize(5); - return new HikariDataSource(config); - } - - @Bean public DataSource queryDataSource() { HikariConfig config = new HikariConfig(); config.setJdbcUrl("jdbc:h2:mem:querydb;MODE=PostgreSQL;DATABASE_TO_LOWER=TRUE"); diff --git a/src/test/java/gc/mda/signal_batch/domain/vessel/service/VesselTrackMergerTest.java b/src/test/java/gc/mda/signal_batch/domain/vessel/service/VesselTrackMergerTest.java new file mode 100644 index 0000000..2994aff --- /dev/null +++ b/src/test/java/gc/mda/signal_batch/domain/vessel/service/VesselTrackMergerTest.java @@ -0,0 +1,243 @@ +package gc.mda.signal_batch.domain.vessel.service; + +import gc.mda.signal_batch.global.websocket.dto.MergedVesselTrack; +import gc.mda.signal_batch.global.websocket.dto.VesselTrackData; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.InjectMocks; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.springframework.jdbc.core.JdbcTemplate; +import org.springframework.test.util.ReflectionTestUtils; + +import java.time.LocalDateTime; +import java.util.ArrayList; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.ArgumentMatchers.*; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +class VesselTrackMergerTest { + + @InjectMocks + private VesselTrackMerger merger; + + @Mock + private JdbcTemplate queryJdbcTemplate; + + @BeforeEach + void setUp() { + ReflectionTestUtils.setField(merger, "enableMergerFiltering", false); + ReflectionTestUtils.setField(merger, "largeGapThresholdHours", 6); + } + + private VesselTrackData createSegment(String mmsi, LocalDateTime start, LocalDateTime end, + String geom, double distance, int pointCount) { + VesselTrackData data = new VesselTrackData(); + data.setMmsi(mmsi); + data.setNationalCode(mmsi.length() >= 3 ? mmsi.substring(0, 3) : "000"); + data.setTrackGeom(geom); + data.setDistanceNm(distance); + data.setAvgSpeed(10.0); + data.setMaxSpeed(15.0); + data.setPointCount(pointCount); + data.setStartTime(start); + data.setEndTime(end); + return data; + } + + @Nested + @DisplayName("mergeTracksByVessel") + class MergeTracksByVessel { + + @Test + @DisplayName("단일 선박 단일 세그먼트 병합") + void singleVessel_singleSegment() { + LocalDateTime t1 = LocalDateTime.of(2025, 1, 1, 10, 0); + LocalDateTime t2 = LocalDateTime.of(2025, 1, 1, 10, 5); + String geom = "LINESTRING M(127.0 34.0 1000, 127.01 34.01 1300)"; + + when(queryJdbcTemplate.queryForObject(anyString(), eq(Double.class), anyString())) + .thenReturn(1.5); + + List tracks = List.of( + createSegment("440331240", t1, t2, geom, 1.5, 2) + ); + + List result = merger.mergeTracksByVessel(tracks); + + assertThat(result).hasSize(1); + assertThat(result.get(0).getMmsi()).isEqualTo("440331240"); + assertThat(result.get(0).getNationalCode()).isEqualTo("440"); + assertThat(result.get(0).getMergedTrackGeom()).isEqualTo(geom); + } + + @Test + @DisplayName("다중 선박 각각 병합") + void multipleVessels_merged() { + LocalDateTime t1 = LocalDateTime.of(2025, 1, 1, 10, 0); + LocalDateTime t2 = LocalDateTime.of(2025, 1, 1, 10, 5); + String geom1 = "LINESTRING M(127.0 34.0 1000, 127.01 34.01 1300)"; + String geom2 = "LINESTRING M(126.0 35.0 1000, 126.01 35.01 1300)"; + + when(queryJdbcTemplate.queryForObject(anyString(), eq(Double.class), anyString())) + .thenReturn(1.5); + + List tracks = List.of( + createSegment("440111111", t1, t2, geom1, 1.0, 2), + createSegment("440222222", t1, t2, geom2, 2.0, 2) + ); + + List result = merger.mergeTracksByVessel(tracks); + + assertThat(result).hasSize(2); + } + + @Test + @DisplayName("빈 geometry 세그먼트 필터링") + void emptyGeometry_filtered() { + LocalDateTime t1 = LocalDateTime.of(2025, 1, 1, 10, 0); + LocalDateTime t2 = LocalDateTime.of(2025, 1, 1, 10, 5); + + List tracks = List.of( + createSegment("440331240", t1, t2, null, 0, 0), + createSegment("440331240", t1, t2, "LINESTRING EMPTY", 0, 0), + createSegment("440331240", t1, t2, "GEOMETRYCOLLECTION EMPTY", 0, 0) + ); + + List result = merger.mergeTracksByVessel(tracks); + + assertThat(result).isEmpty(); + } + + @Test + @DisplayName("다중 세그먼트 병합 시 시간순 정렬 및 시간 범위") + void multipleSegments_sortedByTime() { + LocalDateTime t1 = LocalDateTime.of(2025, 1, 1, 10, 0); + LocalDateTime t2 = LocalDateTime.of(2025, 1, 1, 10, 5); + LocalDateTime t3 = LocalDateTime.of(2025, 1, 1, 10, 10); + LocalDateTime t4 = LocalDateTime.of(2025, 1, 1, 10, 15); + + String geom1 = "LINESTRING M(127.0 34.0 0, 127.01 34.01 300)"; + String geom2 = "LINESTRING M(127.02 34.02 0, 127.03 34.03 300)"; + + when(queryJdbcTemplate.queryForObject(anyString(), eq(Double.class), anyString())) + .thenReturn(3.0); + + List tracks = List.of( + createSegment("440331240", t1, t2, geom1, 1.5, 2), + createSegment("440331240", t3, t4, geom2, 1.5, 2) + ); + + List result = merger.mergeTracksByVessel(tracks); + + assertThat(result).hasSize(1); + MergedVesselTrack merged = result.get(0); + assertThat(merged.getMergedTrackGeom()).startsWith("LINESTRING M("); + assertThat(merged.getStartTime()).isEqualTo(t1); + assertThat(merged.getEndTime()).isEqualTo(t4); + assertThat(merged.getTimeBuckets()).hasSize(2); + } + + @Test + @DisplayName("PostGIS 거리 계산 실패 시 fallback 합산") + void postgisFails_fallbackToSum() { + LocalDateTime t1 = LocalDateTime.of(2025, 1, 1, 10, 0); + LocalDateTime t2 = LocalDateTime.of(2025, 1, 1, 10, 5); + String geom = "LINESTRING M(127.0 34.0 1000, 127.01 34.01 1300)"; + + when(queryJdbcTemplate.queryForObject(anyString(), eq(Double.class), anyString())) + .thenThrow(new RuntimeException("PostGIS unavailable")); + + List tracks = List.of( + createSegment("440331240", t1, t2, geom, 1.5, 2) + ); + + List result = merger.mergeTracksByVessel(tracks); + + assertThat(result).hasSize(1); + assertThat(result.get(0).getTotalDistanceNm()).isEqualTo(1.5); + } + + @Test + @DisplayName("MMSI 3자리 미만 시 nationalCode='000'") + void shortMmsi_nationalCode000() { + LocalDateTime t1 = LocalDateTime.of(2025, 1, 1, 10, 0); + LocalDateTime t2 = LocalDateTime.of(2025, 1, 1, 10, 5); + String geom = "LINESTRING M(127.0 34.0 1000, 127.01 34.01 1300)"; + + when(queryJdbcTemplate.queryForObject(anyString(), eq(Double.class), anyString())) + .thenReturn(1.0); + + VesselTrackData segment = createSegment("AB", t1, t2, geom, 1.0, 2); + segment.setMmsi("AB"); + + List result = merger.mergeTracksByVessel(List.of(segment)); + + assertThat(result).hasSize(1); + assertThat(result.get(0).getNationalCode()).isEqualTo("000"); + } + } + + @Nested + @DisplayName("large gap 필터링") + class LargeGapFiltering { + + @Test + @DisplayName("enableMergerFiltering=true 시 큰 gap 세그먼트 제거") + void largeGap_filtered() { + ReflectionTestUtils.setField(merger, "enableMergerFiltering", true); + + LocalDateTime t1 = LocalDateTime.of(2025, 1, 1, 10, 0); + LocalDateTime t2 = LocalDateTime.of(2025, 1, 1, 10, 5); + LocalDateTime t3 = LocalDateTime.of(2025, 1, 1, 17, 5); + LocalDateTime t4 = LocalDateTime.of(2025, 1, 1, 17, 10); + + String geom1 = "LINESTRING M(127.0 34.0 0, 127.01 34.01 300)"; + String geom2 = "LINESTRING M(127.02 34.02 0, 127.03 34.03 300)"; + + when(queryJdbcTemplate.queryForObject(anyString(), eq(Double.class), anyString())) + .thenReturn(1.5); + + List tracks = new ArrayList<>(); + tracks.add(createSegment("440331240", t1, t2, geom1, 1.5, 2)); + tracks.add(createSegment("440331240", t3, t4, geom2, 1.5, 2)); + + List result = merger.mergeTracksByVessel(tracks); + + assertThat(result).hasSize(1); + assertThat(result.get(0).getTimeBuckets()).hasSize(1); + } + + @Test + @DisplayName("enableMergerFiltering=false 시 gap 무시하고 모두 포함") + void largeGap_notFiltered() { + ReflectionTestUtils.setField(merger, "enableMergerFiltering", false); + + LocalDateTime t1 = LocalDateTime.of(2025, 1, 1, 10, 0); + LocalDateTime t2 = LocalDateTime.of(2025, 1, 1, 10, 5); + LocalDateTime t3 = LocalDateTime.of(2025, 1, 1, 17, 5); + LocalDateTime t4 = LocalDateTime.of(2025, 1, 1, 17, 10); + + String geom1 = "LINESTRING M(127.0 34.0 0, 127.01 34.01 300)"; + String geom2 = "LINESTRING M(127.02 34.02 0, 127.03 34.03 300)"; + + when(queryJdbcTemplate.queryForObject(anyString(), eq(Double.class), anyString())) + .thenReturn(3.0); + + List tracks = new ArrayList<>(); + tracks.add(createSegment("440331240", t1, t2, geom1, 1.5, 2)); + tracks.add(createSegment("440331240", t3, t4, geom2, 1.5, 2)); + + List result = merger.mergeTracksByVessel(tracks); + + assertThat(result).hasSize(1); + assertThat(result.get(0).getTimeBuckets()).hasSize(2); + } + } +} diff --git a/src/test/java/gc/mda/signal_batch/global/util/SignalKindCodeTest.java b/src/test/java/gc/mda/signal_batch/global/util/SignalKindCodeTest.java new file mode 100644 index 0000000..5fde184 --- /dev/null +++ b/src/test/java/gc/mda/signal_batch/global/util/SignalKindCodeTest.java @@ -0,0 +1,153 @@ +package gc.mda.signal_batch.global.util; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.NullAndEmptySource; +import org.junit.jupiter.params.provider.ValueSource; + +import static org.junit.jupiter.api.Assertions.*; + +class SignalKindCodeTest { + + @Nested + @DisplayName("vesselType 단독 매칭") + class VesselTypeDirect { + + @ParameterizedTest + @CsvSource({ + "Cargo, 000023", + "Tanker, 000024", + "Passenger, 000022", + "AtoN, 000028", + "Law Enforcement, 000025", + "Search and Rescue, 000021", + "Local Vessel, 000020" + }) + @DisplayName("vesselType 직접 매칭 시 올바른 코드 반환") + void resolve_directMatch(String vesselType, String expectedCode) { + assertEquals(expectedCode, SignalKindCode.resolve(vesselType, null).getCode()); + } + } + + @Nested + @DisplayName("vesselType 그룹 매칭") + class VesselTypeGroup { + + @ParameterizedTest + @CsvSource({ + "Tug, 000025", + "Pilot Boat, 000025", + "Tender, 000025", + "Anti Pollution, 000025", + "Medical Transport, 000025", + "High Speed Craft, 000022", + "Wing in Ground-effect, 000022" + }) + @DisplayName("vesselType 그룹 매칭 시 올바른 코드 반환") + void resolve_groupMatch(String vesselType, String expectedCode) { + assertEquals(expectedCode, SignalKindCode.resolve(vesselType, null).getCode()); + } + } + + @Nested + @DisplayName("Vessel + extraInfo 조합") + class VesselWithExtraInfo { + + @ParameterizedTest + @CsvSource({ + "Vessel, Fishing, 000020", + "Vessel, Military Operations, 000025", + "Vessel, Towing, 000025", + "Vessel, Towing (Large), 000025", + "Vessel, Dredging/Underwater Ops, 000025", + "Vessel, Diving Operations, 000025", + "Vessel, Pleasure Craft, 000020", + "Vessel, Sailing, 000020", + "Vessel, N/A, 000020", + "Vessel, Hazardous Cat A, 000023", + "Vessel, Hazardous Cat B, 000023", + "Vessel, Unknown, 000027" + }) + @DisplayName("Vessel + extraInfo 조합으로 올바른 코드 반환") + void resolve_vesselWithExtraInfo(String vesselType, String extraInfo, String expectedCode) { + assertEquals(expectedCode, SignalKindCode.resolve(vesselType, extraInfo).getCode()); + } + } + + @Nested + @DisplayName("N/A + extraInfo 조합") + class NaWithExtraInfo { + + @Test + @DisplayName("N/A + hazardous cat → CARGO") + void resolve_naWithHazardous() { + assertEquals("000023", SignalKindCode.resolve("N/A", "Hazardous Cat X").getCode()); + } + + @Test + @DisplayName("N/A + 기타 → DEFAULT") + void resolve_naWithOther() { + assertEquals("000027", SignalKindCode.resolve("N/A", "Other").getCode()); + } + } + + @Nested + @DisplayName("Null/Blank 처리") + class NullBlank { + + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = {" ", "\t"}) + @DisplayName("vesselType가 null/blank이면 DEFAULT 반환") + void resolve_nullOrBlankVesselType(String vesselType) { + assertEquals(SignalKindCode.DEFAULT, SignalKindCode.resolve(vesselType, null)); + } + + @Test + @DisplayName("vesselType과 extraInfo 모두 null이면 DEFAULT") + void resolve_bothNull() { + assertEquals(SignalKindCode.DEFAULT, SignalKindCode.resolve(null, null)); + } + } + + @Nested + @DisplayName("Case insensitivity") + class CaseInsensitive { + + @Test + @DisplayName("대문자 CARGO도 매칭") + void resolve_uppercase() { + assertEquals(SignalKindCode.CARGO, SignalKindCode.resolve("CARGO", null)); + } + + @Test + @DisplayName("대소문자 혼합 Passenger도 매칭") + void resolve_mixedCase() { + assertEquals(SignalKindCode.FERRY, SignalKindCode.resolve("Passenger", null)); + } + + @Test + @DisplayName("extraInfo도 case insensitive") + void resolve_extraInfoCaseInsensitive() { + assertEquals(SignalKindCode.FISHING, SignalKindCode.resolve("VESSEL", "FISHING")); + } + } + + @Test + @DisplayName("알 수 없는 vesselType은 DEFAULT 반환") + void resolve_unknownType() { + assertEquals(SignalKindCode.DEFAULT, SignalKindCode.resolve("UnknownType", null)); + } + + @Test + @DisplayName("Enum 값의 code와 koreanName이 올바른지 확인") + void enumValues_codeAndName() { + assertEquals("000020", SignalKindCode.FISHING.getCode()); + assertEquals("어선", SignalKindCode.FISHING.getKoreanName()); + assertEquals("000028", SignalKindCode.BUOY.getCode()); + assertEquals("부이/항로표지", SignalKindCode.BUOY.getKoreanName()); + } +} diff --git a/src/test/java/gc/mda/signal_batch/performance/IndexStatusTest.java b/src/test/java/gc/mda/signal_batch/performance/IndexStatusTest.java index 69c977a..012a94d 100644 --- a/src/test/java/gc/mda/signal_batch/performance/IndexStatusTest.java +++ b/src/test/java/gc/mda/signal_batch/performance/IndexStatusTest.java @@ -41,7 +41,8 @@ public class IndexStatusTest { "t_vessel_tracks_daily", "t_grid_tracks_summary_daily", "t_area_tracks_summary_daily", - "t_vessel_latest_position", + "t_ais_position", + "t_vessel_static", "t_tile_summary", "t_area_statistics" ); @@ -132,19 +133,21 @@ public class IndexStatusTest { log.info("\n=== 인덱스 생성 스크립트 ==="); List createStatements = List.of( - "-- 필수 인덱스 생성 스크립트", - "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_vessel_tracks_5min_vessel_time ON signal.t_vessel_tracks_5min (sig_src_cd, target_id, time_bucket DESC);", + "-- 필수 인덱스 생성 스크립트 (mmsi 기반)", + "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_vessel_tracks_5min_vessel_time ON signal.t_vessel_tracks_5min (mmsi, time_bucket DESC);", "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_grid_vessel_tracks_haegu_time_desc ON signal.t_grid_vessel_tracks (haegu_no, time_bucket DESC);", "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_area_vessel_tracks_area_time_desc ON signal.t_area_vessel_tracks (area_id, time_bucket DESC);", "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_vessel_tracks_hourly_track_geom ON signal.t_vessel_tracks_hourly USING GIST (track_geom);", "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_vessel_tracks_daily_track_geom ON signal.t_vessel_tracks_daily USING GIST (track_geom);", + "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_ais_position_geom ON signal.t_ais_position USING GIST (geom);", "", "-- 인덱스 생성 후 통계 업데이트", "ANALYZE signal.t_vessel_tracks_5min;", "ANALYZE signal.t_grid_vessel_tracks;", "ANALYZE signal.t_area_vessel_tracks;", "ANALYZE signal.t_vessel_tracks_hourly;", - "ANALYZE signal.t_vessel_tracks_daily;" + "ANALYZE signal.t_vessel_tracks_daily;", + "ANALYZE signal.t_ais_position;" ); createStatements.forEach(log::info); diff --git a/src/test/java/gc/mda/signal_batch/util/SignalKindCodeTest.java b/src/test/java/gc/mda/signal_batch/util/SignalKindCodeTest.java new file mode 100644 index 0000000..a65b1ea --- /dev/null +++ b/src/test/java/gc/mda/signal_batch/util/SignalKindCodeTest.java @@ -0,0 +1,167 @@ +package gc.mda.signal_batch.util; + +import gc.mda.signal_batch.global.util.SignalKindCode; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.NullAndEmptySource; +import org.junit.jupiter.params.provider.ValueSource; + +import static org.assertj.core.api.Assertions.assertThat; + +@DisplayName("SignalKindCode - MDA 선종 범례코드 치환") +class SignalKindCodeTest { + + @Nested + @DisplayName("vesselType 단독 매칭") + class VesselTypeDirect { + + @ParameterizedTest(name = "vesselType={0} → {1}") + @CsvSource({ + "cargo, CARGO", + "tanker, TANKER", + "passenger, FERRY", + "aton, BUOY", + "law enforcement, GOV", + "search and rescue, KCGV", + "local vessel, FISHING" + }) + @DisplayName("직접 매칭 케이스") + void resolve_directMatch(String vesselType, String expectedName) { + SignalKindCode result = SignalKindCode.resolve(vesselType, null); + assertThat(result.name()).isEqualTo(expectedName); + } + } + + @Nested + @DisplayName("vesselType 그룹 매칭") + class VesselTypeGroup { + + @ParameterizedTest(name = "vesselType={0} → GOV") + @ValueSource(strings = {"tug", "pilot boat", "tender", "anti pollution", "medical transport"}) + @DisplayName("GOV 그룹 매칭") + void resolve_govGroup(String vesselType) { + assertThat(SignalKindCode.resolve(vesselType, null)).isEqualTo(SignalKindCode.GOV); + } + + @ParameterizedTest(name = "vesselType={0} → FERRY") + @ValueSource(strings = {"high speed craft", "wing in ground-effect"}) + @DisplayName("FERRY 그룹 매칭") + void resolve_ferryGroup(String vesselType) { + assertThat(SignalKindCode.resolve(vesselType, null)).isEqualTo(SignalKindCode.FERRY); + } + } + + @Nested + @DisplayName("Vessel + extraInfo 조합") + class VesselExtraInfo { + + @Test + @DisplayName("Vessel + fishing → FISHING") + void resolve_vesselFishing() { + assertThat(SignalKindCode.resolve("Vessel", "Fishing")).isEqualTo(SignalKindCode.FISHING); + } + + @Test + @DisplayName("Vessel + military operations → GOV") + void resolve_vesselMilitary() { + assertThat(SignalKindCode.resolve("Vessel", "Military Operations")).isEqualTo(SignalKindCode.GOV); + } + + @ParameterizedTest(name = "Vessel + {0} → GOV") + @ValueSource(strings = {"towing", "towing (large)", "dredging/underwater ops", "diving operations"}) + @DisplayName("Vessel + 해양작업 → GOV") + void resolve_vesselMarineOps(String extraInfo) { + assertThat(SignalKindCode.resolve("Vessel", extraInfo)).isEqualTo(SignalKindCode.GOV); + } + + @ParameterizedTest(name = "Vessel + {0} → FISHING") + @ValueSource(strings = {"pleasure craft", "sailing", "n/a"}) + @DisplayName("Vessel + 레저/기타 → FISHING") + void resolve_vesselLeisure(String extraInfo) { + assertThat(SignalKindCode.resolve("Vessel", extraInfo)).isEqualTo(SignalKindCode.FISHING); + } + + @Test + @DisplayName("Vessel + hazardous cat X → CARGO") + void resolve_vesselHazardous() { + assertThat(SignalKindCode.resolve("Vessel", "Hazardous Cat A")).isEqualTo(SignalKindCode.CARGO); + } + + @Test + @DisplayName("Vessel + 미매칭 extraInfo → DEFAULT") + void resolve_vesselUnknownExtra() { + assertThat(SignalKindCode.resolve("Vessel", "some-unknown")).isEqualTo(SignalKindCode.DEFAULT); + } + } + + @Nested + @DisplayName("N/A + extraInfo 조합") + class NaExtraInfo { + + @Test + @DisplayName("N/A + hazardous cat → CARGO") + void resolve_naHazardous() { + assertThat(SignalKindCode.resolve("N/A", "Hazardous Cat B")).isEqualTo(SignalKindCode.CARGO); + } + + @Test + @DisplayName("N/A + other → DEFAULT") + void resolve_naOther() { + assertThat(SignalKindCode.resolve("N/A", "Other")).isEqualTo(SignalKindCode.DEFAULT); + } + } + + @Nested + @DisplayName("Null/Blank/대소문자 처리") + class EdgeCases { + + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = {" ", "\t"}) + @DisplayName("vesselType이 null/blank → DEFAULT") + void resolve_nullOrBlankVesselType(String vesselType) { + assertThat(SignalKindCode.resolve(vesselType, null)).isEqualTo(SignalKindCode.DEFAULT); + } + + @Test + @DisplayName("대소문자 무시 (case-insensitive)") + void resolve_caseInsensitive() { + assertThat(SignalKindCode.resolve("CARGO", null)).isEqualTo(SignalKindCode.CARGO); + assertThat(SignalKindCode.resolve("Tanker", null)).isEqualTo(SignalKindCode.TANKER); + assertThat(SignalKindCode.resolve("PASSENGER", null)).isEqualTo(SignalKindCode.FERRY); + } + + @Test + @DisplayName("알 수 없는 vesselType → DEFAULT (fallback)") + void resolve_unknownType() { + assertThat(SignalKindCode.resolve("unknown-xyz", null)).isEqualTo(SignalKindCode.DEFAULT); + } + + @Test + @DisplayName("양쪽 모두 null → DEFAULT") + void resolve_bothNull() { + assertThat(SignalKindCode.resolve(null, null)).isEqualTo(SignalKindCode.DEFAULT); + } + } + + @Nested + @DisplayName("코드 값 검증") + class CodeValues { + + @Test + @DisplayName("각 enum의 code 값이 올바른지 확인") + void codeValues() { + assertThat(SignalKindCode.FISHING.getCode()).isEqualTo("000020"); + assertThat(SignalKindCode.KCGV.getCode()).isEqualTo("000021"); + assertThat(SignalKindCode.FERRY.getCode()).isEqualTo("000022"); + assertThat(SignalKindCode.CARGO.getCode()).isEqualTo("000023"); + assertThat(SignalKindCode.TANKER.getCode()).isEqualTo("000024"); + assertThat(SignalKindCode.GOV.getCode()).isEqualTo("000025"); + assertThat(SignalKindCode.DEFAULT.getCode()).isEqualTo("000027"); + assertThat(SignalKindCode.BUOY.getCode()).isEqualTo("000028"); + } + } +}