release: 2026-03-11.2 (12건 커밋) #85
21
.gitignore
vendored
21
.gitignore
vendored
@ -55,6 +55,26 @@ wing_source_*.tar.gz
|
||||
__pycache__/
|
||||
*.pyc
|
||||
|
||||
# prediction/ Python 엔진 (로컬 실행 결과물)
|
||||
prediction/**/__pycache__/
|
||||
prediction/**/*.pyc
|
||||
# prediction/ opendrift 결과물 (로컬 실행 결과물)
|
||||
prediction/opendrift/result/
|
||||
prediction/opendrift/logs/
|
||||
prediction/opendrift/uvicorn.pid
|
||||
prediction/opendrift/.env
|
||||
# prediction/ 이미지분석 결과물 (로컬 실행 결과물)
|
||||
prediction/image/stitch/
|
||||
prediction/image/mx15hdi/Detect/Mask_result/
|
||||
prediction/image/mx15hdi/Detect/result/
|
||||
prediction/image/mx15hdi/Georeference/Mask_Tif/
|
||||
prediction/image/mx15hdi/Georeference/Tif/
|
||||
prediction/image/mx15hdi/Metadata/CSV/
|
||||
prediction/image/mx15hdi/Metadata/Image/Original_Images/
|
||||
prediction/image/mx15hdi/Polygon/Shp/
|
||||
# prediction/ 이미지분석 대용량 바이너리 (모델 가중치)
|
||||
prediction/image/**/*.pth
|
||||
|
||||
# HNS manual images (large binary)
|
||||
frontend/public/hns-manual/pages/
|
||||
frontend/public/hns-manual/images/
|
||||
@ -63,6 +83,7 @@ frontend/public/hns-manual/images/
|
||||
!.claude/
|
||||
.claude/settings.local.json
|
||||
.claude/CLAUDE.local.md
|
||||
*.local
|
||||
|
||||
# Team workflow (managed by /sync-team-workflow)
|
||||
.claude/rules/
|
||||
|
||||
122
backend/package-lock.json
generated
122
backend/package-lock.json
generated
@ -8,6 +8,7 @@
|
||||
"name": "backend",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"@types/multer": "^2.1.0",
|
||||
"bcrypt": "^6.0.0",
|
||||
"cookie-parser": "^1.4.7",
|
||||
"cors": "^2.8.5",
|
||||
@ -17,6 +18,7 @@
|
||||
"google-auth-library": "^10.6.1",
|
||||
"helmet": "^8.1.0",
|
||||
"jsonwebtoken": "^9.0.3",
|
||||
"multer": "^2.1.1",
|
||||
"pg": "^8.19.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
@ -515,7 +517,6 @@
|
||||
"version": "1.19.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.6.tgz",
|
||||
"integrity": "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/connect": "*",
|
||||
@ -526,7 +527,6 @@
|
||||
"version": "3.4.38",
|
||||
"resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.38.tgz",
|
||||
"integrity": "sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/node": "*"
|
||||
@ -556,7 +556,6 @@
|
||||
"version": "5.0.6",
|
||||
"resolved": "https://registry.npmjs.org/@types/express/-/express-5.0.6.tgz",
|
||||
"integrity": "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/body-parser": "*",
|
||||
@ -568,7 +567,6 @@
|
||||
"version": "5.1.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-5.1.1.tgz",
|
||||
"integrity": "sha512-v4zIMr/cX7/d2BpAEX3KNKL/JrT1s43s96lLvvdTmza1oEvDudCqK9aF/djc/SWgy8Yh0h30TZx5VpzqFCxk5A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/node": "*",
|
||||
@ -591,7 +589,6 @@
|
||||
"version": "2.0.5",
|
||||
"resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.5.tgz",
|
||||
"integrity": "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/jsonwebtoken": {
|
||||
@ -612,11 +609,19 @@
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/multer": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/multer/-/multer-2.1.0.tgz",
|
||||
"integrity": "sha512-zYZb0+nJhOHtPpGDb3vqPjwpdeGlGC157VpkqNQL+UU2qwoacoQ7MpsAmUptI/0Oa127X32JzWDqQVEXp2RcIA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/express": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@types/node": {
|
||||
"version": "22.19.11",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.11.tgz",
|
||||
"integrity": "sha512-BH7YwL6rA93ReqeQS1c4bsPpcfOmJasG+Fkr6Y59q83f9M1WcBRHR2vM+P9eOisYRcN3ujQoiZY8uk5W+1WL8w==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"undici-types": "~6.21.0"
|
||||
@ -638,21 +643,18 @@
|
||||
"version": "6.14.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.14.0.tgz",
|
||||
"integrity": "sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/range-parser": {
|
||||
"version": "1.2.7",
|
||||
"resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.7.tgz",
|
||||
"integrity": "sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@types/send": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@types/send/-/send-1.2.1.tgz",
|
||||
"integrity": "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/node": "*"
|
||||
@ -662,7 +664,6 @@
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-2.2.0.tgz",
|
||||
"integrity": "sha512-8mam4H1NHLtu7nmtalF7eyBH14QyOASmcxHhSfEoRyr0nP/YdoesEtU+uSRvMe96TW/HPTtkoKqQLl53N7UXMQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@types/http-errors": "*",
|
||||
@ -715,6 +716,12 @@
|
||||
"url": "https://github.com/chalk/ansi-styles?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/append-field": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/append-field/-/append-field-1.0.0.tgz",
|
||||
"integrity": "sha512-klpgFSWLW1ZEs8svjfb7g4qWY0YS5imI82dTg+QahUvJ8YqAY0P10Uk8tTyh9ZGuYEZEMaeJYCF5BFuX552hsw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/array-flatten": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
|
||||
@ -836,6 +843,23 @@
|
||||
"integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/buffer-from": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
|
||||
"integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/busboy": {
|
||||
"version": "1.6.0",
|
||||
"resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz",
|
||||
"integrity": "sha512-8SFQbg/0hQ9xy3UNTB0YEnsNBbWfhf7RtnzpL7TkBiTBRfrQ9Fxcnz7VJsleJpyp6rVLvXiuORqjlHi5q+PYuA==",
|
||||
"dependencies": {
|
||||
"streamsearch": "^1.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10.16.0"
|
||||
}
|
||||
},
|
||||
"node_modules/bytes": {
|
||||
"version": "3.1.2",
|
||||
"resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
|
||||
@ -892,6 +916,21 @@
|
||||
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/concat-stream": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-2.0.0.tgz",
|
||||
"integrity": "sha512-MWufYdFw53ccGjCA+Ol7XJYpAlW6/prSMzuPOTRnJGcGzuhLn4Scrz7qf6o8bROZ514ltazcIFJZevcfbo0x7A==",
|
||||
"engines": [
|
||||
"node >= 6.0"
|
||||
],
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"buffer-from": "^1.0.0",
|
||||
"inherits": "^2.0.3",
|
||||
"readable-stream": "^3.0.2",
|
||||
"typedarray": "^0.0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/content-disposition": {
|
||||
"version": "0.5.4",
|
||||
"resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
|
||||
@ -1840,6 +1879,25 @@
|
||||
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/multer": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/multer/-/multer-2.1.1.tgz",
|
||||
"integrity": "sha512-mo+QTzKlx8R7E5ylSXxWzGoXoZbOsRMpyitcht8By2KHvMbf3tjwosZ/Mu/XYU6UuJ3VZnODIrak5ZrPiPyB6A==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"append-field": "^1.0.0",
|
||||
"busboy": "^1.6.0",
|
||||
"concat-stream": "^2.0.0",
|
||||
"type-is": "^1.6.18"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 10.16.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "opencollective",
|
||||
"url": "https://opencollective.com/express"
|
||||
}
|
||||
},
|
||||
"node_modules/negotiator": {
|
||||
"version": "0.6.3",
|
||||
"resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
|
||||
@ -2178,6 +2236,20 @@
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/readable-stream": {
|
||||
"version": "3.6.2",
|
||||
"resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
|
||||
"integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"inherits": "^2.0.3",
|
||||
"string_decoder": "^1.1.1",
|
||||
"util-deprecate": "^1.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/resolve-pkg-maps": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/resolve-pkg-maps/-/resolve-pkg-maps-1.0.0.tgz",
|
||||
@ -2424,6 +2496,23 @@
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/streamsearch": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/streamsearch/-/streamsearch-1.1.0.tgz",
|
||||
"integrity": "sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==",
|
||||
"engines": {
|
||||
"node": ">=10.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/string_decoder": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
|
||||
"integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"safe-buffer": "~5.2.0"
|
||||
}
|
||||
},
|
||||
"node_modules/string-width": {
|
||||
"version": "5.1.2",
|
||||
"resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
|
||||
@ -2562,6 +2651,12 @@
|
||||
"node": ">= 0.6"
|
||||
}
|
||||
},
|
||||
"node_modules/typedarray": {
|
||||
"version": "0.0.6",
|
||||
"resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz",
|
||||
"integrity": "sha512-/aCDEGatGvZ2BIk+HmLf4ifCJFwvKFNb9/JeZPMulfgFracn9QFcAf5GO8B/mweUjSoblS5In0cWhqpfs/5PQA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/typescript": {
|
||||
"version": "5.9.3",
|
||||
"resolved": "https://registry.npmjs.org/typescript/-/typescript-5.9.3.tgz",
|
||||
@ -2580,7 +2675,6 @@
|
||||
"version": "6.21.0",
|
||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
|
||||
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/unpipe": {
|
||||
@ -2592,6 +2686,12 @@
|
||||
"node": ">= 0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/util-deprecate": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
|
||||
"integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/utils-merge": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
|
||||
|
||||
@ -9,6 +9,7 @@
|
||||
"db:seed": "tsx src/db/seed.ts"
|
||||
},
|
||||
"dependencies": {
|
||||
"@types/multer": "^2.1.0",
|
||||
"bcrypt": "^6.0.0",
|
||||
"cookie-parser": "^1.4.7",
|
||||
"cors": "^2.8.5",
|
||||
@ -18,6 +19,7 @@
|
||||
"google-auth-library": "^10.6.1",
|
||||
"helmet": "^8.1.0",
|
||||
"jsonwebtoken": "^9.0.3",
|
||||
"multer": "^2.1.1",
|
||||
"pg": "^8.19.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@ -1,7 +1,10 @@
|
||||
import express from 'express';
|
||||
import multer from 'multer';
|
||||
import {
|
||||
listMedia,
|
||||
createMedia,
|
||||
getMediaBySn,
|
||||
fetchOriginalImage,
|
||||
listCctv,
|
||||
listSatRequests,
|
||||
createSatRequest,
|
||||
@ -9,11 +12,13 @@ import {
|
||||
isValidSatStatus,
|
||||
requestOilInference,
|
||||
checkInferenceHealth,
|
||||
stitchImages,
|
||||
} from './aerialService.js';
|
||||
import { isValidNumber } from '../middleware/security.js';
|
||||
import { requireAuth, requirePermission } from '../auth/authMiddleware.js';
|
||||
|
||||
const router = express.Router();
|
||||
const stitchUpload = multer({ storage: multer.memoryStorage(), limits: { fileSize: 50 * 1024 * 1024 } });
|
||||
|
||||
// ============================================================
|
||||
// AERIAL_MEDIA 라우트
|
||||
@ -63,6 +68,40 @@ router.post('/media', requireAuth, requirePermission('aerial', 'CREATE'), async
|
||||
}
|
||||
});
|
||||
|
||||
// GET /api/aerial/media/:sn/download — 원본 이미지 다운로드
|
||||
router.get('/media/:sn/download', requireAuth, requirePermission('aerial', 'READ'), async (req, res) => {
|
||||
try {
|
||||
const sn = parseInt(req.params['sn'] as string, 10);
|
||||
if (!isValidNumber(sn, 1, 999999)) {
|
||||
res.status(400).json({ error: '유효하지 않은 미디어 번호' });
|
||||
return;
|
||||
}
|
||||
|
||||
const media = await getMediaBySn(sn);
|
||||
if (!media) {
|
||||
res.status(404).json({ error: '미디어를 찾을 수 없습니다.' });
|
||||
return;
|
||||
}
|
||||
|
||||
// fileId 추출: FILE_NM의 앞 36자가 UUID 형식인지 검증 (이미지 분석 생성 레코드만 다운로드 가능)
|
||||
const fileId = media.fileNm.substring(0, 36);
|
||||
const UUID_PATTERN = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
|
||||
if (!UUID_PATTERN.test(fileId) || !media.equipNm) {
|
||||
res.status(404).json({ error: '다운로드 가능한 이미지가 없습니다.' });
|
||||
return;
|
||||
}
|
||||
|
||||
const buffer = await fetchOriginalImage(media.equipNm, fileId);
|
||||
const downloadName = media.orgnlNm ?? media.fileNm;
|
||||
res.setHeader('Content-Type', 'image/jpeg');
|
||||
res.setHeader('Content-Disposition', `attachment; filename*=UTF-8''${encodeURIComponent(downloadName)}`);
|
||||
res.send(buffer);
|
||||
} catch (err) {
|
||||
console.error('[aerial] 이미지 다운로드 오류:', err);
|
||||
res.status(502).json({ error: '이미지 다운로드 실패' });
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// CCTV_CAMERA 라우트
|
||||
// ============================================================
|
||||
@ -257,6 +296,39 @@ router.post('/oil-detect', express.json({ limit: '3mb' }), requireAuth, requireP
|
||||
}
|
||||
});
|
||||
|
||||
// ============================================================
|
||||
// STITCH (이미지 합성) 라우트
|
||||
// ============================================================
|
||||
|
||||
// POST /api/aerial/stitch — 여러 이미지를 합성하여 JPEG 반환
|
||||
router.post(
|
||||
'/stitch',
|
||||
requireAuth,
|
||||
requirePermission('aerial', 'READ'),
|
||||
stitchUpload.array('files', 6),
|
||||
async (req, res) => {
|
||||
try {
|
||||
const files = req.files as Express.Multer.File[];
|
||||
if (!files || files.length < 2) {
|
||||
res.status(400).json({ error: '이미지를 최소 2장 이상 선택해주세요.' });
|
||||
return;
|
||||
}
|
||||
const fileId = `stitch_${Date.now()}`;
|
||||
const buffer = await stitchImages(files, fileId);
|
||||
res.type('image/jpeg').send(buffer);
|
||||
} catch (err) {
|
||||
const message = err instanceof Error ? err.message : String(err);
|
||||
if (message.includes('abort') || message.includes('timeout')) {
|
||||
console.error('[aerial] 스티칭 서버 타임아웃:', message);
|
||||
res.status(504).json({ error: '이미지 합성 서버 응답 시간 초과' });
|
||||
return;
|
||||
}
|
||||
console.error('[aerial] 이미지 합성 오류:', err);
|
||||
res.status(503).json({ error: '이미지 합성 서버 연결 불가' });
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// GET /api/aerial/oil-detect/health — 추론 서버 상태 확인
|
||||
router.get('/oil-detect/health', requireAuth, async (_req, res) => {
|
||||
const health = await checkInferenceHealth();
|
||||
|
||||
@ -49,6 +49,26 @@ function rowToMedia(r: Record<string, unknown>): AerialMediaItem {
|
||||
};
|
||||
}
|
||||
|
||||
export async function getMediaBySn(sn: number): Promise<AerialMediaItem | null> {
|
||||
const { rows } = await wingPool.query(
|
||||
`SELECT AERIAL_MEDIA_SN, ACDNT_SN, FILE_NM, ORGNL_NM, FILE_PATH,
|
||||
LON, LAT, LOC_DC, EQUIP_TP_CD, EQUIP_NM, MEDIA_TP_CD,
|
||||
TAKNG_DTM, FILE_SZ, RESOLUTION, REG_DTM
|
||||
FROM wing.AERIAL_MEDIA WHERE AERIAL_MEDIA_SN = $1 AND USE_YN = 'Y'`,
|
||||
[sn]
|
||||
);
|
||||
return rows.length > 0 ? rowToMedia(rows[0]) : null;
|
||||
}
|
||||
|
||||
export async function fetchOriginalImage(camTy: string, fileId: string): Promise<Buffer> {
|
||||
const res = await fetch(`${IMAGE_API_URL}/get-original-image/${camTy}/${fileId}`, {
|
||||
signal: AbortSignal.timeout(30_000),
|
||||
});
|
||||
if (!res.ok) throw new Error(`이미지 서버 응답: ${res.status}`);
|
||||
const base64 = await res.json() as string;
|
||||
return Buffer.from(base64, 'base64');
|
||||
}
|
||||
|
||||
export async function listMedia(input: ListMediaInput): Promise<AerialMediaItem[]> {
|
||||
const conditions: string[] = ["USE_YN = 'Y'"];
|
||||
const params: (string | number)[] = [];
|
||||
@ -109,8 +129,8 @@ export async function createMedia(input: {
|
||||
TAKNG_DTM, FILE_SZ, RESOLUTION
|
||||
) VALUES (
|
||||
$1, $2, $3, $4,
|
||||
$5, $6,
|
||||
CASE WHEN $5 IS NOT NULL AND $6 IS NOT NULL THEN ST_SetSRID(ST_MakePoint($5::float, $6::float), 4326) END,
|
||||
$5::float8, $6::float8,
|
||||
CASE WHEN $5 IS NOT NULL AND $6 IS NOT NULL THEN ST_SetSRID(ST_MakePoint($5, $6), 4326) END,
|
||||
$7, $8, $9, $10,
|
||||
$11, $12, $13
|
||||
) RETURNING AERIAL_MEDIA_SN`,
|
||||
@ -344,7 +364,7 @@ export async function updateSatRequestStatus(sn: number, sttsCd: string): Promis
|
||||
// OIL INFERENCE (GPU 서버 프록시)
|
||||
// ============================================================
|
||||
|
||||
const OIL_INFERENCE_URL = process.env.OIL_INFERENCE_URL || 'http://localhost:8090';
|
||||
const IMAGE_API_URL = process.env.IMAGE_API_URL ?? 'http://localhost:5001';
|
||||
const INFERENCE_TIMEOUT_MS = 10_000;
|
||||
|
||||
export interface OilInferenceRegion {
|
||||
@ -362,13 +382,34 @@ export interface OilInferenceResult {
|
||||
regions: OilInferenceRegion[];
|
||||
}
|
||||
|
||||
/** 여러 이미지를 이미지 분석 서버의 /stitch 엔드포인트로 전송해 합성 JPEG를 반환한다. */
|
||||
export async function stitchImages(
|
||||
files: Express.Multer.File[],
|
||||
fileId: string
|
||||
): Promise<Buffer> {
|
||||
const form = new FormData();
|
||||
form.append('fileId', fileId);
|
||||
for (const f of files) {
|
||||
form.append('files', new Blob([f.buffer], { type: f.mimetype }), f.originalname);
|
||||
}
|
||||
const response = await fetch(`${IMAGE_API_URL}/stitch`, {
|
||||
method: 'POST',
|
||||
body: form,
|
||||
signal: AbortSignal.timeout(300_000),
|
||||
});
|
||||
if (!response.ok) {
|
||||
const detail = await response.text().catch(() => '');
|
||||
throw new Error(`stitch server responded ${response.status}: ${detail}`);
|
||||
}
|
||||
return Buffer.from(await response.arrayBuffer());
|
||||
}
|
||||
|
||||
/** GPU 추론 서버에 이미지를 전송하고 세그멘테이션 결과를 반환한다. */
|
||||
export async function requestOilInference(imageBase64: string): Promise<OilInferenceResult> {
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), INFERENCE_TIMEOUT_MS);
|
||||
|
||||
try {
|
||||
const response = await fetch(`${OIL_INFERENCE_URL}/inference`, {
|
||||
const response = await fetch(`${IMAGE_API_URL}/inference`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ image: imageBase64 }),
|
||||
@ -389,7 +430,7 @@ export async function requestOilInference(imageBase64: string): Promise<OilInfer
|
||||
/** GPU 추론 서버 헬스체크 */
|
||||
export async function checkInferenceHealth(): Promise<{ status: string; device?: string }> {
|
||||
try {
|
||||
const response = await fetch(`${OIL_INFERENCE_URL}/health`, {
|
||||
const response = await fetch(`${IMAGE_API_URL}/health`, {
|
||||
signal: AbortSignal.timeout(3000),
|
||||
});
|
||||
if (!response.ok) throw new Error(`status ${response.status}`);
|
||||
|
||||
174
backend/src/prediction/imageAnalyzeService.ts
Normal file
174
backend/src/prediction/imageAnalyzeService.ts
Normal file
@ -0,0 +1,174 @@
|
||||
import crypto from 'crypto';
|
||||
import { wingPool } from '../db/wingDb.js';
|
||||
import { createMedia } from '../aerial/aerialService.js';
|
||||
|
||||
const IMAGE_API_URL = process.env.IMAGE_API_URL ?? 'http://localhost:5001';
|
||||
|
||||
// 유류 클래스 → UI 유종명 매핑
|
||||
const CLASS_ID_TO_OIL_TYPE: Record<string, string> = {
|
||||
'검정': '벙커C유',
|
||||
'갈색': '벙커C유',
|
||||
'무지개': '경유',
|
||||
'은색': '등유',
|
||||
};
|
||||
|
||||
// 유종명 → DB 코드 매핑
|
||||
const OIL_DB_CODE_MAP: Record<string, string> = {
|
||||
'벙커C유': 'BUNKER_C',
|
||||
'원유': 'CRUDE_OIL',
|
||||
'경유': 'DIESEL',
|
||||
'등유': 'GASOLINE',
|
||||
};
|
||||
|
||||
interface OilPolygon {
|
||||
classId: string;
|
||||
area: number;
|
||||
volume: number;
|
||||
note: string;
|
||||
thickness: number;
|
||||
wkt: string;
|
||||
}
|
||||
|
||||
interface ImageServerResponse {
|
||||
meta: string;
|
||||
data: OilPolygon[];
|
||||
}
|
||||
|
||||
export interface ImageAnalyzeResult {
|
||||
acdntSn: number;
|
||||
lat: number;
|
||||
lon: number;
|
||||
oilType: string;
|
||||
area: number;
|
||||
volume: number;
|
||||
fileId: string;
|
||||
occurredAt: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* mx15hdi CSV 컬럼 순서:
|
||||
* Filename, Tlat_d, Tlat_m, Tlat_s, Tlon_d, Tlon_m, Tlon_s,
|
||||
* Alat_d, Alat_m, Alat_s, Alon_d, Alon_m, Alon_s,
|
||||
* Az, El, Alt, Date1, Date2, Date3, Time1, Time2, Time3
|
||||
*/
|
||||
function parseMeta(metaStr: string): { lat: number; lon: number; occurredAt: string } {
|
||||
const parts = metaStr.split(',');
|
||||
const tlat_d = parseFloat(parts[1]);
|
||||
const tlat_m = parseFloat(parts[2]);
|
||||
const tlat_s = parseFloat(parts[3]);
|
||||
const tlon_d = parseFloat(parts[4]);
|
||||
const tlon_m = parseFloat(parts[5]);
|
||||
const tlon_s = parseFloat(parts[6]);
|
||||
|
||||
const lat = tlat_d + tlat_m / 60 + tlat_s / 3600;
|
||||
const lon = tlon_d + tlon_m / 60 + tlon_s / 3600;
|
||||
|
||||
// Date: Date1(DD), Date2(MM), Date3(YYYY) / Time: Time1(HH), Time2(mm), Time3(ss)
|
||||
const dd = (parts[16] ?? '01').padStart(2, '0');
|
||||
const mm = (parts[17] ?? '01').padStart(2, '0');
|
||||
const yyyy = parts[18] ?? new Date().getFullYear().toString();
|
||||
const time1 = (parts[19] ?? '00').padStart(2, '0');
|
||||
const time2 = (parts[20] ?? '00').padStart(2, '0');
|
||||
const occurredAt = `${yyyy}-${mm}-${dd}T${time1}:${time2}:00+09:00`;
|
||||
|
||||
return { lat, lon, occurredAt };
|
||||
}
|
||||
|
||||
export async function analyzeImageFile(imageBuffer: Buffer, originalName: string): Promise<ImageAnalyzeResult> {
|
||||
const fileId = crypto.randomUUID();
|
||||
|
||||
// camTy는 현재 "mx15hdi"로 하드코딩한다.
|
||||
// TODO: 추후 이미지 EXIF에서 카메라 모델명을 읽어 camTy를 자동 판별하는 로직을
|
||||
// 이미지 분석 서버(api.py)에 추가할 예정이다. (check_camera_info 함수 활용)
|
||||
const camTy = 'mx15hdi';
|
||||
|
||||
// 이미지 분석 서버 호출
|
||||
const formData = new FormData();
|
||||
const blob = new Blob([imageBuffer]);
|
||||
formData.append('camTy', camTy);
|
||||
formData.append('fileId', fileId);
|
||||
formData.append('image', blob, originalName);
|
||||
|
||||
let serverResponse: ImageServerResponse;
|
||||
try {
|
||||
const res = await fetch(`${IMAGE_API_URL}/run-script/`, {
|
||||
method: 'POST',
|
||||
body: formData,
|
||||
signal: AbortSignal.timeout(300_000),
|
||||
});
|
||||
|
||||
if (!res.ok) {
|
||||
const text = await res.text();
|
||||
if (res.status === 400 && text.includes('GPS')) {
|
||||
throw Object.assign(new Error('GPS_NOT_FOUND'), { code: 'GPS_NOT_FOUND' });
|
||||
}
|
||||
throw new Error(`이미지 분석 서버 오류: ${res.status} - ${text}`);
|
||||
}
|
||||
|
||||
serverResponse = await res.json() as ImageServerResponse;
|
||||
} catch (err: unknown) {
|
||||
if (err instanceof Error && (err as NodeJS.ErrnoException).code === 'GPS_NOT_FOUND') throw err;
|
||||
if (err instanceof Error && err.name === 'TimeoutError') {
|
||||
throw Object.assign(new Error('TIMEOUT'), { code: 'TIMEOUT' });
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
// 응답 파싱
|
||||
const { lat, lon, occurredAt } = parseMeta(serverResponse.meta);
|
||||
const firstOil = serverResponse.data[0];
|
||||
const oilType = firstOil ? (CLASS_ID_TO_OIL_TYPE[firstOil.classId] ?? '벙커C유') : '벙커C유';
|
||||
const area = firstOil?.area ?? 0;
|
||||
const volume = firstOil?.volume ?? 0;
|
||||
|
||||
// ACDNT INSERT
|
||||
const acdntNm = `이미지분석_${new Date().toISOString().slice(0, 16).replace('T', ' ')}`;
|
||||
const acdntRes = await wingPool.query(
|
||||
`INSERT INTO wing.ACDNT
|
||||
(ACDNT_CD, ACDNT_NM, ACDNT_TP_CD, OCCRN_DTM, LAT, LNG, ACDNT_STTS_CD, USE_YN, REG_DTM)
|
||||
VALUES (
|
||||
'INC-' || EXTRACT(YEAR FROM NOW())::TEXT || '-' ||
|
||||
LPAD(
|
||||
(SELECT COALESCE(MAX(CAST(SPLIT_PART(ACDNT_CD, '-', 3) AS INTEGER)), 0) + 1
|
||||
FROM wing.ACDNT
|
||||
WHERE ACDNT_CD LIKE 'INC-' || EXTRACT(YEAR FROM NOW())::TEXT || '-%')::TEXT,
|
||||
4, '0'
|
||||
),
|
||||
$1, '유류유출', $2, $3, $4, 'ACTIVE', 'Y', NOW()
|
||||
)
|
||||
RETURNING ACDNT_SN`,
|
||||
[acdntNm, occurredAt, lat, lon]
|
||||
);
|
||||
const acdntSn: number = acdntRes.rows[0].acdnt_sn;
|
||||
|
||||
// SPIL_DATA INSERT (img_rslt_data에 분석 원본 저장)
|
||||
await wingPool.query(
|
||||
`INSERT INTO wing.SPIL_DATA
|
||||
(ACDNT_SN, OIL_TP_CD, SPIL_QTY, SPIL_UNIT_CD, SPIL_TP_CD, FCST_HR, IMG_RSLT_DATA, REG_DTM)
|
||||
VALUES ($1, $2, $3, 'KL', 'CONTINUOUS', 48, $4, NOW())`,
|
||||
[
|
||||
acdntSn,
|
||||
OIL_DB_CODE_MAP[oilType] ?? 'BUNKER_C',
|
||||
volume,
|
||||
JSON.stringify(serverResponse),
|
||||
]
|
||||
);
|
||||
|
||||
// AERIAL_MEDIA INSERT (영상사진관리 목록에서 조회 가능하도록 저장)
|
||||
const fileSizeMb = (imageBuffer.length / (1024 * 1024)).toFixed(1) + ' MB';
|
||||
await createMedia({
|
||||
fileNm: `${fileId}_${originalName}`,
|
||||
orgnlNm: originalName,
|
||||
acdntSn,
|
||||
lon,
|
||||
lat,
|
||||
locDc: `${lon.toFixed(4)} + ${lat.toFixed(4)}`,
|
||||
equipTpCd: 'drone',
|
||||
equipNm: camTy,
|
||||
mediaTpCd: '사진',
|
||||
takngDtm: occurredAt,
|
||||
fileSz: fileSizeMb,
|
||||
});
|
||||
|
||||
return { acdntSn, lat, lon, oilType, area, volume, fileId, occurredAt };
|
||||
}
|
||||
@ -1,11 +1,15 @@
|
||||
import express from 'express';
|
||||
import multer from 'multer';
|
||||
import {
|
||||
listAnalyses, getAnalysisDetail, getBacktrack, listBacktracksByAcdnt,
|
||||
createBacktrack, saveBoomLine, listBoomLines,
|
||||
createBacktrack, saveBoomLine, listBoomLines, getAnalysisTrajectory,
|
||||
} from './predictionService.js';
|
||||
import { analyzeImageFile } from './imageAnalyzeService.js';
|
||||
import { isValidNumber } from '../middleware/security.js';
|
||||
import { requireAuth, requirePermission } from '../auth/authMiddleware.js';
|
||||
|
||||
const upload = multer({ storage: multer.memoryStorage(), limits: { fileSize: 50 * 1024 * 1024 } });
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
// GET /api/prediction/analyses — 분석 목록
|
||||
@ -40,6 +44,26 @@ router.get('/analyses/:acdntSn', requireAuth, requirePermission('prediction', 'R
|
||||
}
|
||||
});
|
||||
|
||||
// GET /api/prediction/analyses/:acdntSn/trajectory — 최신 OpenDrift 결과 조회
|
||||
router.get('/analyses/:acdntSn/trajectory', requireAuth, requirePermission('prediction', 'READ'), async (req, res) => {
|
||||
try {
|
||||
const acdntSn = parseInt(req.params.acdntSn as string, 10);
|
||||
if (!isValidNumber(acdntSn, 1, 999999)) {
|
||||
res.status(400).json({ error: '유효하지 않은 사고 번호' });
|
||||
return;
|
||||
}
|
||||
const result = await getAnalysisTrajectory(acdntSn);
|
||||
if (!result) {
|
||||
res.json({ trajectory: null, summary: null });
|
||||
return;
|
||||
}
|
||||
res.json(result);
|
||||
} catch (err) {
|
||||
console.error('[prediction] trajectory 조회 오류:', err);
|
||||
res.status(500).json({ error: 'trajectory 조회 실패' });
|
||||
}
|
||||
});
|
||||
|
||||
// GET /api/prediction/backtrack — 사고별 역추적 목록
|
||||
router.get('/backtrack', requireAuth, requirePermission('prediction', 'READ'), async (req, res) => {
|
||||
try {
|
||||
@ -124,4 +148,36 @@ router.post('/boom', requireAuth, requirePermission('prediction', 'CREATE'), asy
|
||||
}
|
||||
});
|
||||
|
||||
// POST /api/prediction/image-analyze — 이미지 업로드 분석
|
||||
router.post(
|
||||
'/image-analyze',
|
||||
requireAuth,
|
||||
requirePermission('prediction', 'CREATE'),
|
||||
upload.single('image'),
|
||||
async (req, res) => {
|
||||
try {
|
||||
if (!req.file) {
|
||||
res.status(400).json({ error: '이미지 파일이 필요합니다' });
|
||||
return;
|
||||
}
|
||||
const result = await analyzeImageFile(req.file.buffer, req.file.originalname);
|
||||
res.json(result);
|
||||
} catch (err: unknown) {
|
||||
if (err instanceof Error) {
|
||||
const code = (err as NodeJS.ErrnoException).code;
|
||||
if (code === 'GPS_NOT_FOUND') {
|
||||
res.status(422).json({ error: 'GPS_NOT_FOUND', message: 'GPS 정보가 없는 이미지입니다' });
|
||||
return;
|
||||
}
|
||||
if (code === 'TIMEOUT') {
|
||||
res.status(504).json({ error: 'TIMEOUT', message: '이미지 분석 서버 응답 시간 초과' });
|
||||
return;
|
||||
}
|
||||
}
|
||||
console.error('[prediction] 이미지 분석 오류:', err);
|
||||
res.status(500).json({ error: '이미지 분석 실패' });
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
export default router;
|
||||
|
||||
@ -404,6 +404,100 @@ export async function saveBoomLine(input: SaveBoomLineInput): Promise<{ boomLine
|
||||
return { boomLineSn: Number((rows[0] as Record<string, unknown>)['boom_line_sn']) };
|
||||
}
|
||||
|
||||
interface TrajectoryParticle {
|
||||
lat: number;
|
||||
lon: number;
|
||||
stranded?: 0 | 1;
|
||||
}
|
||||
|
||||
interface TrajectoryWindPoint {
|
||||
lat: number;
|
||||
lon: number;
|
||||
wind_speed: number;
|
||||
wind_direction: number;
|
||||
}
|
||||
|
||||
interface TrajectoryHydrGrid {
|
||||
lonInterval: number[];
|
||||
boundLonLat: { top: number; bottom: number; left: number; right: number };
|
||||
rows: number;
|
||||
cols: number;
|
||||
latInterval: number[];
|
||||
}
|
||||
|
||||
interface TrajectoryTimeStep {
|
||||
particles: TrajectoryParticle[];
|
||||
remaining_volume_m3: number;
|
||||
weathered_volume_m3: number;
|
||||
pollution_area_km2: number;
|
||||
beached_volume_m3: number;
|
||||
pollution_coast_length_m: number;
|
||||
center_lat?: number;
|
||||
center_lon?: number;
|
||||
wind_data?: TrajectoryWindPoint[];
|
||||
hydr_data?: [number[][], number[][]];
|
||||
hydr_grid?: TrajectoryHydrGrid;
|
||||
}
|
||||
|
||||
interface TrajectoryResult {
|
||||
trajectory: Array<{ lat: number; lon: number; time: number; particle: number; stranded?: 0 | 1 }>;
|
||||
summary: {
|
||||
remainingVolume: number;
|
||||
weatheredVolume: number;
|
||||
pollutionArea: number;
|
||||
beachedVolume: number;
|
||||
pollutionCoastLength: number;
|
||||
};
|
||||
centerPoints: Array<{ lat: number; lon: number; time: number }>;
|
||||
windData: TrajectoryWindPoint[][];
|
||||
hydrData: ({ value: [number[][], number[][]]; grid: TrajectoryHydrGrid } | null)[];
|
||||
}
|
||||
|
||||
function transformTrajectoryResult(rawResult: TrajectoryTimeStep[]): TrajectoryResult {
|
||||
const trajectory = rawResult.flatMap((step, stepIdx) =>
|
||||
step.particles.map((p, i) => ({
|
||||
lat: p.lat,
|
||||
lon: p.lon,
|
||||
time: stepIdx,
|
||||
particle: i,
|
||||
stranded: p.stranded,
|
||||
}))
|
||||
);
|
||||
const lastStep = rawResult[rawResult.length - 1];
|
||||
const summary = {
|
||||
remainingVolume: lastStep.remaining_volume_m3,
|
||||
weatheredVolume: lastStep.weathered_volume_m3,
|
||||
pollutionArea: lastStep.pollution_area_km2,
|
||||
beachedVolume: lastStep.beached_volume_m3,
|
||||
pollutionCoastLength: lastStep.pollution_coast_length_m,
|
||||
};
|
||||
const centerPoints = rawResult
|
||||
.map((step, stepIdx) =>
|
||||
step.center_lat != null && step.center_lon != null
|
||||
? { lat: step.center_lat, lon: step.center_lon, time: stepIdx }
|
||||
: null
|
||||
)
|
||||
.filter((p): p is { lat: number; lon: number; time: number } => p !== null);
|
||||
const windData = rawResult.map((step) => step.wind_data ?? []);
|
||||
const hydrData = rawResult.map((step) =>
|
||||
step.hydr_data && step.hydr_grid
|
||||
? { value: step.hydr_data, grid: step.hydr_grid }
|
||||
: null
|
||||
);
|
||||
return { trajectory, summary, centerPoints, windData, hydrData };
|
||||
}
|
||||
|
||||
export async function getAnalysisTrajectory(acdntSn: number): Promise<TrajectoryResult | null> {
|
||||
const sql = `
|
||||
SELECT RSLT_DATA FROM wing.PRED_EXEC
|
||||
WHERE ACDNT_SN = $1 AND ALGO_CD = 'OPENDRIFT' AND EXEC_STTS_CD = 'COMPLETED'
|
||||
ORDER BY CMPL_DTM DESC LIMIT 1
|
||||
`;
|
||||
const { rows } = await wingPool.query(sql, [acdntSn]);
|
||||
if (rows.length === 0 || !rows[0].rslt_data) return null;
|
||||
return transformTrajectoryResult(rows[0].rslt_data as TrajectoryTimeStep[]);
|
||||
}
|
||||
|
||||
export async function listBoomLines(acdntSn: number): Promise<BoomLineItem[]> {
|
||||
const sql = `
|
||||
SELECT BOOM_LINE_SN, ACDNT_SN, BOOM_NM, PRIORITY_ORD,
|
||||
|
||||
@ -1,227 +1,452 @@
|
||||
import { Router, Request, Response } from 'express'
|
||||
import { wingPool } from '../db/wingDb.js'
|
||||
import { requireAuth } from '../auth/authMiddleware.js'
|
||||
import {
|
||||
isValidLatitude,
|
||||
isValidLongitude,
|
||||
isValidNumber,
|
||||
isAllowedValue,
|
||||
isValidStringLength,
|
||||
escapeHtml,
|
||||
} from '../middleware/security.js'
|
||||
|
||||
const router = Router()
|
||||
|
||||
// 허용된 모델 목록 (화이트리스트)
|
||||
const ALLOWED_MODELS = ['KOSPS', 'POSEIDON', 'OpenDrift', '앙상블'] as const
|
||||
type AllowedModel = typeof ALLOWED_MODELS[number]
|
||||
const PYTHON_API_URL = process.env.PYTHON_API_URL ?? 'http://localhost:5003'
|
||||
const POLL_INTERVAL_MS = 3000
|
||||
const POLL_TIMEOUT_MS = 30 * 60 * 1000 // 30분
|
||||
|
||||
// 허용된 유종 목록
|
||||
const ALLOWED_OIL_TYPES = ['원유', '벙커C유', '경유', '휘발유', '등유', '윤활유', '기타'] as const
|
||||
|
||||
// 허용된 유출 유형 목록
|
||||
const ALLOWED_SPILL_TYPES = ['연속유출', '순간유출'] as const
|
||||
|
||||
interface ParticlePoint {
|
||||
lat: number
|
||||
lon: number
|
||||
time: number
|
||||
particle: number
|
||||
// 유종 매핑: 한국어 UI 선택값 → OpenDrift 유종 코드
|
||||
// 추후 DB/설정 파일로 외부화 예정 (개발 단계 임시 구현)
|
||||
const OIL_TYPE_MAP: Record<string, string> = {
|
||||
'벙커C유': 'GENERIC BUNKER C',
|
||||
'경유': 'GENERIC DIESEL',
|
||||
'원유': 'WEST TEXAS INTERMEDIATE (WTI)',
|
||||
'중유': 'GENERIC HEAVY FUEL OIL',
|
||||
'등유': 'FUEL OIL NO.1 (KEROSENE)',
|
||||
'휘발유': 'GENERIC GASOLINE',
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/simulation/run
|
||||
* 오일 확산 시뮬레이션 실행
|
||||
*
|
||||
* 보안 조치:
|
||||
* - 화이트리스트 기반 모델명 검증
|
||||
* - 좌표 범위 검증 (위도 -90~90, 경도 -180~180)
|
||||
* - 숫자 범위 검증 (duration, spill_amount)
|
||||
* - 문자열 길이 제한
|
||||
*/
|
||||
router.post('/run', async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { model, lat, lon, duration_hours, oil_type, spill_amount, spill_type } = req.body
|
||||
// 유종 매핑: 한국어 UI → DB 저장 코드
|
||||
const OIL_DB_CODE_MAP: Record<string, string> = {
|
||||
'벙커C유': 'BUNKER_C',
|
||||
'경유': 'DIESEL',
|
||||
'원유': 'CRUDE_OIL',
|
||||
'중유': 'HEAVY_FUEL_OIL',
|
||||
'등유': 'KEROSENE',
|
||||
'휘발유': 'GASOLINE',
|
||||
}
|
||||
|
||||
// 1. 필수 파라미터 존재 검증
|
||||
if (model === undefined || lat === undefined || lon === undefined || duration_hours === undefined) {
|
||||
// 유출 형태 매핑: 한국어 UI → DB 저장 코드
|
||||
const SPIL_TYPE_MAP: Record<string, string> = {
|
||||
'연속': 'CONTINUOUS',
|
||||
'비연속': 'DISCONTINUOUS',
|
||||
'순간 유출': 'INSTANT',
|
||||
}
|
||||
|
||||
// 단위 매핑: 한국어 UI → DB 저장 코드
|
||||
const UNIT_MAP: Record<string, string> = {
|
||||
'kL': 'KL', 'ton': 'TON', 'barrel': 'BBL',
|
||||
}
|
||||
|
||||
// ============================================================
|
||||
// POST /api/simulation/run
|
||||
// 확산 시뮬레이션 실행 (OpenDrift)
|
||||
// ============================================================
|
||||
/**
|
||||
* OpenDrift 확산 시뮬레이션을 실행한다.
|
||||
* Python FastAPI 서버에 작업을 제출하고 job_id를 받아
|
||||
* 백그라운드에서 폴링하며 결과를 DB에 저장한다.
|
||||
* 프론트엔드는 execSn으로 GET /status/:execSn을 폴링하여 결과를 수신한다.
|
||||
*/
|
||||
router.post('/run', requireAuth, async (req: Request, res: Response) => {
|
||||
try {
|
||||
const { acdntSn: rawAcdntSn, acdntNm, spillUnit, spillTypeCd,
|
||||
lat, lon, runTime, matTy, matVol, spillTime, startTime } = req.body
|
||||
|
||||
// 1. 필수 파라미터 검증
|
||||
if (lat === undefined || lon === undefined || runTime === undefined) {
|
||||
return res.status(400).json({
|
||||
error: '필수 파라미터 누락',
|
||||
required: ['model', 'lat', 'lon', 'duration_hours']
|
||||
required: ['lat', 'lon', 'runTime'],
|
||||
})
|
||||
}
|
||||
|
||||
// 2. 모델명 화이트리스트 검증
|
||||
if (!isAllowedValue(model, [...ALLOWED_MODELS])) {
|
||||
return res.status(400).json({
|
||||
error: '유효하지 않은 모델',
|
||||
message: `허용된 모델: ${ALLOWED_MODELS.join(', ')}`,
|
||||
})
|
||||
}
|
||||
|
||||
// 3. 위도/경도 범위 검증
|
||||
if (!isValidLatitude(lat)) {
|
||||
return res.status(400).json({
|
||||
error: '유효하지 않은 위도',
|
||||
message: '위도는 -90 ~ 90 범위의 숫자여야 합니다.'
|
||||
})
|
||||
return res.status(400).json({ error: '유효하지 않은 위도', message: '위도는 -90~90 범위여야 합니다.' })
|
||||
}
|
||||
if (!isValidLongitude(lon)) {
|
||||
return res.status(400).json({
|
||||
error: '유효하지 않은 경도',
|
||||
message: '경도는 -180 ~ 180 범위의 숫자여야 합니다.'
|
||||
})
|
||||
return res.status(400).json({ error: '유효하지 않은 경도', message: '경도는 -180~180 범위여야 합니다.' })
|
||||
}
|
||||
if (!isValidNumber(runTime, 1, 720)) {
|
||||
return res.status(400).json({ error: '유효하지 않은 예측 시간', message: '예측 시간은 1~720 범위여야 합니다.' })
|
||||
}
|
||||
if (matVol !== undefined && !isValidNumber(matVol, 0, 1000000)) {
|
||||
return res.status(400).json({ error: '유효하지 않은 유출량' })
|
||||
}
|
||||
if (matTy !== undefined && (typeof matTy !== 'string' || !isValidStringLength(matTy, 50))) {
|
||||
return res.status(400).json({ error: '유효하지 않은 유종' })
|
||||
}
|
||||
// acdntSn 없는 경우 acdntNm 필수
|
||||
if (!rawAcdntSn && (!acdntNm || typeof acdntNm !== 'string' || !acdntNm.trim())) {
|
||||
return res.status(400).json({ error: '사고를 선택하거나 사고명을 입력해야 합니다.' })
|
||||
}
|
||||
if (acdntNm && (typeof acdntNm !== 'string' || !isValidStringLength(acdntNm, 200))) {
|
||||
return res.status(400).json({ error: '사고명은 200자 이내여야 합니다.' })
|
||||
}
|
||||
|
||||
// 4. 예측 시간 범위 검증 (1~720시간 = 최대 30일)
|
||||
if (!isValidNumber(duration_hours, 1, 720)) {
|
||||
return res.status(400).json({
|
||||
error: '유효하지 않은 예측 시간',
|
||||
message: '예측 시간은 1~720 범위의 숫자여야 합니다.'
|
||||
})
|
||||
}
|
||||
// 1-B. acdntSn 미제공 시 ACDNT + SPIL_DATA 생성
|
||||
let resolvedAcdntSn: number | null = rawAcdntSn ? Number(rawAcdntSn) : null
|
||||
let resolvedSpilDataSn: number | null = null
|
||||
|
||||
// 5. 선택적 파라미터 검증
|
||||
if (oil_type !== undefined) {
|
||||
if (typeof oil_type !== 'string' || !isValidStringLength(oil_type, 50)) {
|
||||
return res.status(400).json({ error: '유효하지 않은 유종' })
|
||||
if (!resolvedAcdntSn && acdntNm) {
|
||||
try {
|
||||
const occrn = startTime ?? new Date().toISOString()
|
||||
const acdntRes = await wingPool.query(
|
||||
`INSERT INTO wing.ACDNT
|
||||
(ACDNT_CD, ACDNT_NM, ACDNT_TP_CD, OCCRN_DTM, LAT, LNG, ACDNT_STTS_CD, USE_YN, REG_DTM)
|
||||
VALUES (
|
||||
'INC-' || EXTRACT(YEAR FROM NOW())::TEXT || '-' ||
|
||||
LPAD(
|
||||
(SELECT COALESCE(MAX(CAST(SPLIT_PART(ACDNT_CD, '-', 3) AS INTEGER)), 0) + 1
|
||||
FROM wing.ACDNT
|
||||
WHERE ACDNT_CD LIKE 'INC-' || EXTRACT(YEAR FROM NOW())::TEXT || '-%')::TEXT,
|
||||
4, '0'
|
||||
),
|
||||
$1, '유류유출', $2, $3, $4, 'ACTIVE', 'Y', NOW()
|
||||
)
|
||||
RETURNING ACDNT_SN`,
|
||||
[acdntNm.trim(), occrn, lat, lon]
|
||||
)
|
||||
resolvedAcdntSn = acdntRes.rows[0].acdnt_sn as number
|
||||
|
||||
const spilRes = await wingPool.query(
|
||||
`INSERT INTO wing.SPIL_DATA (ACDNT_SN, OIL_TP_CD, SPIL_QTY, SPIL_UNIT_CD, SPIL_TP_CD, FCST_HR, REG_DTM)
|
||||
VALUES ($1, $2, $3, $4, $5, $6, NOW())
|
||||
RETURNING SPIL_DATA_SN`,
|
||||
[
|
||||
resolvedAcdntSn,
|
||||
OIL_DB_CODE_MAP[matTy as string] ?? 'BUNKER_C',
|
||||
matVol ?? 0,
|
||||
UNIT_MAP[spillUnit as string] ?? 'KL',
|
||||
SPIL_TYPE_MAP[spillTypeCd as string] ?? 'CONTINUOUS',
|
||||
runTime,
|
||||
]
|
||||
)
|
||||
resolvedSpilDataSn = spilRes.rows[0].spil_data_sn as number
|
||||
} catch (dbErr) {
|
||||
console.error('[simulation] ACDNT/SPIL_DATA INSERT 실패:', dbErr)
|
||||
return res.status(500).json({ error: '사고 정보 생성 실패' })
|
||||
}
|
||||
}
|
||||
|
||||
if (spill_amount !== undefined) {
|
||||
if (!isValidNumber(spill_amount, 0, 1000000)) {
|
||||
return res.status(400).json({
|
||||
error: '유효하지 않은 유출량',
|
||||
message: '유출량은 0~1,000,000 범위의 숫자여야 합니다.'
|
||||
// 2. Python NC 파일 존재 여부 확인
|
||||
try {
|
||||
const checkRes = await fetch(`${PYTHON_API_URL}/check-nc`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({ lat, lon, startTime }),
|
||||
signal: AbortSignal.timeout(5000),
|
||||
})
|
||||
if (!checkRes.ok) {
|
||||
return res.status(409).json({
|
||||
error: '해당 좌표의 해양 기상 데이터가 없습니다.',
|
||||
message: 'NC 파일이 준비되지 않았습니다.',
|
||||
})
|
||||
}
|
||||
} catch {
|
||||
// Python 서버 미기동 — 5번에서 처리
|
||||
}
|
||||
|
||||
if (spill_type !== undefined) {
|
||||
if (typeof spill_type !== 'string' || !isValidStringLength(spill_type, 50)) {
|
||||
return res.status(400).json({ error: '유효하지 않은 유출 유형' })
|
||||
// 3. 기존 사고의 경우 SPIL_DATA_SN 조회
|
||||
if (resolvedAcdntSn && !resolvedSpilDataSn) {
|
||||
try {
|
||||
const spilRes = await wingPool.query(
|
||||
`SELECT SPIL_DATA_SN FROM wing.SPIL_DATA WHERE ACDNT_SN = $1 ORDER BY SPIL_DATA_SN DESC LIMIT 1`,
|
||||
[resolvedAcdntSn]
|
||||
)
|
||||
if (spilRes.rows.length > 0) {
|
||||
resolvedSpilDataSn = spilRes.rows[0].spil_data_sn as number
|
||||
}
|
||||
} catch (dbErr) {
|
||||
console.error('[simulation] SPIL_DATA 조회 실패:', dbErr)
|
||||
}
|
||||
}
|
||||
|
||||
// 검증 완료 - 시뮬레이션 실행
|
||||
const trajectory = generateDemoTrajectory(
|
||||
lat,
|
||||
lon,
|
||||
duration_hours,
|
||||
model,
|
||||
20
|
||||
// 4. PRED_EXEC INSERT (PENDING) — ACDNT_SN 포함 (NOT NULL FK)
|
||||
const execNm = `EXPC_${Date.now()}`
|
||||
let predExecSn: number
|
||||
try {
|
||||
const insertRes = await wingPool.query(
|
||||
`INSERT INTO wing.PRED_EXEC (ACDNT_SN, SPIL_DATA_SN, ALGO_CD, EXEC_STTS_CD, EXEC_NM, BGNG_DTM)
|
||||
VALUES ($1, $2, 'OPENDRIFT', 'PENDING', $3, NOW())
|
||||
RETURNING PRED_EXEC_SN`,
|
||||
[resolvedAcdntSn, resolvedSpilDataSn, execNm]
|
||||
)
|
||||
predExecSn = insertRes.rows[0].pred_exec_sn as number
|
||||
} catch (dbErr) {
|
||||
console.error('[simulation] PRED_EXEC INSERT 실패:', dbErr)
|
||||
return res.status(500).json({ error: '분석 기록 생성 실패' })
|
||||
}
|
||||
|
||||
// matTy 변환: 한국어 유종 → OpenDrift 유종 코드
|
||||
// 매핑 대상이 아니면 원본 값 그대로 사용 (영문 직접 입력 대응)
|
||||
const odMatTy = matTy !== undefined ? (OIL_TYPE_MAP[matTy as string] ?? (matTy as string)) : undefined
|
||||
|
||||
// 5. Python /run-model 호출
|
||||
let jobId: string
|
||||
try {
|
||||
const pythonRes = await fetch(`${PYTHON_API_URL}/run-model`, {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
lat,
|
||||
lon,
|
||||
startTime,
|
||||
runTime,
|
||||
matTy: odMatTy,
|
||||
matVol,
|
||||
spillTime,
|
||||
name: execNm,
|
||||
}),
|
||||
signal: AbortSignal.timeout(10000),
|
||||
})
|
||||
|
||||
if (pythonRes.status === 503) {
|
||||
const errData = await pythonRes.json() as { error?: string }
|
||||
await wingPool.query(
|
||||
`UPDATE wing.PRED_EXEC SET EXEC_STTS_CD='FAILED', ERR_MSG=$1, CMPL_DTM=NOW() WHERE PRED_EXEC_SN=$2`,
|
||||
[errData.error || '분석 서버 포화', predExecSn]
|
||||
)
|
||||
return res.status(503).json({ error: errData.error || '분석 서버가 사용 중입니다. 잠시 후 재시도해 주세요.' })
|
||||
}
|
||||
|
||||
if (!pythonRes.ok) {
|
||||
throw new Error(`Python 서버 응답 오류: ${pythonRes.status}`)
|
||||
}
|
||||
|
||||
const pythonData = await pythonRes.json() as { job_id: string }
|
||||
jobId = pythonData.job_id
|
||||
} catch {
|
||||
await wingPool.query(
|
||||
`UPDATE wing.PRED_EXEC SET EXEC_STTS_CD='FAILED', ERR_MSG='Python 분석 서버에 연결할 수 없습니다.', CMPL_DTM=NOW() WHERE PRED_EXEC_SN=$1`,
|
||||
[predExecSn]
|
||||
)
|
||||
return res.status(503).json({ error: 'Python 분석 서버에 연결할 수 없습니다.' })
|
||||
}
|
||||
|
||||
// 6. RUNNING 업데이트
|
||||
await wingPool.query(
|
||||
`UPDATE wing.PRED_EXEC SET EXEC_STTS_CD='RUNNING' WHERE PRED_EXEC_SN=$1`,
|
||||
[predExecSn]
|
||||
)
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
model: escapeHtml(String(model)),
|
||||
parameters: {
|
||||
lat,
|
||||
lon,
|
||||
duration_hours,
|
||||
oil_type: oil_type ? escapeHtml(String(oil_type)) : undefined,
|
||||
spill_amount,
|
||||
spill_type: spill_type ? escapeHtml(String(spill_type)) : undefined,
|
||||
},
|
||||
trajectory,
|
||||
metadata: {
|
||||
particle_count: 20,
|
||||
time_steps: duration_hours + 1,
|
||||
generated_at: new Date().toISOString()
|
||||
}
|
||||
})
|
||||
// 7. 즉시 응답 (프론트엔드는 execSn으로 폴링, acdntSn은 신규 생성 사고 추적용)
|
||||
res.json({ success: true, execSn: predExecSn, acdntSn: resolvedAcdntSn, status: 'RUNNING' })
|
||||
|
||||
// 8. 백그라운드 폴링 시작
|
||||
pollAndSave(jobId, predExecSn).catch((err: unknown) =>
|
||||
console.error('[simulation] pollAndSave 오류:', err)
|
||||
)
|
||||
} catch {
|
||||
// 내부 오류 메시지 노출 방지
|
||||
res.status(500).json({
|
||||
error: '시뮬레이션 실행 실패',
|
||||
message: '서버 내부 오류가 발생했습니다.'
|
||||
})
|
||||
res.status(500).json({ error: '시뮬레이션 실행 실패', message: '서버 내부 오류가 발생했습니다.' })
|
||||
}
|
||||
})
|
||||
|
||||
// ============================================================
|
||||
// GET /api/simulation/status/:execSn
|
||||
// 시뮬레이션 실행 상태 및 결과 조회
|
||||
// ============================================================
|
||||
/**
|
||||
* 데모 궤적 데이터 생성
|
||||
* PRED_EXEC 테이블에서 실행 상태를 조회한다.
|
||||
* DB 상태(COMPLETED/FAILED)를 프론트 상태(DONE/ERROR)로 매핑하여 반환한다.
|
||||
*/
|
||||
function generateDemoTrajectory(
|
||||
startLat: number,
|
||||
startLon: number,
|
||||
hours: number,
|
||||
model: string,
|
||||
particleCount: number
|
||||
): ParticlePoint[] {
|
||||
const trajectory: ParticlePoint[] = []
|
||||
|
||||
const modelFactors: Record<string, number> = {
|
||||
'KOSPS': 0.004,
|
||||
'POSEIDON': 0.006,
|
||||
'OpenDrift': 0.005,
|
||||
'앙상블': 0.0055
|
||||
router.get('/status/:execSn', requireAuth, async (req: Request, res: Response) => {
|
||||
const execSn = parseInt(req.params.execSn as string, 10)
|
||||
if (isNaN(execSn) || execSn <= 0) {
|
||||
return res.status(400).json({ error: '유효하지 않은 execSn' })
|
||||
}
|
||||
const spreadFactor = modelFactors[model] || 0.005
|
||||
|
||||
const windSpeed = 5.5
|
||||
const windDirection = 135
|
||||
const currentSpeed = 0.55
|
||||
const currentDirection = 120
|
||||
const waveHeight = 2.2
|
||||
try {
|
||||
const result = await wingPool.query(
|
||||
`SELECT pe.EXEC_STTS_CD, pe.RSLT_DATA, pe.ERR_MSG, pe.BGNG_DTM, sd.FCST_HR,
|
||||
(
|
||||
SELECT AVG(hist.REQD_SEC::FLOAT / hsd.FCST_HR)
|
||||
FROM wing.PRED_EXEC hist
|
||||
JOIN wing.SPIL_DATA hsd ON hist.SPIL_DATA_SN = hsd.SPIL_DATA_SN
|
||||
WHERE hist.ALGO_CD = pe.ALGO_CD
|
||||
AND hist.EXEC_STTS_CD = 'COMPLETED'
|
||||
AND hist.REQD_SEC IS NOT NULL AND hist.REQD_SEC > 0
|
||||
AND hsd.FCST_HR IS NOT NULL AND hsd.FCST_HR > 0
|
||||
) AS avg_sec_per_hr
|
||||
FROM wing.PRED_EXEC pe
|
||||
LEFT JOIN wing.SPIL_DATA sd ON pe.SPIL_DATA_SN = sd.SPIL_DATA_SN
|
||||
WHERE pe.PRED_EXEC_SN=$1`,
|
||||
[execSn]
|
||||
)
|
||||
if (result.rows.length === 0) {
|
||||
return res.status(404).json({ error: '분석 기록을 찾을 수 없습니다.' })
|
||||
}
|
||||
|
||||
const windRadians = (windDirection * Math.PI) / 180
|
||||
const currentRadians = (currentDirection * Math.PI) / 180
|
||||
const row = result.rows[0]
|
||||
const dbStatus: string = row.exec_stts_cd as string
|
||||
// DB 상태 → API 상태 매핑
|
||||
const statusMap: Record<string, string> = {
|
||||
PENDING: 'PENDING',
|
||||
RUNNING: 'RUNNING',
|
||||
COMPLETED: 'DONE',
|
||||
FAILED: 'ERROR',
|
||||
}
|
||||
const status = statusMap[dbStatus] ?? dbStatus
|
||||
|
||||
const windWeight = 0.03
|
||||
const currentWeight = 0.07
|
||||
if (status === 'DONE' && row.rslt_data) {
|
||||
const { trajectory, summary, centerPoints, windData, hydrData } = transformResult(row.rslt_data as PythonTimeStep[])
|
||||
return res.json({ status, trajectory, summary, centerPoints, windData, hydrData })
|
||||
}
|
||||
|
||||
const mainDriftLat =
|
||||
Math.sin(windRadians) * windSpeed * windWeight +
|
||||
Math.sin(currentRadians) * currentSpeed * currentWeight
|
||||
if (status === 'ERROR') {
|
||||
return res.json({ status, error: (row.err_msg as string) || '분석 중 오류가 발생했습니다.' })
|
||||
}
|
||||
|
||||
const mainDriftLon =
|
||||
Math.cos(windRadians) * windSpeed * windWeight +
|
||||
Math.cos(currentRadians) * currentSpeed * currentWeight
|
||||
// PENDING/RUNNING: 경과 시간 기반 진행률 계산
|
||||
// 과거 실행의 초/예측시간 비율(avg_sec_per_hr) × 현재 fcst_hr로 추정, 이력 없으면 5초/hr 폴백
|
||||
let progress: number | undefined;
|
||||
if (status === 'RUNNING' && row.bgng_dtm) {
|
||||
const fcstHr = Number(row.fcst_hr) || 24;
|
||||
const avgSecPerHr = row.avg_sec_per_hr ? Number(row.avg_sec_per_hr) : 5;
|
||||
const estimatedSec = avgSecPerHr * fcstHr;
|
||||
const elapsedSec = (Date.now() - new Date(row.bgng_dtm as string).getTime()) / 1000;
|
||||
progress = Math.min(95, Math.floor((elapsedSec / estimatedSec) * 100));
|
||||
}
|
||||
|
||||
const dispersal = waveHeight * 0.001
|
||||
res.json({ status, ...(progress !== undefined && { progress }) })
|
||||
} catch {
|
||||
res.status(500).json({ error: '상태 조회 실패' })
|
||||
}
|
||||
})
|
||||
|
||||
for (let p = 0; p < particleCount; p++) {
|
||||
const initialSpread = 0.001
|
||||
const randomAngle = Math.random() * Math.PI * 2
|
||||
let particleLat = startLat + Math.sin(randomAngle) * initialSpread * Math.random()
|
||||
let particleLon = startLon + Math.cos(randomAngle) * initialSpread * Math.random()
|
||||
// ============================================================
|
||||
// 백그라운드 폴링
|
||||
// ============================================================
|
||||
async function pollAndSave(jobId: string, execSn: number): Promise<void> {
|
||||
const deadline = Date.now() + POLL_TIMEOUT_MS
|
||||
|
||||
for (let h = 0; h <= hours; h++) {
|
||||
const mainMovementLat = mainDriftLat * h * 0.01
|
||||
const mainMovementLon = mainDriftLon * h * 0.01
|
||||
while (Date.now() < deadline) {
|
||||
await new Promise<void>(resolve => setTimeout(resolve, POLL_INTERVAL_MS))
|
||||
|
||||
const turbulence = Math.sin(h * 0.3 + p * 0.5) * dispersal * h
|
||||
const turbulenceAngle = (h * 0.2 + p * 0.7) * Math.PI
|
||||
|
||||
trajectory.push({
|
||||
lat: particleLat + mainMovementLat + Math.sin(turbulenceAngle) * turbulence,
|
||||
lon: particleLon + mainMovementLon + Math.cos(turbulenceAngle) * turbulence,
|
||||
time: h,
|
||||
particle: p
|
||||
try {
|
||||
const pollRes = await fetch(`${PYTHON_API_URL}/status/${jobId}`, {
|
||||
signal: AbortSignal.timeout(5000),
|
||||
})
|
||||
if (!pollRes.ok) continue
|
||||
|
||||
const data = await pollRes.json() as PythonStatusResponse
|
||||
|
||||
if (data.status === 'DONE' && data.result) {
|
||||
await wingPool.query(
|
||||
`UPDATE wing.PRED_EXEC
|
||||
SET EXEC_STTS_CD='COMPLETED',
|
||||
RSLT_DATA=$1,
|
||||
CMPL_DTM=NOW(),
|
||||
REQD_SEC=EXTRACT(EPOCH FROM (NOW() - BGNG_DTM))::INTEGER
|
||||
WHERE PRED_EXEC_SN=$2`,
|
||||
[JSON.stringify(data.result), execSn]
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
if (data.status === 'ERROR') {
|
||||
await wingPool.query(
|
||||
`UPDATE wing.PRED_EXEC SET EXEC_STTS_CD='FAILED', ERR_MSG=$1, CMPL_DTM=NOW() WHERE PRED_EXEC_SN=$2`,
|
||||
[data.error ?? '분석 오류', execSn]
|
||||
)
|
||||
return
|
||||
}
|
||||
} catch {
|
||||
// 개별 폴링 오류는 무시하고 재시도
|
||||
}
|
||||
}
|
||||
|
||||
return trajectory
|
||||
// 타임아웃 처리
|
||||
await wingPool.query(
|
||||
`UPDATE wing.PRED_EXEC SET EXEC_STTS_CD='FAILED', ERR_MSG='분석 시간 초과 (30분)', CMPL_DTM=NOW() WHERE PRED_EXEC_SN=$1`,
|
||||
[execSn]
|
||||
)
|
||||
}
|
||||
|
||||
/**
|
||||
* GET /api/simulation/status/:jobId
|
||||
* 시뮬레이션 작업 상태 확인
|
||||
*/
|
||||
router.get('/status/:jobId', async (req: Request, res: Response) => {
|
||||
const jobId = req.params.jobId as string
|
||||
// ============================================================
|
||||
// 타입 및 결과 변환
|
||||
// ============================================================
|
||||
interface PythonParticle {
|
||||
lat: number
|
||||
lon: number
|
||||
stranded?: 0 | 1
|
||||
}
|
||||
|
||||
// jobId 형식 검증 (영숫자, 하이픈만 허용)
|
||||
if (!jobId || !/^[a-zA-Z0-9-]+$/.test(jobId) || jobId.length > 50) {
|
||||
return res.status(400).json({ error: '유효하지 않은 작업 ID' })
|
||||
interface WindPoint {
|
||||
lat: number
|
||||
lon: number
|
||||
wind_speed: number
|
||||
wind_direction: number
|
||||
}
|
||||
|
||||
interface HydrGrid {
|
||||
lonInterval: number[]
|
||||
boundLonLat: { top: number; bottom: number; left: number; right: number }
|
||||
rows: number
|
||||
cols: number
|
||||
latInterval: number[]
|
||||
}
|
||||
|
||||
interface PythonTimeStep {
|
||||
particles: PythonParticle[]
|
||||
remaining_volume_m3: number
|
||||
weathered_volume_m3: number
|
||||
pollution_area_km2: number
|
||||
beached_volume_m3: number
|
||||
pollution_coast_length_m: number
|
||||
center_lat?: number
|
||||
center_lon?: number
|
||||
wind_data?: WindPoint[]
|
||||
hydr_data?: [number[][], number[][]]
|
||||
hydr_grid?: HydrGrid
|
||||
}
|
||||
|
||||
interface PythonStatusResponse {
|
||||
status: 'RUNNING' | 'DONE' | 'ERROR'
|
||||
result?: PythonTimeStep[]
|
||||
error?: string
|
||||
}
|
||||
|
||||
function transformResult(rawResult: PythonTimeStep[]) {
|
||||
const trajectory = rawResult.flatMap((step, stepIdx) =>
|
||||
step.particles.map((p, i) => ({
|
||||
lat: p.lat,
|
||||
lon: p.lon,
|
||||
time: stepIdx,
|
||||
particle: i,
|
||||
stranded: p.stranded,
|
||||
}))
|
||||
)
|
||||
const lastStep = rawResult[rawResult.length - 1]
|
||||
const summary = {
|
||||
remainingVolume: lastStep.remaining_volume_m3,
|
||||
weatheredVolume: lastStep.weathered_volume_m3,
|
||||
pollutionArea: lastStep.pollution_area_km2,
|
||||
beachedVolume: lastStep.beached_volume_m3,
|
||||
pollutionCoastLength: lastStep.pollution_coast_length_m,
|
||||
}
|
||||
|
||||
res.json({
|
||||
jobId: escapeHtml(jobId),
|
||||
status: 'completed',
|
||||
progress: 100,
|
||||
message: 'Simulation completed'
|
||||
})
|
||||
})
|
||||
const centerPoints = rawResult
|
||||
.map((step, stepIdx) =>
|
||||
step.center_lat != null && step.center_lon != null
|
||||
? { lat: step.center_lat, lon: step.center_lon, time: stepIdx }
|
||||
: null
|
||||
)
|
||||
.filter((p): p is { lat: number; lon: number; time: number } => p !== null)
|
||||
const windData = rawResult.map((step) => step.wind_data ?? [])
|
||||
const hydrData = rawResult.map((step) =>
|
||||
step.hydr_data && step.hydr_grid
|
||||
? { value: step.hydr_data, grid: step.hydr_grid }
|
||||
: null
|
||||
)
|
||||
return { trajectory, summary, centerPoints, windData, hydrData }
|
||||
}
|
||||
|
||||
export default router
|
||||
|
||||
@ -157,7 +157,8 @@ app.use('/api/audit', auditRouter)
|
||||
// API 라우트 — 업무
|
||||
app.use('/api/board', boardRouter)
|
||||
app.use('/api/layers', layersRouter)
|
||||
app.use('/api/simulation', simulationLimiter, simulationRouter)
|
||||
app.use('/api/simulation/run', simulationLimiter) // 시뮬레이션 실행만 엄격 제한 (status 폴링 제외)
|
||||
app.use('/api/simulation', simulationRouter)
|
||||
app.use('/api/hns', hnsRouter)
|
||||
app.use('/api/reports', reportsRouter)
|
||||
app.use('/api/assets', assetsRouter)
|
||||
|
||||
@ -299,6 +299,7 @@ CREATE TABLE SPIL_DATA (
|
||||
SPIL_LOC_GEOM GEOMETRY(Point, 4326), -- 유출위치지오메트리
|
||||
FCST_HR INTEGER, -- 예측시간
|
||||
REG_DTM TIMESTAMPTZ NOT NULL DEFAULT NOW(), -- 등록일시
|
||||
IMG_RSLT_DATA JSONB, -- 이미지 분석 결과 (2024-06 추가)
|
||||
CONSTRAINT PK_SPIL_DATA PRIMARY KEY (SPIL_DATA_SN),
|
||||
CONSTRAINT FK_SPIL_ACDNT FOREIGN KEY (ACDNT_SN) REFERENCES ACDNT(ACDNT_SN) ON DELETE CASCADE
|
||||
);
|
||||
@ -320,7 +321,8 @@ COMMENT ON COLUMN SPIL_DATA.REG_DTM IS '등록일시';
|
||||
-- ============================================================
|
||||
CREATE TABLE PRED_EXEC (
|
||||
PRED_EXEC_SN SERIAL NOT NULL, -- 예측실행순번
|
||||
SPIL_DATA_SN INTEGER NOT NULL, -- 유출정보순번
|
||||
SPIL_DATA_SN INTEGER, -- 유출정보순번 (NULL 허용 — 사고 미연결 단독 실행 대응)
|
||||
ACDNT_SN INTEGER NOT NULL, -- 사고순번 (사고 참조, 유출정보 미연결 시에도 사고는 필수)
|
||||
ALGO_CD VARCHAR(20) NOT NULL, -- 알고리즘코드
|
||||
EXEC_STTS_CD VARCHAR(20) NOT NULL DEFAULT 'PENDING', -- 실행상태코드
|
||||
BGNG_DTM TIMESTAMPTZ, -- 시작일시
|
||||
@ -328,6 +330,7 @@ CREATE TABLE PRED_EXEC (
|
||||
REQD_SEC INTEGER, -- 소요시간초
|
||||
RSLT_DATA JSONB, -- 결과데이터
|
||||
ERR_MSG TEXT, -- 오류메시지
|
||||
EXEC_NM VARCHAR(100), -- 실행명
|
||||
CONSTRAINT PK_PRED_EXEC PRIMARY KEY (PRED_EXEC_SN),
|
||||
CONSTRAINT FK_PRED_SPIL FOREIGN KEY (SPIL_DATA_SN) REFERENCES SPIL_DATA(SPIL_DATA_SN) ON DELETE CASCADE,
|
||||
CONSTRAINT CK_PRED_STTS CHECK (EXEC_STTS_CD IN ('PENDING','RUNNING','COMPLETED','FAILED'))
|
||||
@ -335,14 +338,16 @@ CREATE TABLE PRED_EXEC (
|
||||
|
||||
COMMENT ON TABLE PRED_EXEC IS '예측실행';
|
||||
COMMENT ON COLUMN PRED_EXEC.PRED_EXEC_SN IS '예측실행순번';
|
||||
COMMENT ON COLUMN PRED_EXEC.SPIL_DATA_SN IS '유출정보순번 (유출정보 참조)';
|
||||
COMMENT ON COLUMN PRED_EXEC.ALGO_CD IS '알고리즘코드 (ALGO: GNOME, OSCAR 등)';
|
||||
COMMENT ON COLUMN PRED_EXEC.SPIL_DATA_SN IS '유출정보순번 (FK → SPIL_DATA, NULL 허용)';
|
||||
COMMENT ON COLUMN PRED_EXEC.ACDNT_SN IS '사고순번 (사고 참조)';
|
||||
COMMENT ON COLUMN PRED_EXEC.ALGO_CD IS '알고리즘코드 (ALGO: GNOME, OSCAR, OPENDRIFT 등)';
|
||||
COMMENT ON COLUMN PRED_EXEC.EXEC_STTS_CD IS '실행상태코드 (PENDING:대기, RUNNING:실행중, COMPLETED:완료, FAILED:실패)';
|
||||
COMMENT ON COLUMN PRED_EXEC.BGNG_DTM IS '시작일시';
|
||||
COMMENT ON COLUMN PRED_EXEC.CMPL_DTM IS '완료일시';
|
||||
COMMENT ON COLUMN PRED_EXEC.REQD_SEC IS '소요시간초 (실행 소요 시간, 초 단위)';
|
||||
COMMENT ON COLUMN PRED_EXEC.RSLT_DATA IS '결과데이터 (JSON 형식 예측 결과)';
|
||||
COMMENT ON COLUMN PRED_EXEC.ERR_MSG IS '오류메시지';
|
||||
COMMENT ON COLUMN PRED_EXEC.EXEC_NM IS '실행명 (EXPC_{timestamp} 형식, OpenDrift 연동용)';
|
||||
|
||||
|
||||
-- ============================================================
|
||||
|
||||
@ -45,6 +45,7 @@ CREATE TABLE IF NOT EXISTS SPIL_DATA (
|
||||
SPIL_TP_CD VARCHAR(20),
|
||||
FCST_HR INTEGER,
|
||||
REG_DTM TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
IMG_RSLT_DATA JSONB,
|
||||
CONSTRAINT PK_SPIL_DATA PRIMARY KEY (SPIL_DATA_SN),
|
||||
CONSTRAINT FK_SPIL_ACDNT FOREIGN KEY (ACDNT_SN) REFERENCES ACDNT(ACDNT_SN) ON DELETE CASCADE
|
||||
);
|
||||
@ -54,20 +55,23 @@ CREATE INDEX IF NOT EXISTS IDX_SPIL_ACDNT ON SPIL_DATA(ACDNT_SN);
|
||||
-- 3. 예측실행 (PRED_EXEC)
|
||||
CREATE TABLE IF NOT EXISTS PRED_EXEC (
|
||||
PRED_EXEC_SN SERIAL NOT NULL,
|
||||
ACDNT_SN INTEGER NOT NULL,
|
||||
SPIL_DATA_SN INTEGER,
|
||||
ACDNT_SN INTEGER NOT NULL,
|
||||
ALGO_CD VARCHAR(20) NOT NULL,
|
||||
EXEC_STTS_CD VARCHAR(20) NOT NULL DEFAULT 'PENDING',
|
||||
BGNG_DTM TIMESTAMPTZ,
|
||||
CMPL_DTM TIMESTAMPTZ,
|
||||
REQD_SEC INTEGER,
|
||||
RSLT_DATA JSONB,
|
||||
ERR_MSG TEXT,
|
||||
ERR_MSG TEXT,
|
||||
EXEC_NM VARCHAR(100),
|
||||
CONSTRAINT PK_PRED_EXEC PRIMARY KEY (PRED_EXEC_SN),
|
||||
CONSTRAINT FK_PRED_ACDNT FOREIGN KEY (ACDNT_SN) REFERENCES ACDNT(ACDNT_SN) ON DELETE CASCADE,
|
||||
CONSTRAINT CK_PRED_STTS CHECK (EXEC_STTS_CD IN ('PENDING','RUNNING','COMPLETED','FAILED'))
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS IDX_PRED_ACDNT ON PRED_EXEC(ACDNT_SN);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS uix_pred_exec_nm ON PRED_EXEC (EXEC_NM) WHERE EXEC_NM IS NOT NULL;
|
||||
|
||||
-- 4. 사고별 기상정보 스냅샷 (ACDNT_WEATHER)
|
||||
CREATE TABLE IF NOT EXISTS ACDNT_WEATHER (
|
||||
|
||||
191
docs/PREDICTION-GUIDE.md
Normal file
191
docs/PREDICTION-GUIDE.md
Normal file
@ -0,0 +1,191 @@
|
||||
# 확산 예측 기능 가이드
|
||||
|
||||
> 대상: 확산 예측(OpenDrift) 기능 개발 및 유지보수 담당자
|
||||
|
||||
---
|
||||
|
||||
## 1. 아키텍처 개요
|
||||
|
||||
**폴링 방식** — HTTP 연결 불안정 문제 해결을 위해 비동기 폴링 구조를 채택했다.
|
||||
|
||||
```
|
||||
[프론트] 실행 버튼
|
||||
→ POST /api/simulation/run 즉시 { execSn, status:'RUNNING' } 반환
|
||||
→ "분석 중..." UI 표시
|
||||
→ 3초마다 GET /api/simulation/status/:execSn 폴링
|
||||
|
||||
[Express 백엔드]
|
||||
→ PRED_EXEC INSERT (PENDING)
|
||||
→ POST Python /run-model 즉시 { job_id } 수신
|
||||
→ 응답 즉시 반환 (프론트 블록 없음)
|
||||
→ 백그라운드: 3초마다 Python GET /status/:job_id 폴링
|
||||
→ DONE 시 PRED_EXEC UPDATE (결과 JSONB 저장)
|
||||
|
||||
[Python FastAPI :5003]
|
||||
→ 동시 처리 초과 시 503 즉시 반환
|
||||
→ 여유 시 job_id 반환 + 백그라운드 OpenDrift 시뮬레이션 실행
|
||||
→ NC 결과 → JSON 변환 → 상태 DONE
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. DB 스키마 (PRED_EXEC)
|
||||
|
||||
```sql
|
||||
PRED_EXEC_SN SERIAL PRIMARY KEY
|
||||
ACDNT_SN INTEGER NOT NULL -- 사고 FK
|
||||
SPIL_DATA_SN INTEGER -- 유출정보 FK (NULL 허용)
|
||||
EXEC_NM VARCHAR(100) UNIQUE -- EXPC_{timestamp} 형식
|
||||
ALGO_CD VARCHAR(20) NOT NULL -- 'OPENDRIFT'
|
||||
EXEC_STTS_CD VARCHAR(20) DEFAULT 'PENDING'
|
||||
-- PENDING | RUNNING | COMPLETED | FAILED
|
||||
BGNG_DTM TIMESTAMPTZ
|
||||
CMPL_DTM TIMESTAMPTZ
|
||||
REQD_SEC INTEGER
|
||||
RSLT_DATA JSONB -- 시뮬레이션 결과 전체
|
||||
ERR_MSG TEXT
|
||||
```
|
||||
|
||||
인덱스: `IDX_PRED_STTS` (EXEC_STTS_CD), `uix_pred_exec_nm` (EXEC_NM, partial)
|
||||
|
||||
---
|
||||
|
||||
## 3. Python FastAPI 엔드포인트 (포트 5003)
|
||||
|
||||
| 메서드 | 경로 | 설명 |
|
||||
|--------|------|------|
|
||||
| GET | `/get-received-date` | 최신 예보 수신 가능 날짜 |
|
||||
| GET | `/get-uv/{datetime}/{category}` | 바람/해류 U/V 벡터 (`wind`\|`hydr`) |
|
||||
| POST | `/check-nc` | NetCDF 파일 존재 여부 확인 |
|
||||
| POST | `/run-model` | 시뮬레이션 제출 → 즉시 `job_id` 반환 |
|
||||
| GET | `/status/{job_id}` | 시뮬레이션 진행 상태 조회 |
|
||||
|
||||
### POST /run-model 입력 파라미터
|
||||
|
||||
```json
|
||||
{
|
||||
"startTime": "2025-01-15 12:00:00", // KST (내부 UTC 변환)
|
||||
"runTime": 72, // 예측 시간 (시간)
|
||||
"matTy": "CRUDE OIL", // OpenDrift 유류명
|
||||
"matVol": 100.0, // 시간당 유출량 (m³/hr)
|
||||
"lon": 126.1,
|
||||
"lat": 36.6,
|
||||
"spillTime": 12, // 유출 지속 시간 (0=순간)
|
||||
"name": "EXPC_1710000000000"
|
||||
}
|
||||
```
|
||||
|
||||
### 유류 코드 매핑 (DB → OpenDrift)
|
||||
|
||||
| DB SPIL_MAT_CD | OpenDrift 이름 |
|
||||
|---------------|---------------|
|
||||
| CRUD | CRUDE OIL |
|
||||
| DSEL | DIESEL |
|
||||
| BNKR | BUNKER |
|
||||
| HEFO | IFO 180 |
|
||||
|
||||
---
|
||||
|
||||
## 4. Express 백엔드 주요 엔드포인트
|
||||
|
||||
파일: [backend/src/routes/simulation.ts](../backend/src/routes/simulation.ts)
|
||||
|
||||
| 메서드 | 경로 | 설명 |
|
||||
|--------|------|------|
|
||||
| POST | `/api/simulation/run` | 시뮬레이션 제출 → `execSn` 즉시 반환 |
|
||||
| GET | `/api/simulation/status/:execSn` | 프론트 폴링용 상태 조회 |
|
||||
|
||||
파일: [backend/src/prediction/predictionService.ts](../backend/src/prediction/predictionService.ts)
|
||||
|
||||
- `fetchPredictionList()` — PRED_EXEC 목록 조회
|
||||
- `fetchTrajectoryResult()` — 저장된 결과 조회 (`RSLT_DATA` JSONB 파싱)
|
||||
|
||||
---
|
||||
|
||||
## 5. 프론트엔드 주요 파일
|
||||
|
||||
| 파일 | 역할 |
|
||||
|------|------|
|
||||
| [frontend/src/tabs/prediction/components/OilSpillView.tsx](../frontend/src/tabs/prediction/components/OilSpillView.tsx) | 예측 탭 메인 뷰, 시뮬레이션 실행·폴링 상태 관리 |
|
||||
| [frontend/src/tabs/prediction/hooks/](../frontend/src/tabs/prediction/hooks/) | `useSimulationStatus` 폴링 훅 |
|
||||
| [frontend/src/tabs/prediction/services/predictionApi.ts](../frontend/src/tabs/prediction/services/predictionApi.ts) | API 요청 함수 + 타입 정의 |
|
||||
| [frontend/src/tabs/prediction/components/RightPanel.tsx](../frontend/src/tabs/prediction/components/RightPanel.tsx) | 풍화량·잔류량·오염면적 표시 (마지막 스텝 실제 값) |
|
||||
| [frontend/src/common/components/map/HydrParticleOverlay.tsx](../frontend/src/common/components/map/HydrParticleOverlay.tsx) | 해류 파티클 Canvas 오버레이 |
|
||||
|
||||
### 핵심 타입 (predictionApi.ts)
|
||||
|
||||
```typescript
|
||||
interface HydrGrid {
|
||||
lonInterval: number[];
|
||||
latInterval: number[];
|
||||
boundLonLat: { top: number; bottom: number; left: number; right: number };
|
||||
rows: number; cols: number;
|
||||
}
|
||||
interface HydrDataStep {
|
||||
value: [number[][], number[][]]; // [u_2d, v_2d]
|
||||
grid: HydrGrid;
|
||||
}
|
||||
```
|
||||
|
||||
### 폴링 훅 패턴
|
||||
|
||||
```typescript
|
||||
useQuery({
|
||||
queryKey: ['simulationStatus', execSn],
|
||||
queryFn: () => api.get(`/api/simulation/status/${execSn}`),
|
||||
enabled: execSn !== null,
|
||||
refetchInterval: (data) =>
|
||||
data?.status === 'DONE' || data?.status === 'ERROR' ? false : 3000,
|
||||
});
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 6. Python 코드 위치 (prediction/)
|
||||
|
||||
```
|
||||
prediction/opendrift/
|
||||
├── api.py FastAPI 진입점 (수정 필요: 폴링 지원 + CORS)
|
||||
├── config.py 경로 설정 (수정 필요: 환경변수화)
|
||||
├── createJsonResult.py NC → JSON 변환 (핵심 후처리)
|
||||
├── coastline/ TN_SHORLINE.shp (한국 해안선)
|
||||
├── startup.sh / shutdown.sh
|
||||
├── .env.example 환경변수 샘플
|
||||
└── environment-opendrift.yml conda 환경 재현용
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. 환경변수
|
||||
|
||||
### backend/.env
|
||||
|
||||
```bash
|
||||
PYTHON_API_URL=http://localhost:5003
|
||||
```
|
||||
|
||||
### prediction/opendrift/.env
|
||||
|
||||
```bash
|
||||
MPR_STORAGE_ROOT=/data/storage # NetCDF 기상·해양 데이터 루트
|
||||
MPR_RESULT_ROOT=./result # 시뮬레이션 결과 저장 경로
|
||||
MAX_CONCURRENT_JOBS=4 # 동시 처리 최대 수
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. 위험 요소
|
||||
|
||||
| 위험 | 내용 |
|
||||
|------|------|
|
||||
| NetCDF 파일 부재 | `MPR_STORAGE_ROOT` 경로에 KMA GDAPS·MOHID NC 파일 필요. 없으면 시뮬레이션 불가 |
|
||||
| conda 환경 | `opendrift` conda 환경 설치 필요 (`environment-opendrift.yml`) |
|
||||
| Workers 포화 | 동시 4개 초과 시 503 반환 → `MAX_CONCURRENT_JOBS` 조정 |
|
||||
| 결과 용량 | 12시간 결과 ≈ 1500KB/건. 90일 주기 `RSLT_DATA = NULL` 정리 권장 |
|
||||
|
||||
---
|
||||
|
||||
## 9. 관련 문서
|
||||
|
||||
- [CRUD-API-GUIDE.md](./CRUD-API-GUIDE.md) — Express API 개발 패턴
|
||||
- [COMMON-GUIDE.md](./COMMON-GUIDE.md) — 인증·상태관리 공통 로직
|
||||
@ -4,6 +4,25 @@
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [2026-03-11.2]
|
||||
|
||||
### 추가
|
||||
- OpenDrift 유류 확산 시뮬레이션 통합 (비동기 폴링 구조)
|
||||
- flyTo 완료 후 자동 재생 기능
|
||||
- 이미지 분석 서버 Docker 패키징 (CPU 전용 환경)
|
||||
- SPIL_DATA 이미지 분석 결과 컬럼 인라인 통합
|
||||
- CPU 전용 Docker 환경 구축 (Dockerfile.cpu, docker-compose.cpu.yml)
|
||||
|
||||
### 변경
|
||||
- 이미지 분석/보고서/항공 UI 개선
|
||||
- CCTV/관리자 고도화
|
||||
|
||||
### 기타
|
||||
- 팀 워크플로우 v1.6.1 적용일 갱신
|
||||
- 팀 워크플로우 v1.6.1 동기화 (custom_pre_commit 프로젝트 해시 불일치 해결)
|
||||
- 팀 워크플로우 v1.6.0 동기화 (해시 기반 자동 최신화, push/mr/release 워크플로우 체크, 팀 관리 파일 gitignore 처리)
|
||||
- 팀 워크플로우 v1.5.0 동기화 (스킬 7종 업데이트, version 스킬 신규, release-notes-guide 추가)
|
||||
|
||||
## [2026-03-11]
|
||||
|
||||
### 추가
|
||||
|
||||
157
frontend/src/common/components/map/HydrParticleOverlay.tsx
Normal file
157
frontend/src/common/components/map/HydrParticleOverlay.tsx
Normal file
@ -0,0 +1,157 @@
|
||||
import { useEffect, useRef } from 'react';
|
||||
import { useMap } from '@vis.gl/react-maplibre';
|
||||
import type { HydrDataStep } from '@tabs/prediction/services/predictionApi';
|
||||
|
||||
interface HydrParticleOverlayProps {
|
||||
hydrStep: HydrDataStep | null;
|
||||
}
|
||||
|
||||
const PARTICLE_COUNT = 3000;
|
||||
const MAX_AGE = 300;
|
||||
const SPEED_SCALE = 0.1;
|
||||
const DT = 600;
|
||||
const TRAIL_LENGTH = 30; // 파티클당 저장할 화면 좌표 수
|
||||
const NUM_ALPHA_BANDS = 4; // stroke 배치 단위
|
||||
|
||||
interface TrailPoint { x: number; y: number; }
|
||||
interface Particle {
|
||||
lon: number;
|
||||
lat: number;
|
||||
trail: TrailPoint[];
|
||||
age: number;
|
||||
}
|
||||
|
||||
export default function HydrParticleOverlay({ hydrStep }: HydrParticleOverlayProps) {
|
||||
const { current: map } = useMap();
|
||||
const animRef = useRef<number>();
|
||||
|
||||
useEffect(() => {
|
||||
if (!map || !hydrStep) return;
|
||||
|
||||
const container = map.getContainer();
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.style.cssText = 'position:absolute;top:0;left:0;pointer-events:none;z-index:5;';
|
||||
canvas.width = container.clientWidth;
|
||||
canvas.height = container.clientHeight;
|
||||
container.appendChild(canvas);
|
||||
const ctx = canvas.getContext('2d')!;
|
||||
|
||||
const { value: [u2d, v2d], grid } = hydrStep;
|
||||
const { boundLonLat, lonInterval, latInterval } = grid;
|
||||
|
||||
const lons: number[] = [boundLonLat.left];
|
||||
for (const d of lonInterval) lons.push(lons[lons.length - 1] + d);
|
||||
const lats: number[] = [boundLonLat.bottom];
|
||||
for (const d of latInterval) lats.push(lats[lats.length - 1] + d);
|
||||
|
||||
function getUV(lon: number, lat: number): [number, number] {
|
||||
let col = -1, row = -1;
|
||||
for (let i = 0; i < lons.length - 1; i++) {
|
||||
if (lon >= lons[i] && lon < lons[i + 1]) { col = i; break; }
|
||||
}
|
||||
for (let i = 0; i < lats.length - 1; i++) {
|
||||
if (lat >= lats[i] && lat < lats[i + 1]) { row = i; break; }
|
||||
}
|
||||
if (col < 0 || row < 0) return [0, 0];
|
||||
const fx = (lon - lons[col]) / (lons[col + 1] - lons[col]);
|
||||
const fy = (lat - lats[row]) / (lats[row + 1] - lats[row]);
|
||||
const u00 = u2d[row]?.[col] ?? 0, u01 = u2d[row]?.[col + 1] ?? u00;
|
||||
const u10 = u2d[row + 1]?.[col] ?? u00, u11 = u2d[row + 1]?.[col + 1] ?? u00;
|
||||
const v00 = v2d[row]?.[col] ?? 0, v01 = v2d[row]?.[col + 1] ?? v00;
|
||||
const v10 = v2d[row + 1]?.[col] ?? v00, v11 = v2d[row + 1]?.[col + 1] ?? v00;
|
||||
const u = u00 * (1 - fx) * (1 - fy) + u01 * fx * (1 - fy) + u10 * (1 - fx) * fy + u11 * fx * fy;
|
||||
const v = v00 * (1 - fx) * (1 - fy) + v01 * fx * (1 - fy) + v10 * (1 - fx) * fy + v11 * fx * fy;
|
||||
return [u, v];
|
||||
}
|
||||
|
||||
const bbox = boundLonLat;
|
||||
const particles: Particle[] = Array.from({ length: PARTICLE_COUNT }, () => ({
|
||||
lon: bbox.left + Math.random() * (bbox.right - bbox.left),
|
||||
lat: bbox.bottom + Math.random() * (bbox.top - bbox.bottom),
|
||||
trail: [],
|
||||
age: Math.floor(Math.random() * MAX_AGE),
|
||||
}));
|
||||
|
||||
function resetParticle(p: Particle) {
|
||||
p.lon = bbox.left + Math.random() * (bbox.right - bbox.left);
|
||||
p.lat = bbox.bottom + Math.random() * (bbox.top - bbox.bottom);
|
||||
p.trail = [];
|
||||
p.age = 0;
|
||||
}
|
||||
|
||||
// 지도 이동/줌 시 화면 좌표가 틀어지므로 trail 초기화
|
||||
const onMove = () => { for (const p of particles) p.trail = []; };
|
||||
map.on('move', onMove);
|
||||
|
||||
function animate() {
|
||||
// 매 프레임 완전 초기화 → 잔상 없음
|
||||
ctx.clearRect(0, 0, canvas.width, canvas.height);
|
||||
|
||||
// alpha band별 세그먼트 버퍼 (드로우 콜 최소화)
|
||||
const bands: [number, number, number, number][][] =
|
||||
Array.from({ length: NUM_ALPHA_BANDS }, () => []);
|
||||
|
||||
for (const p of particles) {
|
||||
const [u, v] = getUV(p.lon, p.lat);
|
||||
const speed = Math.sqrt(u * u + v * v);
|
||||
if (speed < 0.001) { resetParticle(p); continue; }
|
||||
|
||||
const cosLat = Math.cos(p.lat * Math.PI / 180);
|
||||
p.lon += u * SPEED_SCALE * DT / (cosLat * 111320);
|
||||
p.lat += v * SPEED_SCALE * DT / 111320;
|
||||
p.age++;
|
||||
|
||||
if (
|
||||
p.lon < bbox.left || p.lon > bbox.right ||
|
||||
p.lat < bbox.bottom || p.lat > bbox.top ||
|
||||
p.age > MAX_AGE
|
||||
) { resetParticle(p); continue; }
|
||||
|
||||
const curr = map.project([p.lon, p.lat]);
|
||||
if (!curr) continue;
|
||||
|
||||
p.trail.push({ x: curr.x, y: curr.y });
|
||||
if (p.trail.length > TRAIL_LENGTH) p.trail.shift();
|
||||
if (p.trail.length < 2) continue;
|
||||
|
||||
for (let i = 1; i < p.trail.length; i++) {
|
||||
const t = i / p.trail.length; // 0=oldest, 1=newest
|
||||
const band = Math.min(NUM_ALPHA_BANDS - 1, Math.floor(t * NUM_ALPHA_BANDS));
|
||||
const a = p.trail[i - 1], b = p.trail[i];
|
||||
bands[band].push([a.x, a.y, b.x, b.y]);
|
||||
}
|
||||
}
|
||||
|
||||
// alpha band별 일괄 렌더링
|
||||
ctx.lineWidth = 0.8;
|
||||
for (let b = 0; b < NUM_ALPHA_BANDS; b++) {
|
||||
ctx.strokeStyle = `rgba(180, 210, 255, ${((b + 1) / NUM_ALPHA_BANDS) * 0.75})`;
|
||||
ctx.beginPath();
|
||||
for (const [x1, y1, x2, y2] of bands[b]) {
|
||||
ctx.moveTo(x1, y1);
|
||||
ctx.lineTo(x2, y2);
|
||||
}
|
||||
ctx.stroke();
|
||||
}
|
||||
|
||||
animRef.current = requestAnimationFrame(animate);
|
||||
}
|
||||
|
||||
animRef.current = requestAnimationFrame(animate);
|
||||
|
||||
const onResize = () => {
|
||||
canvas.width = container.clientWidth;
|
||||
canvas.height = container.clientHeight;
|
||||
};
|
||||
map.on('resize', onResize);
|
||||
|
||||
return () => {
|
||||
cancelAnimationFrame(animRef.current!);
|
||||
map.off('resize', onResize);
|
||||
map.off('move', onMove);
|
||||
canvas.remove();
|
||||
};
|
||||
}, [map, hydrStep]);
|
||||
|
||||
return null;
|
||||
}
|
||||
@ -1,4 +1,4 @@
|
||||
import { useState, useMemo, useEffect, useCallback } from 'react'
|
||||
import { useState, useMemo, useEffect, useCallback, useRef } from 'react'
|
||||
import { Map, Marker, Popup, Source, Layer, useControl, useMap } from '@vis.gl/react-maplibre'
|
||||
import { MapboxOverlay } from '@deck.gl/mapbox'
|
||||
import { ScatterplotLayer, PathLayer, TextLayer, BitmapLayer } from '@deck.gl/layers'
|
||||
@ -8,6 +8,8 @@ import type { MapLayerMouseEvent } from 'maplibre-gl'
|
||||
import 'maplibre-gl/dist/maplibre-gl.css'
|
||||
import { layerDatabase } from '@common/services/layerService'
|
||||
import type { PredictionModel, SensitiveResource } from '@tabs/prediction/components/OilSpillView'
|
||||
import type { HydrDataStep } from '@tabs/prediction/services/predictionApi'
|
||||
import HydrParticleOverlay from './HydrParticleOverlay'
|
||||
import type { BoomLine, BoomLineCoord } from '@common/types/boomLine'
|
||||
import type { ReplayShip, CollisionEvent } from '@common/types/backtrack'
|
||||
import { createBacktrackLayers } from './BacktrackReplayOverlay'
|
||||
@ -17,8 +19,8 @@ import { useMapStore } from '@common/store/mapStore'
|
||||
const GEOSERVER_URL = import.meta.env.VITE_GEOSERVER_URL || 'http://localhost:8080'
|
||||
const VWORLD_API_KEY = import.meta.env.VITE_VWORLD_API_KEY || ''
|
||||
|
||||
// 남해안 중심 좌표 (여수 앞바다)
|
||||
const DEFAULT_CENTER: [number, number] = [34.5, 127.8]
|
||||
// 인천 송도 국제도시
|
||||
const DEFAULT_CENTER: [number, number] = [37.39, 126.64]
|
||||
const DEFAULT_ZOOM = 10
|
||||
|
||||
// CartoDB Dark Matter 스타일
|
||||
@ -159,7 +161,7 @@ interface MapViewProps {
|
||||
incidentCoord?: { lon: number; lat: number }
|
||||
isSelectingLocation?: boolean
|
||||
onMapClick?: (lon: number, lat: number) => void
|
||||
oilTrajectory?: Array<{ lat: number; lon: number; time: number; particle?: number; model?: PredictionModel }>
|
||||
oilTrajectory?: Array<{ lat: number; lon: number; time: number; particle?: number; model?: PredictionModel; stranded?: 0 | 1 }>
|
||||
selectedModels?: Set<PredictionModel>
|
||||
dispersionResult?: DispersionResult | null
|
||||
dispersionHeatmap?: Array<{ lon: number; lat: number; concentration: number }>
|
||||
@ -177,7 +179,16 @@ interface MapViewProps {
|
||||
incidentCoord: { lat: number; lon: number }
|
||||
}
|
||||
sensitiveResources?: SensitiveResource[]
|
||||
flyToTarget?: { lng: number; lat: number; zoom?: number } | null
|
||||
fitBoundsTarget?: { north: number; south: number; east: number; west: number } | null
|
||||
centerPoints?: Array<{ lat: number; lon: number; time: number }>
|
||||
windData?: Array<Array<{ lat: number; lon: number; wind_speed: number; wind_direction: number }>>
|
||||
hydrData?: (HydrDataStep | null)[]
|
||||
// 외부 플레이어 제어 (prediction 하단 바에서 제어할 때 사용)
|
||||
externalCurrentTime?: number
|
||||
mapCaptureRef?: React.MutableRefObject<(() => string | null) | null>
|
||||
onIncidentFlyEnd?: () => void
|
||||
flyToIncident?: { lon: number; lat: number }
|
||||
}
|
||||
|
||||
// deck.gl 오버레이 컴포넌트 (MapLibre 컨트롤로 등록, interleaved)
|
||||
@ -188,6 +199,33 @@ function DeckGLOverlay({ layers }: { layers: any[] }) {
|
||||
return null
|
||||
}
|
||||
|
||||
// flyTo 트리거 컴포넌트 (Map 내부에서 useMap() 사용)
|
||||
function FlyToController({ flyToTarget }: { flyToTarget?: { lng: number; lat: number; zoom?: number } | null }) {
|
||||
const { current: map } = useMap()
|
||||
useEffect(() => {
|
||||
if (!map || !flyToTarget) return
|
||||
map.flyTo({
|
||||
center: [flyToTarget.lng, flyToTarget.lat],
|
||||
zoom: flyToTarget.zoom ?? 10,
|
||||
duration: 1200,
|
||||
})
|
||||
}, [flyToTarget, map])
|
||||
return null
|
||||
}
|
||||
|
||||
// fitBounds 트리거 컴포넌트 (Map 내부에서 useMap() 사용)
|
||||
function FitBoundsController({ fitBoundsTarget }: { fitBoundsTarget?: { north: number; south: number; east: number; west: number } | null }) {
|
||||
const { current: map } = useMap()
|
||||
useEffect(() => {
|
||||
if (!map || !fitBoundsTarget) return
|
||||
map.fitBounds(
|
||||
[[fitBoundsTarget.west, fitBoundsTarget.south], [fitBoundsTarget.east, fitBoundsTarget.north]],
|
||||
{ padding: 80, duration: 1200, maxZoom: 12 }
|
||||
)
|
||||
}, [fitBoundsTarget, map])
|
||||
return null
|
||||
}
|
||||
|
||||
// 3D 모드 pitch/bearing 제어 컴포넌트 (Map 내부에서 useMap() 사용)
|
||||
function MapPitchController({ threeD }: { threeD: boolean }) {
|
||||
const { current: map } = useMap()
|
||||
@ -203,14 +241,17 @@ function MapPitchController({ threeD }: { threeD: boolean }) {
|
||||
}
|
||||
|
||||
// 사고 지점 변경 시 지도 이동 (Map 내부 컴포넌트)
|
||||
function MapFlyToIncident({ lon, lat }: { lon?: number; lat?: number }) {
|
||||
function MapFlyToIncident({ lon, lat, onFlyEnd }: { lon?: number; lat?: number; onFlyEnd?: () => void }) {
|
||||
const { current: map } = useMap()
|
||||
const onFlyEndRef = useRef(onFlyEnd)
|
||||
useEffect(() => { onFlyEndRef.current = onFlyEnd }, [onFlyEnd])
|
||||
|
||||
useEffect(() => {
|
||||
if (!map || lon == null || lat == null) return
|
||||
|
||||
const doFly = () => {
|
||||
map.flyTo({ center: [lon, lat], zoom: 12, duration: 1200 })
|
||||
map.flyTo({ center: [lon, lat], zoom: 11, duration: 1200 })
|
||||
map.once('moveend', () => onFlyEndRef.current?.())
|
||||
}
|
||||
|
||||
if (map.loaded()) {
|
||||
@ -261,14 +302,24 @@ export function MapView({
|
||||
layerBrightness = 50,
|
||||
backtrackReplay,
|
||||
sensitiveResources = [],
|
||||
flyToTarget,
|
||||
fitBoundsTarget,
|
||||
centerPoints = [],
|
||||
windData = [],
|
||||
hydrData = [],
|
||||
externalCurrentTime,
|
||||
mapCaptureRef,
|
||||
onIncidentFlyEnd,
|
||||
flyToIncident,
|
||||
}: MapViewProps) {
|
||||
const { mapToggles } = useMapStore()
|
||||
const isControlled = externalCurrentTime !== undefined
|
||||
const [currentPosition, setCurrentPosition] = useState<[number, number]>(DEFAULT_CENTER)
|
||||
const [currentTime, setCurrentTime] = useState(0)
|
||||
const [internalCurrentTime, setInternalCurrentTime] = useState(0)
|
||||
const [isPlaying, setIsPlaying] = useState(false)
|
||||
const [playbackSpeed, setPlaybackSpeed] = useState(1)
|
||||
const [popupInfo, setPopupInfo] = useState<PopupInfo | null>(null)
|
||||
const currentTime = isControlled ? externalCurrentTime : internalCurrentTime
|
||||
|
||||
const handleMapClick = useCallback((e: MapLayerMouseEvent) => {
|
||||
const { lng, lat } = e.lngLat
|
||||
@ -279,33 +330,34 @@ export function MapView({
|
||||
setPopupInfo(null)
|
||||
}, [onMapClick])
|
||||
|
||||
// 애니메이션 재생 로직
|
||||
// 애니메이션 재생 로직 (외부 제어 모드에서는 비활성)
|
||||
useEffect(() => {
|
||||
if (!isPlaying || oilTrajectory.length === 0) return
|
||||
if (isControlled || !isPlaying || oilTrajectory.length === 0) return
|
||||
|
||||
const maxTime = Math.max(...oilTrajectory.map(p => p.time))
|
||||
if (currentTime >= maxTime) {
|
||||
if (internalCurrentTime >= maxTime) {
|
||||
setIsPlaying(false)
|
||||
return
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
setCurrentTime(prev => {
|
||||
setInternalCurrentTime(prev => {
|
||||
const next = prev + (1 * playbackSpeed)
|
||||
return next > maxTime ? maxTime : next
|
||||
})
|
||||
}, 200)
|
||||
|
||||
return () => clearInterval(interval)
|
||||
}, [isPlaying, currentTime, playbackSpeed, oilTrajectory])
|
||||
}, [isControlled, isPlaying, internalCurrentTime, playbackSpeed, oilTrajectory])
|
||||
|
||||
// 시뮬레이션 시작 시 자동으로 애니메이션 재생
|
||||
// 시뮬레이션 시작 시 자동으로 애니메이션 재생 (외부 제어 모드에서는 비활성)
|
||||
useEffect(() => {
|
||||
if (isControlled) return
|
||||
if (oilTrajectory.length > 0) {
|
||||
setCurrentTime(0)
|
||||
setInternalCurrentTime(0)
|
||||
setIsPlaying(true)
|
||||
}
|
||||
}, [oilTrajectory.length])
|
||||
}, [isControlled, oilTrajectory.length])
|
||||
|
||||
// WMS 레이어 목록
|
||||
const wmsLayers = useMemo(() => {
|
||||
@ -330,6 +382,9 @@ export function MapView({
|
||||
|
||||
// --- 유류 확산 입자 (ScatterplotLayer) ---
|
||||
const visibleParticles = oilTrajectory.filter(p => p.time <= currentTime)
|
||||
const activeStep = visibleParticles.length > 0
|
||||
? Math.max(...visibleParticles.map(p => p.time))
|
||||
: -1
|
||||
if (visibleParticles.length > 0) {
|
||||
result.push(
|
||||
new ScatterplotLayer({
|
||||
@ -338,8 +393,15 @@ export function MapView({
|
||||
getPosition: (d: (typeof visibleParticles)[0]) => [d.lon, d.lat],
|
||||
getRadius: 3,
|
||||
getFillColor: (d: (typeof visibleParticles)[0]) => {
|
||||
const modelKey = d.model || Array.from(selectedModels)[0] || 'OpenDrift'
|
||||
return hexToRgba(MODEL_COLORS[modelKey] || '#3b82f6', 180)
|
||||
// 1순위: stranded 입자 → 빨간색
|
||||
if (d.stranded === 1) return [239, 68, 68, 220] as [number, number, number, number]
|
||||
// 2순위: 현재 활성 스텝 → 모델 기본 색상
|
||||
if (d.time === activeStep) {
|
||||
const modelKey = d.model || Array.from(selectedModels)[0] || 'OpenDrift'
|
||||
return hexToRgba(MODEL_COLORS[modelKey] || '#3b82f6', 180)
|
||||
}
|
||||
// 3순위: 과거 스텝 → 회색 + 투명
|
||||
return [130, 130, 130, 70] as [number, number, number, number]
|
||||
},
|
||||
radiusMinPixels: 2.5,
|
||||
radiusMaxPixels: 5,
|
||||
@ -354,6 +416,7 @@ export function MapView({
|
||||
content: (
|
||||
<div className="text-xs">
|
||||
<strong>{modelKey} 입자 #{(d.particle ?? 0) + 1}</strong>
|
||||
{d.stranded === 1 && <span className="text-red-400"> (육지 부착)</span>}
|
||||
<br />
|
||||
시간: +{d.time}h
|
||||
<br />
|
||||
@ -364,7 +427,7 @@ export function MapView({
|
||||
}
|
||||
},
|
||||
updateTriggers: {
|
||||
getFillColor: [selectedModels],
|
||||
getFillColor: [selectedModels, currentTime],
|
||||
},
|
||||
})
|
||||
)
|
||||
@ -689,37 +752,73 @@ export function MapView({
|
||||
)
|
||||
}
|
||||
|
||||
// --- 해류 화살표 (TextLayer) ---
|
||||
if (incidentCoord) {
|
||||
const currentArrows: Array<{ lon: number; lat: number; bearing: number; speed: number }> = []
|
||||
const gridSize = 5
|
||||
const spacing = 0.04 // 약 4km 간격
|
||||
const mainBearing = 200 // SSW 방향 (도)
|
||||
// --- 입자 중심점 이동 경로 (PathLayer + ScatterplotLayer) ---
|
||||
const visibleCenters = centerPoints.filter(p => p.time <= currentTime)
|
||||
if (visibleCenters.length >= 2) {
|
||||
result.push(
|
||||
new PathLayer({
|
||||
id: 'center-path',
|
||||
data: [{ path: visibleCenters.map(p => [p.lon, p.lat] as [number, number]) }],
|
||||
getPath: (d: { path: [number, number][] }) => d.path,
|
||||
getColor: [255, 220, 50, 200],
|
||||
getWidth: 2,
|
||||
widthMinPixels: 2,
|
||||
widthMaxPixels: 4,
|
||||
})
|
||||
)
|
||||
}
|
||||
if (visibleCenters.length > 0) {
|
||||
result.push(
|
||||
new ScatterplotLayer({
|
||||
id: 'center-points',
|
||||
data: visibleCenters,
|
||||
getPosition: (d: (typeof visibleCenters)[0]) => [d.lon, d.lat],
|
||||
getRadius: 5,
|
||||
getFillColor: [255, 220, 50, 230],
|
||||
radiusMinPixels: 4,
|
||||
radiusMaxPixels: 8,
|
||||
pickable: false,
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
for (let row = -gridSize; row <= gridSize; row++) {
|
||||
for (let col = -gridSize; col <= gridSize; col++) {
|
||||
const lat = incidentCoord.lat + row * spacing
|
||||
const lon = incidentCoord.lon + col * spacing / Math.cos(incidentCoord.lat * Math.PI / 180)
|
||||
// 사고 지점에서 멀어질수록 해류 방향 약간 변화
|
||||
const distFactor = Math.sqrt(row * row + col * col) / gridSize
|
||||
const localBearing = mainBearing + (col * 3) + (row * 2)
|
||||
const speed = 0.3 + (1 - distFactor) * 0.2
|
||||
currentArrows.push({ lon, lat, bearing: localBearing, speed })
|
||||
}
|
||||
}
|
||||
// --- 바람 화살표 (TextLayer) ---
|
||||
if (incidentCoord && windData.length > 0) {
|
||||
type ArrowPoint = { lon: number; lat: number; bearing: number; speed: number }
|
||||
|
||||
const activeWindStep = windData[currentTime] ?? windData[0] ?? []
|
||||
const currentArrows: ArrowPoint[] = activeWindStep
|
||||
.filter((d) => d.wind_speed != null && d.wind_direction != null)
|
||||
.map((d) => ({
|
||||
lon: d.lon,
|
||||
lat: d.lat,
|
||||
bearing: d.wind_direction,
|
||||
speed: d.wind_speed,
|
||||
}))
|
||||
|
||||
result.push(
|
||||
new TextLayer({
|
||||
id: 'current-arrows',
|
||||
data: currentArrows,
|
||||
getPosition: (d: (typeof currentArrows)[0]) => [d.lon, d.lat],
|
||||
getPosition: (d: ArrowPoint) => [d.lon, d.lat],
|
||||
getText: () => '➤',
|
||||
getAngle: (d: (typeof currentArrows)[0]) => -d.bearing + 90,
|
||||
getAngle: (d: ArrowPoint) => -d.bearing + 90,
|
||||
getSize: 22,
|
||||
getColor: [6, 182, 212, 100],
|
||||
getColor: (d: ArrowPoint): [number, number, number, number] => {
|
||||
const s = d.speed
|
||||
if (s < 3) return [6, 182, 212, 130] // cyan-500: calm
|
||||
if (s < 7) return [34, 197, 94, 150] // green-500: light
|
||||
if (s < 12) return [234, 179, 8, 170] // yellow-500: moderate
|
||||
if (s < 17) return [249, 115, 22, 190] // orange-500: fresh
|
||||
return [239, 68, 68, 210] // red-500: strong
|
||||
},
|
||||
characterSet: 'auto',
|
||||
sizeUnits: 'pixels' as const,
|
||||
billboard: true,
|
||||
updateTriggers: {
|
||||
getColor: [currentTime, windData],
|
||||
getAngle: [currentTime, windData],
|
||||
},
|
||||
})
|
||||
)
|
||||
}
|
||||
@ -729,7 +828,7 @@ export function MapView({
|
||||
oilTrajectory, currentTime, selectedModels,
|
||||
boomLines, isDrawingBoom, drawingPoints,
|
||||
dispersionResult, dispersionHeatmap, incidentCoord, backtrackReplay,
|
||||
sensitiveResources,
|
||||
sensitiveResources, centerPoints, windData,
|
||||
])
|
||||
|
||||
// 3D 모드에 따른 지도 스타일 전환
|
||||
@ -755,7 +854,11 @@ export function MapView({
|
||||
{/* 3D 모드 pitch 제어 */}
|
||||
<MapPitchController threeD={mapToggles.threeD} />
|
||||
{/* 사고 지점 변경 시 지도 이동 */}
|
||||
<MapFlyToIncident lon={incidentCoord?.lon} lat={incidentCoord?.lat} />
|
||||
<MapFlyToIncident lon={flyToIncident?.lon} lat={flyToIncident?.lat} onFlyEnd={onIncidentFlyEnd} />
|
||||
{/* 외부에서 flyTo 트리거 */}
|
||||
<FlyToController flyToTarget={flyToTarget} />
|
||||
{/* 예측 완료 시 궤적 전체 범위로 fitBounds */}
|
||||
<FitBoundsController fitBoundsTarget={fitBoundsTarget} />
|
||||
|
||||
{/* WMS 레이어 */}
|
||||
{wmsLayers.map(layer => (
|
||||
@ -783,6 +886,11 @@ export function MapView({
|
||||
{/* deck.gl 오버레이 (인터리브드: 일반 레이어) */}
|
||||
<DeckGLOverlay layers={deckLayers} />
|
||||
|
||||
{/* 해류 파티클 오버레이 */}
|
||||
{hydrData.length > 0 && (
|
||||
<HydrParticleOverlay hydrStep={hydrData[currentTime] ?? null} />
|
||||
)}
|
||||
|
||||
{/* 사고 위치 마커 (MapLibre Marker) */}
|
||||
{incidentCoord && !isNaN(incidentCoord.lat) && !isNaN(incidentCoord.lon) && !(dispersionHeatmap && dispersionHeatmap.length > 0) && (
|
||||
<Marker longitude={incidentCoord.lon} latitude={incidentCoord.lat} anchor="bottom">
|
||||
@ -832,14 +940,14 @@ export function MapView({
|
||||
position={incidentCoord ? [incidentCoord.lat, incidentCoord.lon] : currentPosition}
|
||||
/>
|
||||
|
||||
{/* 타임라인 컨트롤 */}
|
||||
{oilTrajectory.length > 0 && (
|
||||
{/* 타임라인 컨트롤 (외부 제어 모드에서는 숨김 — 하단 플레이어가 대신 담당) */}
|
||||
{!isControlled && oilTrajectory.length > 0 && (
|
||||
<TimelineControl
|
||||
currentTime={currentTime}
|
||||
maxTime={Math.max(...oilTrajectory.map(p => p.time))}
|
||||
isPlaying={isPlaying}
|
||||
playbackSpeed={playbackSpeed}
|
||||
onTimeChange={setCurrentTime}
|
||||
onTimeChange={setInternalCurrentTime}
|
||||
onPlayPause={() => setIsPlaying(!isPlaying)}
|
||||
onSpeedChange={setPlaybackSpeed}
|
||||
/>
|
||||
|
||||
@ -175,4 +175,50 @@ export function consumeHnsReportPayload(): HnsReportPayload | null {
|
||||
return v;
|
||||
}
|
||||
|
||||
// ─── 유출유 예측 보고서 실 데이터 전달 ──────────────────────────
|
||||
export interface OilReportPayload {
|
||||
incident: {
|
||||
name: string;
|
||||
occurTime: string;
|
||||
location: string;
|
||||
lat: number | null;
|
||||
lon: number | null;
|
||||
pollutant: string;
|
||||
spillAmount: string;
|
||||
shipName: string;
|
||||
};
|
||||
pollution: {
|
||||
spillAmount: string;
|
||||
weathered: string;
|
||||
seaRemain: string;
|
||||
pollutionArea: string;
|
||||
coastAttach: string;
|
||||
coastLength: string;
|
||||
oilType: string;
|
||||
};
|
||||
weather: {
|
||||
windDir: string;
|
||||
windSpeed: string;
|
||||
waveHeight: string;
|
||||
temp: string;
|
||||
} | null;
|
||||
spread: {
|
||||
kosps: string;
|
||||
openDrift: string;
|
||||
poseidon: string;
|
||||
};
|
||||
coastal: {
|
||||
firstTime: string | null;
|
||||
};
|
||||
hasSimulation: boolean;
|
||||
}
|
||||
|
||||
let _oilReportPayload: OilReportPayload | null = null;
|
||||
export function setOilReportPayload(d: OilReportPayload | null) { _oilReportPayload = d; }
|
||||
export function consumeOilReportPayload(): OilReportPayload | null {
|
||||
const v = _oilReportPayload;
|
||||
_oilReportPayload = null;
|
||||
return v;
|
||||
}
|
||||
|
||||
export { subMenuState }
|
||||
|
||||
@ -259,6 +259,12 @@
|
||||
background: rgba(6, 182, 212, 0.15);
|
||||
}
|
||||
|
||||
.prd-map-btn.active {
|
||||
background: rgba(6, 182, 212, 0.25);
|
||||
border-color: rgba(6, 182, 212, 0.6);
|
||||
box-shadow: 0 0 0 1px rgba(6, 182, 212, 0.3);
|
||||
}
|
||||
|
||||
/* ═══ Coordinate Display ═══ */
|
||||
.cod {
|
||||
position: absolute;
|
||||
|
||||
27
frontend/src/common/utils/imageAnalysisSignal.ts
Normal file
27
frontend/src/common/utils/imageAnalysisSignal.ts
Normal file
@ -0,0 +1,27 @@
|
||||
import type { ImageAnalyzeResult } from '@tabs/prediction/services/predictionApi';
|
||||
|
||||
/**
|
||||
* 항공탐색(유출유면적분석) → 유출유 확산예측 탭 간 데이터 전달용 모듈 레벨 시그널.
|
||||
* registerMainTabSwitcher / navigateToTab 패턴과 동일한 방식으로 구현된다.
|
||||
*/
|
||||
|
||||
interface PendingImageAnalysis extends ImageAnalyzeResult {
|
||||
autoRun: boolean;
|
||||
}
|
||||
|
||||
let _pending: PendingImageAnalysis | null = null;
|
||||
|
||||
/** 분석 결과를 시그널에 저장한다. navigateToTab 호출 직전에 사용한다. */
|
||||
export function setPendingImageAnalysis(data: PendingImageAnalysis): void {
|
||||
_pending = data;
|
||||
}
|
||||
|
||||
/**
|
||||
* 시그널에서 분석 결과를 꺼내고 초기화한다.
|
||||
* OilSpillView 마운트 시 1회 호출한다.
|
||||
*/
|
||||
export function consumePendingImageAnalysis(): PendingImageAnalysis | null {
|
||||
const value = _pending;
|
||||
_pending = null;
|
||||
return value;
|
||||
}
|
||||
@ -1,6 +1,7 @@
|
||||
import { useState, useCallback, useRef, useEffect } from 'react'
|
||||
import { fetchAerialMedia } from '../services/aerialApi'
|
||||
import { fetchAerialMedia, downloadAerialMedia } from '../services/aerialApi'
|
||||
import type { AerialMediaItem } from '../services/aerialApi'
|
||||
import { navigateToTab } from '@common/hooks/useSubMenu'
|
||||
|
||||
// ── Helpers ──
|
||||
|
||||
@ -48,6 +49,9 @@ export function MediaManagement() {
|
||||
const [searchTerm, setSearchTerm] = useState('')
|
||||
const [sortBy, setSortBy] = useState('latest')
|
||||
const [showUpload, setShowUpload] = useState(false)
|
||||
const [downloadingId, setDownloadingId] = useState<number | null>(null)
|
||||
const [bulkDownloading, setBulkDownloading] = useState(false)
|
||||
const [downloadResult, setDownloadResult] = useState<{ total: number; success: number } | null>(null)
|
||||
const modalRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
const loadData = useCallback(async () => {
|
||||
@ -118,6 +122,38 @@ export function MediaManagement() {
|
||||
})
|
||||
}
|
||||
|
||||
const handleBulkDownload = async () => {
|
||||
if (bulkDownloading || selectedIds.size === 0) return
|
||||
setBulkDownloading(true)
|
||||
let success = 0
|
||||
const total = selectedIds.size
|
||||
for (const sn of selectedIds) {
|
||||
const item = mediaItems.find(f => f.aerialMediaSn === sn)
|
||||
if (!item) continue
|
||||
try {
|
||||
await downloadAerialMedia(sn, item.orgnlNm ?? item.fileNm)
|
||||
success++
|
||||
} catch {
|
||||
// 실패 건 스킵
|
||||
}
|
||||
}
|
||||
setBulkDownloading(false)
|
||||
setDownloadResult({ total, success })
|
||||
}
|
||||
|
||||
const handleDownload = async (e: React.MouseEvent, item: AerialMediaItem) => {
|
||||
e.stopPropagation()
|
||||
if (downloadingId !== null) return
|
||||
setDownloadingId(item.aerialMediaSn)
|
||||
try {
|
||||
await downloadAerialMedia(item.aerialMediaSn, item.orgnlNm ?? item.fileNm)
|
||||
} catch {
|
||||
alert('다운로드 실패: 이미지를 찾을 수 없습니다.')
|
||||
} finally {
|
||||
setDownloadingId(null)
|
||||
}
|
||||
}
|
||||
|
||||
const droneCount = mediaItems.filter(f => f.equipTpCd === 'drone').length
|
||||
const planeCount = mediaItems.filter(f => f.equipTpCd === 'plane').length
|
||||
const satCount = mediaItems.filter(f => f.equipTpCd === 'satellite').length
|
||||
@ -254,8 +290,12 @@ export function MediaManagement() {
|
||||
<td className="px-2 py-2 text-[11px] font-mono">{f.fileSz ?? '—'}</td>
|
||||
<td className="px-2 py-2 text-[11px] font-mono">{f.resolution ?? '—'}</td>
|
||||
<td className="px-2 py-2 text-center" onClick={e => e.stopPropagation()}>
|
||||
<button className="px-2 py-1 text-[10px] rounded bg-[rgba(6,182,212,0.1)] text-primary-cyan border border-primary-cyan/20 hover:bg-[rgba(6,182,212,0.2)] transition-colors">
|
||||
📥
|
||||
<button
|
||||
onClick={(e) => handleDownload(e, f)}
|
||||
disabled={downloadingId === f.aerialMediaSn}
|
||||
className="px-2 py-1 text-[10px] rounded bg-[rgba(6,182,212,0.1)] text-primary-cyan border border-primary-cyan/20 hover:bg-[rgba(6,182,212,0.2)] transition-colors disabled:opacity-50"
|
||||
>
|
||||
{downloadingId === f.aerialMediaSn ? '⏳' : '📥'}
|
||||
</button>
|
||||
</td>
|
||||
</tr>
|
||||
@ -274,15 +314,47 @@ export function MediaManagement() {
|
||||
<button onClick={toggleAll} className="px-3 py-1.5 text-[11px] font-semibold rounded bg-bg-3 border border-border text-text-2 hover:bg-bg-hover transition-colors font-korean">
|
||||
☑ 전체선택
|
||||
</button>
|
||||
<button className="px-3 py-1.5 text-[11px] font-semibold rounded bg-[rgba(6,182,212,0.1)] text-primary-cyan border border-primary-cyan/30 hover:bg-[rgba(6,182,212,0.2)] transition-colors font-korean">
|
||||
📥 선택 다운로드
|
||||
<button
|
||||
onClick={handleBulkDownload}
|
||||
disabled={bulkDownloading || selectedIds.size === 0}
|
||||
className="px-3 py-1.5 text-[11px] font-semibold rounded bg-[rgba(6,182,212,0.1)] text-primary-cyan border border-primary-cyan/30 hover:bg-[rgba(6,182,212,0.2)] transition-colors font-korean disabled:opacity-50"
|
||||
>
|
||||
{bulkDownloading ? '⏳ 다운로드 중...' : '📥 선택 다운로드'}
|
||||
</button>
|
||||
<button className="px-3 py-1.5 text-[11px] font-semibold rounded bg-[rgba(168,85,247,0.1)] text-primary-purple border border-primary-purple/30 hover:bg-[rgba(168,85,247,0.2)] transition-colors font-korean">
|
||||
🧩 유출유면적분석으로 →
|
||||
<button
|
||||
onClick={() => navigateToTab('prediction', 'analysis')}
|
||||
className="px-3 py-1.5 text-[11px] font-semibold rounded bg-[rgba(168,85,247,0.1)] text-primary-purple border border-primary-purple/30 hover:bg-[rgba(168,85,247,0.2)] transition-colors font-korean"
|
||||
>
|
||||
🔬 유출유확산예측으로 →
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 선택 다운로드 결과 팝업 */}
|
||||
{downloadResult && (
|
||||
<div className="fixed inset-0 z-[300] bg-black/60 backdrop-blur-sm flex items-center justify-center">
|
||||
<div className="bg-bg-1 border border-border rounded-md p-6 w-72 text-center">
|
||||
<div className="text-2xl mb-3">📥</div>
|
||||
<div className="text-sm font-bold font-korean mb-3">다운로드 완료</div>
|
||||
<div className="text-[13px] font-korean text-text-2 mb-1">
|
||||
총 <span className="text-primary-cyan font-bold">{downloadResult.total}</span>건 선택
|
||||
</div>
|
||||
<div className="text-[13px] font-korean text-text-2 mb-4">
|
||||
<span className="text-status-green font-bold">{downloadResult.success}</span>건 다운로드 성공
|
||||
{downloadResult.total - downloadResult.success > 0 && (
|
||||
<> / <span className="text-status-red font-bold">{downloadResult.total - downloadResult.success}</span>건 실패</>
|
||||
)}
|
||||
</div>
|
||||
<button
|
||||
onClick={() => setDownloadResult(null)}
|
||||
className="px-6 py-2 text-sm font-semibold rounded bg-[rgba(6,182,212,0.15)] text-primary-cyan border border-primary-cyan/30 hover:bg-[rgba(6,182,212,0.25)] transition-colors font-korean"
|
||||
>
|
||||
확인
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Upload Modal */}
|
||||
{showUpload && (
|
||||
<div className="fixed inset-0 z-[200] bg-black/60 backdrop-blur-sm flex items-center justify-center">
|
||||
|
||||
@ -1,212 +1,245 @@
|
||||
import { useState } from 'react'
|
||||
import { useState, useRef, useEffect, useCallback } from 'react';
|
||||
import { stitchImages } from '../services/aerialApi';
|
||||
import { analyzeImage } from '@tabs/prediction/services/predictionApi';
|
||||
import { setPendingImageAnalysis } from '@common/utils/imageAnalysisSignal';
|
||||
import { navigateToTab } from '@common/hooks/useSubMenu';
|
||||
|
||||
// ── Types & Mock Data ──
|
||||
|
||||
interface MosaicImage {
|
||||
id: string
|
||||
filename: string
|
||||
status: 'done' | 'processing' | 'waiting'
|
||||
hasOil: boolean
|
||||
}
|
||||
|
||||
const mosaicImages: MosaicImage[] = [
|
||||
{ id: 'T1', filename: '드론_001.jpg', status: 'done', hasOil: true },
|
||||
{ id: 'T2', filename: '드론_002.jpg', status: 'done', hasOil: true },
|
||||
{ id: 'T3', filename: '드론_003.jpg', status: 'done', hasOil: true },
|
||||
{ id: 'T4', filename: '드론_004.jpg', status: 'done', hasOil: true },
|
||||
{ id: 'T5', filename: '드론_005.jpg', status: 'processing', hasOil: false },
|
||||
{ id: 'T6', filename: '드론_006.jpg', status: 'waiting', hasOil: false },
|
||||
]
|
||||
|
||||
// ── Component ──
|
||||
const MAX_IMAGES = 6;
|
||||
|
||||
export function OilAreaAnalysis() {
|
||||
const [activeStep, setActiveStep] = useState(1)
|
||||
const [analyzing, setAnalyzing] = useState(false)
|
||||
const [analyzed, setAnalyzed] = useState(false)
|
||||
const [selectedFiles, setSelectedFiles] = useState<File[]>([]);
|
||||
const [previewUrls, setPreviewUrls] = useState<string[]>([]);
|
||||
const [stitchedBlob, setStitchedBlob] = useState<Blob | null>(null);
|
||||
const [stitchedPreviewUrl, setStitchedPreviewUrl] = useState<string | null>(null);
|
||||
const [isStitching, setIsStitching] = useState(false);
|
||||
const [isAnalyzing, setIsAnalyzing] = useState(false);
|
||||
const [error, setError] = useState<string | null>(null);
|
||||
const fileInputRef = useRef<HTMLInputElement>(null);
|
||||
|
||||
const handleAnalyze = () => {
|
||||
setAnalyzing(true)
|
||||
setTimeout(() => {
|
||||
setAnalyzing(false)
|
||||
setAnalyzed(true)
|
||||
}, 1500)
|
||||
}
|
||||
// Object URL 메모리 누수 방지 — 언마운트 시 전체 revoke
|
||||
useEffect(() => {
|
||||
return () => {
|
||||
previewUrls.forEach(url => URL.revokeObjectURL(url));
|
||||
if (stitchedPreviewUrl) URL.revokeObjectURL(stitchedPreviewUrl);
|
||||
};
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, []);
|
||||
|
||||
const stepCls = (idx: number) => {
|
||||
if (idx < activeStep) return 'border-status-green text-status-green bg-[rgba(34,197,94,0.05)]'
|
||||
if (idx === activeStep) return 'border-primary-cyan text-primary-cyan bg-[rgba(6,182,212,0.05)]'
|
||||
return 'border-border text-text-3 bg-bg-3'
|
||||
}
|
||||
const handleFileSelect = useCallback((e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
setError(null);
|
||||
const incoming = Array.from(e.target.files ?? []);
|
||||
if (incoming.length === 0) return;
|
||||
|
||||
setSelectedFiles(prev => {
|
||||
const merged = [...prev, ...incoming].slice(0, MAX_IMAGES);
|
||||
if (prev.length + incoming.length > MAX_IMAGES) {
|
||||
setError(`최대 ${MAX_IMAGES}장까지 선택할 수 있습니다.`);
|
||||
}
|
||||
return merged;
|
||||
});
|
||||
|
||||
// setSelectedFiles updater 밖에서 독립 호출 — updater 내부 side effect는
|
||||
// React Strict Mode의 이중 호출로 인해 URL이 중복 생성되는 버그를 유발함
|
||||
setPreviewUrls(prev => {
|
||||
const available = MAX_IMAGES - prev.length;
|
||||
const toAdd = incoming.slice(0, available);
|
||||
return [...prev, ...toAdd.map(f => URL.createObjectURL(f))];
|
||||
});
|
||||
|
||||
// input 초기화 (동일 파일 재선택 허용)
|
||||
e.target.value = '';
|
||||
}, []);
|
||||
|
||||
const handleRemoveFile = useCallback((idx: number) => {
|
||||
setSelectedFiles(prev => prev.filter((_, i) => i !== idx));
|
||||
setPreviewUrls(prev => {
|
||||
URL.revokeObjectURL(prev[idx]);
|
||||
return prev.filter((_, i) => i !== idx);
|
||||
});
|
||||
// 합성 결과 초기화 (선택 파일이 바뀌었으므로)
|
||||
setStitchedBlob(null);
|
||||
if (stitchedPreviewUrl) {
|
||||
URL.revokeObjectURL(stitchedPreviewUrl);
|
||||
setStitchedPreviewUrl(null);
|
||||
}
|
||||
setError(null);
|
||||
}, [stitchedPreviewUrl]);
|
||||
|
||||
const handleStitch = async () => {
|
||||
if (selectedFiles.length < 2) {
|
||||
setError('이미지를 2장 이상 선택해주세요.');
|
||||
return;
|
||||
}
|
||||
setError(null);
|
||||
setIsStitching(true);
|
||||
try {
|
||||
const blob = await stitchImages(selectedFiles);
|
||||
if (stitchedPreviewUrl) URL.revokeObjectURL(stitchedPreviewUrl);
|
||||
setStitchedBlob(blob);
|
||||
setStitchedPreviewUrl(URL.createObjectURL(blob));
|
||||
} catch (err) {
|
||||
const msg =
|
||||
err instanceof Error
|
||||
? err.message
|
||||
: (err as { message?: string }).message ?? '이미지 합성에 실패했습니다.';
|
||||
const status = err instanceof Error ? 0 : (err as { status?: number }).status ?? 0;
|
||||
setError(status === 504 ? '이미지 합성 서버 응답 시간이 초과되었습니다.' : msg);
|
||||
} finally {
|
||||
setIsStitching(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleAnalyze = async () => {
|
||||
if (!stitchedBlob) return;
|
||||
setError(null);
|
||||
setIsAnalyzing(true);
|
||||
try {
|
||||
const stitchedFile = new File([stitchedBlob], `stitch_${Date.now()}.jpg`, { type: 'image/jpeg' });
|
||||
const result = await analyzeImage(stitchedFile);
|
||||
setPendingImageAnalysis({ ...result, autoRun: true });
|
||||
navigateToTab('prediction', 'analysis');
|
||||
} catch (err) {
|
||||
const msg = err instanceof Error ? err.message : '분석에 실패했습니다.';
|
||||
setError(msg.includes('GPS') ? '이미지에 GPS 정보가 없습니다. GPS 정보가 포함된 이미지를 사용해주세요.' : msg);
|
||||
setIsAnalyzing(false);
|
||||
}
|
||||
};
|
||||
|
||||
const canStitch = selectedFiles.length >= 2 && !isStitching && !isAnalyzing;
|
||||
const canAnalyze = stitchedBlob !== null && !isStitching && !isAnalyzing;
|
||||
|
||||
return (
|
||||
<div className="flex gap-5 h-full overflow-hidden">
|
||||
{/* Left Panel */}
|
||||
<div className="w-[340px] min-w-[340px] flex flex-col overflow-y-auto scrollbar-thin">
|
||||
{/* ── Left Panel ── */}
|
||||
<div className="w-[280px] min-w-[280px] flex flex-col overflow-y-auto scrollbar-thin">
|
||||
<div className="text-sm font-bold mb-1 font-korean">🧩 유출유면적분석</div>
|
||||
<div className="text-[11px] text-text-3 mb-4 font-korean">단면 사진을 합성하여 유출유 확산 면적과 기름 양을 산정합니다.</div>
|
||||
|
||||
{/* Step Indicator */}
|
||||
<div className="flex gap-2 mb-3">
|
||||
{['① 사진 선택', '② 정합·합성', '③ 면적 산정'].map((label, i) => (
|
||||
<button
|
||||
key={i}
|
||||
onClick={() => setActiveStep(i)}
|
||||
className={`flex-1 py-2 rounded-sm border text-center text-[10px] font-semibold font-korean cursor-pointer transition-colors ${stepCls(i)}`}
|
||||
>
|
||||
{label}
|
||||
</button>
|
||||
))}
|
||||
<div className="text-[11px] text-text-3 mb-4 font-korean">
|
||||
드론 사진을 합성하여 유출유 확산 면적과 기름 양을 산정합니다.
|
||||
</div>
|
||||
|
||||
{/* Selected Images */}
|
||||
<div className="text-[11px] font-bold mb-2 font-korean">선택된 사진 (6장)</div>
|
||||
<div className="flex flex-col gap-1 mb-3.5">
|
||||
{['여수항_드론_001.jpg', '여수항_드론_002.jpg', '여수항_드론_003.jpg', '여수항_드론_004.jpg', '여수항_드론_005.jpg', '여수항_드론_006.jpg'].map((name, i) => (
|
||||
<div key={i} className="flex items-center gap-2 px-2 py-1.5 bg-bg-3 border border-border rounded-sm text-[11px] font-korean">
|
||||
<span>🛸</span>
|
||||
<span className="flex-1 truncate">{name}</span>
|
||||
<span className={`text-[9px] font-semibold ${
|
||||
i < 4 ? 'text-status-green' : i === 4 ? 'text-status-orange' : 'text-text-3'
|
||||
}`}>
|
||||
{i < 4 ? '✓ 정합' : i === 4 ? '⏳ 정합중' : '대기'}
|
||||
</span>
|
||||
{/* 이미지 선택 버튼 */}
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type="file"
|
||||
accept="image/*"
|
||||
multiple
|
||||
className="hidden"
|
||||
onChange={handleFileSelect}
|
||||
/>
|
||||
<button
|
||||
onClick={() => fileInputRef.current?.click()}
|
||||
disabled={selectedFiles.length >= MAX_IMAGES || isStitching || isAnalyzing}
|
||||
className="w-full py-2 mb-3 border border-dashed border-border rounded-sm text-xs font-korean text-text-2
|
||||
hover:border-primary-cyan hover:text-primary-cyan transition-colors cursor-pointer
|
||||
disabled:opacity-40 disabled:cursor-not-allowed"
|
||||
>
|
||||
+ 이미지 선택 ({selectedFiles.length}/{MAX_IMAGES})
|
||||
</button>
|
||||
|
||||
{/* 선택된 이미지 목록 */}
|
||||
{selectedFiles.length > 0 && (
|
||||
<>
|
||||
<div className="text-[11px] font-bold mb-1.5 font-korean">선택된 이미지</div>
|
||||
<div className="flex flex-col gap-1 mb-3">
|
||||
{selectedFiles.map((file, i) => (
|
||||
<div
|
||||
key={`${file.name}-${i}`}
|
||||
className="flex items-center gap-2 px-2 py-1.5 bg-bg-3 border border-border rounded-sm text-[11px] font-korean"
|
||||
>
|
||||
<span className="text-primary-cyan">📷</span>
|
||||
<span className="flex-1 truncate text-text-1">{file.name}</span>
|
||||
<button
|
||||
onClick={() => handleRemoveFile(i)}
|
||||
disabled={isStitching || isAnalyzing}
|
||||
className="text-text-3 hover:text-status-red transition-colors cursor-pointer
|
||||
disabled:opacity-40 disabled:cursor-not-allowed ml-1 shrink-0"
|
||||
title="제거"
|
||||
>
|
||||
✕
|
||||
</button>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Analysis Parameters */}
|
||||
<div className="text-[11px] font-bold mb-2 font-korean">분석 파라미터</div>
|
||||
<div className="flex flex-col gap-1.5 mb-3.5">
|
||||
{[
|
||||
['촬영 고도', '120 m'],
|
||||
['GSD (지상해상도)', '3.2 cm/px'],
|
||||
['오버랩 비율', '80% / 70%'],
|
||||
['좌표계', 'EPSG:5186'],
|
||||
['유종 판별 기준', 'NDVI + NIR'],
|
||||
['유막 두께 추정', 'Bonn Agreement'],
|
||||
].map(([label, value], i) => (
|
||||
<div key={i} className="flex justify-between items-center text-[11px]">
|
||||
<span className="text-text-3 font-korean">{label}</span>
|
||||
<span className="font-mono font-semibold">{value}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
{/* 에러 메시지 */}
|
||||
{error && (
|
||||
<div className="mb-3 px-2.5 py-2 bg-[rgba(239,68,68,0.08)] border border-[rgba(239,68,68,0.3)] rounded-sm text-[11px] text-status-red font-korean">
|
||||
{error}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Action Buttons */}
|
||||
{/* 이미지 합성 버튼 */}
|
||||
<button
|
||||
onClick={handleStitch}
|
||||
disabled={!canStitch}
|
||||
className="w-full py-2.5 mb-2 rounded-sm text-[12px] font-bold font-korean cursor-pointer transition-colors
|
||||
border border-primary-cyan text-primary-cyan bg-[rgba(6,182,212,0.06)]
|
||||
hover:bg-[rgba(6,182,212,0.15)] disabled:opacity-40 disabled:cursor-not-allowed"
|
||||
>
|
||||
{isStitching ? '⏳ 합성 중...' : stitchedBlob ? '✅ 다시 합성' : '🔗 이미지 합성'}
|
||||
</button>
|
||||
|
||||
{/* 분석 시작 버튼 */}
|
||||
<button
|
||||
onClick={handleAnalyze}
|
||||
disabled={analyzing}
|
||||
className={`w-full py-3 rounded-sm text-[13px] font-bold font-korean cursor-pointer border-none mb-2 transition-colors ${
|
||||
analyzed
|
||||
? 'bg-[rgba(34,197,94,0.15)] text-status-green border border-status-green'
|
||||
: 'text-white'
|
||||
}`}
|
||||
style={!analyzed ? { background: 'linear-gradient(135deg, var(--cyan), var(--blue))' } : undefined}
|
||||
disabled={!canAnalyze}
|
||||
className="w-full py-3 rounded-sm text-[13px] font-bold font-korean cursor-pointer border-none transition-colors
|
||||
disabled:opacity-40 disabled:cursor-not-allowed text-white"
|
||||
style={canAnalyze ? { background: 'linear-gradient(135deg, var(--cyan), var(--blue))' } : { background: 'var(--bg-3)' }}
|
||||
>
|
||||
{analyzing ? '⏳ 분석중...' : analyzed ? '✅ 분석 완료!' : '🧩 면적분석 실행'}
|
||||
</button>
|
||||
<button className="w-full py-2.5 border border-border bg-bg-3 text-text-2 rounded-sm text-xs font-semibold font-korean cursor-pointer hover:bg-bg-hover transition-colors">
|
||||
📥 결과 다운로드 (GeoTIFF)
|
||||
{isAnalyzing ? '⏳ 분석 중...' : '🧩 분석 시작'}
|
||||
</button>
|
||||
</div>
|
||||
|
||||
{/* Right Panel */}
|
||||
{/* ── Right Panel ── */}
|
||||
<div className="flex-1 flex flex-col overflow-hidden">
|
||||
{/* Header */}
|
||||
<div className="flex justify-between items-center mb-2">
|
||||
<span className="text-xs font-bold font-korean">🗺 합성 영상 및 유막 탐지 결과</span>
|
||||
<div className="flex gap-1.5">
|
||||
<span className="text-[10px] px-2 py-0.5 rounded-full bg-[rgba(239,68,68,0.1)] text-status-red font-semibold font-korean">■ 유막 탐지</span>
|
||||
<span className="text-[10px] px-2 py-0.5 rounded-full bg-[rgba(6,182,212,0.1)] text-primary-cyan font-semibold font-korean">□ 원본 타일</span>
|
||||
<span className="text-[10px] px-2 py-0.5 rounded-full bg-[rgba(34,197,94,0.1)] text-status-green font-semibold font-korean">정합률 96.2%</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Image Grid 3×2 */}
|
||||
{/* 3×2 이미지 그리드 */}
|
||||
<div className="text-[11px] font-bold mb-2 font-korean">선택된 이미지 미리보기</div>
|
||||
<div className="grid grid-cols-3 gap-1.5 mb-3">
|
||||
{mosaicImages.map(img => (
|
||||
<div key={img.id} className="bg-bg-3 border border-border rounded-sm overflow-hidden cursor-pointer hover:border-border-light transition-colors">
|
||||
<div
|
||||
className="h-[100px] relative flex items-center justify-center overflow-hidden"
|
||||
style={{ background: 'linear-gradient(135deg, #0c1624, #1a1a2e)' }}
|
||||
>
|
||||
{img.hasOil && (
|
||||
<div
|
||||
className="absolute inset-0"
|
||||
style={{
|
||||
background: 'rgba(239,68,68,0.15)',
|
||||
border: '1px solid rgba(239,68,68,0.35)',
|
||||
clipPath: 'polygon(20% 30%,60% 15%,85% 40%,70% 80%,30% 75%,10% 50%)',
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
<div className="text-lg font-bold text-white/[0.08] font-mono">{img.id}</div>
|
||||
<div className={`absolute top-1.5 right-1.5 px-1.5 py-0.5 rounded-md text-[9px] font-bold font-korean ${
|
||||
img.status === 'done' && img.hasOil ? 'bg-[rgba(239,68,68,0.2)] text-status-red' :
|
||||
img.status === 'processing' ? 'bg-[rgba(249,115,22,0.2)] text-status-orange' :
|
||||
'bg-[rgba(100,116,139,0.2)] text-text-3'
|
||||
}`}>
|
||||
{img.status === 'done' && img.hasOil ? '유막' : img.status === 'processing' ? '정합중' : '대기'}
|
||||
{Array.from({ length: MAX_IMAGES }).map((_, i) => (
|
||||
<div
|
||||
key={i}
|
||||
className="bg-bg-3 border border-border rounded-sm overflow-hidden"
|
||||
style={{ height: '300px' }}
|
||||
>
|
||||
{previewUrls[i] ? (
|
||||
<img
|
||||
src={previewUrls[i]}
|
||||
alt={selectedFiles[i]?.name ?? ''}
|
||||
className="w-full h-full object-cover"
|
||||
/>
|
||||
) : (
|
||||
<div className="flex items-center justify-center h-full text-text-3 text-lg font-mono opacity-20">
|
||||
{i + 1}
|
||||
</div>
|
||||
</div>
|
||||
<div className="px-2 py-1.5 flex justify-between items-center text-[10px] font-korean text-text-2">
|
||||
<span>{img.filename}</span>
|
||||
<span className={
|
||||
img.status === 'done' ? 'text-status-green' :
|
||||
img.status === 'processing' ? 'text-status-orange' :
|
||||
'text-text-3'
|
||||
}>
|
||||
{img.status === 'done' ? '✓' : img.status === 'processing' ? '⏳' : '—'}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
|
||||
{/* Merged Result Preview */}
|
||||
<div className="relative h-[140px] bg-bg-0 border border-border rounded-sm overflow-hidden mb-3">
|
||||
<div className="absolute inset-0" style={{ background: 'radial-gradient(ellipse at 40% 50%, rgba(10,25,40,0.7), rgba(8,14,26,0.95))' }}>
|
||||
<div className="absolute border border-dashed rounded flex items-center justify-center text-[10px] font-korean" style={{ top: '15%', left: '10%', width: '65%', height: '70%', borderColor: 'rgba(6,182,212,0.3)', color: 'rgba(6,182,212,0.5)' }}>
|
||||
합성 영역 (3×2 그리드)
|
||||
{/* 합성 결과 */}
|
||||
<div className="text-[11px] font-bold mb-2 font-korean">합성 결과</div>
|
||||
<div
|
||||
className="relative bg-bg-0 border border-border rounded-sm overflow-hidden flex items-center justify-center"
|
||||
style={{ minHeight: '160px', flex: '1 1 0' }}
|
||||
>
|
||||
{stitchedPreviewUrl ? (
|
||||
<img
|
||||
src={stitchedPreviewUrl}
|
||||
alt="합성 결과"
|
||||
className="max-w-full max-h-full object-contain"
|
||||
/>
|
||||
) : (
|
||||
<div className="text-[12px] text-text-3 font-korean text-center px-4">
|
||||
{isStitching
|
||||
? '⏳ 이미지를 합성하고 있습니다...'
|
||||
: '이미지를 선택하고 합성 버튼을 클릭하면\n합성 결과가 여기에 표시됩니다.'}
|
||||
</div>
|
||||
<div className="absolute" style={{ top: '22%', left: '18%', width: '35%', height: '40%', background: 'rgba(239,68,68,0.12)', border: '1.5px solid rgba(239,68,68,0.4)', borderRadius: '30% 50% 40% 60%' }} />
|
||||
<div className="absolute" style={{ top: '40%', left: '38%', width: '20%', height: '30%', background: 'rgba(239,68,68,0.08)', border: '1px solid rgba(239,68,68,0.3)', borderRadius: '50% 30% 60% 40%' }} />
|
||||
</div>
|
||||
<div className="absolute bottom-1.5 left-2.5 text-[9px] text-text-3 font-mono">34.7312°N, 127.6845°E</div>
|
||||
<div className="absolute bottom-1.5 right-2.5 text-[9px] text-text-3 font-mono">축척 ≈ 1:2,500</div>
|
||||
</div>
|
||||
|
||||
{/* Analysis Results */}
|
||||
<div className="p-4 bg-bg-3 border border-border rounded-md">
|
||||
<div className="text-xs font-bold mb-2.5 font-korean">📊 유출유 분석 결과</div>
|
||||
<div className="grid grid-cols-3 gap-2">
|
||||
{[
|
||||
{ value: '0.42 km²', label: '유막 면적', color: 'text-status-red' },
|
||||
{ value: '12.6 kL', label: '추정 유출량', color: 'text-status-orange' },
|
||||
{ value: '1.84 km²', label: '합성 영역 면적', color: 'text-primary-cyan' },
|
||||
].map((r, i) => (
|
||||
<div key={i} className="text-center py-2.5 px-2 bg-bg-0 border border-border rounded-sm">
|
||||
<div className={`text-lg font-bold font-mono ${r.color}`}>{r.value}</div>
|
||||
<div className="text-[9px] text-text-3 mt-0.5 font-korean">{r.label}</div>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
<div className="grid grid-cols-2 gap-1.5 mt-2.5 text-[11px]">
|
||||
{[
|
||||
['두꺼운 유막 (>1mm)', '0.08 km²', 'text-status-red'],
|
||||
['얇은 유막 (<1mm)', '0.34 km²', 'text-status-orange'],
|
||||
['무지개 빛깔', '0.12 km²', 'text-status-yellow'],
|
||||
['Bonn 코드', 'Code 3~4', 'text-text-1'],
|
||||
].map(([label, value, color], i) => (
|
||||
<div key={i} className="flex justify-between px-2 py-1 bg-bg-0 rounded">
|
||||
<span className="text-text-3 font-korean">{label}</span>
|
||||
<span className={`font-semibold font-mono ${color}`}>{value}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@ -103,3 +103,30 @@ export async function createSatRequest(
|
||||
const response = await api.post<{ satReqSn: number }>('/aerial/sat-requests', input);
|
||||
return response.data;
|
||||
}
|
||||
|
||||
export async function downloadAerialMedia(sn: number, fileName: string): Promise<void> {
|
||||
const res = await api.get(`/aerial/media/${sn}/download`, { responseType: 'blob' });
|
||||
const url = URL.createObjectURL(res.data as Blob);
|
||||
const a = document.createElement('a');
|
||||
a.href = url;
|
||||
a.download = fileName;
|
||||
document.body.appendChild(a);
|
||||
a.click();
|
||||
document.body.removeChild(a);
|
||||
URL.revokeObjectURL(url);
|
||||
}
|
||||
|
||||
/**
|
||||
* 여러 이미지 파일을 /aerial/stitch 엔드포인트로 전송해 합성 JPEG Blob을 반환한다.
|
||||
* FastAPI /stitch → pic_gps.py 스티칭 파이프라인 프록시.
|
||||
*/
|
||||
export async function stitchImages(files: File[]): Promise<Blob> {
|
||||
const form = new FormData();
|
||||
files.forEach(f => form.append('files', f));
|
||||
const response = await api.post<Blob>('/aerial/stitch', form, {
|
||||
responseType: 'blob',
|
||||
timeout: 310_000,
|
||||
headers: { 'Content-Type': undefined }, // 기본 application/json 제거 → 브라우저가 multipart/form-data 자동 설정
|
||||
});
|
||||
return response.data;
|
||||
}
|
||||
|
||||
@ -10,8 +10,11 @@ export function LeftPanel({
|
||||
selectedAnalysis,
|
||||
enabledLayers,
|
||||
onToggleLayer,
|
||||
accidentTime,
|
||||
onAccidentTimeChange,
|
||||
incidentCoord,
|
||||
onCoordChange,
|
||||
isSelectingLocation,
|
||||
onMapSelectClick,
|
||||
onRunSimulation,
|
||||
isRunningSimulation,
|
||||
@ -25,6 +28,10 @@ export function LeftPanel({
|
||||
onOilTypeChange,
|
||||
spillAmount,
|
||||
onSpillAmountChange,
|
||||
incidentName,
|
||||
onIncidentNameChange,
|
||||
spillUnit,
|
||||
onSpillUnitChange,
|
||||
boomLines,
|
||||
onBoomLinesChange,
|
||||
oilTrajectory,
|
||||
@ -40,6 +47,7 @@ export function LeftPanel({
|
||||
onLayerOpacityChange,
|
||||
layerBrightness,
|
||||
onLayerBrightnessChange,
|
||||
onImageAnalysisResult,
|
||||
}: LeftPanelProps) {
|
||||
const [expandedSections, setExpandedSections] = useState<ExpandedSections>({
|
||||
predictionInput: true,
|
||||
@ -64,8 +72,11 @@ export function LeftPanel({
|
||||
<PredictionInputSection
|
||||
expanded={expandedSections.predictionInput}
|
||||
onToggle={() => toggleSection('predictionInput')}
|
||||
accidentTime={accidentTime}
|
||||
onAccidentTimeChange={onAccidentTimeChange}
|
||||
incidentCoord={incidentCoord}
|
||||
onCoordChange={onCoordChange}
|
||||
isSelectingLocation={isSelectingLocation}
|
||||
onMapSelectClick={onMapSelectClick}
|
||||
onRunSimulation={onRunSimulation}
|
||||
isRunningSimulation={isRunningSimulation}
|
||||
@ -79,6 +90,11 @@ export function LeftPanel({
|
||||
onOilTypeChange={onOilTypeChange}
|
||||
spillAmount={spillAmount}
|
||||
onSpillAmountChange={onSpillAmountChange}
|
||||
incidentName={incidentName}
|
||||
onIncidentNameChange={onIncidentNameChange}
|
||||
spillUnit={spillUnit}
|
||||
onSpillUnitChange={onSpillUnitChange}
|
||||
onImageAnalysisResult={onImageAnalysisResult}
|
||||
/>
|
||||
|
||||
{/* Incident Section */}
|
||||
@ -178,7 +194,7 @@ export function LeftPanel({
|
||||
boomLines={boomLines}
|
||||
onBoomLinesChange={onBoomLinesChange}
|
||||
oilTrajectory={oilTrajectory}
|
||||
incidentCoord={incidentCoord}
|
||||
incidentCoord={incidentCoord ?? { lat: 0, lon: 0 }}
|
||||
algorithmSettings={algorithmSettings}
|
||||
onAlgorithmSettingsChange={onAlgorithmSettingsChange}
|
||||
isDrawingBoom={isDrawingBoom}
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import { useState, useEffect, useCallback } from 'react'
|
||||
import { useState, useEffect, useCallback, useMemo, useRef } from 'react'
|
||||
import { LeftPanel } from './LeftPanel'
|
||||
import { RightPanel } from './RightPanel'
|
||||
import { MapView } from '@common/components/map/MapView'
|
||||
@ -8,14 +8,17 @@ import { BoomDeploymentTheoryView } from './BoomDeploymentTheoryView'
|
||||
import { BacktrackModal } from './BacktrackModal'
|
||||
import { RecalcModal } from './RecalcModal'
|
||||
import { BacktrackReplayBar } from '@common/components/map/BacktrackReplayBar'
|
||||
import { useSubMenu, navigateToTab, setReportGenCategory } from '@common/hooks/useSubMenu'
|
||||
import { useSubMenu, navigateToTab, setReportGenCategory, setOilReportPayload, type OilReportPayload } from '@common/hooks/useSubMenu'
|
||||
import type { BoomLine, AlgorithmSettings, ContainmentResult, BoomLineCoord } from '@common/types/boomLine'
|
||||
import type { BacktrackPhase, BacktrackVessel, BacktrackConditions, ReplayShip, CollisionEvent } from '@common/types/backtrack'
|
||||
import { TOTAL_REPLAY_FRAMES } from '@common/types/backtrack'
|
||||
import { fetchBacktrackByAcdnt, createBacktrack, fetchPredictionDetail } from '../services/predictionApi'
|
||||
import type { PredictionDetail } from '../services/predictionApi'
|
||||
import { fetchBacktrackByAcdnt, createBacktrack, fetchPredictionDetail, fetchAnalysisTrajectory } from '../services/predictionApi'
|
||||
import type { CenterPoint, HydrDataStep, ImageAnalyzeResult, OilParticle, PredictionDetail, SimulationRunResponse, SimulationSummary, WindPoint } from '../services/predictionApi'
|
||||
import { useSimulationStatus } from '../hooks/useSimulationStatus'
|
||||
import SimulationLoadingOverlay from './SimulationLoadingOverlay'
|
||||
import { api } from '@common/services/api'
|
||||
import { generateAIBoomLines } from '@common/utils/geo'
|
||||
import { consumePendingImageAnalysis } from '@common/utils/imageAnalysisSignal'
|
||||
|
||||
export type PredictionModel = 'KOSPS' | 'POSEIDON' | 'OpenDrift'
|
||||
|
||||
@ -101,15 +104,24 @@ export const ALL_MODELS: PredictionModel[] = ['KOSPS', 'POSEIDON', 'OpenDrift']
|
||||
export function OilSpillView() {
|
||||
const { activeSubTab, setActiveSubTab } = useSubMenu('prediction')
|
||||
const [enabledLayers, setEnabledLayers] = useState<Set<string>>(new Set())
|
||||
const [incidentCoord, setIncidentCoord] = useState({ lon: 127.6845, lat: 34.7312 })
|
||||
const [incidentCoord, setIncidentCoord] = useState<{ lon: number; lat: number } | null>(null)
|
||||
const [flyToCoord, setFlyToCoord] = useState<{ lon: number; lat: number } | undefined>(undefined)
|
||||
const flyToTarget = null
|
||||
const fitBoundsTarget = null
|
||||
const [isSelectingLocation, setIsSelectingLocation] = useState(false)
|
||||
const [oilTrajectory, setOilTrajectory] = useState<Array<{ lat: number; lon: number; time: number; particle?: number; model?: PredictionModel }>>([])
|
||||
const [oilTrajectory, setOilTrajectory] = useState<OilParticle[]>([])
|
||||
const [centerPoints, setCenterPoints] = useState<CenterPoint[]>([])
|
||||
const [windData, setWindData] = useState<WindPoint[][]>([])
|
||||
const [hydrData, setHydrData] = useState<(HydrDataStep | null)[]>([])
|
||||
const [isRunningSimulation, setIsRunningSimulation] = useState(false)
|
||||
const [selectedModels, setSelectedModels] = useState<Set<PredictionModel>>(new Set(['KOSPS']))
|
||||
const [predictionTime, setPredictionTime] = useState(48)
|
||||
const [accidentTime, setAccidentTime] = useState<string>('')
|
||||
const [spillType, setSpillType] = useState('연속')
|
||||
const [oilType, setOilType] = useState('벙커C유')
|
||||
const [spillAmount, setSpillAmount] = useState(100)
|
||||
const [incidentName, setIncidentName] = useState('')
|
||||
const [spillUnit, setSpillUnit] = useState('kL')
|
||||
|
||||
// 민감자원
|
||||
const [sensitiveResources, setSensitiveResources] = useState<SensitiveResource[]>([])
|
||||
@ -132,7 +144,7 @@ export function OilSpillView() {
|
||||
|
||||
// 타임라인 플레이어 상태
|
||||
const [isPlaying, setIsPlaying] = useState(false)
|
||||
const [timelinePosition, setTimelinePosition] = useState(25) // 0~100%
|
||||
const [currentStep, setCurrentStep] = useState(0) // 현재 시간값 (시간 단위)
|
||||
const [playSpeed, setPlaySpeed] = useState(1)
|
||||
|
||||
// 역추적 상태
|
||||
@ -152,26 +164,17 @@ export function OilSpillView() {
|
||||
// 역추적 API 데이터
|
||||
const [backtrackConditions, setBacktrackConditions] = useState<BacktrackConditions>({
|
||||
estimatedSpillTime: '', analysisRange: '±12시간', searchRadius: '10 NM',
|
||||
spillLocation: { lat: 34.7312, lon: 127.6845 }, totalVessels: 0,
|
||||
spillLocation: { lat: 37.3883, lon: 126.6435 }, totalVessels: 0,
|
||||
})
|
||||
const [replayShips, setReplayShips] = useState<ReplayShip[]>([])
|
||||
const [collisionEvent, setCollisionEvent] = useState<CollisionEvent | null>(null)
|
||||
|
||||
// 재계산 상태
|
||||
const [recalcModalOpen, setRecalcModalOpen] = useState(false)
|
||||
const [currentExecSn, setCurrentExecSn] = useState<number | null>(null)
|
||||
const [simulationSummary, setSimulationSummary] = useState<SimulationSummary | null>(null)
|
||||
const { data: simStatus } = useSimulationStatus(currentExecSn)
|
||||
|
||||
// 분석 탭 초기 진입 시 기본 데모 자동 표시
|
||||
useEffect(() => {
|
||||
if (activeSubTab === 'analysis' && oilTrajectory.length === 0 && !selectedAnalysis) {
|
||||
const models = Array.from(selectedModels.size > 0 ? selectedModels : new Set<PredictionModel>(['KOSPS']))
|
||||
const demoTrajectory = generateDemoTrajectory(incidentCoord, models, predictionTime)
|
||||
setOilTrajectory(demoTrajectory)
|
||||
const demoBooms = generateAIBoomLines(demoTrajectory, incidentCoord, algorithmSettings)
|
||||
setBoomLines(demoBooms)
|
||||
setSensitiveResources(DEMO_SENSITIVE_RESOURCES)
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [activeSubTab])
|
||||
|
||||
const handleToggleLayer = (layerId: string, enabled: boolean) => {
|
||||
setEnabledLayers(prev => {
|
||||
@ -204,7 +207,7 @@ export function OilSpillView() {
|
||||
estimatedSpillTime: bt.estSpilDtm ? new Date(bt.estSpilDtm).toLocaleString('ko-KR', { month: '2-digit', day: '2-digit', hour: '2-digit', minute: '2-digit' }) : '',
|
||||
analysisRange: bt.anlysRange || '±12시간',
|
||||
searchRadius: bt.srchRadiusNm ? `${bt.srchRadiusNm} NM` : '10 NM',
|
||||
spillLocation: { lat: bt.lat || incidentCoord.lat, lon: bt.lon || incidentCoord.lon },
|
||||
spillLocation: { lat: bt.lat || incidentCoord?.lat || 0, lon: bt.lon || incidentCoord?.lon || 0 },
|
||||
totalVessels: bt.totalVessels || 0,
|
||||
})
|
||||
setBacktrackPhase('results')
|
||||
@ -225,7 +228,7 @@ export function OilSpillView() {
|
||||
setBacktrackModalOpen(true)
|
||||
setBacktrackConditions(prev => ({
|
||||
...prev,
|
||||
spillLocation: incidentCoord,
|
||||
spillLocation: incidentCoord ?? prev.spillLocation,
|
||||
}))
|
||||
if (selectedAnalysis) {
|
||||
loadBacktrackData(selectedAnalysis.acdntSn)
|
||||
@ -236,6 +239,7 @@ export function OilSpillView() {
|
||||
}
|
||||
|
||||
const handleRunBacktrackAnalysis = async () => {
|
||||
if (!incidentCoord) return
|
||||
setBacktrackPhase('analyzing')
|
||||
try {
|
||||
if (selectedAnalysis) {
|
||||
@ -290,10 +294,6 @@ export function OilSpillView() {
|
||||
// 역추적 리플레이 애니메이션
|
||||
useEffect(() => {
|
||||
if (!isReplayPlaying) return
|
||||
if (replayFrame >= TOTAL_REPLAY_FRAMES) {
|
||||
setIsReplayPlaying(false)
|
||||
return
|
||||
}
|
||||
const interval = setInterval(() => {
|
||||
setReplayFrame(prev => {
|
||||
const next = prev + 1
|
||||
@ -305,13 +305,127 @@ export function OilSpillView() {
|
||||
})
|
||||
}, 50 / replaySpeed)
|
||||
return () => clearInterval(interval)
|
||||
}, [isReplayPlaying, replayFrame, replaySpeed])
|
||||
}, [isReplayPlaying, replaySpeed])
|
||||
|
||||
// flyTo 완료 후 재생 대기 플래그
|
||||
const pendingPlayRef = useRef(false)
|
||||
|
||||
// 항공 이미지 분석 완료 후 자동실행 플래그
|
||||
const pendingAutoRunRef = useRef(false)
|
||||
|
||||
// 마운트 시 이미지 분석 시그널 확인 (유출유면적분석 탭에서 이동한 경우)
|
||||
useEffect(() => {
|
||||
const pending = consumePendingImageAnalysis()
|
||||
if (!pending) return
|
||||
handleImageAnalysisResult({
|
||||
acdntSn: pending.acdntSn,
|
||||
lat: pending.lat,
|
||||
lon: pending.lon,
|
||||
oilType: pending.oilType,
|
||||
area: pending.area,
|
||||
volume: pending.volume,
|
||||
fileId: pending.fileId,
|
||||
occurredAt: pending.occurredAt,
|
||||
})
|
||||
if (pending.autoRun) pendingAutoRunRef.current = true
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [])
|
||||
|
||||
// incidentCoord 업데이트 후 시뮬레이션 자동실행
|
||||
useEffect(() => {
|
||||
if (pendingAutoRunRef.current && incidentCoord) {
|
||||
pendingAutoRunRef.current = false
|
||||
handleRunSimulation()
|
||||
}
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [incidentCoord])
|
||||
|
||||
const handleFlyEnd = useCallback(() => {
|
||||
setFlyToCoord(undefined)
|
||||
if (pendingPlayRef.current) {
|
||||
pendingPlayRef.current = false
|
||||
setIsPlaying(true)
|
||||
}
|
||||
}, [])
|
||||
|
||||
// 시뮬레이션 폴링 결과 처리
|
||||
useEffect(() => {
|
||||
if (!simStatus) return;
|
||||
if (simStatus.status === 'DONE' && simStatus.trajectory) {
|
||||
// eslint-disable-next-line react-hooks/set-state-in-effect
|
||||
setOilTrajectory(simStatus.trajectory);
|
||||
setSimulationSummary(simStatus.summary ?? null);
|
||||
setCenterPoints(simStatus.centerPoints ?? []);
|
||||
setWindData(simStatus.windData ?? []);
|
||||
setHydrData(simStatus.hydrData ?? []);
|
||||
setIsRunningSimulation(false);
|
||||
setCurrentExecSn(null);
|
||||
// AI 방어선 자동 생성
|
||||
if (incidentCoord) {
|
||||
const booms = generateAIBoomLines(simStatus.trajectory, incidentCoord, algorithmSettings);
|
||||
setBoomLines(booms);
|
||||
}
|
||||
setSensitiveResources(DEMO_SENSITIVE_RESOURCES);
|
||||
// 새 시뮬레이션 완료 시 flyTo 없으므로 즉시 재생
|
||||
setCurrentStep(0);
|
||||
setIsPlaying(true);
|
||||
}
|
||||
if (simStatus.status === 'ERROR') {
|
||||
setIsRunningSimulation(false);
|
||||
setCurrentExecSn(null);
|
||||
}
|
||||
}, [simStatus, incidentCoord, algorithmSettings]);
|
||||
|
||||
// trajectory 변경 시 플레이어 스텝 초기화 (재생은 각 경로에서 별도 처리)
|
||||
useEffect(() => {
|
||||
if (oilTrajectory.length > 0) {
|
||||
// eslint-disable-next-line react-hooks/set-state-in-effect
|
||||
setCurrentStep(0);
|
||||
}
|
||||
}, [oilTrajectory.length]);
|
||||
|
||||
// 플레이어 재생 애니메이션 (1x = 1초/스텝, 2x = 0.5초/스텝, 4x = 0.25초/스텝)
|
||||
const timeSteps = useMemo(() => {
|
||||
if (oilTrajectory.length === 0) return [];
|
||||
const unique = [...new Set(oilTrajectory.map(p => p.time))].sort((a, b) => a - b);
|
||||
return unique;
|
||||
}, [oilTrajectory]);
|
||||
|
||||
const maxTime = timeSteps[timeSteps.length - 1] ?? predictionTime;
|
||||
|
||||
useEffect(() => {
|
||||
if (!isPlaying || timeSteps.length === 0) return;
|
||||
if (currentStep >= maxTime) {
|
||||
// eslint-disable-next-line react-hooks/set-state-in-effect
|
||||
setIsPlaying(false);
|
||||
return;
|
||||
}
|
||||
const ms = 1000 / playSpeed;
|
||||
const id = setInterval(() => {
|
||||
setCurrentStep(prev => {
|
||||
const idx = timeSteps.indexOf(prev);
|
||||
if (idx < 0 || idx >= timeSteps.length - 1) {
|
||||
setIsPlaying(false);
|
||||
return timeSteps[timeSteps.length - 1];
|
||||
}
|
||||
return timeSteps[idx + 1];
|
||||
});
|
||||
}, ms);
|
||||
return () => clearInterval(id);
|
||||
}, [isPlaying, currentStep, playSpeed, timeSteps, maxTime]);
|
||||
|
||||
// 분석 목록에서 사고명 클릭 시
|
||||
const handleSelectAnalysis = async (analysis: Analysis) => {
|
||||
setIsPlaying(false)
|
||||
setCurrentStep(0)
|
||||
setSelectedAnalysis(analysis)
|
||||
setCenterPoints([])
|
||||
if (analysis.occurredAt) {
|
||||
setAccidentTime(analysis.occurredAt.slice(0, 16))
|
||||
}
|
||||
if (analysis.lon != null && analysis.lat != null) {
|
||||
setIncidentCoord({ lon: analysis.lon, lat: analysis.lat })
|
||||
setFlyToCoord({ lon: analysis.lon, lat: analysis.lat })
|
||||
}
|
||||
// 유종 매핑
|
||||
const oilTypeMap: Record<string, string> = {
|
||||
@ -336,16 +450,49 @@ export function OilSpillView() {
|
||||
// 분석 화면으로 전환
|
||||
setActiveSubTab('analysis')
|
||||
|
||||
// 데모 궤적 자동 생성 (화면 진입 즉시 시각화)
|
||||
const coord = (analysis.lon != null && analysis.lat != null)
|
||||
? { lon: analysis.lon, lat: analysis.lat }
|
||||
: incidentCoord
|
||||
const demoModels = Array.from(models.size > 0 ? models : new Set<PredictionModel>(['KOSPS']))
|
||||
|
||||
// OpenDrift 완료된 경우 실제 궤적 로드, 없으면 데모로 fallback
|
||||
if (analysis.opendriftStatus === 'completed') {
|
||||
try {
|
||||
const { trajectory, summary, centerPoints: cp, windData: wd, hydrData: hd } = await fetchAnalysisTrajectory(analysis.acdntSn)
|
||||
if (trajectory && trajectory.length > 0) {
|
||||
setOilTrajectory(trajectory)
|
||||
if (summary) setSimulationSummary(summary)
|
||||
setCenterPoints(cp ?? [])
|
||||
setWindData(wd ?? [])
|
||||
setHydrData(hd ?? [])
|
||||
const booms = generateAIBoomLines(trajectory, coord, algorithmSettings)
|
||||
setBoomLines(booms)
|
||||
setSensitiveResources(DEMO_SENSITIVE_RESOURCES)
|
||||
// incidentCoord가 변경된 경우 flyTo 완료 후 재생, 그렇지 않으면 즉시 재생
|
||||
if (analysis.lon !== incidentCoord?.lon || analysis.lat !== incidentCoord?.lat) {
|
||||
pendingPlayRef.current = true
|
||||
} else {
|
||||
setIsPlaying(true)
|
||||
}
|
||||
return
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('[prediction] trajectory 로딩 실패, 데모로 fallback:', err)
|
||||
}
|
||||
}
|
||||
|
||||
// 데모 궤적 생성 (fallback)
|
||||
const demoTrajectory = generateDemoTrajectory(coord, demoModels, parseInt(analysis.duration) || 48)
|
||||
setOilTrajectory(demoTrajectory)
|
||||
const demoBooms = generateAIBoomLines(demoTrajectory, coord, algorithmSettings)
|
||||
setBoomLines(demoBooms)
|
||||
setSensitiveResources(DEMO_SENSITIVE_RESOURCES)
|
||||
// incidentCoord가 변경된 경우 flyTo 완료 후 재생, 그렇지 않으면 즉시 재생
|
||||
if (analysis.lon !== incidentCoord?.lon || analysis.lat !== incidentCoord?.lat) {
|
||||
pendingPlayRef.current = true
|
||||
} else {
|
||||
setIsPlaying(true)
|
||||
}
|
||||
}
|
||||
|
||||
const handleMapClick = (lon: number, lat: number) => {
|
||||
@ -357,57 +504,176 @@ export function OilSpillView() {
|
||||
}
|
||||
}
|
||||
|
||||
const handleImageAnalysisResult = useCallback((result: ImageAnalyzeResult) => {
|
||||
setIncidentCoord({ lat: result.lat, lon: result.lon })
|
||||
setFlyToCoord({ lat: result.lat, lon: result.lon })
|
||||
setAccidentTime(result.occurredAt.slice(0, 16))
|
||||
setOilType(result.oilType)
|
||||
setSpillAmount(parseFloat(result.volume.toFixed(4)))
|
||||
setSpillUnit('kL')
|
||||
setSelectedAnalysis({
|
||||
acdntSn: result.acdntSn,
|
||||
acdntNm: '',
|
||||
occurredAt: result.occurredAt,
|
||||
analysisDate: '',
|
||||
requestor: '',
|
||||
duration: '48',
|
||||
oilType: result.oilType,
|
||||
volume: result.volume,
|
||||
location: '',
|
||||
lat: result.lat,
|
||||
lon: result.lon,
|
||||
kospsStatus: 'pending',
|
||||
poseidonStatus: 'pending',
|
||||
opendriftStatus: 'pending',
|
||||
backtrackStatus: 'pending',
|
||||
analyst: '',
|
||||
officeName: '',
|
||||
})
|
||||
}, [])
|
||||
|
||||
const handleRunSimulation = async () => {
|
||||
if (selectedModels.size === 0) return
|
||||
setIsRunningSimulation(true)
|
||||
// incidentName이 있으면 직접 입력 모드 — 기존 selectedAnalysis.acdntSn 무시하고 새 사고 생성
|
||||
const isDirectInput = incidentName.trim().length > 0;
|
||||
const existingAcdntSn = isDirectInput
|
||||
? undefined
|
||||
: (selectedAnalysis?.acdntSn ?? analysisDetail?.acdnt?.acdntSn);
|
||||
|
||||
// 선택 모드인데 사고도 없으면 실행 불가, 직접 입력 모드인데 사고명 없으면 실행 불가
|
||||
if (!isDirectInput && !existingAcdntSn) {
|
||||
return;
|
||||
}
|
||||
if (!incidentCoord) {
|
||||
return;
|
||||
}
|
||||
|
||||
setIsRunningSimulation(true);
|
||||
setSimulationSummary(null);
|
||||
try {
|
||||
const models = Array.from(selectedModels)
|
||||
const results = await Promise.all(
|
||||
models.map(async (model) => {
|
||||
const { data } = await api.post<{ trajectory: Array<{ lat: number; lon: number; time: number; particle?: number }> }>('/simulation/run', {
|
||||
model,
|
||||
lat: incidentCoord.lat,
|
||||
lon: incidentCoord.lon,
|
||||
duration_hours: predictionTime,
|
||||
oil_type: oilType,
|
||||
spill_amount: spillAmount,
|
||||
spill_type: spillType,
|
||||
})
|
||||
return data.trajectory.map(p => ({ ...p, model }))
|
||||
})
|
||||
)
|
||||
const payload: Record<string, unknown> = {
|
||||
acdntSn: existingAcdntSn,
|
||||
lat: incidentCoord.lat,
|
||||
lon: incidentCoord.lon,
|
||||
runTime: predictionTime,
|
||||
matTy: oilType,
|
||||
matVol: spillAmount,
|
||||
spillTime: spillType === '연속' ? predictionTime : 0,
|
||||
startTime: accidentTime
|
||||
? `${accidentTime}:00`
|
||||
: analysisDetail?.acdnt?.occurredAt,
|
||||
};
|
||||
|
||||
setOilTrajectory(results.flat())
|
||||
// 직접 입력 모드: 백엔드에서 ACDNT + SPIL_DATA 생성에 필요한 필드 추가
|
||||
if (isDirectInput) {
|
||||
payload.acdntNm = incidentName.trim();
|
||||
payload.spillUnit = spillUnit;
|
||||
payload.spillTypeCd = spillType;
|
||||
}
|
||||
|
||||
const { data } = await api.post<SimulationRunResponse>('/simulation/run', payload);
|
||||
setCurrentExecSn(data.execSn);
|
||||
|
||||
// 직접 입력으로 신규 생성된 경우: selectedAnalysis 갱신 + incidentName 초기화
|
||||
if (data.acdntSn && isDirectInput) {
|
||||
setSelectedAnalysis({
|
||||
acdntSn: data.acdntSn,
|
||||
acdntNm: incidentName.trim(),
|
||||
occurredAt: accidentTime ? `${accidentTime}:00` : '',
|
||||
analysisDate: new Date().toISOString(),
|
||||
requestor: '',
|
||||
duration: String(predictionTime),
|
||||
oilType,
|
||||
volume: spillAmount,
|
||||
location: '',
|
||||
lat: incidentCoord.lat,
|
||||
lon: incidentCoord.lon,
|
||||
kospsStatus: 'pending',
|
||||
poseidonStatus: 'pending',
|
||||
opendriftStatus: 'pending',
|
||||
backtrackStatus: 'pending',
|
||||
analyst: '',
|
||||
officeName: '',
|
||||
} as Analysis);
|
||||
// 다음 실행 시 동일 사고 재생성 방지 — 이후에는 selectedAnalysis.acdntSn 사용
|
||||
setIncidentName('');
|
||||
}
|
||||
// setIsRunningSimulation(false)는 폴링 결과 useEffect에서 처리
|
||||
} catch {
|
||||
// 백엔드 미구현 — 클라이언트 데모 궤적 fallback
|
||||
console.info('[prediction] 서버 시뮬레이션 미구현, 데모 궤적 생성')
|
||||
const models = Array.from(selectedModels)
|
||||
const demoTrajectory = generateDemoTrajectory(incidentCoord, models, predictionTime)
|
||||
setOilTrajectory(demoTrajectory)
|
||||
|
||||
// AI 방어선 자동 생성
|
||||
const demoBooms = generateAIBoomLines(demoTrajectory, incidentCoord, algorithmSettings)
|
||||
setBoomLines(demoBooms)
|
||||
|
||||
// 민감자원 로드
|
||||
setSensitiveResources(DEMO_SENSITIVE_RESOURCES)
|
||||
} finally {
|
||||
setIsRunningSimulation(false)
|
||||
setIsRunningSimulation(false);
|
||||
// 503 등 에러 시 상태 복원 (에러 메시지 표시는 향후 토스트로 처리)
|
||||
}
|
||||
}
|
||||
|
||||
const handleOpenReport = () => {
|
||||
const OIL_TYPE_CODE: Record<string, string> = {
|
||||
'벙커C유': 'BUNKER_C', '경유': 'DIESEL', '원유': 'CRUDE_OIL', '윤활유': 'LUBE_OIL',
|
||||
};
|
||||
const accidentName =
|
||||
selectedAnalysis?.acdntNm ||
|
||||
analysisDetail?.acdnt?.acdntNm ||
|
||||
incidentName ||
|
||||
'(미입력)';
|
||||
const occurTime =
|
||||
selectedAnalysis?.occurredAt ||
|
||||
analysisDetail?.acdnt?.occurredAt ||
|
||||
accidentTime ||
|
||||
'';
|
||||
const wx = analysisDetail?.weather?.[0] ?? null;
|
||||
|
||||
const payload: OilReportPayload = {
|
||||
incident: {
|
||||
name: accidentName,
|
||||
occurTime,
|
||||
location: selectedAnalysis?.location || analysisDetail?.acdnt?.location || '',
|
||||
lat: incidentCoord?.lat ?? selectedAnalysis?.lat ?? null,
|
||||
lon: incidentCoord?.lon ?? selectedAnalysis?.lon ?? null,
|
||||
pollutant: OIL_TYPE_CODE[oilType] || oilType,
|
||||
spillAmount: `${spillAmount} ${spillUnit}`,
|
||||
shipName: analysisDetail?.vessels?.[0]?.vesselNm || '',
|
||||
},
|
||||
pollution: {
|
||||
spillAmount: `${spillAmount.toFixed(2)} ${spillUnit}`,
|
||||
weathered: simulationSummary ? `${simulationSummary.weatheredVolume.toFixed(2)} m³` : '—',
|
||||
seaRemain: simulationSummary ? `${simulationSummary.remainingVolume.toFixed(2)} m³` : '—',
|
||||
pollutionArea: simulationSummary ? `${simulationSummary.pollutionArea.toFixed(2)} km²` : '—',
|
||||
coastAttach: simulationSummary ? `${simulationSummary.beachedVolume.toFixed(2)} m³` : '—',
|
||||
coastLength: simulationSummary ? `${simulationSummary.pollutionCoastLength.toFixed(2)} km` : '—',
|
||||
oilType: OIL_TYPE_CODE[oilType] || oilType,
|
||||
},
|
||||
weather: wx
|
||||
? { windDir: wx.wind, windSpeed: wx.wind, waveHeight: wx.wave, temp: wx.temp }
|
||||
: null,
|
||||
spread: { kosps: '—', openDrift: '—', poseidon: '—' },
|
||||
coastal: {
|
||||
firstTime: (() => {
|
||||
const beachedTimes = oilTrajectory.filter(p => p.stranded === 1).map(p => p.time);
|
||||
if (beachedTimes.length === 0) return null;
|
||||
const d = new Date(Math.min(...beachedTimes) * 1000);
|
||||
return `${String(d.getHours()).padStart(2, '0')}:${String(d.getMinutes()).padStart(2, '0')}`;
|
||||
})(),
|
||||
},
|
||||
hasSimulation: simulationSummary !== null,
|
||||
};
|
||||
|
||||
setOilReportPayload(payload);
|
||||
setReportGenCategory(0);
|
||||
navigateToTab('reports', 'generate');
|
||||
};
|
||||
|
||||
return (
|
||||
<div className="flex flex-1 overflow-hidden">
|
||||
<div className="relative flex flex-1 overflow-hidden">
|
||||
{/* Left Sidebar */}
|
||||
{activeSubTab === 'analysis' && (
|
||||
<LeftPanel
|
||||
selectedAnalysis={selectedAnalysis}
|
||||
enabledLayers={enabledLayers}
|
||||
onToggleLayer={handleToggleLayer}
|
||||
accidentTime={accidentTime}
|
||||
onAccidentTimeChange={setAccidentTime}
|
||||
incidentCoord={incidentCoord}
|
||||
onCoordChange={setIncidentCoord}
|
||||
onMapSelectClick={() => setIsSelectingLocation(true)}
|
||||
isSelectingLocation={isSelectingLocation}
|
||||
onMapSelectClick={() => setIsSelectingLocation(prev => !prev)}
|
||||
onRunSimulation={handleRunSimulation}
|
||||
isRunningSimulation={isRunningSimulation}
|
||||
selectedModels={selectedModels}
|
||||
@ -420,6 +686,10 @@ export function OilSpillView() {
|
||||
onOilTypeChange={setOilType}
|
||||
spillAmount={spillAmount}
|
||||
onSpillAmountChange={setSpillAmount}
|
||||
incidentName={incidentName}
|
||||
onIncidentNameChange={setIncidentName}
|
||||
spillUnit={spillUnit}
|
||||
onSpillUnitChange={setSpillUnit}
|
||||
boomLines={boomLines}
|
||||
onBoomLinesChange={setBoomLines}
|
||||
oilTrajectory={oilTrajectory}
|
||||
@ -435,6 +705,7 @@ export function OilSpillView() {
|
||||
onLayerOpacityChange={setLayerOpacity}
|
||||
layerBrightness={layerBrightness}
|
||||
onLayerBrightnessChange={setLayerBrightness}
|
||||
onImageAnalysisResult={handleImageAnalysisResult}
|
||||
/>
|
||||
)}
|
||||
|
||||
@ -450,7 +721,8 @@ export function OilSpillView() {
|
||||
<>
|
||||
<MapView
|
||||
enabledLayers={enabledLayers}
|
||||
incidentCoord={incidentCoord}
|
||||
incidentCoord={incidentCoord ?? undefined}
|
||||
flyToIncident={flyToCoord}
|
||||
isSelectingLocation={isSelectingLocation || isDrawingBoom}
|
||||
onMapClick={handleMapClick}
|
||||
oilTrajectory={oilTrajectory}
|
||||
@ -461,10 +733,17 @@ export function OilSpillView() {
|
||||
layerOpacity={layerOpacity}
|
||||
layerBrightness={layerBrightness}
|
||||
sensitiveResources={sensitiveResources}
|
||||
backtrackReplay={isReplayActive && replayShips.length > 0 ? {
|
||||
centerPoints={centerPoints}
|
||||
windData={windData}
|
||||
hydrData={hydrData}
|
||||
flyToTarget={flyToTarget}
|
||||
fitBoundsTarget={fitBoundsTarget}
|
||||
onIncidentFlyEnd={handleFlyEnd}
|
||||
externalCurrentTime={oilTrajectory.length > 0 ? currentStep : undefined}
|
||||
backtrackReplay={isReplayActive && replayShips.length > 0 && incidentCoord ? {
|
||||
isActive: true,
|
||||
ships: replayShips,
|
||||
collisionEvent: collisionEvent || undefined,
|
||||
collisionEvent: collisionEvent ?? null,
|
||||
replayFrame,
|
||||
totalFrames: TOTAL_REPLAY_FRAMES,
|
||||
incidentCoord,
|
||||
@ -472,148 +751,166 @@ export function OilSpillView() {
|
||||
/>
|
||||
|
||||
{/* 타임라인 플레이어 (리플레이 비활성 시) */}
|
||||
{!isReplayActive && <div className="absolute bottom-0 left-0 right-0 h-[72px] flex items-center px-5 gap-4" style={{
|
||||
background: 'rgba(15,21,36,0.95)', backdropFilter: 'blur(16px)',
|
||||
borderTop: '1px solid var(--bd)',
|
||||
zIndex: 1100
|
||||
}}>
|
||||
{/* 컨트롤 버튼 */}
|
||||
<div className="flex gap-1 shrink-0">
|
||||
{[
|
||||
{ icon: '⏮', action: () => setTimelinePosition(0) },
|
||||
{ icon: '◀', action: () => setTimelinePosition(Math.max(0, timelinePosition - 100 / 12)) },
|
||||
].map((btn, i) => (
|
||||
<button key={i} onClick={btn.action} style={{
|
||||
width: '34px', height: '34px', borderRadius: 'var(--rS, 4px)',
|
||||
border: '1px solid var(--bd)', background: 'var(--bg3)', color: 'var(--t2)',
|
||||
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
||||
cursor: 'pointer', fontSize: '14px', transition: '0.2s'
|
||||
}}>{btn.icon}</button>
|
||||
))}
|
||||
<button onClick={() => setIsPlaying(!isPlaying)} style={{
|
||||
width: '34px', height: '34px', borderRadius: 'var(--rS, 4px)',
|
||||
border: isPlaying ? '1px solid var(--cyan)' : '1px solid var(--bd)',
|
||||
background: isPlaying ? 'var(--cyan)' : 'var(--bg3)',
|
||||
color: isPlaying ? 'var(--bg0)' : 'var(--t2)',
|
||||
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
||||
cursor: 'pointer', fontSize: '14px', transition: '0.2s'
|
||||
}}>{isPlaying ? '⏸' : '▶'}</button>
|
||||
{[
|
||||
{ icon: '▶▶', action: () => setTimelinePosition(Math.min(100, timelinePosition + 100 / 12)) },
|
||||
{ icon: '⏭', action: () => setTimelinePosition(100) },
|
||||
].map((btn, i) => (
|
||||
<button key={i} onClick={btn.action} style={{
|
||||
width: '34px', height: '34px', borderRadius: 'var(--rS, 4px)',
|
||||
border: '1px solid var(--bd)', background: 'var(--bg3)', color: 'var(--t2)',
|
||||
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
||||
cursor: 'pointer', fontSize: '12px', transition: '0.2s'
|
||||
}}>{btn.icon}</button>
|
||||
))}
|
||||
<div className="w-2" />
|
||||
<button onClick={() => setPlaySpeed(playSpeed >= 4 ? 1 : playSpeed * 2)} style={{
|
||||
width: '34px', height: '34px', borderRadius: 'var(--rS, 4px)',
|
||||
border: '1px solid var(--bd)', background: 'var(--bg3)', color: 'var(--t2)',
|
||||
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
||||
cursor: 'pointer', fontSize: '11px', fontWeight: 600, fontFamily: 'var(--fM)', transition: '0.2s'
|
||||
}}>{playSpeed}×</button>
|
||||
</div>
|
||||
|
||||
{/* 타임라인 슬라이더 */}
|
||||
<div className="flex-1 flex flex-col gap-1.5">
|
||||
{/* 시간 라벨 */}
|
||||
<div className="flex justify-between px-1">
|
||||
{['0h', '6h', '12h', '18h', '24h', '36h', '48h', '60h', '72h'].map((label, i) => {
|
||||
const pos = [0, 8.33, 16.67, 25, 33.33, 50, 66.67, 83.33, 100][i]
|
||||
const isActive = Math.abs(timelinePosition - pos) < 5
|
||||
return (
|
||||
<span key={label} style={{
|
||||
fontSize: '10px', fontFamily: 'var(--fM)',
|
||||
color: isActive ? 'var(--cyan)' : 'var(--t3)',
|
||||
fontWeight: isActive ? 600 : 400, cursor: 'pointer'
|
||||
}} onClick={() => setTimelinePosition(pos)}>{label}</span>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
|
||||
{/* 슬라이더 트랙 */}
|
||||
<div className="relative h-6 flex items-center">
|
||||
{/* 트랙 레일 */}
|
||||
<div
|
||||
style={{ width: '100%', height: '4px', background: 'var(--bd)', borderRadius: '2px', position: 'relative', cursor: 'pointer' }}
|
||||
onClick={(e) => {
|
||||
const rect = e.currentTarget.getBoundingClientRect()
|
||||
setTimelinePosition(Math.max(0, Math.min(100, ((e.clientX - rect.left) / rect.width) * 100)))
|
||||
}}
|
||||
>
|
||||
{/* 진행 바 */}
|
||||
<div style={{
|
||||
position: 'absolute', top: 0, left: 0,
|
||||
width: `${timelinePosition}%`, height: '100%',
|
||||
background: 'linear-gradient(90deg, var(--cyan), var(--blue))',
|
||||
borderRadius: '2px', transition: 'width 0.15s'
|
||||
}} />
|
||||
{/* 주요 마커 */}
|
||||
{[0, 16.67, 33.33, 50, 66.67, 83.33, 100].map((pos) => (
|
||||
<div key={`mj-${pos}`} style={{
|
||||
position: 'absolute', width: '2px', height: '14px',
|
||||
background: 'var(--t3)', top: '-5px', left: `${pos}%`
|
||||
}} />
|
||||
{!isReplayActive && (() => {
|
||||
const progressPct = maxTime > 0 ? (currentStep / maxTime) * 100 : 0;
|
||||
// 동적 라벨: 스텝 수에 따라 균등 분배
|
||||
const visibleLabels: number[] = (() => {
|
||||
if (timeSteps.length === 0) return [0];
|
||||
if (timeSteps.length <= 8) return timeSteps;
|
||||
const interval = Math.ceil(timeSteps.length / 7);
|
||||
return timeSteps.filter((_, i) => i % interval === 0 || i === timeSteps.length - 1);
|
||||
})();
|
||||
return (
|
||||
<div className="absolute bottom-0 left-0 right-0 h-[72px] flex items-center px-5 gap-4" style={{
|
||||
background: 'rgba(15,21,36,0.95)', backdropFilter: 'blur(16px)',
|
||||
borderTop: '1px solid var(--bd)',
|
||||
zIndex: 1100
|
||||
}}>
|
||||
{/* 컨트롤 버튼 */}
|
||||
<div className="flex gap-1 shrink-0">
|
||||
{[
|
||||
{ icon: '⏮', action: () => { setCurrentStep(timeSteps[0] ?? 0); setIsPlaying(false); } },
|
||||
{ icon: '◀', action: () => { const idx = timeSteps.indexOf(currentStep); if (idx > 0) setCurrentStep(timeSteps[idx - 1]); } },
|
||||
].map((btn, i) => (
|
||||
<button key={i} onClick={btn.action} style={{
|
||||
width: '34px', height: '34px', borderRadius: 'var(--rS, 4px)',
|
||||
border: '1px solid var(--bd)', background: 'var(--bg3)', color: 'var(--t2)',
|
||||
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
||||
cursor: 'pointer', fontSize: '14px', transition: '0.2s'
|
||||
}}>{btn.icon}</button>
|
||||
))}
|
||||
{/* 보조 마커 */}
|
||||
{[8.33, 25].map((pos) => (
|
||||
<div key={`mn-${pos}`} style={{
|
||||
position: 'absolute', width: '2px', height: '10px',
|
||||
background: 'var(--bdL)', top: '-3px', left: `${pos}%`
|
||||
}} />
|
||||
))}
|
||||
{/* 방어선 설치 이벤트 마커 */}
|
||||
{boomLines.length > 0 && [
|
||||
{ pos: 4.2, label: '1차 방어선 설치 (+3h)' },
|
||||
{ pos: 8.3, label: '2차 방어선 설치 (+6h)' },
|
||||
{ pos: 12.5, label: '3차 방어선 설치 (+9h)' },
|
||||
].slice(0, boomLines.length).map((bm, i) => (
|
||||
<div key={`bm-${i}`} title={bm.label} style={{
|
||||
position: 'absolute', top: '-18px', left: `${bm.pos}%`,
|
||||
transform: 'translateX(-50%)', fontSize: '12px', cursor: 'pointer',
|
||||
filter: 'drop-shadow(0 0 4px rgba(245,158,11,0.5))'
|
||||
}}>🛡</div>
|
||||
<button onClick={() => {
|
||||
if (!isPlaying && currentStep >= maxTime) setCurrentStep(timeSteps[0] ?? 0);
|
||||
setIsPlaying(p => !p);
|
||||
}} style={{
|
||||
width: '34px', height: '34px', borderRadius: 'var(--rS, 4px)',
|
||||
border: isPlaying ? '1px solid var(--cyan)' : '1px solid var(--bd)',
|
||||
background: isPlaying ? 'var(--cyan)' : 'var(--bg3)',
|
||||
color: isPlaying ? 'var(--bg0)' : 'var(--t2)',
|
||||
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
||||
cursor: 'pointer', fontSize: '14px', transition: '0.2s'
|
||||
}}>{isPlaying ? '⏸' : '▶'}</button>
|
||||
{[
|
||||
{ icon: '▶▶', action: () => { const idx = timeSteps.indexOf(currentStep); if (idx < timeSteps.length - 1) setCurrentStep(timeSteps[idx + 1]); } },
|
||||
{ icon: '⏭', action: () => { setCurrentStep(maxTime); setIsPlaying(false); } },
|
||||
].map((btn, i) => (
|
||||
<button key={i} onClick={btn.action} style={{
|
||||
width: '34px', height: '34px', borderRadius: 'var(--rS, 4px)',
|
||||
border: '1px solid var(--bd)', background: 'var(--bg3)', color: 'var(--t2)',
|
||||
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
||||
cursor: 'pointer', fontSize: '12px', transition: '0.2s'
|
||||
}}>{btn.icon}</button>
|
||||
))}
|
||||
<div className="w-2" />
|
||||
<button onClick={() => setPlaySpeed(playSpeed >= 4 ? 1 : playSpeed * 2)} style={{
|
||||
width: '34px', height: '34px', borderRadius: 'var(--rS, 4px)',
|
||||
border: '1px solid var(--bd)', background: 'var(--bg3)', color: 'var(--t2)',
|
||||
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
||||
cursor: 'pointer', fontSize: '11px', fontWeight: 600, fontFamily: 'var(--fM)', transition: '0.2s'
|
||||
}}>{playSpeed}×</button>
|
||||
</div>
|
||||
{/* 드래그 핸들 */}
|
||||
<div style={{
|
||||
position: 'absolute', left: `${timelinePosition}%`, top: '50%',
|
||||
transform: 'translate(-50%, -50%)',
|
||||
width: '16px', height: '16px',
|
||||
background: 'var(--cyan)', border: '3px solid var(--bg0)',
|
||||
borderRadius: '50%', cursor: 'grab',
|
||||
boxShadow: '0 0 10px rgba(6,182,212,0.4)', zIndex: 2,
|
||||
transition: 'left 0.15s'
|
||||
}} />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 시간 정보 */}
|
||||
<div style={{ display: 'flex', flexDirection: 'column', alignItems: 'flex-end', gap: '4px', flexShrink: 0, minWidth: '200px' }}>
|
||||
<div style={{ fontSize: '14px', fontWeight: 600, color: 'var(--cyan)', fontFamily: 'var(--fM)' }}>
|
||||
+{Math.round(timelinePosition * 72 / 100)}h — {(() => {
|
||||
const d = new Date(); d.setHours(d.getHours() + Math.round(timelinePosition * 72 / 100))
|
||||
return `${String(d.getMonth() + 1).padStart(2, '0')}/${String(d.getDate()).padStart(2, '0')} ${String(d.getHours()).padStart(2, '0')}:${String(d.getMinutes()).padStart(2, '0')} KST`
|
||||
})()}
|
||||
</div>
|
||||
<div style={{ display: 'flex', gap: '14px' }}>
|
||||
{[
|
||||
{ label: '풍화율', value: `${Math.min(99, Math.round(timelinePosition * 0.4))}%` },
|
||||
{ label: '면적', value: `${(timelinePosition * 0.08).toFixed(1)} km²` },
|
||||
{ label: '차단율', value: boomLines.length > 0 ? `${Math.min(95, 70 + Math.round(timelinePosition * 0.2))}%` : '—', color: 'var(--boom)' },
|
||||
].map((s, i) => (
|
||||
<div key={i} style={{ display: 'flex', alignItems: 'center', gap: '5px', fontSize: '11px' }}>
|
||||
<span className="text-text-3">{s.label}</span>
|
||||
<span style={{ color: s.color, fontWeight: 600, fontFamily: 'var(--fM)' }}>{s.value}</span>
|
||||
{/* 타임라인 슬라이더 */}
|
||||
<div className="flex-1 flex flex-col gap-1.5">
|
||||
{/* 동적 시간 라벨 */}
|
||||
<div className="relative h-4">
|
||||
{visibleLabels.map(t => {
|
||||
const pos = maxTime > 0 ? (t / maxTime) * 100 : 0;
|
||||
const isActive = t === currentStep;
|
||||
return (
|
||||
<span key={t} style={{
|
||||
position: 'absolute', left: `${pos}%`, transform: 'translateX(-50%)',
|
||||
fontSize: '10px', fontFamily: 'var(--fM)',
|
||||
color: isActive ? 'var(--cyan)' : 'var(--t3)',
|
||||
fontWeight: isActive ? 600 : 400, cursor: 'pointer', whiteSpace: 'nowrap'
|
||||
}} onClick={() => setCurrentStep(t)}>{t}h</span>
|
||||
)
|
||||
})}
|
||||
</div>
|
||||
))}
|
||||
|
||||
{/* 슬라이더 트랙 */}
|
||||
<div className="relative h-6 flex items-center">
|
||||
<div
|
||||
style={{ width: '100%', height: '4px', background: 'var(--bd)', borderRadius: '2px', position: 'relative', cursor: 'pointer' }}
|
||||
onClick={(e) => {
|
||||
if (timeSteps.length === 0) return;
|
||||
const rect = e.currentTarget.getBoundingClientRect();
|
||||
const pct = (e.clientX - rect.left) / rect.width;
|
||||
const targetTime = pct * maxTime;
|
||||
const closest = timeSteps.reduce((a, b) =>
|
||||
Math.abs(b - targetTime) < Math.abs(a - targetTime) ? b : a
|
||||
);
|
||||
setCurrentStep(closest);
|
||||
}}
|
||||
>
|
||||
{/* 진행 바 */}
|
||||
<div style={{
|
||||
position: 'absolute', top: 0, left: 0,
|
||||
width: `${progressPct}%`, height: '100%',
|
||||
background: 'linear-gradient(90deg, var(--cyan), var(--blue))',
|
||||
borderRadius: '2px', transition: 'width 0.15s'
|
||||
}} />
|
||||
{/* 스텝 마커 (각 타임스텝 위치에 틱 표시) */}
|
||||
{timeSteps.map(t => {
|
||||
const pos = maxTime > 0 ? (t / maxTime) * 100 : 0;
|
||||
return (
|
||||
<div key={`tick-${t}`} style={{
|
||||
position: 'absolute', width: '2px', height: '10px',
|
||||
background: t <= currentStep ? 'var(--cyan)' : 'var(--t3)',
|
||||
top: '-3px', left: `${pos}%`, opacity: 0.6
|
||||
}} />
|
||||
);
|
||||
})}
|
||||
{/* 방어선 설치 이벤트 마커 */}
|
||||
{boomLines.length > 0 && [
|
||||
{ pos: 4.2, label: '1차 방어선 설치 (+3h)' },
|
||||
{ pos: 8.3, label: '2차 방어선 설치 (+6h)' },
|
||||
{ pos: 12.5, label: '3차 방어선 설치 (+9h)' },
|
||||
].slice(0, boomLines.length).map((bm, i) => (
|
||||
<div key={`bm-${i}`} title={bm.label} style={{
|
||||
position: 'absolute', top: '-18px', left: `${bm.pos}%`,
|
||||
transform: 'translateX(-50%)', fontSize: '12px', cursor: 'pointer',
|
||||
filter: 'drop-shadow(0 0 4px rgba(245,158,11,0.5))'
|
||||
}}>🛡</div>
|
||||
))}
|
||||
</div>
|
||||
{/* 드래그 핸들 */}
|
||||
<div style={{
|
||||
position: 'absolute', left: `${progressPct}%`, top: '50%',
|
||||
transform: 'translate(-50%, -50%)',
|
||||
width: '16px', height: '16px',
|
||||
background: 'var(--cyan)', border: '3px solid var(--bg0)',
|
||||
borderRadius: '50%', cursor: 'grab',
|
||||
boxShadow: '0 0 10px rgba(6,182,212,0.4)', zIndex: 2,
|
||||
transition: 'left 0.15s'
|
||||
}} />
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 시간 정보 */}
|
||||
<div style={{ display: 'flex', flexDirection: 'column', alignItems: 'flex-end', gap: '4px', flexShrink: 0, minWidth: '200px' }}>
|
||||
<div style={{ fontSize: '14px', fontWeight: 600, color: 'var(--cyan)', fontFamily: 'var(--fM)' }}>
|
||||
+{currentStep}h — {(() => {
|
||||
const d = new Date(); d.setHours(d.getHours() + currentStep);
|
||||
return `${String(d.getMonth() + 1).padStart(2, '0')}/${String(d.getDate()).padStart(2, '0')} ${String(d.getHours()).padStart(2, '0')}:${String(d.getMinutes()).padStart(2, '0')} KST`;
|
||||
})()}
|
||||
</div>
|
||||
<div style={{ display: 'flex', gap: '14px' }}>
|
||||
{[
|
||||
{ label: '풍화율', value: `${Math.min(99, Math.round(progressPct * 0.4))}%` },
|
||||
{ label: '면적', value: `${(progressPct * 0.08).toFixed(1)} km²` },
|
||||
{ label: '차단율', value: boomLines.length > 0 ? `${Math.min(95, 70 + Math.round(progressPct * 0.2))}%` : '—', color: 'var(--boom)' },
|
||||
].map((s, i) => (
|
||||
<div key={i} style={{ display: 'flex', alignItems: 'center', gap: '5px', fontSize: '11px' }}>
|
||||
<span className="text-text-3">{s.label}</span>
|
||||
<span style={{ color: s.color, fontWeight: 600, fontFamily: 'var(--fM)' }}>{s.value}</span>
|
||||
</div>
|
||||
))}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</div>}
|
||||
);
|
||||
})()}
|
||||
|
||||
{/* 역추적 리플레이 바 */}
|
||||
{isReplayActive && (
|
||||
@ -627,7 +924,7 @@ export function OilSpillView() {
|
||||
onSpeedChange={setReplaySpeed}
|
||||
onClose={handleCloseReplay}
|
||||
replayShips={replayShips}
|
||||
collisionEvent={collisionEvent || undefined}
|
||||
collisionEvent={collisionEvent}
|
||||
/>
|
||||
)}
|
||||
</>
|
||||
@ -635,7 +932,15 @@ export function OilSpillView() {
|
||||
</div>
|
||||
|
||||
{/* Right Panel */}
|
||||
{activeSubTab === 'analysis' && <RightPanel onOpenBacktrack={handleOpenBacktrack} onOpenRecalc={() => setRecalcModalOpen(true)} onOpenReport={() => { setReportGenCategory(0); navigateToTab('reports', 'generate') }} detail={analysisDetail} />}
|
||||
{activeSubTab === 'analysis' && <RightPanel onOpenBacktrack={handleOpenBacktrack} onOpenRecalc={() => setRecalcModalOpen(true)} onOpenReport={handleOpenReport} detail={analysisDetail} summary={simulationSummary} />}
|
||||
|
||||
{/* 확산 예측 실행 중 로딩 오버레이 */}
|
||||
{isRunningSimulation && (
|
||||
<SimulationLoadingOverlay
|
||||
status={simStatus?.status === 'RUNNING' ? 'RUNNING' : 'PENDING'}
|
||||
progress={simStatus?.progress}
|
||||
/>
|
||||
)}
|
||||
|
||||
{/* 재계산 모달 */}
|
||||
<RecalcModal
|
||||
@ -645,7 +950,7 @@ export function OilSpillView() {
|
||||
spillAmount={spillAmount}
|
||||
spillType={spillType}
|
||||
predictionTime={predictionTime}
|
||||
incidentCoord={incidentCoord}
|
||||
incidentCoord={incidentCoord ?? { lat: 0, lon: 0 }}
|
||||
selectedModels={selectedModels}
|
||||
onSubmit={(params) => {
|
||||
setOilType(params.oilType)
|
||||
|
||||
@ -1,14 +1,19 @@
|
||||
import { useState } from 'react'
|
||||
import { useState, useRef } from 'react'
|
||||
import { decimalToDMS } from '@common/utils/coordinates'
|
||||
import { ComboBox } from '@common/components/ui/ComboBox'
|
||||
import { ALL_MODELS } from './OilSpillView'
|
||||
import type { PredictionModel } from './OilSpillView'
|
||||
import { analyzeImage } from '../services/predictionApi'
|
||||
import type { ImageAnalyzeResult } from '../services/predictionApi'
|
||||
|
||||
interface PredictionInputSectionProps {
|
||||
expanded: boolean
|
||||
onToggle: () => void
|
||||
incidentCoord: { lon: number; lat: number }
|
||||
accidentTime: string
|
||||
onAccidentTimeChange: (time: string) => void
|
||||
incidentCoord: { lon: number; lat: number } | null
|
||||
onCoordChange: (coord: { lon: number; lat: number }) => void
|
||||
isSelectingLocation: boolean
|
||||
onMapSelectClick: () => void
|
||||
onRunSimulation: () => void
|
||||
isRunningSimulation: boolean
|
||||
@ -22,13 +27,21 @@ interface PredictionInputSectionProps {
|
||||
onOilTypeChange: (type: string) => void
|
||||
spillAmount: number
|
||||
onSpillAmountChange: (amount: number) => void
|
||||
incidentName: string
|
||||
onIncidentNameChange: (name: string) => void
|
||||
spillUnit: string
|
||||
onSpillUnitChange: (unit: string) => void
|
||||
onImageAnalysisResult?: (result: ImageAnalyzeResult) => void
|
||||
}
|
||||
|
||||
const PredictionInputSection = ({
|
||||
expanded,
|
||||
onToggle,
|
||||
accidentTime,
|
||||
onAccidentTimeChange,
|
||||
incidentCoord,
|
||||
onCoordChange,
|
||||
isSelectingLocation,
|
||||
onMapSelectClick,
|
||||
onRunSimulation,
|
||||
isRunningSimulation,
|
||||
@ -42,26 +55,57 @@ const PredictionInputSection = ({
|
||||
onOilTypeChange,
|
||||
spillAmount,
|
||||
onSpillAmountChange,
|
||||
incidentName,
|
||||
onIncidentNameChange,
|
||||
spillUnit,
|
||||
onSpillUnitChange,
|
||||
onImageAnalysisResult,
|
||||
}: PredictionInputSectionProps) => {
|
||||
const [inputMode, setInputMode] = useState<'direct' | 'upload'>('direct')
|
||||
const [uploadedImage, setUploadedImage] = useState<string | null>(null)
|
||||
const [uploadedFileName, setUploadedFileName] = useState<string>('')
|
||||
const [uploadedFile, setUploadedFile] = useState<File | null>(null)
|
||||
const [isAnalyzing, setIsAnalyzing] = useState(false)
|
||||
const [analyzeError, setAnalyzeError] = useState<string | null>(null)
|
||||
const [analyzeResult, setAnalyzeResult] = useState<ImageAnalyzeResult | null>(null)
|
||||
const fileInputRef = useRef<HTMLInputElement>(null)
|
||||
|
||||
const handleImageUpload = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const file = e.target.files?.[0]
|
||||
if (file) {
|
||||
setUploadedFileName(file.name)
|
||||
const reader = new FileReader()
|
||||
reader.onload = (event) => {
|
||||
setUploadedImage(event.target?.result as string)
|
||||
}
|
||||
reader.readAsDataURL(file)
|
||||
}
|
||||
const handleFileSelect = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const file = e.target.files?.[0] ?? null
|
||||
setUploadedFile(file)
|
||||
setAnalyzeError(null)
|
||||
setAnalyzeResult(null)
|
||||
}
|
||||
|
||||
const removeUploadedImage = () => {
|
||||
setUploadedImage(null)
|
||||
setUploadedFileName('')
|
||||
const handleRemoveFile = () => {
|
||||
setUploadedFile(null)
|
||||
setAnalyzeError(null)
|
||||
setAnalyzeResult(null)
|
||||
if (fileInputRef.current) fileInputRef.current.value = ''
|
||||
}
|
||||
|
||||
const handleAnalyze = async () => {
|
||||
if (!uploadedFile) return
|
||||
setIsAnalyzing(true)
|
||||
setAnalyzeError(null)
|
||||
try {
|
||||
const result = await analyzeImage(uploadedFile)
|
||||
setAnalyzeResult(result)
|
||||
onImageAnalysisResult?.(result)
|
||||
} catch (err: unknown) {
|
||||
if (err && typeof err === 'object' && 'response' in err) {
|
||||
const res = (err as { response?: { data?: { error?: string } } }).response
|
||||
if (res?.data?.error === 'GPS_NOT_FOUND') {
|
||||
setAnalyzeError('GPS 정보가 없는 이미지입니다')
|
||||
return
|
||||
}
|
||||
if (res?.data?.error === 'TIMEOUT') {
|
||||
setAnalyzeError('분석 서버 응답 없음 (시간 초과)')
|
||||
return
|
||||
}
|
||||
}
|
||||
setAnalyzeError('이미지 분석 중 오류가 발생했습니다')
|
||||
} finally {
|
||||
setIsAnalyzing(false)
|
||||
}
|
||||
}
|
||||
|
||||
return (
|
||||
@ -88,8 +132,7 @@ const PredictionInputSection = ({
|
||||
name="prdType"
|
||||
checked={inputMode === 'direct'}
|
||||
onChange={() => setInputMode('direct')}
|
||||
className="m-0 w-[11px] h-[11px]"
|
||||
className="accent-[var(--cyan)]"
|
||||
className="accent-[var(--cyan)] m-0 w-[11px] h-[11px]"
|
||||
/>
|
||||
직접 입력
|
||||
</label>
|
||||
@ -99,8 +142,7 @@ const PredictionInputSection = ({
|
||||
name="prdType"
|
||||
checked={inputMode === 'upload'}
|
||||
onChange={() => setInputMode('upload')}
|
||||
className="m-0 w-[11px] h-[11px]"
|
||||
className="accent-[var(--cyan)]"
|
||||
className="accent-[var(--cyan)] m-0 w-[11px] h-[11px]"
|
||||
/>
|
||||
이미지 업로드
|
||||
</label>
|
||||
@ -109,43 +151,23 @@ const PredictionInputSection = ({
|
||||
{/* Direct Input Mode */}
|
||||
{inputMode === 'direct' && (
|
||||
<>
|
||||
<input className="prd-i" placeholder="사고명 직접 입력" />
|
||||
<input className="prd-i" placeholder="또는 사고 리스트에서 선택" />
|
||||
<input
|
||||
className="prd-i"
|
||||
placeholder="사고명 직접 입력"
|
||||
value={incidentName}
|
||||
onChange={(e) => onIncidentNameChange(e.target.value)}
|
||||
/>
|
||||
<input className="prd-i" placeholder="또는 사고 리스트에서 선택" readOnly />
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* Image Upload Mode */}
|
||||
{inputMode === 'upload' && (
|
||||
<>
|
||||
<input className="prd-i" placeholder="여수 유조선 충돌" />
|
||||
<ComboBox
|
||||
className="prd-i"
|
||||
value=""
|
||||
onChange={() => {}}
|
||||
options={[
|
||||
{ value: '', label: '여수 유조선 충돌 (INC-0042)' },
|
||||
{ value: 'INC-0042', label: '여수 유조선 충돌 (INC-0042)' }
|
||||
]}
|
||||
placeholder="사고 선택"
|
||||
/>
|
||||
|
||||
{/* Upload Success Message */}
|
||||
{uploadedImage && (
|
||||
<div className="flex items-center gap-[6px] text-[10px] font-semibold text-[#22c55e] rounded"
|
||||
style={{
|
||||
padding: '6px 8px',
|
||||
background: 'rgba(34,197,94,0.1)',
|
||||
border: '1px solid rgba(34,197,94,0.3)',
|
||||
borderRadius: 'var(--rS)',
|
||||
}}>
|
||||
<span className="text-[12px]">✓</span>
|
||||
내 이미지가 업로드됨
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* File Upload Area */}
|
||||
{!uploadedImage ? (
|
||||
<label className="flex items-center justify-center text-[11px] text-text-3 cursor-pointer"
|
||||
{/* 파일 선택 영역 */}
|
||||
{!uploadedFile ? (
|
||||
<label
|
||||
className="flex items-center justify-center text-[11px] text-text-3 cursor-pointer"
|
||||
style={{
|
||||
padding: '20px',
|
||||
background: 'var(--bg0)',
|
||||
@ -160,66 +182,96 @@ const PredictionInputSection = ({
|
||||
onMouseLeave={(e) => {
|
||||
e.currentTarget.style.borderColor = 'var(--bd)'
|
||||
e.currentTarget.style.background = 'var(--bg0)'
|
||||
}}>
|
||||
}}
|
||||
>
|
||||
📁 이미지 파일을 선택하세요
|
||||
<input
|
||||
ref={fileInputRef}
|
||||
type="file"
|
||||
accept="image/*"
|
||||
onChange={handleImageUpload}
|
||||
onChange={handleFileSelect}
|
||||
className="hidden"
|
||||
/>
|
||||
</label>
|
||||
) : (
|
||||
<div className="flex items-center justify-between font-mono text-[10px] bg-bg-0 border border-border"
|
||||
style={{
|
||||
padding: '8px 10px',
|
||||
borderRadius: 'var(--rS)',
|
||||
}}>
|
||||
<span className="text-text-2">📄 {uploadedFileName || 'example_plot_0.gif'}</span>
|
||||
<div
|
||||
className="flex items-center justify-between font-mono text-[10px] bg-bg-0 border border-border"
|
||||
style={{ padding: '8px 10px', borderRadius: 'var(--rS)' }}
|
||||
>
|
||||
<span className="text-text-2">📄 {uploadedFile.name}</span>
|
||||
<button
|
||||
onClick={removeUploadedImage}
|
||||
onClick={handleRemoveFile}
|
||||
className="text-[10px] text-text-3 bg-transparent border-none cursor-pointer"
|
||||
style={{ padding: '2px 6px', transition: '0.15s' }}
|
||||
onMouseEnter={(e) => {
|
||||
e.currentTarget.style.color = 'var(--red)'
|
||||
}}
|
||||
onMouseLeave={(e) => {
|
||||
e.currentTarget.style.color = 'var(--t3)'
|
||||
}}
|
||||
onMouseEnter={(e) => { e.currentTarget.style.color = 'var(--red)' }}
|
||||
onMouseLeave={(e) => { e.currentTarget.style.color = 'var(--t3)' }}
|
||||
>
|
||||
✕
|
||||
</button>
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Dropdowns */}
|
||||
<div className="grid grid-cols-2 gap-1">
|
||||
<ComboBox
|
||||
className="prd-i"
|
||||
value=""
|
||||
onChange={() => {}}
|
||||
options={[
|
||||
{ value: '', label: '유출회사' },
|
||||
{ value: 'company1', label: '회사A' },
|
||||
{ value: 'company2', label: '회사B' }
|
||||
]}
|
||||
placeholder="유출회사"
|
||||
/>
|
||||
<ComboBox
|
||||
className="prd-i"
|
||||
value=""
|
||||
onChange={() => {}}
|
||||
options={[
|
||||
{ value: '', label: '예상시각' },
|
||||
{ value: '09:00', label: '09:00' },
|
||||
{ value: '12:00', label: '12:00' }
|
||||
]}
|
||||
placeholder="예상시각"
|
||||
/>
|
||||
</div>
|
||||
{/* 분석 실행 버튼 */}
|
||||
<button
|
||||
className="prd-btn pri"
|
||||
style={{ padding: '7px', fontSize: '11px' }}
|
||||
onClick={handleAnalyze}
|
||||
disabled={!uploadedFile || isAnalyzing}
|
||||
>
|
||||
{isAnalyzing ? '⏳ 분석 중...' : '🔍 이미지 분석 실행'}
|
||||
</button>
|
||||
|
||||
{/* 에러 메시지 */}
|
||||
{analyzeError && (
|
||||
<div
|
||||
className="text-[10px] font-semibold"
|
||||
style={{
|
||||
padding: '6px 8px',
|
||||
background: 'rgba(239,68,68,0.1)',
|
||||
border: '1px solid rgba(239,68,68,0.3)',
|
||||
borderRadius: 'var(--rS)',
|
||||
color: 'var(--red)',
|
||||
}}
|
||||
>
|
||||
⚠ {analyzeError}
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* 분석 완료 메시지 */}
|
||||
{analyzeResult && (
|
||||
<div
|
||||
className="text-[10px] font-semibold"
|
||||
style={{
|
||||
padding: '6px 8px',
|
||||
background: 'rgba(34,197,94,0.1)',
|
||||
border: '1px solid rgba(34,197,94,0.3)',
|
||||
borderRadius: 'var(--rS)',
|
||||
color: '#22c55e',
|
||||
lineHeight: 1.6,
|
||||
}}
|
||||
>
|
||||
✓ 분석 완료<br />
|
||||
<span className="font-normal text-text-3">
|
||||
위도 {analyzeResult.lat.toFixed(4)} / 경도 {analyzeResult.lon.toFixed(4)}<br />
|
||||
유종: {analyzeResult.oilType} / 면적: {analyzeResult.area.toFixed(1)} m²
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* 사고 발생 시각 */}
|
||||
<div className="flex flex-col gap-0.5">
|
||||
<label className="text-[9px] text-text-3 font-korean">사고 발생 시각 (KST)</label>
|
||||
<input
|
||||
className="prd-i"
|
||||
type="datetime-local"
|
||||
value={accidentTime}
|
||||
onChange={(e) => onAccidentTimeChange(e.target.value)}
|
||||
style={{ colorScheme: 'dark' }}
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Coordinates + Map Button */}
|
||||
<div className="flex flex-col gap-1">
|
||||
<div className="grid items-center gap-1" style={{ gridTemplateColumns: '1fr 1fr auto' }}>
|
||||
@ -230,7 +282,7 @@ const PredictionInputSection = ({
|
||||
value={incidentCoord?.lat ?? ''}
|
||||
onChange={(e) => {
|
||||
const value = e.target.value === '' ? 0 : parseFloat(e.target.value)
|
||||
onCoordChange({ ...incidentCoord, lat: isNaN(value) ? 0 : value })
|
||||
onCoordChange({ lon: incidentCoord?.lon ?? 0, lat: isNaN(value) ? 0 : value })
|
||||
}}
|
||||
placeholder="위도°"
|
||||
/>
|
||||
@ -241,19 +293,21 @@ const PredictionInputSection = ({
|
||||
value={incidentCoord?.lon ?? ''}
|
||||
onChange={(e) => {
|
||||
const value = e.target.value === '' ? 0 : parseFloat(e.target.value)
|
||||
onCoordChange({ ...incidentCoord, lon: isNaN(value) ? 0 : value })
|
||||
onCoordChange({ lat: incidentCoord?.lat ?? 0, lon: isNaN(value) ? 0 : value })
|
||||
}}
|
||||
placeholder="경도°"
|
||||
/>
|
||||
<button className="prd-map-btn" onClick={onMapSelectClick}>📍 지도</button>
|
||||
<button
|
||||
className={`prd-map-btn${isSelectingLocation ? ' active' : ''}`}
|
||||
onClick={onMapSelectClick}
|
||||
>📍 지도</button>
|
||||
</div>
|
||||
{/* 도분초 표시 */}
|
||||
{incidentCoord && !isNaN(incidentCoord.lat) && !isNaN(incidentCoord.lon) && (
|
||||
<div className="text-[9px] text-text-3 font-mono border border-border bg-bg-0"
|
||||
style={{
|
||||
padding: '4px 8px',
|
||||
borderRadius: 'var(--rS)',
|
||||
}}>
|
||||
<div
|
||||
className="text-[9px] text-text-3 font-mono border border-border bg-bg-0"
|
||||
style={{ padding: '4px 8px', borderRadius: 'var(--rS)' }}
|
||||
>
|
||||
{decimalToDMS(incidentCoord.lat, true)} / {decimalToDMS(incidentCoord.lon, false)}
|
||||
</div>
|
||||
)}
|
||||
@ -299,8 +353,8 @@ const PredictionInputSection = ({
|
||||
/>
|
||||
<ComboBox
|
||||
className="prd-i"
|
||||
value="kL"
|
||||
onChange={() => {}}
|
||||
value={spillUnit}
|
||||
onChange={onSpillUnitChange}
|
||||
options={[
|
||||
{ value: 'kL', label: 'kL' },
|
||||
{ value: 'ton', label: 'Ton' },
|
||||
@ -321,19 +375,6 @@ const PredictionInputSection = ({
|
||||
/>
|
||||
</div>
|
||||
|
||||
{/* Image Analysis Note (Upload Mode Only) */}
|
||||
{inputMode === 'upload' && uploadedImage && (
|
||||
<div className="text-[9px] text-text-3 leading-[1.4]"
|
||||
style={{
|
||||
padding: '8px',
|
||||
background: 'rgba(59,130,246,0.08)',
|
||||
border: '1px solid rgba(59,130,246,0.2)',
|
||||
borderRadius: 'var(--rS)',
|
||||
}}>
|
||||
📊 이미지 내 확산경로를 분석하였습니다. 각 방제요소 가이드 참고하세요.
|
||||
</div>
|
||||
)}
|
||||
|
||||
{/* Divider */}
|
||||
<div className="h-px bg-border my-0.5" />
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import { useState } from 'react'
|
||||
import type { PredictionDetail } from '../services/predictionApi'
|
||||
import type { PredictionDetail, SimulationSummary } from '../services/predictionApi'
|
||||
|
||||
export function RightPanel({ onOpenBacktrack, onOpenRecalc, onOpenReport, detail }: { onOpenBacktrack?: () => void; onOpenRecalc?: () => void; onOpenReport?: () => void; detail?: PredictionDetail | null }) {
|
||||
export function RightPanel({ onOpenBacktrack, onOpenRecalc, onOpenReport, detail, summary }: { onOpenBacktrack?: () => void; onOpenRecalc?: () => void; onOpenReport?: () => void; detail?: PredictionDetail | null; summary?: SimulationSummary | null }) {
|
||||
const vessel = detail?.vessels?.[0]
|
||||
const vessel2 = detail?.vessels?.[1]
|
||||
const spill = detail?.spill
|
||||
@ -44,11 +44,11 @@ export function RightPanel({ onOpenBacktrack, onOpenRecalc, onOpenReport, detail
|
||||
<Section title="오염 종합 상황" badge="위험" badgeColor="red">
|
||||
<div className="grid grid-cols-2 gap-0.5 text-[9px]">
|
||||
<StatBox label="유출량" value={spill?.volume != null ? spill.volume.toFixed(2) : '—'} unit={spill?.unit || 'kl'} color="var(--t1)" />
|
||||
<StatBox label="풍화량" value="0.43" unit="kl" color="var(--orange)" />
|
||||
<StatBox label="해상잔존" value="9.57" unit="kl" color="var(--blue)" />
|
||||
<StatBox label="연안부착" value="0.00" unit="kl" color="var(--red)" />
|
||||
<StatBox label="풍화량" value={summary ? summary.weatheredVolume.toFixed(2) : '—'} unit="m³" color="var(--orange)" />
|
||||
<StatBox label="해상잔존" value={summary ? summary.remainingVolume.toFixed(2) : '—'} unit="m³" color="var(--blue)" />
|
||||
<StatBox label="연안부착" value={summary ? summary.beachedVolume.toFixed(2) : '—'} unit="m³" color="var(--red)" />
|
||||
<div className="col-span-2">
|
||||
<StatBox label="오염해역면적" value="8.56" unit="㎢" color="var(--cyan)" />
|
||||
<StatBox label="오염해역면적" value={summary ? summary.pollutionArea.toFixed(2) : '—'} unit="km²" color="var(--cyan)" />
|
||||
</div>
|
||||
</div>
|
||||
</Section>
|
||||
|
||||
@ -0,0 +1,123 @@
|
||||
interface SimulationLoadingOverlayProps {
|
||||
status: 'PENDING' | 'RUNNING';
|
||||
progress?: number;
|
||||
}
|
||||
|
||||
const SimulationLoadingOverlay = ({ status, progress }: SimulationLoadingOverlayProps) => {
|
||||
const displayProgress = progress ?? 0;
|
||||
const statusText = status === 'PENDING' ? '모델 초기화 중...' : '입자 추적 계산 중...';
|
||||
|
||||
return (
|
||||
<div
|
||||
style={{
|
||||
position: 'absolute',
|
||||
inset: 0,
|
||||
zIndex: 50,
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
background: 'rgba(10, 14, 26, 0.75)',
|
||||
backdropFilter: 'blur(4px)',
|
||||
}}
|
||||
>
|
||||
<div
|
||||
style={{
|
||||
width: 320,
|
||||
background: 'var(--bg1)',
|
||||
border: '1px solid var(--bd)',
|
||||
borderRadius: 'var(--rM)',
|
||||
padding: '28px 24px',
|
||||
display: 'flex',
|
||||
flexDirection: 'column',
|
||||
gap: 16,
|
||||
}}
|
||||
>
|
||||
{/* 아이콘 + 제목 */}
|
||||
<div style={{ display: 'flex', alignItems: 'center', gap: 10 }}>
|
||||
<div
|
||||
style={{
|
||||
width: 36,
|
||||
height: 36,
|
||||
borderRadius: '50%',
|
||||
background: 'rgba(6, 182, 212, 0.12)',
|
||||
border: '1px solid rgba(6, 182, 212, 0.3)',
|
||||
display: 'flex',
|
||||
alignItems: 'center',
|
||||
justifyContent: 'center',
|
||||
flexShrink: 0,
|
||||
}}
|
||||
>
|
||||
<svg width="18" height="18" viewBox="0 0 24 24" fill="none">
|
||||
<path
|
||||
d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm-1 14l-4-4 1.41-1.41L11 13.17l6.59-6.59L19 8l-8 8z"
|
||||
fill="var(--cyan)"
|
||||
opacity="0.8"
|
||||
/>
|
||||
</svg>
|
||||
</div>
|
||||
<div>
|
||||
<div style={{ color: 'var(--t1)', fontSize: 14, fontWeight: 600 }}>
|
||||
확산 예측 분석 중
|
||||
</div>
|
||||
<div style={{ color: 'var(--t3)', fontSize: 12, marginTop: 2 }}>
|
||||
{statusText}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 진행률 바 */}
|
||||
<div>
|
||||
<div
|
||||
style={{
|
||||
height: 6,
|
||||
background: 'rgba(255, 255, 255, 0.06)',
|
||||
borderRadius: 999,
|
||||
overflow: 'hidden',
|
||||
}}
|
||||
>
|
||||
<div
|
||||
style={{
|
||||
height: '100%',
|
||||
width: `${displayProgress}%`,
|
||||
background: 'linear-gradient(90deg, var(--cyan), var(--blue))',
|
||||
borderRadius: 999,
|
||||
transition: 'width 0.6s ease',
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
style={{
|
||||
display: 'flex',
|
||||
justifyContent: 'space-between',
|
||||
marginTop: 8,
|
||||
}}
|
||||
>
|
||||
<span style={{ color: 'var(--t3)', fontSize: 11 }}>
|
||||
{status === 'PENDING' ? '대기 중' : '분석 진행 중'}
|
||||
</span>
|
||||
<span style={{ color: 'var(--cyan)', fontSize: 12, fontWeight: 600 }}>
|
||||
{status === 'PENDING' ? '—' : `${displayProgress}%`}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* 안내 문구 */}
|
||||
<div
|
||||
style={{
|
||||
color: 'var(--t3)',
|
||||
fontSize: 11,
|
||||
lineHeight: 1.6,
|
||||
borderTop: '1px solid var(--bdL)',
|
||||
paddingTop: 12,
|
||||
}}
|
||||
>
|
||||
OpenDrift 모델로 유류 확산을 시뮬레이션하고 있습니다.
|
||||
<br />
|
||||
완료되면 자동으로 결과가 표시됩니다.
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default SimulationLoadingOverlay;
|
||||
@ -1,13 +1,17 @@
|
||||
import type { PredictionModel } from './OilSpillView'
|
||||
import type { BoomLine, BoomLineCoord, AlgorithmSettings, ContainmentResult } from '@common/types/boomLine'
|
||||
import type { Analysis } from './AnalysisListTable'
|
||||
import type { ImageAnalyzeResult } from '../services/predictionApi'
|
||||
|
||||
export interface LeftPanelProps {
|
||||
selectedAnalysis?: Analysis | null
|
||||
enabledLayers: Set<string>
|
||||
onToggleLayer: (layerId: string, enabled: boolean) => void
|
||||
incidentCoord: { lon: number; lat: number }
|
||||
accidentTime: string
|
||||
onAccidentTimeChange: (time: string) => void
|
||||
incidentCoord: { lon: number; lat: number } | null
|
||||
onCoordChange: (coord: { lon: number; lat: number }) => void
|
||||
isSelectingLocation: boolean
|
||||
onMapSelectClick: () => void
|
||||
onRunSimulation: () => void
|
||||
isRunningSimulation: boolean
|
||||
@ -21,6 +25,10 @@ export interface LeftPanelProps {
|
||||
onOilTypeChange: (type: string) => void
|
||||
spillAmount: number
|
||||
onSpillAmountChange: (amount: number) => void
|
||||
incidentName: string
|
||||
onIncidentNameChange: (name: string) => void
|
||||
spillUnit: string
|
||||
onSpillUnitChange: (unit: string) => void
|
||||
// 오일펜스 배치 관련
|
||||
boomLines: BoomLine[]
|
||||
onBoomLinesChange: (lines: BoomLine[]) => void
|
||||
@ -38,6 +46,8 @@ export interface LeftPanelProps {
|
||||
onLayerOpacityChange: (val: number) => void
|
||||
layerBrightness: number
|
||||
onLayerBrightnessChange: (val: number) => void
|
||||
// 이미지 분석 결과 콜백
|
||||
onImageAnalysisResult?: (result: ImageAnalyzeResult) => void
|
||||
}
|
||||
|
||||
export interface ExpandedSections {
|
||||
|
||||
16
frontend/src/tabs/prediction/hooks/useSimulationStatus.ts
Normal file
16
frontend/src/tabs/prediction/hooks/useSimulationStatus.ts
Normal file
@ -0,0 +1,16 @@
|
||||
import { useQuery } from '@tanstack/react-query';
|
||||
import { api } from '@common/services/api';
|
||||
import type { SimulationStatusResponse } from '../services/predictionApi';
|
||||
|
||||
export const useSimulationStatus = (execSn: number | null) => {
|
||||
return useQuery<SimulationStatusResponse>({
|
||||
queryKey: ['simulationStatus', execSn],
|
||||
queryFn: () => api.get<SimulationStatusResponse>(`/simulation/status/${execSn}`).then(r => r.data),
|
||||
enabled: execSn !== null,
|
||||
refetchInterval: (query) => {
|
||||
const status = query.state.data?.status;
|
||||
if (status === 'DONE' || status === 'ERROR') return false;
|
||||
return 3000;
|
||||
},
|
||||
});
|
||||
};
|
||||
@ -115,3 +115,105 @@ export const createBacktrack = async (input: {
|
||||
const response = await api.post<{ backtrackSn: number }>('/prediction/backtrack', input);
|
||||
return response.data;
|
||||
};
|
||||
|
||||
// ============================================================
|
||||
// 확산 예측 시뮬레이션 (OpenDrift 연동)
|
||||
// ============================================================
|
||||
|
||||
export interface SimulationRunResponse {
|
||||
success: boolean;
|
||||
execSn: number;
|
||||
acdntSn: number | null;
|
||||
status: 'RUNNING';
|
||||
}
|
||||
|
||||
export interface WindPoint {
|
||||
lat: number;
|
||||
lon: number;
|
||||
wind_speed: number;
|
||||
wind_direction: number;
|
||||
}
|
||||
|
||||
export interface HydrGrid {
|
||||
lonInterval: number[];
|
||||
boundLonLat: { top: number; bottom: number; left: number; right: number };
|
||||
rows: number;
|
||||
cols: number;
|
||||
latInterval: number[];
|
||||
}
|
||||
|
||||
export interface HydrDataStep {
|
||||
value: [number[][], number[][]]; // [u_2d, v_2d]
|
||||
grid: HydrGrid;
|
||||
}
|
||||
|
||||
export interface CenterPoint {
|
||||
lat: number;
|
||||
lon: number;
|
||||
time: number;
|
||||
}
|
||||
|
||||
export interface OilParticle {
|
||||
lat: number;
|
||||
lon: number;
|
||||
time: number;
|
||||
particle?: number;
|
||||
stranded?: 0 | 1;
|
||||
}
|
||||
|
||||
export interface SimulationSummary {
|
||||
remainingVolume: number;
|
||||
weatheredVolume: number;
|
||||
pollutionArea: number;
|
||||
beachedVolume: number;
|
||||
pollutionCoastLength: number;
|
||||
}
|
||||
|
||||
export interface SimulationStatusResponse {
|
||||
status: 'PENDING' | 'RUNNING' | 'DONE' | 'ERROR';
|
||||
progress?: number;
|
||||
trajectory?: OilParticle[];
|
||||
summary?: SimulationSummary;
|
||||
centerPoints?: CenterPoint[];
|
||||
windData?: WindPoint[][];
|
||||
hydrData?: (HydrDataStep | null)[];
|
||||
error?: string;
|
||||
}
|
||||
|
||||
export interface TrajectoryResponse {
|
||||
trajectory: OilParticle[] | null;
|
||||
summary: SimulationSummary | null;
|
||||
centerPoints?: CenterPoint[];
|
||||
windData?: WindPoint[][];
|
||||
hydrData?: (HydrDataStep | null)[];
|
||||
}
|
||||
|
||||
export const fetchAnalysisTrajectory = async (acdntSn: number): Promise<TrajectoryResponse> => {
|
||||
const response = await api.get<TrajectoryResponse>(`/prediction/analyses/${acdntSn}/trajectory`);
|
||||
return response.data;
|
||||
};
|
||||
|
||||
// ============================================================
|
||||
// 이미지 업로드 분석
|
||||
// ============================================================
|
||||
|
||||
export interface ImageAnalyzeResult {
|
||||
acdntSn: number;
|
||||
lat: number;
|
||||
lon: number;
|
||||
oilType: string;
|
||||
area: number;
|
||||
volume: number;
|
||||
fileId: string;
|
||||
occurredAt: string;
|
||||
}
|
||||
|
||||
export const analyzeImage = async (file: File): Promise<ImageAnalyzeResult> => {
|
||||
const formData = new FormData();
|
||||
formData.append('image', file);
|
||||
const response = await api.post<ImageAnalyzeResult>('/prediction/image-analyze', formData, {
|
||||
headers: { 'Content-Type': 'multipart/form-data' },
|
||||
timeout: 330_000,
|
||||
});
|
||||
return response.data;
|
||||
};
|
||||
|
||||
@ -2,7 +2,7 @@ import { useState, useEffect } from 'react';
|
||||
import {
|
||||
createEmptyReport,
|
||||
} from './OilSpillReportTemplate';
|
||||
import { consumeReportGenCategory, consumeHnsReportPayload, type HnsReportPayload } from '@common/hooks/useSubMenu';
|
||||
import { consumeReportGenCategory, consumeHnsReportPayload, type HnsReportPayload, consumeOilReportPayload, type OilReportPayload } from '@common/hooks/useSubMenu';
|
||||
import { saveReport } from '../services/reportsApi';
|
||||
import {
|
||||
CATEGORIES,
|
||||
@ -32,6 +32,8 @@ function ReportGenerator({ onSave }: ReportGeneratorProps) {
|
||||
|
||||
// HNS 실 데이터 (없으면 sampleHnsData fallback)
|
||||
const [hnsPayload, setHnsPayload] = useState<HnsReportPayload | null>(null)
|
||||
// OIL 실 데이터 (없으면 sampleOilData fallback)
|
||||
const [oilPayload, setOilPayload] = useState<OilReportPayload | null>(null)
|
||||
|
||||
// 외부에서 카테고리 힌트가 변경되면 반영
|
||||
useEffect(() => {
|
||||
@ -44,6 +46,9 @@ function ReportGenerator({ onSave }: ReportGeneratorProps) {
|
||||
// HNS 데이터 소비
|
||||
const payload = consumeHnsReportPayload()
|
||||
if (payload) setHnsPayload(payload)
|
||||
// OIL 예측 데이터 소비
|
||||
const oilData = consumeOilReportPayload()
|
||||
if (oilData) setOilPayload(oilData)
|
||||
}, [])
|
||||
|
||||
const cat = CATEGORIES[activeCat]
|
||||
@ -65,8 +70,19 @@ function ReportGenerator({ onSave }: ReportGeneratorProps) {
|
||||
report.status = '완료'
|
||||
report.author = '시스템 자동생성'
|
||||
if (activeCat === 0) {
|
||||
report.incident.pollutant = sampleOilData.pollution.oilType
|
||||
report.incident.spillAmount = sampleOilData.pollution.spillAmount
|
||||
if (oilPayload) {
|
||||
report.incident.name = oilPayload.incident.name;
|
||||
report.incident.occurTime = oilPayload.incident.occurTime;
|
||||
report.incident.location = oilPayload.incident.location;
|
||||
report.incident.lat = String(oilPayload.incident.lat ?? '');
|
||||
report.incident.lon = String(oilPayload.incident.lon ?? '');
|
||||
report.incident.shipName = oilPayload.incident.shipName;
|
||||
report.incident.pollutant = oilPayload.pollution.oilType;
|
||||
report.incident.spillAmount = oilPayload.pollution.spillAmount;
|
||||
} else {
|
||||
report.incident.pollutant = sampleOilData.pollution.oilType;
|
||||
report.incident.spillAmount = sampleOilData.pollution.spillAmount;
|
||||
}
|
||||
}
|
||||
try {
|
||||
await saveReport(report)
|
||||
@ -82,6 +98,24 @@ function ReportGenerator({ onSave }: ReportGeneratorProps) {
|
||||
const sectionHTML = activeSections.map(sec => {
|
||||
let content = `<p style="font-size:12px;color:#666;">${sec.desc}</p>`;
|
||||
|
||||
// OIL 섹션에 실 데이터 삽입
|
||||
if (activeCat === 0 && oilPayload) {
|
||||
if (sec.id === 'oil-pollution') {
|
||||
const rows = [
|
||||
['유출량', oilPayload.pollution.spillAmount, '풍화량', oilPayload.pollution.weathered],
|
||||
['해상잔유량', oilPayload.pollution.seaRemain, '오염해역면적', oilPayload.pollution.pollutionArea],
|
||||
['연안부착량', oilPayload.pollution.coastAttach, '오염해안길이', oilPayload.pollution.coastLength],
|
||||
];
|
||||
const simBanner = !oilPayload.hasSimulation
|
||||
? '<p style="font-size:10px;color:#f97316;margin-bottom:8px;">시뮬레이션이 실행되지 않아 오염량은 입력값 기준으로 표시됩니다.</p>'
|
||||
: '';
|
||||
const trs = rows.map(r =>
|
||||
`<tr><td style="padding:6px 8px;border:1px solid #ddd;color:#888;">${r[0]}</td><td style="padding:6px 8px;border:1px solid #ddd;font-weight:bold;text-align:right;">${r[1]}</td><td style="padding:6px 8px;border:1px solid #ddd;color:#888;">${r[2]}</td><td style="padding:6px 8px;border:1px solid #ddd;font-weight:bold;text-align:right;">${r[3]}</td></tr>`
|
||||
).join('');
|
||||
content = `${simBanner}<table style="width:100%;border-collapse:collapse;font-size:12px;">${trs}</table>`;
|
||||
}
|
||||
}
|
||||
|
||||
// HNS 섹션에 실 데이터 삽입
|
||||
if (activeCat === 1 && hnsPayload) {
|
||||
if (sec.id === 'hns-atm') {
|
||||
@ -261,9 +295,9 @@ function ReportGenerator({ onSave }: ReportGeneratorProps) {
|
||||
</div>
|
||||
<div className="grid grid-cols-3 gap-3">
|
||||
{[
|
||||
{ label: 'KOSPS', value: sampleOilData.spread.kosps, color: '#06b6d4' },
|
||||
{ label: 'OpenDrift', value: sampleOilData.spread.openDrift, color: '#ef4444' },
|
||||
{ label: 'POSEIDON', value: sampleOilData.spread.poseidon, color: '#f97316' },
|
||||
{ label: 'KOSPS', value: oilPayload?.spread.kosps || sampleOilData.spread.kosps, color: '#06b6d4' },
|
||||
{ label: 'OpenDrift', value: oilPayload?.spread.openDrift || sampleOilData.spread.openDrift, color: '#ef4444' },
|
||||
{ label: 'POSEIDON', value: oilPayload?.spread.poseidon || sampleOilData.spread.poseidon, color: '#f97316' },
|
||||
].map((m, i) => (
|
||||
<div key={i} className="bg-bg-1 border border-border rounded-lg p-4 text-center">
|
||||
<p className="text-[10px] text-text-3 font-korean mb-1">{m.label}</p>
|
||||
@ -274,23 +308,30 @@ function ReportGenerator({ onSave }: ReportGeneratorProps) {
|
||||
</>
|
||||
)}
|
||||
{sec.id === 'oil-pollution' && (
|
||||
<table className="w-full table-fixed border-collapse">
|
||||
<colgroup><col style={{ width: '25%' }} /><col style={{ width: '25%' }} /><col style={{ width: '25%' }} /><col style={{ width: '25%' }} /></colgroup>
|
||||
<tbody>
|
||||
{[
|
||||
['유출량', sampleOilData.pollution.spillAmount, '풍화량', sampleOilData.pollution.weathered],
|
||||
['해상잔유량', sampleOilData.pollution.seaRemain, '오염해역면적', sampleOilData.pollution.pollutionArea],
|
||||
['연안부착량', sampleOilData.pollution.coastAttach, '오염해안길이', sampleOilData.pollution.coastLength],
|
||||
].map((row, i) => (
|
||||
<tr key={i} className="border-b border-border">
|
||||
<td className="px-4 py-3 text-[11px] text-text-3 font-korean bg-[rgba(255,255,255,0.02)]">{row[0]}</td>
|
||||
<td className="px-4 py-3 text-[12px] text-text-1 font-mono font-semibold text-right">{row[1]}</td>
|
||||
<td className="px-4 py-3 text-[11px] text-text-3 font-korean bg-[rgba(255,255,255,0.02)]">{row[2]}</td>
|
||||
<td className="px-4 py-3 text-[12px] text-text-1 font-mono font-semibold text-right">{row[3]}</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
<>
|
||||
{oilPayload && !oilPayload.hasSimulation && (
|
||||
<div className="mb-3 px-3 py-2 rounded text-[10px] font-korean" style={{ background: 'rgba(249,115,22,0.08)', border: '1px solid rgba(249,115,22,0.3)', color: '#f97316' }}>
|
||||
시뮬레이션이 실행되지 않아 오염량은 입력값 기준으로 표시됩니다.
|
||||
</div>
|
||||
)}
|
||||
<table className="w-full table-fixed border-collapse">
|
||||
<colgroup><col style={{ width: '25%' }} /><col style={{ width: '25%' }} /><col style={{ width: '25%' }} /><col style={{ width: '25%' }} /></colgroup>
|
||||
<tbody>
|
||||
{[
|
||||
['유출량', oilPayload?.pollution.spillAmount || sampleOilData.pollution.spillAmount, '풍화량', oilPayload?.pollution.weathered || sampleOilData.pollution.weathered],
|
||||
['해상잔유량', oilPayload?.pollution.seaRemain || sampleOilData.pollution.seaRemain, '오염해역면적', oilPayload?.pollution.pollutionArea || sampleOilData.pollution.pollutionArea],
|
||||
['연안부착량', oilPayload?.pollution.coastAttach || sampleOilData.pollution.coastAttach, '오염해안길이', oilPayload?.pollution.coastLength || sampleOilData.pollution.coastLength],
|
||||
].map((row, i) => (
|
||||
<tr key={i} className="border-b border-border">
|
||||
<td className="px-4 py-3 text-[11px] text-text-3 font-korean bg-[rgba(255,255,255,0.02)]">{row[0]}</td>
|
||||
<td className="px-4 py-3 text-[12px] text-text-1 font-mono font-semibold text-right">{row[1]}</td>
|
||||
<td className="px-4 py-3 text-[11px] text-text-3 font-korean bg-[rgba(255,255,255,0.02)]">{row[2]}</td>
|
||||
<td className="px-4 py-3 text-[12px] text-text-1 font-mono font-semibold text-right">{row[3]}</td>
|
||||
</tr>
|
||||
))}
|
||||
</tbody>
|
||||
</table>
|
||||
</>
|
||||
)}
|
||||
{sec.id === 'oil-sensitive' && (
|
||||
<>
|
||||
@ -304,9 +345,9 @@ function ReportGenerator({ onSave }: ReportGeneratorProps) {
|
||||
)}
|
||||
{sec.id === 'oil-coastal' && (
|
||||
<p className="text-[12px] text-text-2 font-korean">
|
||||
최초 부착시간: <span className="font-semibold text-text-1">{sampleOilData.coastal.firstTime}</span>
|
||||
최초 부착시간: <span className="font-semibold text-text-1">{oilPayload?.coastal?.firstTime ?? sampleOilData.coastal.firstTime}</span>
|
||||
{' / '}
|
||||
부착 해안길이: <span className="font-semibold text-text-1">{sampleOilData.coastal.coastLength}</span>
|
||||
부착 해안길이: <span className="font-semibold text-text-1">{oilPayload?.pollution.coastLength || sampleOilData.coastal.coastLength}</span>
|
||||
</p>
|
||||
)}
|
||||
{sec.id === 'oil-defense' && (
|
||||
@ -318,11 +359,20 @@ function ReportGenerator({ onSave }: ReportGeneratorProps) {
|
||||
</div>
|
||||
)}
|
||||
{sec.id === 'oil-tide' && (
|
||||
<p className="text-[12px] text-text-2 font-korean">
|
||||
고조: <span className="font-semibold text-text-1">{sampleOilData.tide.highTide1}</span>
|
||||
{' / '}저조: <span className="font-semibold text-text-1">{sampleOilData.tide.lowTide}</span>
|
||||
{' / '}고조: <span className="font-semibold text-text-1">{sampleOilData.tide.highTide2}</span>
|
||||
</p>
|
||||
<>
|
||||
<p className="text-[12px] text-text-2 font-korean">
|
||||
고조: <span className="font-semibold text-text-1">{sampleOilData.tide.highTide1}</span>
|
||||
{' / '}저조: <span className="font-semibold text-text-1">{sampleOilData.tide.lowTide}</span>
|
||||
{' / '}고조: <span className="font-semibold text-text-1">{sampleOilData.tide.highTide2}</span>
|
||||
</p>
|
||||
{oilPayload?.weather && (
|
||||
<p className="text-[11px] text-text-3 font-korean mt-2">
|
||||
기상: 풍향/풍속 <span className="text-text-2 font-semibold">{oilPayload.weather.windDir}</span>
|
||||
{' / '}파고 <span className="text-text-2 font-semibold">{oilPayload.weather.waveHeight}</span>
|
||||
{' / '}기온 <span className="text-text-2 font-semibold">{oilPayload.weather.temp}</span>
|
||||
</p>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
|
||||
{/* ── HNS 대기확산 섹션들 ── */}
|
||||
|
||||
13
prediction/image/.dockerignore
Normal file
13
prediction/image/.dockerignore
Normal file
@ -0,0 +1,13 @@
|
||||
__pycache__/
|
||||
stitch/
|
||||
|
||||
mx15hdi/Detect/Mask_result/
|
||||
mx15hdi/Detect/result/
|
||||
|
||||
mx15hdi/Georeference/Mask_Tif/
|
||||
mx15hdi/Georeference/Tif/
|
||||
|
||||
mx15hdi/Metadata/CSV/
|
||||
mx15hdi/Metadata/Image/Original_Images/
|
||||
|
||||
mx15hdi/Polygon/Shp/
|
||||
376
prediction/image/DOCKER_USAGE.md
Normal file
376
prediction/image/DOCKER_USAGE.md
Normal file
@ -0,0 +1,376 @@
|
||||
# wing-image-analysis Docker 사용 가이드
|
||||
|
||||
드론 영상 기반 유류 오염 분석 FastAPI 서버를 Docker 컨테이너로 빌드하고 실행하는 방법을 설명한다.
|
||||
|
||||
---
|
||||
|
||||
## 목차
|
||||
|
||||
1. [사전 요구사항](#1-사전-요구사항)
|
||||
2. [빠른 시작](#2-빠른-시작)
|
||||
3. [빌드 명령어](#3-빌드-명령어)
|
||||
4. [실행 명령어](#4-실행-명령어)
|
||||
5. [환경변수 설정](#5-환경변수-설정)
|
||||
6. [볼륨 구조](#6-볼륨-구조)
|
||||
7. [API 엔드포인트 사용 예시](#7-api-엔드포인트-사용-예시)
|
||||
8. [로그 확인 및 디버깅](#8-로그-확인-및-디버깅)
|
||||
9. [컨테이너 관리](#9-컨테이너-관리)
|
||||
10. [주의사항](#10-주의사항)
|
||||
11. [CPU 전용 환경 실행](#11-cpu-전용-환경-실행)
|
||||
|
||||
---
|
||||
|
||||
## 1. 사전 요구사항
|
||||
|
||||
| 항목 | 최소 버전 | 확인 명령어 |
|
||||
|------|----------|-------------|
|
||||
| Docker Engine | 24.0 이상 | `docker --version` |
|
||||
| Docker Compose | 2.20 이상 | `docker compose version` |
|
||||
| NVIDIA 드라이버 | 525 이상 (CUDA 12.1 지원) | `nvidia-smi` |
|
||||
| nvidia-container-toolkit | 최신 | `nvidia-ctk --version` |
|
||||
|
||||
### nvidia-container-toolkit 설치 (Ubuntu 기준)
|
||||
|
||||
```bash
|
||||
# GPG 키 및 저장소 추가
|
||||
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey \
|
||||
| sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg
|
||||
|
||||
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
|
||||
| sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' \
|
||||
| sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
|
||||
|
||||
sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
|
||||
|
||||
# Docker 런타임 설정 및 재시작
|
||||
sudo nvidia-ctk runtime configure --runtime=docker
|
||||
sudo systemctl restart docker
|
||||
```
|
||||
|
||||
### GPU 동작 확인
|
||||
|
||||
```bash
|
||||
docker run --rm --gpus all nvidia/cuda:12.1-base-ubuntu22.04 nvidia-smi
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 2. 빠른 시작
|
||||
|
||||
```bash
|
||||
# 1. prediction/image/ 디렉토리로 이동
|
||||
cd prediction/image
|
||||
|
||||
# 2. 환경변수 파일 준비 (필요 시)
|
||||
cp .env.example .env
|
||||
|
||||
# 3. 빌드 + 실행 (백그라운드)
|
||||
docker compose up -d --build
|
||||
|
||||
# 4. 서버 상태 확인
|
||||
curl http://localhost:5001/docs
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 3. 빌드 명령어
|
||||
|
||||
### docker compose (권장)
|
||||
|
||||
```bash
|
||||
# 이미지 빌드만 수행 (실행 안 함)
|
||||
docker compose build
|
||||
|
||||
# 빌드 로그를 상세하게 출력
|
||||
docker compose build --progress=plain
|
||||
|
||||
# 캐시 없이 처음부터 빌드 (의존성 변경 시)
|
||||
docker compose build --no-cache
|
||||
```
|
||||
|
||||
### docker build (단독)
|
||||
|
||||
```bash
|
||||
# prediction/image/ 디렉토리에서 실행
|
||||
docker build -t wing-image-analysis:latest .
|
||||
|
||||
# 빌드 태그 지정
|
||||
docker build -t wing-image-analysis:1.0.0 .
|
||||
|
||||
# 캐시 없이 빌드
|
||||
docker build --no-cache -t wing-image-analysis:latest .
|
||||
```
|
||||
|
||||
> **참고**: 첫 빌드는 PyTorch base 이미지(약 8GB) + GDAL/Python 패키지 설치로 **30~60분** 소요될 수 있다.
|
||||
> 이후 빌드는 레이어 캐시로 수 분 내 완료된다.
|
||||
|
||||
---
|
||||
|
||||
## 4. 실행 명령어
|
||||
|
||||
### docker compose (권장)
|
||||
|
||||
```bash
|
||||
# 백그라운드 실행
|
||||
docker compose up -d
|
||||
|
||||
# 빌드 후 즉시 실행
|
||||
docker compose up -d --build
|
||||
|
||||
# 포그라운드 실행 (로그 바로 출력)
|
||||
docker compose up
|
||||
|
||||
# 중지
|
||||
docker compose down
|
||||
|
||||
# 중지 + 볼륨 삭제 (데이터 초기화 시)
|
||||
docker compose down -v
|
||||
```
|
||||
|
||||
### docker run (단독 — 테스트용)
|
||||
|
||||
```bash
|
||||
docker run --rm \
|
||||
--gpus all \
|
||||
-p 5001:5001 \
|
||||
--env-file .env \
|
||||
-v "$(pwd)/mx15hdi/Metadata/Image/Original_Images:/app/mx15hdi/Metadata/Image/Original_Images" \
|
||||
wing-image-analysis:latest
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 5. 환경변수 설정
|
||||
|
||||
`.env.example`을 복사하여 `.env`를 생성한다.
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
| 변수 | 설명 | 기본값 |
|
||||
|------|------|--------|
|
||||
| `API_HOST` | 서버 바인드 주소 | `0.0.0.0` |
|
||||
| `API_PORT` | 서버 포트 | `5001` |
|
||||
|
||||
---
|
||||
|
||||
## 6. 볼륨 구조
|
||||
|
||||
컨테이너 내부 경로와 호스트 경로의 매핑이다. 이미지/결과 데이터는 컨테이너 외부에 저장되어 컨테이너를 재시작해도 유지된다.
|
||||
|
||||
```
|
||||
호스트 (prediction/image/) 컨테이너 (/app/)
|
||||
─────────────────────────────────────────────────────────────────────
|
||||
mx15hdi/Metadata/Image/Original_Images/ → mx15hdi/Metadata/Image/Original_Images/ ← 원본 이미지 입력
|
||||
mx15hdi/Metadata/CSV/ → mx15hdi/Metadata/CSV/ ← 메타데이터 출력
|
||||
mx15hdi/Georeference/Tif/ → mx15hdi/Georeference/Tif/ ← GeoTIFF 출력
|
||||
mx15hdi/Georeference/Mask_Tif/ → mx15hdi/Georeference/Mask_Tif/ ← 마스크 GeoTIFF
|
||||
mx15hdi/Polygon/Shp/ → mx15hdi/Polygon/Shp/ ← Shapefile 출력
|
||||
mx15hdi/Detect/result/ → mx15hdi/Detect/result/ ← 블렌딩 결과
|
||||
mx15hdi/Detect/Mask_result/ → mx15hdi/Detect/Mask_result/ ← 마스크 결과
|
||||
starsafire/Metadata/Image/Original_Images → starsafire/Metadata/Image/Original_Images ← 열화상 입력
|
||||
starsafire/{기타}/ → starsafire/{기타}/ ← 열화상 출력
|
||||
stitch/ → stitch/ ← 스티칭 결과
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 7. API 엔드포인트 사용 예시
|
||||
|
||||
서버 기동 후 `http://localhost:5001/docs`에서 Swagger UI로 전체 API를 확인할 수 있다.
|
||||
|
||||
### 7.1 전체 분석 파이프라인 실행
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:5001/run-script/ \
|
||||
-F "files=@/path/to/drone_image.jpg" \
|
||||
-F "camTy=mx15hdi" \
|
||||
-F "fileId=20240310_001"
|
||||
```
|
||||
|
||||
**응답 예시**:
|
||||
```json
|
||||
{
|
||||
"meta": "drone_image.jpg,37,30,0,126,55,0,...",
|
||||
"data": [
|
||||
{
|
||||
"classId": 2,
|
||||
"area": 1234.56,
|
||||
"volume": 0.1234,
|
||||
"note": "갈색",
|
||||
"thickness": 0.0001,
|
||||
"wkt": "POLYGON((...))"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### 7.2 메타데이터 조회
|
||||
|
||||
```bash
|
||||
curl http://localhost:5001/get-metadata/mx15hdi/20240310_001
|
||||
```
|
||||
|
||||
### 7.3 원본 이미지 조회 (Base64)
|
||||
|
||||
```bash
|
||||
curl http://localhost:5001/get-original-image/mx15hdi/20240310_001
|
||||
```
|
||||
|
||||
### 7.4 GeoTIFF + 좌표 조회
|
||||
|
||||
```bash
|
||||
curl http://localhost:5001/get-image/mx15hdi/20240310_001
|
||||
```
|
||||
|
||||
### 7.5 이미지 스티칭
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:5001/stitch \
|
||||
-F "files=@photo1.jpg" \
|
||||
-F "files=@photo2.jpg" \
|
||||
-F "mode=drone"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 8. 로그 확인 및 디버깅
|
||||
|
||||
```bash
|
||||
# 실시간 로그 출력
|
||||
docker logs wing-image-analysis -f
|
||||
|
||||
# 최근 100줄만 출력
|
||||
docker logs wing-image-analysis --tail 100
|
||||
|
||||
# 컨테이너 내부 쉘 접속
|
||||
docker exec -it wing-image-analysis bash
|
||||
|
||||
# GPU 사용 현황 확인 (컨테이너 내부)
|
||||
docker exec wing-image-analysis nvidia-smi
|
||||
|
||||
# Python 패키지 목록 확인
|
||||
docker exec wing-image-analysis pip list
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 9. 컨테이너 관리
|
||||
|
||||
```bash
|
||||
# 상태 확인
|
||||
docker compose ps
|
||||
|
||||
# 재시작
|
||||
docker compose restart
|
||||
|
||||
# 중지 (볼륨 유지)
|
||||
docker compose down
|
||||
|
||||
# 이미지 삭제
|
||||
docker rmi wing-image-analysis:latest
|
||||
|
||||
# 사용하지 않는 리소스 정리
|
||||
docker system prune -f
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 10. 주의사항
|
||||
|
||||
### GPU 자동 감지
|
||||
- 서버 기동 시 `torch.cuda.is_available()`로 GPU 유무를 자동 감지한다.
|
||||
- GPU가 있으면 `cuda:0`, 없으면 `cpu`로 자동 폴백된다.
|
||||
- 환경변수 `DEVICE`로 device를 명시 지정할 수 있다 (예: `DEVICE=cpu`, `DEVICE=cuda:1`).
|
||||
|
||||
### 첫 기동 시간
|
||||
- AI 모델 로드: 약 **10~30초** 소요 (GPU 메모리에 로딩)
|
||||
- 준비 완료 후 로그에 `Application startup complete` 메시지가 출력된다.
|
||||
|
||||
### workers=1 고정
|
||||
- GPU 모델은 프로세스 간 공유가 불가하므로 uvicorn workers는 반드시 `1`로 유지해야 한다.
|
||||
- 병렬 처리는 내부 `ThreadPoolExecutor`(max_workers=4)로 처리된다.
|
||||
|
||||
### 포트 충돌
|
||||
- 기본 포트 `5001`이 다른 서비스와 충돌하면 `docker-compose.yml`의 `ports` 항목을 수정한다:
|
||||
```yaml
|
||||
ports:
|
||||
- "5002:5001" # 호스트 5002 → 컨테이너 5001
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 11. CPU 전용 환경 실행
|
||||
|
||||
GPU(NVIDIA)가 없는 환경에서는 CPU 전용 설정을 사용한다.
|
||||
|
||||
### 사전 요구사항 (CPU 모드)
|
||||
|
||||
| 항목 | 최소 버전 | 확인 명령어 |
|
||||
|------|----------|-------------|
|
||||
| Docker Engine | 24.0 이상 | `docker --version` |
|
||||
| Docker Compose | 2.20 이상 | `docker compose version` |
|
||||
| NVIDIA 드라이버 | **불필요** | — |
|
||||
|
||||
### 빠른 시작 (CPU)
|
||||
|
||||
```bash
|
||||
# prediction/image/ 디렉토리로 이동
|
||||
cd prediction/image
|
||||
|
||||
# 환경변수 파일 준비 (필요 시)
|
||||
cp .env.example .env
|
||||
|
||||
# CPU 이미지 빌드 + 실행
|
||||
docker compose -f docker-compose.cpu.yml up -d --build
|
||||
|
||||
# 서버 상태 확인
|
||||
curl http://localhost:5001/docs
|
||||
```
|
||||
|
||||
### 빌드 명령어 (CPU)
|
||||
|
||||
```bash
|
||||
# CPU 이미지만 빌드
|
||||
docker compose -f docker-compose.cpu.yml build
|
||||
|
||||
# 캐시 없이 빌드
|
||||
docker compose -f docker-compose.cpu.yml build --no-cache
|
||||
```
|
||||
|
||||
> **참고**: CPU 기반 PyTorch 이미지는 GPU 이미지(~8GB) 대비 약 70% 용량이 절감된다.
|
||||
> 단, CPU 추론은 GPU 대비 처리 속도가 느리므로 대용량 이미지 분석 시 시간이 더 소요된다.
|
||||
|
||||
### 실행 명령어 (CPU)
|
||||
|
||||
```bash
|
||||
# 백그라운드 실행
|
||||
docker compose -f docker-compose.cpu.yml up -d
|
||||
|
||||
# 포그라운드 실행 (로그 바로 출력)
|
||||
docker compose -f docker-compose.cpu.yml up
|
||||
|
||||
# 중지
|
||||
docker compose -f docker-compose.cpu.yml down
|
||||
```
|
||||
|
||||
### 로컬 직접 실행 (Docker 없이)
|
||||
|
||||
```bash
|
||||
# GPU 있으면 자동으로 cuda:0 사용, 없으면 cpu로 폴백
|
||||
python api.py
|
||||
|
||||
# device 강제 지정
|
||||
DEVICE=cpu python api.py
|
||||
DEVICE=cuda:1 python api.py
|
||||
```
|
||||
|
||||
### GPU/CPU 모드 확인
|
||||
|
||||
서버 기동 로그에서 사용 device를 확인할 수 있다:
|
||||
|
||||
```
|
||||
[Inference] 사용 device: cpu ← CPU 모드
|
||||
[Inference] 사용 device: cuda:0 ← GPU 모드
|
||||
```
|
||||
84
prediction/image/Dockerfile
Normal file
84
prediction/image/Dockerfile
Normal file
@ -0,0 +1,84 @@
|
||||
# ==============================================================================
|
||||
# wing-image-analysis — 드론 영상 유류 분석 FastAPI 서버
|
||||
#
|
||||
# Base: PyTorch 1.9.1 + CUDA 11.1 + cuDNN 8
|
||||
# (mmsegmentation 0.25.0 / mmcv-full 1.4.3 호환 환경)
|
||||
# GPU: NVIDIA GPU 필수 (MMSegmentation 추론)
|
||||
# Port: 5001
|
||||
# ==============================================================================
|
||||
FROM pytorch/pytorch:1.9.1-cuda11.1-cudnn8-devel
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# 시스템 패키지: GDAL / PROJ / GEOS (rasterio, geopandas 빌드 의존성)
|
||||
# libpq-dev: psycopg2-binary 런타임 의존성
|
||||
# libspatialindex-dev: geopandas 공간 인덱스
|
||||
# ------------------------------------------------------------------------------
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gdal-bin \
|
||||
libgdal-dev \
|
||||
libproj-dev \
|
||||
libgeos-dev \
|
||||
libspatialindex-dev \
|
||||
gcc \
|
||||
g++ \
|
||||
git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# rasterio는 GDAL 헤더 버전을 맞춰 빌드해야 한다
|
||||
ENV GDAL_VERSION=3.4.1
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# mmcv-full 1.4.3 — CUDA 11.1 + PyTorch 1.9.0 pre-built 휠
|
||||
# (소스 컴파일 없이 수 초 내 설치)
|
||||
# ------------------------------------------------------------------------------
|
||||
RUN pip install --no-cache-dir \
|
||||
mmcv-full==1.4.3 \
|
||||
-f https://download.openmmlab.com/mmcv/dist/cu111/torch1.9.0/index.html
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Python 의존성 설치
|
||||
# ------------------------------------------------------------------------------
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# 로컬 mmsegmentation 설치 (mx15hdi/Detect/mmsegmentation/)
|
||||
# 번들 소스를 먼저 복사한 뒤 editable 설치한다
|
||||
# ------------------------------------------------------------------------------
|
||||
COPY mx15hdi/Detect/mmsegmentation/ /tmp/mmsegmentation/
|
||||
RUN pip install --no-cache-dir -e /tmp/mmsegmentation/
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# 소스 코드 전체 복사
|
||||
# 대용량 데이터 디렉토리(Original_Images, result 등)는
|
||||
# docker-compose.yml의 볼륨 마운트로 외부에서 주입된다
|
||||
# ------------------------------------------------------------------------------
|
||||
COPY . .
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# .dockerignore로 제외된 런타임 출력 디렉토리를 빈 폴더로 생성
|
||||
# (볼륨 마운트 전에도 경로가 존재해야 한다)
|
||||
# ------------------------------------------------------------------------------
|
||||
RUN mkdir -p \
|
||||
/app/stitch \
|
||||
/app/mx15hdi/Detect/Mask_result \
|
||||
/app/mx15hdi/Detect/result \
|
||||
/app/mx15hdi/Georeference/Mask_Tif \
|
||||
/app/mx15hdi/Georeference/Tif \
|
||||
/app/mx15hdi/Metadata/CSV \
|
||||
/app/mx15hdi/Metadata/Image/Original_Images \
|
||||
/app/mx15hdi/Polygon/Shp
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# 런타임 설정
|
||||
# ------------------------------------------------------------------------------
|
||||
EXPOSE 5001
|
||||
|
||||
# workers=1: GPU 모델을 프로세스 하나에서만 로드 (메모리 공유 불가)
|
||||
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "5001", "--workers", "1"]
|
||||
112
prediction/image/Dockerfile.cpu
Normal file
112
prediction/image/Dockerfile.cpu
Normal file
@ -0,0 +1,112 @@
|
||||
# ==============================================================================
|
||||
# wing-image-analysis — 드론 영상 유류 분석 FastAPI 서버 (CPU 전용)
|
||||
#
|
||||
# Base: python:3.9-slim + PyTorch 1.9.0 CPU 빌드
|
||||
# (mmsegmentation 0.25.0 / mmcv-full 1.4.3 호환 환경)
|
||||
# python:3.9 필수 — numpy 1.26.4, geopandas 0.14.4가 Python >=3.9 요구
|
||||
# GPU: 불필요 (CPU 추론)
|
||||
# Port: 5001
|
||||
# ==============================================================================
|
||||
FROM python:3.9-slim
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
DEVICE=cpu
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# 시스템 패키지: GDAL / PROJ / GEOS (rasterio, geopandas 빌드 의존성)
|
||||
# libspatialindex-dev: geopandas 공간 인덱스
|
||||
# opencv-contrib-python-headless 런타임 SO 의존성 (python:3.9-slim에 미포함):
|
||||
# libgl1 — libGL.so.1
|
||||
# libglib2.0-0 — libgthread-2.0.so.0, libgobject-2.0.so.0, libglib-2.0.so.0
|
||||
# libsm6 — libSM.so.6
|
||||
# libxext6 — libXext.so.6
|
||||
# libxrender1 — libXrender.so.1
|
||||
# libgomp1 — libgomp.so.1 (OpenMP, numpy/opencv 병렬 처리)
|
||||
# ------------------------------------------------------------------------------
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gdal-bin \
|
||||
libgdal-dev \
|
||||
libproj-dev \
|
||||
libgeos-dev \
|
||||
libspatialindex-dev \
|
||||
libgl1 \
|
||||
libglib2.0-0 \
|
||||
libsm6 \
|
||||
libxext6 \
|
||||
libxrender1 \
|
||||
libgomp1 \
|
||||
gcc \
|
||||
g++ \
|
||||
git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# rasterio는 GDAL 헤더 버전을 맞춰 빌드해야 한다
|
||||
ENV GDAL_VERSION=3.4.1
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# GDAL Python 바인딩 (osgeo 모듈) — 시스템 GDAL 버전과 일치해야 한다
|
||||
# python:3.9-slim은 conda 없이 pip 환경이므로 명시적 설치 필요
|
||||
# ------------------------------------------------------------------------------
|
||||
RUN pip install --no-cache-dir GDAL=="$(gdal-config --version)"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# PyTorch 1.9.0 CPU 버전 설치
|
||||
# (mmsegmentation 0.25.0 / mmcv-full 1.4.3 호환)
|
||||
# ------------------------------------------------------------------------------
|
||||
RUN pip install --no-cache-dir \
|
||||
torch==1.9.0+cpu \
|
||||
torchvision==0.10.0+cpu \
|
||||
-f https://download.pytorch.org/whl/torch_stable.html
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# mmcv-full 1.4.3 CPU 휠 (CUDA ops 없는 경량 빌드, 추론에 충분)
|
||||
# ------------------------------------------------------------------------------
|
||||
RUN pip install --no-cache-dir \
|
||||
mmcv-full==1.4.3 \
|
||||
-f https://download.openmmlab.com/mmcv/dist/cpu/torch1.9.0/index.html
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Python 의존성 설치
|
||||
# ------------------------------------------------------------------------------
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# 로컬 mmsegmentation 설치 (mx15hdi/Detect/mmsegmentation/)
|
||||
# 번들 소스를 먼저 복사한 뒤 editable 설치한다
|
||||
# ------------------------------------------------------------------------------
|
||||
COPY mx15hdi/Detect/mmsegmentation/ /tmp/mmsegmentation/
|
||||
RUN pip install --no-cache-dir -e /tmp/mmsegmentation/
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# 소스 코드 전체 복사
|
||||
# 대용량 데이터 디렉토리(Original_Images, result 등)는
|
||||
# docker-compose.cpu.yml의 볼륨 마운트로 외부에서 주입된다
|
||||
# ------------------------------------------------------------------------------
|
||||
COPY . .
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# .dockerignore로 제외된 런타임 출력 디렉토리를 빈 폴더로 생성
|
||||
# (볼륨 마운트 전에도 경로가 존재해야 한다)
|
||||
# ------------------------------------------------------------------------------
|
||||
RUN mkdir -p \
|
||||
/app/stitch \
|
||||
/app/mx15hdi/Detect/Mask_result \
|
||||
/app/mx15hdi/Detect/result \
|
||||
/app/mx15hdi/Georeference/Mask_Tif \
|
||||
/app/mx15hdi/Georeference/Tif \
|
||||
/app/mx15hdi/Metadata/CSV \
|
||||
/app/mx15hdi/Metadata/Image/Original_Images \
|
||||
/app/mx15hdi/Polygon/Shp
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# 런타임 설정
|
||||
# ------------------------------------------------------------------------------
|
||||
EXPOSE 5001
|
||||
|
||||
# workers=1: 모델을 프로세스 하나에서만 로드 (메모리 공유 불가)
|
||||
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "5001", "--workers", "1"]
|
||||
340
prediction/image/api.py
Normal file
340
prediction/image/api.py
Normal file
@ -0,0 +1,340 @@
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
from contextlib import asynccontextmanager
|
||||
import asyncio
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from fastapi import FastAPI, HTTPException, File, UploadFile, Form
|
||||
from fastapi.responses import Response, FileResponse
|
||||
import subprocess
|
||||
import rasterio
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from PIL.ExifTags import TAGS
|
||||
import io
|
||||
import base64
|
||||
from pyproj import Transformer
|
||||
from extract_data import get_metadata as get_meta
|
||||
from extract_data import get_oil_type as get_oil
|
||||
import time
|
||||
|
||||
from typing import List, Optional
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
from collections import Counter
|
||||
|
||||
# mx15hdi 파이프라인 모듈 임포트를 위한 sys.path 설정
|
||||
_BASE_DIR = Path(__file__).parent
|
||||
sys.path.insert(0, str(_BASE_DIR / 'mx15hdi' / 'Detect'))
|
||||
sys.path.insert(0, str(_BASE_DIR / 'mx15hdi' / 'Metadata' / 'Scripts'))
|
||||
sys.path.insert(0, str(_BASE_DIR / 'mx15hdi' / 'Georeference' / 'Scripts'))
|
||||
sys.path.insert(0, str(_BASE_DIR / 'mx15hdi' / 'Polygon' / 'Scripts'))
|
||||
|
||||
from Inference import load_model, run_inference
|
||||
from Export_Metadata_mx15hdi import run_metadata_export
|
||||
from Create_Georeferenced_Images_nadir import run_georeference
|
||||
from Oilshape import run_oilshape
|
||||
|
||||
# AI 모델 (서버 시작 시 1회 로드)
|
||||
_model = None
|
||||
# CPU/GPU 바운드 작업용 스레드 풀
|
||||
_executor = ThreadPoolExecutor(max_workers=4)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""서버 시작 시 AI 모델을 1회 로드하고, 종료 시 해제한다."""
|
||||
global _model
|
||||
print("AI 모델 로딩 중 (epoch_165.pth)...")
|
||||
_model = load_model()
|
||||
print("AI 모델 로드 완료")
|
||||
yield
|
||||
_model = None
|
||||
|
||||
|
||||
app = FastAPI(lifespan=lifespan)
|
||||
|
||||
|
||||
def check_gps_info(image_path: str):
|
||||
# Pillow로 이미지 열기
|
||||
image = Image.open(image_path)
|
||||
|
||||
# EXIF 데이터 추출
|
||||
exifdata = image.getexif()
|
||||
|
||||
if not exifdata:
|
||||
print("EXIF 정보를 찾을 수 없습니다.")
|
||||
return False
|
||||
|
||||
# GPS 정보 추출
|
||||
gps_ifd = exifdata.get_ifd(0x8825) # GPS IFD 태그
|
||||
if not gps_ifd:
|
||||
print("GPS 정보를 찾을 수 없습니다.")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def check_camera_info(image_file):
|
||||
# Pillow로 이미지 열기
|
||||
image = Image.open(image_file)
|
||||
|
||||
# EXIF 데이터 추출
|
||||
exifdata = image.getexif()
|
||||
|
||||
if not exifdata:
|
||||
print("EXIF 정보를 찾을 수 없습니다.")
|
||||
return False
|
||||
|
||||
for tag_id, value in exifdata.items():
|
||||
tag_name = TAGS.get(tag_id, tag_id)
|
||||
if tag_name == "Model":
|
||||
return value.strip() if isinstance(value, str) else value
|
||||
|
||||
|
||||
async def _run_mx15hdi_pipeline(file_id: str):
|
||||
"""
|
||||
mx15hdi 파이프라인을 in-process로 실행한다.
|
||||
- Step 1 (AI 추론) + Step 2 (메타데이터 추출) 병렬 실행
|
||||
- Step 3 (지리참조) → Step 4 (폴리곤 추출) 순차 실행
|
||||
- 중간 파일 I/O 없이 numpy 배열을 메모리로 전달
|
||||
"""
|
||||
loop = asyncio.get_event_loop()
|
||||
|
||||
# Step 1 + Step 2 병렬 실행 — inference_cache 캡처
|
||||
inference_cache, _ = await asyncio.gather(
|
||||
loop.run_in_executor(_executor, run_inference, _model, file_id),
|
||||
loop.run_in_executor(_executor, run_metadata_export, file_id),
|
||||
)
|
||||
|
||||
# Step 3: Georeference — inference_cache 메모리로 전달, georef_cache 반환
|
||||
georef_cache = await loop.run_in_executor(
|
||||
_executor, run_georeference, file_id, inference_cache
|
||||
)
|
||||
|
||||
# Step 4: Polygon 추출 — georef_cache 메모리로 전달 (Mask_Tif 디스크 읽기 없음)
|
||||
await loop.run_in_executor(_executor, run_oilshape, file_id, georef_cache)
|
||||
|
||||
|
||||
# 전체 과정을 구동하는 api
|
||||
@app.post("/run-script/")
|
||||
async def run_script(
|
||||
# pollId: int = Form(...),
|
||||
camTy: str = Form(...),
|
||||
fileId: str = Form(...),
|
||||
image: UploadFile = File(...)
|
||||
):
|
||||
try:
|
||||
print("start")
|
||||
start_time = time.perf_counter()
|
||||
|
||||
if camTy not in ["mx15hdi", "starsafire"]:
|
||||
raise HTTPException(status_code=400, detail="string1 must be 'mx15hdi' or 'starsafire'")
|
||||
|
||||
# 저장할 이미지 경로 설정
|
||||
upload_dir = os.path.join(camTy, "Metadata/Image/Original_Images", fileId)
|
||||
os.makedirs(upload_dir, exist_ok=True)
|
||||
|
||||
# 이미지 파일 저장
|
||||
image_path = os.path.join(upload_dir, image.filename)
|
||||
with open(image_path, "wb") as f:
|
||||
f.write(await image.read())
|
||||
|
||||
gps_flage = check_gps_info(image_path)
|
||||
if not gps_flage:
|
||||
return {"detail": "GPS Infomation Not Found"}
|
||||
|
||||
if camTy == "mx15hdi":
|
||||
# in-process 파이프라인 실행 (모델 재로딩 없음, Step1+2 병렬)
|
||||
await _run_mx15hdi_pipeline(fileId)
|
||||
else:
|
||||
# starsafire: 기존 subprocess 방식 유지
|
||||
script_dir = os.path.join(os.getcwd(), camTy, "Main")
|
||||
script_file = "Combine_module.py"
|
||||
script_path = os.path.join(script_dir, script_file)
|
||||
|
||||
if not os.path.exists(script_path):
|
||||
raise HTTPException(status_code=404, detail="Script not found")
|
||||
|
||||
result = subprocess.run(
|
||||
["python", script_file, fileId],
|
||||
cwd=script_dir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300
|
||||
)
|
||||
print(f"Subprocess stdout: {result.stdout}")
|
||||
print(f"Subprocess stderr: {result.stderr}")
|
||||
|
||||
meta_string = get_meta(camTy, fileId)
|
||||
oil_data = get_oil(camTy, fileId)
|
||||
end_time = time.perf_counter()
|
||||
print(f"Run time: {end_time - start_time:.4f} sec")
|
||||
|
||||
return {
|
||||
"meta": meta_string,
|
||||
"data": oil_data
|
||||
}
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
raise HTTPException(status_code=500, detail="Script execution timed out")
|
||||
except Exception as e:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@app.get("/get-metadata/{camTy}/{fileId}")
|
||||
async def get_metadata(camTy: str, fileId: str):
|
||||
try:
|
||||
meta_string = get_meta(camTy, fileId)
|
||||
oil_data = get_oil(camTy, fileId)
|
||||
|
||||
return {
|
||||
"meta": meta_string,
|
||||
"data": oil_data
|
||||
}
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
raise HTTPException(status_code=500, detail="Script execution timed out")
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@app.get("/get-original-image/{camTy}/{fileId}")
|
||||
async def get_original_image(camTy: str, fileId: str):
|
||||
try:
|
||||
image_path = os.path.join(camTy, "Metadata/Image/Original_Images", fileId)
|
||||
files = os.listdir(image_path)
|
||||
target_file = [f for f in files if f.endswith(".png") or f.endswith(".jpg")]
|
||||
image_file = os.path.join(image_path, target_file[0])
|
||||
|
||||
with open(image_file, "rb") as origin_image:
|
||||
base64_string = base64.b64encode(origin_image.read()).decode("utf-8")
|
||||
print(base64_string[:100])
|
||||
|
||||
return base64_string
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@app.get("/get-image/{camTy}/{fileId}")
|
||||
async def get_image(camTy: str, fileId: str):
|
||||
try:
|
||||
tif_file_path = os.path.join(camTy, "Georeference/Tif", fileId)
|
||||
files = os.listdir(tif_file_path)
|
||||
target_file = [f for f in files if f.endswith(".tif")]
|
||||
tif_file = os.path.join(tif_file_path, target_file[0])
|
||||
|
||||
with rasterio.open(tif_file) as dataset:
|
||||
crs = dataset.crs
|
||||
|
||||
bounds = dataset.bounds
|
||||
|
||||
if crs != "EPSG:4326":
|
||||
transformer = Transformer.from_crs(crs, "EPSG:4326", always_xy=True)
|
||||
minx, miny = transformer.transform(bounds.left, bounds.bottom)
|
||||
maxx, maxy = transformer.transform(bounds.right, bounds.top)
|
||||
|
||||
print(minx, miny, maxx, maxy)
|
||||
|
||||
data = dataset.read()
|
||||
if data.shape[0] == 1:
|
||||
image_data = data[0]
|
||||
else:
|
||||
image_data = np.moveaxis(data, 0, -1)
|
||||
|
||||
image = Image.fromarray(image_data)
|
||||
buffer = io.BytesIO()
|
||||
image.save(buffer, format="PNG")
|
||||
|
||||
base64_string = base64.b64encode(buffer.getvalue()).decode("utf-8")
|
||||
|
||||
print(base64_string[:100])
|
||||
return {
|
||||
"minLon": minx,
|
||||
"minLat": miny,
|
||||
"maxLon": maxx,
|
||||
"maxLat": maxy,
|
||||
"image": base64_string
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
BASE_DIR = Path(__file__).parent
|
||||
PIC_GPS_SCRIPT = BASE_DIR / "pic_gps.py"
|
||||
|
||||
@app.post("/stitch")
|
||||
async def stitch(
|
||||
files: List[UploadFile] = File(..., description="합성할 이미지 파일들 (2장 이상)"),
|
||||
fileId: str = Form(...)
|
||||
):
|
||||
if len(files) < 2:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="최소 2장 이상의 이미지가 필요합니다."
|
||||
)
|
||||
|
||||
try:
|
||||
today = datetime.now().strftime("%Y%m%d")
|
||||
upload_dir = BASE_DIR / "stitch" / fileId
|
||||
upload_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
model_list = []
|
||||
for idx, file in enumerate(files):
|
||||
|
||||
model = check_camera_info(file.file)
|
||||
model_list.append(model)
|
||||
|
||||
original_filename = file.filename or f"image_{idx}.jpg"
|
||||
filename = f"{model}_{idx:03d}_{original_filename}"
|
||||
file_path = upload_dir / filename
|
||||
|
||||
output_filename = f"stitched_{fileId}.jpg"
|
||||
output_path = upload_dir / output_filename
|
||||
|
||||
# 파일 저장
|
||||
with open(file_path, "wb") as buffer:
|
||||
shutil.copyfileobj(file.file, buffer)
|
||||
|
||||
model_counter = Counter(model_list)
|
||||
most_common_model = model_counter.most_common(1)
|
||||
|
||||
cmd = [
|
||||
"python",
|
||||
str(PIC_GPS_SCRIPT),
|
||||
"--mode", "drone",
|
||||
"--input", str(upload_dir),
|
||||
"--out", str(output_path),
|
||||
"--model", most_common_model[0][0],
|
||||
"--enhance"
|
||||
]
|
||||
|
||||
print(cmd)
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300
|
||||
)
|
||||
|
||||
print(f"Subprocess stdout: {result.stdout}")
|
||||
if result.returncode != 0:
|
||||
print(f"Subprocess stderr: {result.stderr}")
|
||||
raise HTTPException(status_code=500, detail=f"Script failed: {result.stderr}")
|
||||
|
||||
return FileResponse(
|
||||
path=str(output_path),
|
||||
media_type="image/jpeg",
|
||||
filename=output_filename
|
||||
)
|
||||
except HTTPException:
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import uvicorn
|
||||
uvicorn.run(app, host="0.0.0.0", port=5001)
|
||||
46
prediction/image/docker-compose.cpu.yml
Normal file
46
prediction/image/docker-compose.cpu.yml
Normal file
@ -0,0 +1,46 @@
|
||||
version: "3.9"
|
||||
|
||||
# CPU 전용 docker-compose 설정
|
||||
# GPU(nvidia-container-toolkit) 없이도 실행 가능
|
||||
# 실행: docker compose -f docker-compose.cpu.yml up -d --build
|
||||
|
||||
services:
|
||||
image-analysis:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.cpu
|
||||
image: wing-image-analysis:cpu
|
||||
container_name: wing-image-analysis
|
||||
ports:
|
||||
- "5001:5001"
|
||||
environment:
|
||||
- DEVICE=cpu
|
||||
|
||||
volumes:
|
||||
# ── mx15hdi (EO 드론 카메라) ────────────────────────────────────────
|
||||
# 입력: 업로드된 원본 이미지
|
||||
- ./mx15hdi/Metadata/Image/Original_Images:/app/mx15hdi/Metadata/Image/Original_Images
|
||||
# 출력: 메타데이터 CSV
|
||||
- ./mx15hdi/Metadata/CSV:/app/mx15hdi/Metadata/CSV
|
||||
# 출력: 지리참조 GeoTIFF (컬러 / 마스크)
|
||||
- ./mx15hdi/Georeference/Tif:/app/mx15hdi/Georeference/Tif
|
||||
- ./mx15hdi/Georeference/Mask_Tif:/app/mx15hdi/Georeference/Mask_Tif
|
||||
# 출력: 유류 폴리곤 Shapefile
|
||||
- ./mx15hdi/Polygon/Shp:/app/mx15hdi/Polygon/Shp
|
||||
# 출력: 블렌딩 추론 결과 / 마스크 이미지
|
||||
- ./mx15hdi/Detect/result:/app/mx15hdi/Detect/result
|
||||
- ./mx15hdi/Detect/Mask_result:/app/mx15hdi/Detect/Mask_result
|
||||
# ── starsafire (열화상 카메라) ──────────────────────────────────────
|
||||
- ./starsafire/Metadata/Image/Original_Images:/app/starsafire/Metadata/Image/Original_Images
|
||||
- ./starsafire/Metadata/CSV:/app/starsafire/Metadata/CSV
|
||||
- ./starsafire/Georeference/Tif:/app/starsafire/Georeference/Tif
|
||||
- ./starsafire/Georeference/Mask_Tif:/app/starsafire/Georeference/Mask_Tif
|
||||
- ./starsafire/Polygon/Shp:/app/starsafire/Polygon/Shp
|
||||
- ./starsafire/Detect/result:/app/starsafire/Detect/result
|
||||
- ./starsafire/Detect/Mask_result:/app/starsafire/Detect/Mask_result
|
||||
# ── 스티칭 결과 ─────────────────────────────────────────────────────
|
||||
- ./stitch:/app/stitch
|
||||
|
||||
# GPU deploy 섹션 없음 — CPU 전용 실행
|
||||
|
||||
restart: unless-stopped
|
||||
47
prediction/image/docker-compose.yml
Normal file
47
prediction/image/docker-compose.yml
Normal file
@ -0,0 +1,47 @@
|
||||
version: "3.9"
|
||||
|
||||
services:
|
||||
image-analysis:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
image: wing-image-analysis:latest
|
||||
container_name: wing-image-analysis
|
||||
ports:
|
||||
- "5001:5001"
|
||||
|
||||
volumes:
|
||||
# ── mx15hdi (EO 드론 카메라) ────────────────────────────────────────
|
||||
# 입력: 업로드된 원본 이미지
|
||||
- ./mx15hdi/Metadata/Image/Original_Images:/app/mx15hdi/Metadata/Image/Original_Images
|
||||
# 출력: 메타데이터 CSV
|
||||
- ./mx15hdi/Metadata/CSV:/app/mx15hdi/Metadata/CSV
|
||||
# 출력: 지리참조 GeoTIFF (컬러 / 마스크)
|
||||
- ./mx15hdi/Georeference/Tif:/app/mx15hdi/Georeference/Tif
|
||||
- ./mx15hdi/Georeference/Mask_Tif:/app/mx15hdi/Georeference/Mask_Tif
|
||||
# 출력: 유류 폴리곤 Shapefile
|
||||
- ./mx15hdi/Polygon/Shp:/app/mx15hdi/Polygon/Shp
|
||||
# 출력: 블렌딩 추론 결과 / 마스크 이미지
|
||||
- ./mx15hdi/Detect/result:/app/mx15hdi/Detect/result
|
||||
- ./mx15hdi/Detect/Mask_result:/app/mx15hdi/Detect/Mask_result
|
||||
# ── starsafire (열화상 카메라) ──────────────────────────────────────
|
||||
- ./starsafire/Metadata/Image/Original_Images:/app/starsafire/Metadata/Image/Original_Images
|
||||
- ./starsafire/Metadata/CSV:/app/starsafire/Metadata/CSV
|
||||
- ./starsafire/Georeference/Tif:/app/starsafire/Georeference/Tif
|
||||
- ./starsafire/Georeference/Mask_Tif:/app/starsafire/Georeference/Mask_Tif
|
||||
- ./starsafire/Polygon/Shp:/app/starsafire/Polygon/Shp
|
||||
- ./starsafire/Detect/result:/app/starsafire/Detect/result
|
||||
- ./starsafire/Detect/Mask_result:/app/starsafire/Detect/Mask_result
|
||||
# ── 스티칭 결과 ─────────────────────────────────────────────────────
|
||||
- ./stitch:/app/stitch
|
||||
|
||||
# NVIDIA GPU 할당 (nvidia-container-toolkit 필수)
|
||||
deploy:
|
||||
resources:
|
||||
reservations:
|
||||
devices:
|
||||
- driver: nvidia
|
||||
count: 1
|
||||
capabilities: [gpu]
|
||||
|
||||
restart: unless-stopped
|
||||
97
prediction/image/extract_data.py
Normal file
97
prediction/image/extract_data.py
Normal file
@ -0,0 +1,97 @@
|
||||
import csv
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
import geopandas as gpd
|
||||
import json
|
||||
|
||||
def get_metadata(camTy: str, fileId: str):
|
||||
|
||||
# CSV 파일 경로 설정
|
||||
# base_dir = "mx15hdi" if pollId == "1" else "starsafire"
|
||||
if camTy == "mx15hdi":
|
||||
csv_path = f"{camTy}/Metadata/CSV/{fileId}/mx15hdi_interpolation.csv"
|
||||
elif camTy == "starsafire":
|
||||
csv_path = f"{camTy}/Metadata/CSV/{fileId}/Metadata_Extracted.csv"
|
||||
|
||||
try:
|
||||
# CSV 파일 읽기
|
||||
with open(csv_path, 'r', newline='', encoding='utf-8-sig') as csvfile:
|
||||
reader = csv.reader(csvfile)
|
||||
next(reader, None)
|
||||
row = next(reader, None)
|
||||
return ','.join(row)
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"CSV file not found: {csv_path}")
|
||||
raise
|
||||
except ValueError as e:
|
||||
print(f"Value error: {str(e)}")
|
||||
raise
|
||||
except Exception as e:
|
||||
print(f"Error processing CSV: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def get_oil_type(camTy: str, fileId: str):
|
||||
# Shapefile 경로 설정
|
||||
path = f"{camTy}/Polygon/Shp/{fileId}"
|
||||
shp_file = list(Path(path).glob("*.shp"))
|
||||
if not shp_file:
|
||||
return []
|
||||
shp_path = f"{camTy}/Polygon/Shp/{fileId}/{shp_file[0].name}"
|
||||
print(shp_path)
|
||||
# if camTy == "mx15hdi":
|
||||
# fileSub = f"{Path(fileName).stem}_gsd"
|
||||
# elif camTy == "starsafire":
|
||||
# fileSub = f"{Path(fileName).stem}"
|
||||
|
||||
# shp_path = f"{camTy}/Polygon/Shp/{fileId}/{fileSub}.shp"
|
||||
|
||||
# 두께 정보
|
||||
class_thickness_mm = {
|
||||
1: 1.0, # Black oil (Emulsion)
|
||||
2: 0.1, # Brown oil (Crude)
|
||||
3: 0.0003, # Rainbow oil (Slick)
|
||||
4: 0.0001 # Silver oil (Slick)
|
||||
}
|
||||
# 알고리즘 정보
|
||||
algorithm = {
|
||||
1: "검정",
|
||||
2: "갈색",
|
||||
3: "무지개",
|
||||
4: "은색"
|
||||
}
|
||||
|
||||
try:
|
||||
# Shapefile 읽기
|
||||
gdf = gpd.read_file(shp_path)
|
||||
if gdf.crs != "epsg:4326":
|
||||
gdf = gdf.to_crs("epsg:4326")
|
||||
|
||||
# 데이터 준비
|
||||
data = []
|
||||
for _, row in gdf.iterrows():
|
||||
class_id = row.get('class_id', None)
|
||||
area_m2 = row.get('area_m2', None)
|
||||
volume_m3 = row.get('volume_m3', None)
|
||||
note = row.get('note', None)
|
||||
thickness_m = class_thickness_mm.get(class_id, 0) / 1000.0
|
||||
geom_wkt = row.geometry.wkt if row.geometry else None
|
||||
result = {
|
||||
"classId": algorithm.get(class_id, 0),
|
||||
"area": area_m2,
|
||||
"volume": volume_m3,
|
||||
"note": note,
|
||||
"thickness": thickness_m,
|
||||
"wkt": geom_wkt
|
||||
}
|
||||
data.append(result)
|
||||
|
||||
return data
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Shapefile not found: {shp_path}")
|
||||
raise
|
||||
except Exception as e:
|
||||
print(f"Error processing shapefile or database: {str(e)}")
|
||||
raise
|
||||
238
prediction/image/image_plan.md
Normal file
238
prediction/image/image_plan.md
Normal file
@ -0,0 +1,238 @@
|
||||
# 이미지 업로드 유류 분석 기능 구현 계획
|
||||
|
||||
## Context
|
||||
|
||||
드론/항공 촬영 이미지를 업로드하면 AI 세그멘테이션으로 유류 확산 정보(위치·유종·면적·부피)를 자동 추출하고, 결과를 DB에 저장한 뒤 예측정보 입력 폼에 자동 채워주는 기능이다.
|
||||
이미지 분석 서버(`prediction/image/api.py`, FastAPI, 포트 5001)는 이미 구현되어 있으며, 프론트↔백엔드↔이미지 분석 서버 연동 및 결과 자동 채우기를 구현한다.
|
||||
|
||||
---
|
||||
|
||||
## 전체 흐름
|
||||
|
||||
```
|
||||
[프론트] 이미지 선택 → 분석 요청 버튼
|
||||
↓ POST /api/prediction/image-analyze (multipart: image)
|
||||
[백엔드]
|
||||
├─ fileId = UUID 생성
|
||||
├─ camTy = "mx15hdi" (하드코딩, 추후 이미지 EXIF 카메라 정보로 자동 판별 예정)
|
||||
├─ 이미지 분석 서버로 전달 POST http://IMAGE_API_URL/run-script/
|
||||
├─ 응답 파싱: meta(위경도 DMS→십진수 변환), data[0].classId→유종
|
||||
├─ ACDNT INSERT (lat/lon/임시사고명)
|
||||
├─ SPIL_DATA INSERT (유종/면적/img_rslt_data JSONB)
|
||||
└─ 응답: { acdntSn, lat, lon, oilType, area, volume }
|
||||
↓
|
||||
[프론트] 폼 자동 채우기 (좌표·유종·유출량)
|
||||
→ 사용자가 나머지 입력 후 "확산예측 실행"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 구현 단계
|
||||
|
||||
### Step 1 — DB 마이그레이션 (`database/migration/017_spil_img_rslt.sql`)
|
||||
|
||||
`SPIL_DATA` 테이블에 이미지 분석 결과 컬럼 추가.
|
||||
|
||||
```sql
|
||||
ALTER TABLE wing.spil_data
|
||||
ADD COLUMN IF NOT EXISTS img_rslt_data JSONB;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Step 2 — 백엔드: 이미지 분석 엔드포인트
|
||||
|
||||
**파일**: `backend/src/prediction/predictionRouter.ts` (라우트 등록)
|
||||
**신규 파일**: `backend/src/prediction/imageAnalyzeService.ts`
|
||||
|
||||
#### 엔드포인트
|
||||
|
||||
```
|
||||
POST /api/prediction/image-analyze
|
||||
Content-Type: multipart/form-data
|
||||
Body: image (file)
|
||||
```
|
||||
|
||||
#### `imageAnalyzeService.ts` 핵심 로직
|
||||
|
||||
```typescript
|
||||
// 1. fileId 생성 (crypto.randomUUID)
|
||||
|
||||
// 2. 이미지 분석 서버 호출
|
||||
// camTy는 현재 "mx15hdi"로 하드코딩한다.
|
||||
// TODO: 추후 이미지 EXIF에서 카메라 모델명을 읽어 camTy를 자동 판별하는 로직을
|
||||
// 이미지 분석 서버(api.py)에 추가할 예정이다. (check_camera_info 함수 활용)
|
||||
// FormData: { camTy: 'mx15hdi', fileId, image }
|
||||
// → POST ${IMAGE_API_URL}/run-script/
|
||||
// 응답: { meta: string, data: OilPolygon[] }
|
||||
|
||||
// 3. meta 문자열 파싱 (mx15hdi CSV 컬럼 순서 사용)
|
||||
// [Filename, Tlat_d, Tlat_m, Tlat_s, Tlon_d, Tlon_m, Tlon_s, ...]
|
||||
// DMS → 십진수: d + m/60 + s/3600
|
||||
|
||||
// 4. 유종 매핑 (data[0].classId → UI 유종명)
|
||||
// classId → oilType: { '검정': '벙커C유', '갈색': '벙커C유', '무지개': '경유', '은색': '등유' }
|
||||
|
||||
// 5. ACDNT INSERT (임시 사고명 = "이미지분석_YYYY-MM-DD HH:mm", lat, lon, occurredAt = 촬영시각)
|
||||
// 6. SPIL_DATA INSERT (acdntSn, matTyCd, matVol=data[0].volume, imgRsltData=JSON.stringify(response))
|
||||
|
||||
// 7. 반환
|
||||
interface ImageAnalyzeResult {
|
||||
acdntSn: number;
|
||||
lat: number;
|
||||
lon: number;
|
||||
oilType: string; // UI 유종명 (벙커C유 등)
|
||||
area: number; // m²
|
||||
volume: number; // m³
|
||||
fileId: string;
|
||||
}
|
||||
```
|
||||
|
||||
#### 환경변수 추가 (`backend/.env`)
|
||||
|
||||
```
|
||||
IMAGE_API_URL=http://localhost:5001
|
||||
```
|
||||
|
||||
#### 에러 처리
|
||||
|
||||
| 조건 | 응답 |
|
||||
|------|------|
|
||||
| 이미지에 GPS EXIF 없음 | 422 `{ error: 'GPS_NOT_FOUND' }` |
|
||||
| 이미지 서버 타임아웃(300s) | 504 |
|
||||
|
||||
---
|
||||
|
||||
### Step 3 — 프론트엔드: API 서비스
|
||||
|
||||
**파일**: `frontend/src/tabs/prediction/services/predictionApi.ts`
|
||||
|
||||
```typescript
|
||||
interface ImageAnalyzeResult {
|
||||
acdntSn: number;
|
||||
lat: number;
|
||||
lon: number;
|
||||
oilType: string;
|
||||
area: number;
|
||||
volume: number;
|
||||
fileId: string;
|
||||
}
|
||||
|
||||
export const analyzeImage = async (
|
||||
file: File
|
||||
): Promise<ImageAnalyzeResult> => {
|
||||
const formData = new FormData();
|
||||
formData.append('image', file);
|
||||
const { data } = await api.post<ImageAnalyzeResult>(
|
||||
'/prediction/image-analyze',
|
||||
formData,
|
||||
{ headers: { 'Content-Type': 'multipart/form-data' }, timeout: 330000 }
|
||||
);
|
||||
return data;
|
||||
};
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Step 4 — 프론트엔드: Props 타입 확장
|
||||
|
||||
**파일**: `frontend/src/tabs/prediction/components/leftPanelTypes.ts`
|
||||
|
||||
```typescript
|
||||
// 기존 Props에 추가
|
||||
onImageAnalysisResult?: (result: ImageAnalyzeResult) => void;
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Step 5 — 프론트엔드: PredictionInputSection 수정
|
||||
|
||||
**파일**: `frontend/src/tabs/prediction/components/PredictionInputSection.tsx`
|
||||
|
||||
#### 변경 사항
|
||||
|
||||
1. **"이미지 분석 실행" 버튼** (이미지 선택 후 활성화)
|
||||
- 클릭 시 `analyzeImage(file)` 호출 (camTy는 백엔드에서 처리)
|
||||
- 로딩 스피너 표시 (분석 소요시간 수십 초~수 분)
|
||||
|
||||
2. **분석 결과 표시** (성공 시)
|
||||
- "분석 완료: 위도 XX.XXXX / 경도 XXX.XXXX / 유종: OO" 요약 메시지
|
||||
|
||||
3. **`onImageAnalysisResult` 콜백 호출**
|
||||
- 분석 성공 시 부모로 결과 전달
|
||||
|
||||
4. **에러 처리**
|
||||
- GPS_NOT_FOUND: "GPS 정보가 없는 이미지입니다" 메시지 표시
|
||||
- 타임아웃: "분석 서버 응답 없음" 메시지 표시
|
||||
|
||||
5. **로컬 상태 교체**: `uploadedImage` (Base64 DataURL) 제거, `uploadedFile: File | null`로 교체
|
||||
|
||||
---
|
||||
|
||||
### Step 6 — 프론트엔드: OilSpillView 결과 처리
|
||||
|
||||
**파일**: `frontend/src/tabs/prediction/components/OilSpillView.tsx`
|
||||
|
||||
```typescript
|
||||
const handleImageAnalysisResult = useCallback((result: ImageAnalyzeResult) => {
|
||||
// 1. 사고 좌표 자동 채우기
|
||||
setIncidentCoord({ lat: result.lat, lon: result.lon })
|
||||
setFlyToCoord({ lat: result.lat, lon: result.lon })
|
||||
|
||||
// 2. 유종/유출량 자동 채우기
|
||||
setOilType(result.oilType)
|
||||
setSpillAmount(parseFloat(result.volume.toFixed(4)))
|
||||
setSpillUnit('m³')
|
||||
|
||||
// 3. 분석 선택 상태 갱신 (acdntSn 연결 — 시뮬레이션 실행 시 기존 사고 사용)
|
||||
setSelectedAnalysis({
|
||||
acdntSn: result.acdntSn,
|
||||
acdntNm: '',
|
||||
// ... 나머지 기본값
|
||||
})
|
||||
}, [])
|
||||
```
|
||||
|
||||
`LeftPanel`에 `onImageAnalysisResult={handleImageAnalysisResult}` 전달.
|
||||
|
||||
---
|
||||
|
||||
## 수정 대상 파일 요약
|
||||
|
||||
| 파일 | 변경 유형 |
|
||||
|------|---------|
|
||||
| `database/migration/017_spil_img_rslt.sql` | **신규** — SPIL_DATA 컬럼 추가 |
|
||||
| `backend/src/prediction/imageAnalyzeService.ts` | **신규** — 이미지 분석 서비스 |
|
||||
| `backend/src/prediction/predictionRouter.ts` | **수정** — 라우트 추가 |
|
||||
| `backend/.env` | **수정** — IMAGE_API_URL 추가 |
|
||||
| `frontend/src/tabs/prediction/services/predictionApi.ts` | **수정** — analyzeImage 함수 추가 |
|
||||
| `frontend/src/tabs/prediction/components/leftPanelTypes.ts` | **수정** — Props 타입 추가 |
|
||||
| `frontend/src/tabs/prediction/components/PredictionInputSection.tsx` | **수정** — 분석 실행 UI |
|
||||
| `frontend/src/tabs/prediction/components/OilSpillView.tsx` | **수정** — 결과 처리 핸들러 |
|
||||
|
||||
---
|
||||
|
||||
## 검증 방법
|
||||
|
||||
1. **이미지 분석 서버 직접 테스트**
|
||||
```bash
|
||||
curl -X POST http://localhost:5001/run-script/ \
|
||||
-F "camTy=mx15hdi" -F "fileId=test001" -F "image=@drone_image.jpg"
|
||||
```
|
||||
|
||||
2. **백엔드 엔드포인트 테스트**
|
||||
```bash
|
||||
curl -X POST http://localhost:3001/api/prediction/image-analyze \
|
||||
-F "image=@drone_image.jpg" \
|
||||
-H "Cookie: <auth_cookie>"
|
||||
```
|
||||
- 응답: `{ acdntSn, lat, lon, oilType, area, volume, fileId }`
|
||||
- DB 확인: ACDNT, SPIL_DATA 레코드 생성 여부
|
||||
|
||||
3. **프론트엔드 E2E 테스트**
|
||||
- 이미지 업로드 모드 선택 → GPS EXIF 있는 이미지 업로드 → "이미지 분석 실행" 클릭
|
||||
- 로딩 표시 → 완료 시: 지도 이동, 유종/좌표 폼 자동 채워짐 확인
|
||||
- 나머지 필드(예상시각·유출시간 등) 직접 입력 후 "확산예측 실행" → 정상 시뮬레이션 확인
|
||||
|
||||
4. **에러 케이스 확인**
|
||||
- GPS 없는 이미지 → "GPS 정보가 없는 이미지입니다" 메시지
|
||||
131
prediction/image/mx15hdi/Detect/Inference.py
Normal file
131
prediction/image/mx15hdi/Detect/Inference.py
Normal file
@ -0,0 +1,131 @@
|
||||
import os, mmcv, cv2, json
|
||||
import numpy as np
|
||||
import torch
|
||||
from pathlib import Path
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
from mmseg.apis import init_segmentor, inference_segmentor
|
||||
from shapely.geometry import Polygon, mapping
|
||||
import sys
|
||||
|
||||
_DETECT_DIR = Path(__file__).parent # mx15hdi/Detect/
|
||||
_MX15HDI_DIR = _DETECT_DIR.parent # mx15hdi/
|
||||
|
||||
|
||||
def load_model():
|
||||
"""서버 시작 시 1회 호출. 로드된 모델 객체를 반환한다."""
|
||||
# 우선순위: 환경변수 DEVICE > GPU 자동감지 > CPU 폴백
|
||||
env_device = os.environ.get('DEVICE', '').strip()
|
||||
if env_device:
|
||||
device = env_device
|
||||
elif torch.cuda.is_available():
|
||||
device = 'cuda:0'
|
||||
else:
|
||||
device = 'cpu'
|
||||
print(f'[Inference] 사용 device: {device}')
|
||||
|
||||
config = str(_DETECT_DIR / 'V7_SPECIAL.py')
|
||||
checkpoint = str(_DETECT_DIR / 'epoch_165.pth')
|
||||
model = init_segmentor(config, checkpoint, device=device)
|
||||
model.PALETTE = [
|
||||
[0, 0, 0], # background
|
||||
[0, 0, 204], # black
|
||||
[180, 180, 180], # brown
|
||||
[255, 255, 0], # rainbow
|
||||
[178, 102, 255] # silver
|
||||
]
|
||||
return model
|
||||
|
||||
|
||||
def blend_images(original_img, color_mask, alpha=0.6):
|
||||
"""
|
||||
Blend original image and color mask with alpha transparency.
|
||||
Inputs: numpy arrays HWC uint8
|
||||
"""
|
||||
blended = cv2.addWeighted(original_img, 1 - alpha, color_mask, alpha, 0)
|
||||
return blended
|
||||
|
||||
|
||||
def run_inference(model, file_id: str, write_files: bool = False) -> dict:
|
||||
"""
|
||||
사전 로드된 모델로 file_id 폴더 내 이미지를 세그멘테이션한다.
|
||||
|
||||
Args:
|
||||
model: load_model()로 로드된 모델 객체.
|
||||
file_id: 처리할 이미지 폴더명.
|
||||
write_files: True이면 Detect/result/ 와 Detect/Mask_result/ 에 중간 파일 저장.
|
||||
False이면 디스크 쓰기 생략 (기본값).
|
||||
|
||||
Returns:
|
||||
inference_cache: {image_filename: {'blended': ndarray, 'mask': ndarray, 'ext': str}}
|
||||
이 값을 run_georeference()에 전달하면 중간 파일 읽기를 생략할 수 있다.
|
||||
"""
|
||||
img_path = str(_MX15HDI_DIR / 'Metadata' / 'Image' / 'Original_Images' / file_id)
|
||||
output_folder = str(_DETECT_DIR / 'result' / file_id)
|
||||
mask_folder = str(_DETECT_DIR / 'Mask_result' / file_id)
|
||||
|
||||
if not os.path.exists(img_path):
|
||||
raise FileNotFoundError(f"이미지 폴더가 존재하지 않습니다: {img_path}")
|
||||
|
||||
if write_files:
|
||||
os.makedirs(output_folder, exist_ok=True)
|
||||
os.makedirs(mask_folder, exist_ok=True)
|
||||
|
||||
image_files = [
|
||||
f for f in os.listdir(img_path)
|
||||
if f.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff'))
|
||||
]
|
||||
|
||||
# palette_array는 이미지마다 동일하므로 루프 외부에서 1회 생성
|
||||
palette_array = np.array(model.PALETTE, dtype=np.uint8)
|
||||
|
||||
inference_cache = {}
|
||||
|
||||
for image_file in tqdm(image_files, desc="Processing images"):
|
||||
image_path = os.path.join(img_path, image_file)
|
||||
image_name, image_ext = os.path.splitext(image_file)
|
||||
image_ext = image_ext.lower()
|
||||
|
||||
# 이미지를 1회만 읽어 inference와 blending 모두에 재사용
|
||||
img_bgr = cv2.imread(image_path)
|
||||
img_rgb = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2RGB)
|
||||
|
||||
# 이미 로드된 배열을 inference_segmentor에 전달 (경로 전달 시 내부에서 재읽기 발생)
|
||||
result = inference_segmentor(model, img_bgr)
|
||||
seg_map = result[0]
|
||||
|
||||
# Create color mask from palette
|
||||
color_mask = palette_array[seg_map]
|
||||
|
||||
# blended image
|
||||
blended = blend_images(img_rgb, color_mask, alpha=0.6)
|
||||
blended_bgr = cv2.cvtColor(blended, cv2.COLOR_RGB2BGR)
|
||||
|
||||
# mask — numpy 슬라이싱으로 cv2.cvtColor 호출 1회 제거
|
||||
mask_bgr = color_mask[:, :, ::-1].copy()
|
||||
|
||||
# 결과를 메모리 캐시에 저장 (georeference 단계에서 재사용)
|
||||
# mask는 palette 원본(RGB) 그대로 저장 — Oilshape의 class_colors가 RGB 기준이므로 BGR로 저장 시 색상 매칭 실패
|
||||
inference_cache[image_file] = {
|
||||
'blended': blended_bgr,
|
||||
'mask': color_mask,
|
||||
'ext': image_ext,
|
||||
}
|
||||
|
||||
if write_files:
|
||||
cv2.imwrite(
|
||||
os.path.join(output_folder, f"{image_name}{image_ext}"),
|
||||
blended_bgr,
|
||||
[cv2.IMWRITE_JPEG_QUALITY, 85]
|
||||
)
|
||||
cv2.imwrite(os.path.join(mask_folder, f"{image_name}{image_ext}"), mask_bgr)
|
||||
|
||||
return inference_cache
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2:
|
||||
raise ValueError("파라미터가 제공되지 않았습니다. 폴더 이름을 명령줄 인자로 입력해주세요.")
|
||||
_model = load_model()
|
||||
# CLI 단독 실행 시에는 중간 파일도 디스크에 저장
|
||||
run_inference(_model, sys.argv[1], write_files=True)
|
||||
196
prediction/image/mx15hdi/Detect/V7_SPECIAL.py
Normal file
196
prediction/image/mx15hdi/Detect/V7_SPECIAL.py
Normal file
@ -0,0 +1,196 @@
|
||||
norm_cfg = dict(type='SyncBN', requires_grad=True)
|
||||
model = dict(
|
||||
type='EncoderDecoder',
|
||||
pretrained='open-mmlab://resnet101_v1c',
|
||||
backbone=dict(
|
||||
type='ResNetV1c',
|
||||
depth=101,
|
||||
num_stages=4,
|
||||
out_indices=(0, 1, 2, 3),
|
||||
dilations=(1, 1, 2, 4),
|
||||
strides=(1, 2, 1, 1),
|
||||
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
||||
norm_eval=False,
|
||||
style='pytorch',
|
||||
contract_dilation=True),
|
||||
decode_head=dict(
|
||||
type='DAHead',
|
||||
in_channels=2048,
|
||||
in_index=3,
|
||||
channels=512,
|
||||
pam_channels=64,
|
||||
dropout_ratio=0.1,
|
||||
num_classes=5,
|
||||
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
||||
align_corners=False,
|
||||
loss_decode=dict(
|
||||
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
|
||||
auxiliary_head=dict(
|
||||
type='FCNHead',
|
||||
in_channels=1024,
|
||||
in_index=2,
|
||||
channels=256,
|
||||
num_convs=1,
|
||||
concat_input=False,
|
||||
dropout_ratio=0.1,
|
||||
num_classes=5,
|
||||
norm_cfg=dict(type='SyncBN', requires_grad=True),
|
||||
align_corners=False,
|
||||
loss_decode=dict(
|
||||
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
|
||||
train_cfg=dict(),
|
||||
test_cfg=dict(mode='whole'))
|
||||
dataset_type = 'CustomDataset'
|
||||
data_root = 'data/my_dataset_v7'
|
||||
img_norm_cfg = dict(
|
||||
mean=[119.54541993, 107.13545011, 96.71320316],
|
||||
std=[60.3273945, 56.33692515, 55.71005772],
|
||||
to_rgb=True)
|
||||
crop_size = (512, 512)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(512, 512)),
|
||||
dict(type='RandomCrop', crop_size=(512, 512), cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', flip_ratio=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(
|
||||
type='Normalize',
|
||||
mean=[119.54541993, 107.13545011, 96.71320316],
|
||||
std=[60.3273945, 56.33692515, 55.71005772],
|
||||
to_rgb=True),
|
||||
dict(type='Pad', size=(512, 512), pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(512, 512),
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(
|
||||
type='Normalize',
|
||||
mean=[119.54541993, 107.13545011, 96.71320316],
|
||||
std=[60.3273945, 56.33692515, 55.71005772],
|
||||
to_rgb=True),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img'])
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type='CustomDataset',
|
||||
data_root='data/my_dataset_v7',
|
||||
img_dir='img_dir/train',
|
||||
ann_dir='ann_dir/train',
|
||||
pipeline=[
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(512, 512)),
|
||||
dict(type='RandomCrop', crop_size=(512, 512), cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', flip_ratio=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(
|
||||
type='Normalize',
|
||||
mean=[119.54541993, 107.13545011, 96.71320316],
|
||||
std=[60.3273945, 56.33692515, 55.71005772],
|
||||
to_rgb=True),
|
||||
dict(type='Pad', size=(512, 512), pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
||||
]),
|
||||
val=dict(
|
||||
type='CustomDataset',
|
||||
data_root='data/my_dataset_v7',
|
||||
img_dir='img_dir/val',
|
||||
ann_dir='ann_dir/val',
|
||||
pipeline=[
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(512, 512),
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(
|
||||
type='Normalize',
|
||||
mean=[119.54541993, 107.13545011, 96.71320316],
|
||||
std=[60.3273945, 56.33692515, 55.71005772],
|
||||
to_rgb=True),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img'])
|
||||
])
|
||||
]),
|
||||
test=dict(
|
||||
type='CustomDataset',
|
||||
data_root='data/my_dataset_v7',
|
||||
img_dir='img_dir/test',
|
||||
ann_dir='ann_dir/test',
|
||||
pipeline=[
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(512, 512),
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(
|
||||
type='Normalize',
|
||||
mean=[119.54541993, 107.13545011, 96.71320316],
|
||||
std=[60.3273945, 56.33692515, 55.71005772],
|
||||
to_rgb=True),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img'])
|
||||
])
|
||||
],
|
||||
split=None,
|
||||
img_suffix='.png',
|
||||
seg_map_suffix='.png'))
|
||||
dist_params = dict(backend='nccl')
|
||||
log_level = 'INFO'
|
||||
load_from = None
|
||||
resume_from = None
|
||||
#workflow = [('train', 1), ('val', 1)]
|
||||
workflow = [('test', 1)]
|
||||
cudnn_benchmark = True
|
||||
optimizer = dict(
|
||||
type='AdamW',
|
||||
lr=3e-05,
|
||||
betas=(0.9, 0.999),
|
||||
weight_decay=0.01,
|
||||
paramwise_cfg=dict(
|
||||
custom_keys=dict(
|
||||
pos_block=dict(decay_mult=0.0),
|
||||
norm=dict(decay_mult=0.0),
|
||||
head=dict(lr_mult=10.0))))
|
||||
optimizer_config = dict()
|
||||
lr_config = dict(
|
||||
policy='poly',
|
||||
warmup='linear',
|
||||
warmup_iters=1500,
|
||||
warmup_ratio=1e-06,
|
||||
power=1.0,
|
||||
min_lr=0.0,
|
||||
by_epoch=False)
|
||||
runner = dict(type='EpochBasedRunner', max_epochs=200)
|
||||
checkpoint_config = dict(by_epoch=True, interval=1)
|
||||
evaluation = dict(by_epoch=True, interval=1, metric='mIoU')
|
||||
log_config = dict(
|
||||
interval=1000,
|
||||
hooks=[
|
||||
dict(type='TextLoggerHook'),
|
||||
dict(
|
||||
type='WandbLoggerHook',
|
||||
init_kwargs=dict(project='Oil_Spill', name='V7_SPECIAL'))
|
||||
])
|
||||
auto_resume = False
|
||||
work_dir = 'work_dirs/V7_SPECIAL'
|
||||
gpu_ids = [0]
|
||||
@ -0,0 +1,161 @@
|
||||
version: 2.1
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
docker:
|
||||
- image: cimg/python:3.7.4
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install dependencies
|
||||
command: |
|
||||
sudo apt-add-repository ppa:brightbox/ruby-ng -y
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y ruby2.7
|
||||
- run:
|
||||
name: Install pre-commit hook
|
||||
command: |
|
||||
pip install pre-commit
|
||||
pre-commit install
|
||||
- run:
|
||||
name: Linting
|
||||
command: pre-commit run --all-files
|
||||
- run:
|
||||
name: Check docstring coverage
|
||||
command: |
|
||||
pip install interrogate
|
||||
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 50 mmseg
|
||||
|
||||
build_cpu:
|
||||
parameters:
|
||||
# The python version must match available image tags in
|
||||
# https://circleci.com/developer/images/image/cimg/python
|
||||
python:
|
||||
type: string
|
||||
default: "3.7.4"
|
||||
torch:
|
||||
type: string
|
||||
torchvision:
|
||||
type: string
|
||||
docker:
|
||||
- image: cimg/python:<< parameters.python >>
|
||||
resource_class: large
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install Libraries
|
||||
command: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx libjpeg-dev zlib1g-dev libtinfo-dev libncurses5
|
||||
- run:
|
||||
name: Configure Python & pip
|
||||
command: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install wheel
|
||||
- run:
|
||||
name: Install PyTorch
|
||||
command: |
|
||||
python -V
|
||||
python -m pip install torch==<< parameters.torch >>+cpu torchvision==<< parameters.torchvision >>+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- run:
|
||||
name: Install mmseg dependencies
|
||||
command: |
|
||||
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch<< parameters.torch >>/index.html
|
||||
python -m pip install mmdet
|
||||
python -m pip install -r requirements.txt
|
||||
- run:
|
||||
name: Build and install
|
||||
command: |
|
||||
python -m pip install -e .
|
||||
- run:
|
||||
name: Run unittests
|
||||
command: |
|
||||
python -m pip install timm
|
||||
python -m coverage run --branch --source mmseg -m pytest tests/
|
||||
python -m coverage xml
|
||||
python -m coverage report -m
|
||||
|
||||
build_cu101:
|
||||
machine:
|
||||
image: ubuntu-1604-cuda-10.1:201909-23
|
||||
resource_class: gpu.nvidia.small
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install Libraries
|
||||
command: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 libgl1-mesa-glx
|
||||
- run:
|
||||
name: Configure Python & pip
|
||||
command: |
|
||||
pyenv global 3.7.0
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install wheel
|
||||
- run:
|
||||
name: Install PyTorch
|
||||
command: |
|
||||
python -V
|
||||
python -m pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- run:
|
||||
name: Install mmseg dependencies
|
||||
# python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch${{matrix.torch_version}}/index.html
|
||||
command: |
|
||||
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/torch1.6.0/index.html
|
||||
python -m pip install mmdet
|
||||
python -m pip install -r requirements.txt
|
||||
- run:
|
||||
name: Build and install
|
||||
command: |
|
||||
python setup.py check -m -s
|
||||
TORCH_CUDA_ARCH_LIST=7.0 python -m pip install -e .
|
||||
- run:
|
||||
name: Run unittests
|
||||
command: |
|
||||
python -m pip install timm
|
||||
python -m pytest tests/
|
||||
|
||||
workflows:
|
||||
unit_tests:
|
||||
jobs:
|
||||
- lint
|
||||
- build_cpu:
|
||||
name: build_cpu_th1.6
|
||||
torch: 1.6.0
|
||||
torchvision: 0.7.0
|
||||
requires:
|
||||
- lint
|
||||
- build_cpu:
|
||||
name: build_cpu_th1.7
|
||||
torch: 1.7.0
|
||||
torchvision: 0.8.1
|
||||
requires:
|
||||
- lint
|
||||
- build_cpu:
|
||||
name: build_cpu_th1.8_py3.9
|
||||
torch: 1.8.0
|
||||
torchvision: 0.9.0
|
||||
python: "3.9.0"
|
||||
requires:
|
||||
- lint
|
||||
- build_cpu:
|
||||
name: build_cpu_th1.9_py3.8
|
||||
torch: 1.9.0
|
||||
torchvision: 0.10.0
|
||||
python: "3.8.0"
|
||||
requires:
|
||||
- lint
|
||||
- build_cpu:
|
||||
name: build_cpu_th1.9_py3.9
|
||||
torch: 1.9.0
|
||||
torchvision: 0.10.0
|
||||
python: "3.9.0"
|
||||
requires:
|
||||
- lint
|
||||
- build_cu101:
|
||||
requires:
|
||||
- build_cpu_th1.6
|
||||
- build_cpu_th1.7
|
||||
- build_cpu_th1.8_py3.9
|
||||
- build_cpu_th1.9_py3.8
|
||||
- build_cpu_th1.9_py3.9
|
||||
@ -0,0 +1,133 @@
|
||||
# yapf: disable
|
||||
# Inference Speed is tested on NVIDIA V100
|
||||
hrnet = [
|
||||
dict(
|
||||
config='configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py',
|
||||
checkpoint='fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=33.0),
|
||||
),
|
||||
dict(
|
||||
config='configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py',
|
||||
checkpoint='fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=76.31),
|
||||
),
|
||||
dict(
|
||||
config='configs/hrnet/fcn_hr48_512x512_160k_ade20k.py',
|
||||
checkpoint='fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth',
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=42.02),
|
||||
),
|
||||
dict(
|
||||
config='configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py',
|
||||
checkpoint='fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=80.65),
|
||||
),
|
||||
]
|
||||
pspnet = [
|
||||
dict(
|
||||
config='configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py',
|
||||
checkpoint='pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=78.55),
|
||||
),
|
||||
dict(
|
||||
config='configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py',
|
||||
checkpoint='pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=79.76),
|
||||
),
|
||||
dict(
|
||||
config='configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py',
|
||||
checkpoint='pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=44.39),
|
||||
),
|
||||
dict(
|
||||
config='configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py',
|
||||
checkpoint='pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=42.48),
|
||||
),
|
||||
]
|
||||
resnest = [
|
||||
dict(
|
||||
config='configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py',
|
||||
checkpoint='pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=45.44),
|
||||
),
|
||||
dict(
|
||||
config='configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py',
|
||||
checkpoint='pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=78.57),
|
||||
),
|
||||
]
|
||||
fastscnn = [
|
||||
dict(
|
||||
config='configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py',
|
||||
checkpoint='fast_scnn_8x4_160k_lr0.12_cityscapes-0cec9937.pth',
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=70.96),
|
||||
)
|
||||
]
|
||||
deeplabv3plus = [
|
||||
dict(
|
||||
config='configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py', # noqa
|
||||
checkpoint='deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=80.98),
|
||||
),
|
||||
dict(
|
||||
config='configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py', # noqa
|
||||
checkpoint='deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=80.97),
|
||||
),
|
||||
dict(
|
||||
config='configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py', # noqa
|
||||
checkpoint='deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=80.09),
|
||||
),
|
||||
dict(
|
||||
config='configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py', # noqa
|
||||
checkpoint='deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=79.83),
|
||||
),
|
||||
]
|
||||
vit = [
|
||||
dict(
|
||||
config='configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py',
|
||||
checkpoint='upernet_vit-b16_ln_mln_512x512_160k_ade20k-f444c077.pth',
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=47.73),
|
||||
),
|
||||
dict(
|
||||
config='configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py',
|
||||
checkpoint='upernet_deit-s16_ln_mln_512x512_160k_ade20k-c0cd652f.pth',
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=43.52),
|
||||
),
|
||||
]
|
||||
fp16 = [
|
||||
dict(
|
||||
config='configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py', # noqa
|
||||
checkpoint='deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes_20200717_230920-f1104f4b.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=80.46),
|
||||
)
|
||||
]
|
||||
swin = [
|
||||
dict(
|
||||
config='configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py', # noqa
|
||||
checkpoint='upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', # noqa
|
||||
eval='mIoU',
|
||||
metric=dict(mIoU=44.41),
|
||||
)
|
||||
]
|
||||
# yapf: enable
|
||||
@ -0,0 +1,19 @@
|
||||
configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py
|
||||
configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py
|
||||
configs/hrnet/fcn_hr48_512x512_160k_ade20k.py
|
||||
configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py
|
||||
configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py
|
||||
configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py
|
||||
configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py
|
||||
configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py
|
||||
configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py
|
||||
configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py
|
||||
configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py
|
||||
configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py
|
||||
configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py
|
||||
configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py
|
||||
configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py
|
||||
configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py
|
||||
configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py
|
||||
configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py
|
||||
configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py
|
||||
@ -0,0 +1,41 @@
|
||||
PARTITION=$1
|
||||
CHECKPOINT_DIR=$2
|
||||
|
||||
echo 'configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr18s_512x512_160k_ade20k configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py $CHECKPOINT_DIR/fcn_hr18s_512x512_160k_ade20k_20200614_214413-870f65ac.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr18s_512x512_160k_ade20k --cfg-options dist_params.port=28171 &
|
||||
echo 'configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr18s_512x1024_160k_cityscapes configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py $CHECKPOINT_DIR/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr18s_512x1024_160k_cityscapes --cfg-options dist_params.port=28172 &
|
||||
echo 'configs/hrnet/fcn_hr48_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr48_512x512_160k_ade20k configs/hrnet/fcn_hr48_512x512_160k_ade20k.py $CHECKPOINT_DIR/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr48_512x512_160k_ade20k --cfg-options dist_params.port=28173 &
|
||||
echo 'configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fcn_hr48_512x1024_160k_cityscapes configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py $CHECKPOINT_DIR/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fcn_hr48_512x1024_160k_cityscapes --cfg-options dist_params.port=28174 &
|
||||
echo 'configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r50-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_r50-d8_512x1024_80k_cityscapes_20200606_112131-2376f12b.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r50-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28175 &
|
||||
echo 'configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r101-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_r101-d8_512x1024_80k_cityscapes_20200606_112211-e1e1100f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28176 &
|
||||
echo 'configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r101-d8_512x512_160k_ade20k configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_r101-d8_512x512_160k_ade20k_20200615_100650-967c316f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r101-d8_512x512_160k_ade20k --cfg-options dist_params.port=28177 &
|
||||
echo 'configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_r50-d8_512x512_160k_ade20k configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_r50-d8_512x512_160k_ade20k_20200615_184358-1890b0bd.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_r50-d8_512x512_160k_ade20k --cfg-options dist_params.port=28178 &
|
||||
echo 'configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_s101-d8_512x512_160k_ade20k configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py $CHECKPOINT_DIR/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_s101-d8_512x512_160k_ade20k --cfg-options dist_params.port=28179 &
|
||||
echo 'configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION pspnet_s101-d8_512x1024_80k_cityscapes configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/pspnet_s101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28180 &
|
||||
echo 'configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION fast_scnn_lr0.12_8x4_160k_cityscapes configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py $CHECKPOINT_DIR/fast_scnn_8x4_160k_lr0.12_cityscapes-0cec9937.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/fast_scnn_lr0.12_8x4_160k_cityscapes --cfg-options dist_params.port=28181 &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_769x769_80k_cityscapes_20200607_000405-a7573d20.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_769x769_80k_cityscapes --cfg-options dist_params.port=28182 &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_512x1024_80k_cityscapes_20200606_114143-068fcfe9.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28183 &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r50-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r50-d8_512x1024_80k_cityscapes_20200606_114049-f9fb496d.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r50-d8_512x1024_80k_cityscapes --cfg-options dist_params.port=28184 &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r50-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r50-d8_769x769_80k_cityscapes_20200606_210233-0e9dfdc4.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r50-d8_769x769_80k_cityscapes --cfg-options dist_params.port=28185 &
|
||||
echo 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_vit-b16_ln_mln_512x512_160k_ade20k configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py $CHECKPOINT_DIR/upernet_vit-b16_ln_mln_512x512_160k_ade20k-f444c077.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_vit-b16_ln_mln_512x512_160k_ade20k --cfg-options dist_params.port=28186 &
|
||||
echo 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_deit-s16_ln_mln_512x512_160k_ade20k configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py $CHECKPOINT_DIR/upernet_deit-s16_ln_mln_512x512_160k_ade20k-c0cd652f.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_deit-s16_ln_mln_512x512_160k_ade20k --cfg-options dist_params.port=28187 &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py $CHECKPOINT_DIR/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes-cc58bc8d.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes --cfg-options dist_params.port=28188 &
|
||||
echo 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 tools/slurm_test.sh $PARTITION upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py $CHECKPOINT_DIR/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth --eval mIoU --work-dir work_dirs/benchmark_evaluation/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K --cfg-options dist_params.port=28189 &
|
||||
@ -0,0 +1,149 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import os.path as osp
|
||||
import warnings
|
||||
from argparse import ArgumentParser
|
||||
|
||||
import requests
|
||||
from mmcv import Config
|
||||
|
||||
from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot
|
||||
from mmseg.utils import get_root_logger
|
||||
|
||||
# ignore warnings when segmentors inference
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
|
||||
def download_checkpoint(checkpoint_name, model_name, config_name, collect_dir):
|
||||
"""Download checkpoint and check if hash code is true."""
|
||||
url = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{checkpoint_name}' # noqa
|
||||
|
||||
r = requests.get(url)
|
||||
assert r.status_code != 403, f'{url} Access denied.'
|
||||
|
||||
with open(osp.join(collect_dir, checkpoint_name), 'wb') as code:
|
||||
code.write(r.content)
|
||||
|
||||
true_hash_code = osp.splitext(checkpoint_name)[0].split('-')[1]
|
||||
|
||||
# check hash code
|
||||
with open(osp.join(collect_dir, checkpoint_name), 'rb') as fp:
|
||||
sha256_cal = hashlib.sha256()
|
||||
sha256_cal.update(fp.read())
|
||||
cur_hash_code = sha256_cal.hexdigest()[:8]
|
||||
|
||||
assert true_hash_code == cur_hash_code, f'{url} download failed, '
|
||||
'incomplete downloaded file or url invalid.'
|
||||
|
||||
if cur_hash_code != true_hash_code:
|
||||
os.remove(osp.join(collect_dir, checkpoint_name))
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('config', help='test config file path')
|
||||
parser.add_argument('checkpoint_root', help='Checkpoint file root path')
|
||||
parser.add_argument(
|
||||
'-i', '--img', default='demo/demo.png', help='Image file')
|
||||
parser.add_argument('-a', '--aug', action='store_true', help='aug test')
|
||||
parser.add_argument('-m', '--model-name', help='model name to inference')
|
||||
parser.add_argument(
|
||||
'-s', '--show', action='store_true', help='show results')
|
||||
parser.add_argument(
|
||||
'-d', '--device', default='cuda:0', help='Device used for inference')
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def inference_model(config_name, checkpoint, args, logger=None):
|
||||
cfg = Config.fromfile(config_name)
|
||||
if args.aug:
|
||||
if 'flip' in cfg.data.test.pipeline[
|
||||
1] and 'img_scale' in cfg.data.test.pipeline[1]:
|
||||
cfg.data.test.pipeline[1].img_ratios = [
|
||||
0.5, 0.75, 1.0, 1.25, 1.5, 1.75
|
||||
]
|
||||
cfg.data.test.pipeline[1].flip = True
|
||||
else:
|
||||
if logger is not None:
|
||||
logger.error(f'{config_name}: unable to start aug test')
|
||||
else:
|
||||
print(f'{config_name}: unable to start aug test', flush=True)
|
||||
|
||||
model = init_segmentor(cfg, checkpoint, device=args.device)
|
||||
# test a single image
|
||||
result = inference_segmentor(model, args.img)
|
||||
|
||||
# show the results
|
||||
if args.show:
|
||||
show_result_pyplot(model, args.img, result)
|
||||
return result
|
||||
|
||||
|
||||
# Sample test whether the inference code is correct
|
||||
def main(args):
|
||||
config = Config.fromfile(args.config)
|
||||
|
||||
if not os.path.exists(args.checkpoint_root):
|
||||
os.makedirs(args.checkpoint_root, 0o775)
|
||||
|
||||
# test single model
|
||||
if args.model_name:
|
||||
if args.model_name in config:
|
||||
model_infos = config[args.model_name]
|
||||
if not isinstance(model_infos, list):
|
||||
model_infos = [model_infos]
|
||||
for model_info in model_infos:
|
||||
config_name = model_info['config'].strip()
|
||||
print(f'processing: {config_name}', flush=True)
|
||||
checkpoint = osp.join(args.checkpoint_root,
|
||||
model_info['checkpoint'].strip())
|
||||
try:
|
||||
# build the model from a config file and a checkpoint file
|
||||
inference_model(config_name, checkpoint, args)
|
||||
except Exception:
|
||||
print(f'{config_name} test failed!')
|
||||
continue
|
||||
return
|
||||
else:
|
||||
raise RuntimeError('model name input error.')
|
||||
|
||||
# test all model
|
||||
logger = get_root_logger(
|
||||
log_file='benchmark_inference_image.log', log_level=logging.ERROR)
|
||||
|
||||
for model_name in config:
|
||||
model_infos = config[model_name]
|
||||
|
||||
if not isinstance(model_infos, list):
|
||||
model_infos = [model_infos]
|
||||
for model_info in model_infos:
|
||||
print('processing: ', model_info['config'], flush=True)
|
||||
config_path = model_info['config'].strip()
|
||||
config_name = osp.splitext(osp.basename(config_path))[0]
|
||||
checkpoint_name = model_info['checkpoint'].strip()
|
||||
checkpoint = osp.join(args.checkpoint_root, checkpoint_name)
|
||||
|
||||
# ensure checkpoint exists
|
||||
try:
|
||||
if not osp.exists(checkpoint):
|
||||
download_checkpoint(checkpoint_name, model_name,
|
||||
config_name.rstrip('.py'),
|
||||
args.checkpoint_root)
|
||||
except Exception:
|
||||
logger.error(f'{checkpoint_name} download error')
|
||||
continue
|
||||
|
||||
# test model inference with checkpoint
|
||||
try:
|
||||
# build the model from a config file and a checkpoint file
|
||||
inference_model(config_path, checkpoint, args, logger)
|
||||
except Exception as e:
|
||||
logger.error(f'{config_path} " : {repr(e)}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_args()
|
||||
main(args)
|
||||
@ -0,0 +1,40 @@
|
||||
PARTITION=$1
|
||||
|
||||
echo 'configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr18s_512x512_160k_ade20k configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24727 --work-dir work_dirs/hrnet/fcn_hr18s_512x512_160k_ade20k >/dev/null &
|
||||
echo 'configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr18s_512x1024_160k_cityscapes configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24728 --work-dir work_dirs/hrnet/fcn_hr18s_512x1024_160k_cityscapes >/dev/null &
|
||||
echo 'configs/hrnet/fcn_hr48_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr48_512x512_160k_ade20k configs/hrnet/fcn_hr48_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24729 --work-dir work_dirs/hrnet/fcn_hr48_512x512_160k_ade20k >/dev/null &
|
||||
echo 'configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fcn_hr48_512x1024_160k_cityscapes configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24730 --work-dir work_dirs/hrnet/fcn_hr48_512x1024_160k_cityscapes >/dev/null &
|
||||
echo 'configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r50-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24731 --work-dir work_dirs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes >/dev/null &
|
||||
echo 'configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r101-d8_512x1024_80k_cityscapes configs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24732 --work-dir work_dirs/pspnet/pspnet_r101-d8_512x1024_80k_cityscapes >/dev/null &
|
||||
echo 'configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r101-d8_512x512_160k_ade20k configs/pspnet/pspnet_r101-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24733 --work-dir work_dirs/pspnet/pspnet_r101-d8_512x512_160k_ade20k >/dev/null &
|
||||
echo 'configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_r50-d8_512x512_160k_ade20k configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24734 --work-dir work_dirs/pspnet/pspnet_r50-d8_512x512_160k_ade20k >/dev/null &
|
||||
echo 'configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_s101-d8_512x512_160k_ade20k configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24735 --work-dir work_dirs/resnest/pspnet_s101-d8_512x512_160k_ade20k >/dev/null &
|
||||
echo 'configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION pspnet_s101-d8_512x1024_80k_cityscapes configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24736 --work-dir work_dirs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes >/dev/null &
|
||||
echo 'configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION fast_scnn_lr0.12_8x4_160k_cityscapes configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24737 --work-dir work_dirs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes >/dev/null &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24738 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_769x769_80k_cityscapes >/dev/null &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24739 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes >/dev/null &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r50-d8_512x1024_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24740 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes >/dev/null &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r50-d8_769x769_80k_cityscapes configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24741 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes >/dev/null &
|
||||
echo 'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py' &
|
||||
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_vit-b16_ln_mln_512x512_160k_ade20k configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24742 --work-dir work_dirs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k >/dev/null &
|
||||
echo 'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py' &
|
||||
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_deit-s16_ln_mln_512x512_160k_ade20k configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24743 --work-dir work_dirs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k >/dev/null &
|
||||
echo 'configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py' &
|
||||
GPUS=4 GPUS_PER_NODE=4 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes configs/deeplabv3plus/deeplabv3plus_r101-d8_fp16_512x1024_80k_cityscapes.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24744 --work-dir work_dirs/deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_fp16_cityscapes >/dev/null &
|
||||
echo 'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py' &
|
||||
GPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh $PARTITION upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py --cfg-options checkpoint_config.max_keep_ckpts=1 dist_params.port=24745 --work-dir work_dirs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K >/dev/null &
|
||||
@ -0,0 +1,101 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import logging
|
||||
import os
|
||||
from argparse import ArgumentParser
|
||||
|
||||
import requests
|
||||
import yaml as yml
|
||||
|
||||
from mmseg.utils import get_root_logger
|
||||
|
||||
|
||||
def check_url(url):
|
||||
"""Check url response status.
|
||||
|
||||
Args:
|
||||
url (str): url needed to check.
|
||||
|
||||
Returns:
|
||||
int, bool: status code and check flag.
|
||||
"""
|
||||
flag = True
|
||||
r = requests.head(url)
|
||||
status_code = r.status_code
|
||||
if status_code == 403 or status_code == 404:
|
||||
flag = False
|
||||
|
||||
return status_code, flag
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = ArgumentParser('url valid check.')
|
||||
parser.add_argument(
|
||||
'-m',
|
||||
'--model-name',
|
||||
type=str,
|
||||
help='Select the model needed to check')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
model_name = args.model_name
|
||||
|
||||
# yml path generate.
|
||||
# If model_name is not set, script will check all of the models.
|
||||
if model_name is not None:
|
||||
yml_list = [(model_name, f'configs/{model_name}/{model_name}.yml')]
|
||||
else:
|
||||
# check all
|
||||
yml_list = [(x, f'configs/{x}/{x}.yml') for x in os.listdir('configs/')
|
||||
if x != '_base_']
|
||||
|
||||
logger = get_root_logger(log_file='url_check.log', log_level=logging.ERROR)
|
||||
|
||||
for model_name, yml_path in yml_list:
|
||||
# Default yaml loader unsafe.
|
||||
model_infos = yml.load(
|
||||
open(yml_path, 'r'), Loader=yml.CLoader)['Models']
|
||||
for model_info in model_infos:
|
||||
config_name = model_info['Name']
|
||||
checkpoint_url = model_info['Weights']
|
||||
# checkpoint url check
|
||||
status_code, flag = check_url(checkpoint_url)
|
||||
if flag:
|
||||
logger.info(f'checkpoint | {config_name} | {checkpoint_url} | '
|
||||
f'{status_code} valid')
|
||||
else:
|
||||
logger.error(
|
||||
f'checkpoint | {config_name} | {checkpoint_url} | '
|
||||
f'{status_code} | error')
|
||||
# log_json check
|
||||
checkpoint_name = checkpoint_url.split('/')[-1]
|
||||
model_time = '-'.join(checkpoint_name.split('-')[:-1]).replace(
|
||||
f'{config_name}_', '')
|
||||
# two style of log_json name
|
||||
# use '_' to link model_time (will be deprecated)
|
||||
log_json_url_1 = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{config_name}_{model_time}.log.json' # noqa
|
||||
status_code_1, flag_1 = check_url(log_json_url_1)
|
||||
# use '-' to link model_time
|
||||
log_json_url_2 = f'https://download.openmmlab.com/mmsegmentation/v0.5/{model_name}/{config_name}/{config_name}-{model_time}.log.json' # noqa
|
||||
status_code_2, flag_2 = check_url(log_json_url_2)
|
||||
if flag_1 or flag_2:
|
||||
if flag_1:
|
||||
logger.info(
|
||||
f'log.json | {config_name} | {log_json_url_1} | '
|
||||
f'{status_code_1} | valid')
|
||||
else:
|
||||
logger.info(
|
||||
f'log.json | {config_name} | {log_json_url_2} | '
|
||||
f'{status_code_2} | valid')
|
||||
else:
|
||||
logger.error(
|
||||
f'log.json | {config_name} | {log_json_url_1} & '
|
||||
f'{log_json_url_2} | {status_code_1} & {status_code_2} | '
|
||||
'error')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -0,0 +1,91 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import argparse
|
||||
import glob
|
||||
import os.path as osp
|
||||
|
||||
import mmcv
|
||||
from mmcv import Config
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Gather benchmarked model evaluation results')
|
||||
parser.add_argument('config', help='test config file path')
|
||||
parser.add_argument(
|
||||
'root',
|
||||
type=str,
|
||||
help='root path of benchmarked models to be gathered')
|
||||
parser.add_argument(
|
||||
'--out',
|
||||
type=str,
|
||||
default='benchmark_evaluation_info.json',
|
||||
help='output path of gathered metrics and compared '
|
||||
'results to be stored')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_args()
|
||||
|
||||
root_path = args.root
|
||||
metrics_out = args.out
|
||||
result_dict = {}
|
||||
|
||||
cfg = Config.fromfile(args.config)
|
||||
|
||||
for model_key in cfg:
|
||||
model_infos = cfg[model_key]
|
||||
if not isinstance(model_infos, list):
|
||||
model_infos = [model_infos]
|
||||
for model_info in model_infos:
|
||||
previous_metrics = model_info['metric']
|
||||
config = model_info['config'].strip()
|
||||
fname, _ = osp.splitext(osp.basename(config))
|
||||
|
||||
# Load benchmark evaluation json
|
||||
metric_json_dir = osp.join(root_path, fname)
|
||||
if not osp.exists(metric_json_dir):
|
||||
print(f'{metric_json_dir} not existed.')
|
||||
continue
|
||||
|
||||
json_list = glob.glob(osp.join(metric_json_dir, '*.json'))
|
||||
if len(json_list) == 0:
|
||||
print(f'There is no eval json in {metric_json_dir}.')
|
||||
continue
|
||||
|
||||
log_json_path = list(sorted(json_list))[-1]
|
||||
metric = mmcv.load(log_json_path)
|
||||
if config not in metric.get('config', {}):
|
||||
print(f'{config} not included in {log_json_path}')
|
||||
continue
|
||||
|
||||
# Compare between new benchmark results and previous metrics
|
||||
differential_results = dict()
|
||||
new_metrics = dict()
|
||||
for record_metric_key in previous_metrics:
|
||||
if record_metric_key not in metric['metric']:
|
||||
raise KeyError('record_metric_key not exist, please '
|
||||
'check your config')
|
||||
old_metric = previous_metrics[record_metric_key]
|
||||
new_metric = round(metric['metric'][record_metric_key] * 100,
|
||||
2)
|
||||
|
||||
differential = new_metric - old_metric
|
||||
flag = '+' if differential > 0 else '-'
|
||||
differential_results[
|
||||
record_metric_key] = f'{flag}{abs(differential):.2f}'
|
||||
new_metrics[record_metric_key] = new_metric
|
||||
|
||||
result_dict[config] = dict(
|
||||
differential=differential_results,
|
||||
previous=previous_metrics,
|
||||
new=new_metrics)
|
||||
|
||||
if metrics_out:
|
||||
mmcv.dump(result_dict, metrics_out, indent=4)
|
||||
print('===================================')
|
||||
for config_name, metrics in result_dict.items():
|
||||
print(config_name, metrics)
|
||||
print('===================================')
|
||||
@ -0,0 +1,100 @@
|
||||
import argparse
|
||||
import glob
|
||||
import os.path as osp
|
||||
|
||||
import mmcv
|
||||
from gather_models import get_final_results
|
||||
from mmcv import Config
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Gather benchmarked models train results')
|
||||
parser.add_argument('config', help='test config file path')
|
||||
parser.add_argument(
|
||||
'root',
|
||||
type=str,
|
||||
help='root path of benchmarked models to be gathered')
|
||||
parser.add_argument(
|
||||
'--out',
|
||||
type=str,
|
||||
default='benchmark_train_info.json',
|
||||
help='output path of gathered metrics to be stored')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = parse_args()
|
||||
|
||||
root_path = args.root
|
||||
metrics_out = args.out
|
||||
|
||||
evaluation_cfg = Config.fromfile(args.config)
|
||||
|
||||
result_dict = {}
|
||||
for model_key in evaluation_cfg:
|
||||
model_infos = evaluation_cfg[model_key]
|
||||
if not isinstance(model_infos, list):
|
||||
model_infos = [model_infos]
|
||||
for model_info in model_infos:
|
||||
config = model_info['config']
|
||||
|
||||
# benchmark train dir
|
||||
model_name = osp.split(osp.dirname(config))[1]
|
||||
config_name = osp.splitext(osp.basename(config))[0]
|
||||
exp_dir = osp.join(root_path, model_name, config_name)
|
||||
if not osp.exists(exp_dir):
|
||||
print(f'{config} hasn\'t {exp_dir}')
|
||||
continue
|
||||
|
||||
# parse config
|
||||
cfg = mmcv.Config.fromfile(config)
|
||||
total_iters = cfg.runner.max_iters
|
||||
exp_metric = cfg.evaluation.metric
|
||||
if not isinstance(exp_metric, list):
|
||||
exp_metrics = [exp_metric]
|
||||
|
||||
# determine whether total_iters ckpt exists
|
||||
ckpt_path = f'iter_{total_iters}.pth'
|
||||
if not osp.exists(osp.join(exp_dir, ckpt_path)):
|
||||
print(f'{config} hasn\'t {ckpt_path}')
|
||||
continue
|
||||
|
||||
# only the last log json counts
|
||||
log_json_path = list(
|
||||
sorted(glob.glob(osp.join(exp_dir, '*.log.json'))))[-1]
|
||||
|
||||
# extract metric value
|
||||
model_performance = get_final_results(log_json_path, total_iters)
|
||||
if model_performance is None:
|
||||
print(f'log file error: {log_json_path}')
|
||||
continue
|
||||
|
||||
differential_results = dict()
|
||||
old_results = dict()
|
||||
new_results = dict()
|
||||
for metric_key in model_performance:
|
||||
if metric_key in ['mIoU']:
|
||||
metric = round(model_performance[metric_key] * 100, 2)
|
||||
old_metric = model_info['metric'][metric_key]
|
||||
old_results[metric_key] = old_metric
|
||||
new_results[metric_key] = metric
|
||||
differential = metric - old_metric
|
||||
flag = '+' if differential > 0 else '-'
|
||||
differential_results[
|
||||
metric_key] = f'{flag}{abs(differential):.2f}'
|
||||
result_dict[config] = dict(
|
||||
differential_results=differential_results,
|
||||
old_results=old_results,
|
||||
new_results=new_results,
|
||||
)
|
||||
|
||||
# 4 save or print results
|
||||
if metrics_out:
|
||||
mmcv.dump(result_dict, metrics_out, indent=4)
|
||||
print('===================================')
|
||||
for config_name, metrics in result_dict.items():
|
||||
print(config_name, metrics)
|
||||
print('===================================')
|
||||
@ -0,0 +1,211 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import argparse
|
||||
import glob
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import os.path as osp
|
||||
import shutil
|
||||
|
||||
import mmcv
|
||||
import torch
|
||||
|
||||
# build schedule look-up table to automatically find the final model
|
||||
RESULTS_LUT = ['mIoU', 'mAcc', 'aAcc']
|
||||
|
||||
|
||||
def calculate_file_sha256(file_path):
|
||||
"""calculate file sha256 hash code."""
|
||||
with open(file_path, 'rb') as fp:
|
||||
sha256_cal = hashlib.sha256()
|
||||
sha256_cal.update(fp.read())
|
||||
return sha256_cal.hexdigest()
|
||||
|
||||
|
||||
def process_checkpoint(in_file, out_file):
|
||||
checkpoint = torch.load(in_file, map_location='cpu')
|
||||
# remove optimizer for smaller file size
|
||||
if 'optimizer' in checkpoint:
|
||||
del checkpoint['optimizer']
|
||||
# if it is necessary to remove some sensitive data in checkpoint['meta'],
|
||||
# add the code here.
|
||||
torch.save(checkpoint, out_file)
|
||||
# The hash code calculation and rename command differ on different system
|
||||
# platform.
|
||||
sha = calculate_file_sha256(out_file)
|
||||
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
|
||||
os.rename(out_file, final_file)
|
||||
|
||||
# Remove prefix and suffix
|
||||
final_file_name = osp.split(final_file)[1]
|
||||
final_file_name = osp.splitext(final_file_name)[0]
|
||||
|
||||
return final_file_name
|
||||
|
||||
|
||||
def get_final_iter(config):
|
||||
iter_num = config.split('_')[-2]
|
||||
assert iter_num.endswith('k')
|
||||
return int(iter_num[:-1]) * 1000
|
||||
|
||||
|
||||
def get_final_results(log_json_path, iter_num):
|
||||
result_dict = dict()
|
||||
last_iter = 0
|
||||
with open(log_json_path, 'r') as f:
|
||||
for line in f.readlines():
|
||||
log_line = json.loads(line)
|
||||
if 'mode' not in log_line.keys():
|
||||
continue
|
||||
|
||||
# When evaluation, the 'iter' of new log json is the evaluation
|
||||
# steps on single gpu.
|
||||
flag1 = ('aAcc' in log_line) or (log_line['mode'] == 'val')
|
||||
flag2 = (last_iter == iter_num - 50) or (last_iter == iter_num)
|
||||
if flag1 and flag2:
|
||||
result_dict.update({
|
||||
key: log_line[key]
|
||||
for key in RESULTS_LUT if key in log_line
|
||||
})
|
||||
return result_dict
|
||||
|
||||
last_iter = log_line['iter']
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='Gather benchmarked models')
|
||||
parser.add_argument(
|
||||
'-f', '--config-name', type=str, help='Process the selected config.')
|
||||
parser.add_argument(
|
||||
'-w',
|
||||
'--work-dir',
|
||||
default='work_dirs/',
|
||||
type=str,
|
||||
help='Ckpt storage root folder of benchmarked models to be gathered.')
|
||||
parser.add_argument(
|
||||
'-c',
|
||||
'--collect-dir',
|
||||
default='work_dirs/gather',
|
||||
type=str,
|
||||
help='Ckpt collect root folder of gathered models.')
|
||||
parser.add_argument(
|
||||
'--all', action='store_true', help='whether include .py and .log')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
work_dir = args.work_dir
|
||||
collect_dir = args.collect_dir
|
||||
selected_config_name = args.config_name
|
||||
mmcv.mkdir_or_exist(collect_dir)
|
||||
|
||||
# find all models in the root directory to be gathered
|
||||
raw_configs = list(mmcv.scandir('./configs', '.py', recursive=True))
|
||||
|
||||
# filter configs that is not trained in the experiments dir
|
||||
used_configs = []
|
||||
for raw_config in raw_configs:
|
||||
config_name = osp.splitext(osp.basename(raw_config))[0]
|
||||
if osp.exists(osp.join(work_dir, config_name)):
|
||||
if (selected_config_name is None
|
||||
or selected_config_name == config_name):
|
||||
used_configs.append(raw_config)
|
||||
print(f'Find {len(used_configs)} models to be gathered')
|
||||
|
||||
# find final_ckpt and log file for trained each config
|
||||
# and parse the best performance
|
||||
model_infos = []
|
||||
for used_config in used_configs:
|
||||
config_name = osp.splitext(osp.basename(used_config))[0]
|
||||
exp_dir = osp.join(work_dir, config_name)
|
||||
# check whether the exps is finished
|
||||
final_iter = get_final_iter(used_config)
|
||||
final_model = 'iter_{}.pth'.format(final_iter)
|
||||
model_path = osp.join(exp_dir, final_model)
|
||||
|
||||
# skip if the model is still training
|
||||
if not osp.exists(model_path):
|
||||
print(f'{used_config} train not finished yet')
|
||||
continue
|
||||
|
||||
# get logs
|
||||
log_json_paths = glob.glob(osp.join(exp_dir, '*.log.json'))
|
||||
log_json_path = log_json_paths[0]
|
||||
model_performance = None
|
||||
for idx, _log_json_path in enumerate(log_json_paths):
|
||||
model_performance = get_final_results(_log_json_path, final_iter)
|
||||
if model_performance is not None:
|
||||
log_json_path = _log_json_path
|
||||
break
|
||||
|
||||
if model_performance is None:
|
||||
print(f'{used_config} model_performance is None')
|
||||
continue
|
||||
|
||||
model_time = osp.split(log_json_path)[-1].split('.')[0]
|
||||
model_infos.append(
|
||||
dict(
|
||||
config_name=config_name,
|
||||
results=model_performance,
|
||||
iters=final_iter,
|
||||
model_time=model_time,
|
||||
log_json_path=osp.split(log_json_path)[-1]))
|
||||
|
||||
# publish model for each checkpoint
|
||||
publish_model_infos = []
|
||||
for model in model_infos:
|
||||
config_name = model['config_name']
|
||||
model_publish_dir = osp.join(collect_dir, config_name)
|
||||
|
||||
publish_model_path = osp.join(model_publish_dir,
|
||||
config_name + '_' + model['model_time'])
|
||||
trained_model_path = osp.join(work_dir, config_name,
|
||||
'iter_{}.pth'.format(model['iters']))
|
||||
if osp.exists(model_publish_dir):
|
||||
for file in os.listdir(model_publish_dir):
|
||||
if file.endswith('.pth'):
|
||||
print(f'model {file} found')
|
||||
model['model_path'] = osp.abspath(
|
||||
osp.join(model_publish_dir, file))
|
||||
break
|
||||
if 'model_path' not in model:
|
||||
print(f'dir {model_publish_dir} exists, no model found')
|
||||
|
||||
else:
|
||||
mmcv.mkdir_or_exist(model_publish_dir)
|
||||
|
||||
# convert model
|
||||
final_model_path = process_checkpoint(trained_model_path,
|
||||
publish_model_path)
|
||||
model['model_path'] = final_model_path
|
||||
|
||||
new_json_path = f'{config_name}_{model["log_json_path"]}'
|
||||
# copy log
|
||||
shutil.copy(
|
||||
osp.join(work_dir, config_name, model['log_json_path']),
|
||||
osp.join(model_publish_dir, new_json_path))
|
||||
|
||||
if args.all:
|
||||
new_txt_path = new_json_path.rstrip('.json')
|
||||
shutil.copy(
|
||||
osp.join(work_dir, config_name,
|
||||
model['log_json_path'].rstrip('.json')),
|
||||
osp.join(model_publish_dir, new_txt_path))
|
||||
|
||||
if args.all:
|
||||
# copy config to guarantee reproducibility
|
||||
raw_config = osp.join('./configs', f'{config_name}.py')
|
||||
mmcv.Config.fromfile(raw_config).dump(
|
||||
osp.join(model_publish_dir, osp.basename(raw_config)))
|
||||
|
||||
publish_model_infos.append(model)
|
||||
|
||||
models = dict(models=publish_model_infos)
|
||||
mmcv.dump(models, osp.join(collect_dir, 'model_infos.json'), indent=4)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -0,0 +1,114 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import argparse
|
||||
import os.path as osp
|
||||
|
||||
from mmcv import Config
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Convert benchmark test model list to script')
|
||||
parser.add_argument('config', help='test config file path')
|
||||
parser.add_argument('--port', type=int, default=28171, help='dist port')
|
||||
parser.add_argument(
|
||||
'--work-dir',
|
||||
default='work_dirs/benchmark_evaluation',
|
||||
help='the dir to save metric')
|
||||
parser.add_argument(
|
||||
'--out',
|
||||
type=str,
|
||||
default='.dev/benchmark_evaluation.sh',
|
||||
help='path to save model benchmark script')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def process_model_info(model_info, work_dir):
|
||||
config = model_info['config'].strip()
|
||||
fname, _ = osp.splitext(osp.basename(config))
|
||||
job_name = fname
|
||||
checkpoint = model_info['checkpoint'].strip()
|
||||
work_dir = osp.join(work_dir, fname)
|
||||
if not isinstance(model_info['eval'], list):
|
||||
evals = [model_info['eval']]
|
||||
else:
|
||||
evals = model_info['eval']
|
||||
eval = ' '.join(evals)
|
||||
return dict(
|
||||
config=config,
|
||||
job_name=job_name,
|
||||
checkpoint=checkpoint,
|
||||
work_dir=work_dir,
|
||||
eval=eval)
|
||||
|
||||
|
||||
def create_test_bash_info(commands, model_test_dict, port, script_name,
|
||||
partition):
|
||||
config = model_test_dict['config']
|
||||
job_name = model_test_dict['job_name']
|
||||
checkpoint = model_test_dict['checkpoint']
|
||||
work_dir = model_test_dict['work_dir']
|
||||
eval = model_test_dict['eval']
|
||||
|
||||
echo_info = f'\necho \'{config}\' &'
|
||||
commands.append(echo_info)
|
||||
commands.append('\n')
|
||||
|
||||
command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
|
||||
f'CPUS_PER_TASK=2 {script_name} '
|
||||
|
||||
command_info += f'{partition} '
|
||||
command_info += f'{job_name} '
|
||||
command_info += f'{config} '
|
||||
command_info += f'$CHECKPOINT_DIR/{checkpoint} '
|
||||
|
||||
command_info += f'--eval {eval} '
|
||||
command_info += f'--work-dir {work_dir} '
|
||||
command_info += f'--cfg-options dist_params.port={port} '
|
||||
command_info += '&'
|
||||
|
||||
commands.append(command_info)
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
if args.out:
|
||||
out_suffix = args.out.split('.')[-1]
|
||||
assert args.out.endswith('.sh'), \
|
||||
f'Expected out file path suffix is .sh, but get .{out_suffix}'
|
||||
|
||||
commands = []
|
||||
partition_name = 'PARTITION=$1'
|
||||
commands.append(partition_name)
|
||||
commands.append('\n')
|
||||
|
||||
checkpoint_root = 'CHECKPOINT_DIR=$2'
|
||||
commands.append(checkpoint_root)
|
||||
commands.append('\n')
|
||||
|
||||
script_name = osp.join('tools', 'slurm_test.sh')
|
||||
port = args.port
|
||||
work_dir = args.work_dir
|
||||
|
||||
cfg = Config.fromfile(args.config)
|
||||
|
||||
for model_key in cfg:
|
||||
model_infos = cfg[model_key]
|
||||
if not isinstance(model_infos, list):
|
||||
model_infos = [model_infos]
|
||||
for model_info in model_infos:
|
||||
print('processing: ', model_info['config'])
|
||||
model_test_dict = process_model_info(model_info, work_dir)
|
||||
create_test_bash_info(commands, model_test_dict, port, script_name,
|
||||
'$PARTITION')
|
||||
port += 1
|
||||
|
||||
command_str = ''.join(commands)
|
||||
if args.out:
|
||||
with open(args.out, 'w') as f:
|
||||
f.write(command_str + '\n')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -0,0 +1,91 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import argparse
|
||||
import os.path as osp
|
||||
|
||||
# Default using 4 gpu when training
|
||||
config_8gpu_list = [
|
||||
'configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py', # noqa
|
||||
'configs/vit/upernet_vit-b16_ln_mln_512x512_160k_ade20k.py',
|
||||
'configs/vit/upernet_deit-s16_ln_mln_512x512_160k_ade20k.py',
|
||||
]
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Convert benchmark model json to script')
|
||||
parser.add_argument(
|
||||
'txt_path', type=str, help='txt path output by benchmark_filter')
|
||||
parser.add_argument('--port', type=int, default=24727, help='dist port')
|
||||
parser.add_argument(
|
||||
'--out',
|
||||
type=str,
|
||||
default='.dev/benchmark_train.sh',
|
||||
help='path to save model benchmark script')
|
||||
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def create_train_bash_info(commands, config, script_name, partition, port):
|
||||
cfg = config.strip()
|
||||
|
||||
# print cfg name
|
||||
echo_info = f'echo \'{cfg}\' &'
|
||||
commands.append(echo_info)
|
||||
commands.append('\n')
|
||||
|
||||
_, model_name = osp.split(osp.dirname(cfg))
|
||||
config_name, _ = osp.splitext(osp.basename(cfg))
|
||||
# default setting
|
||||
if cfg in config_8gpu_list:
|
||||
command_info = f'GPUS=8 GPUS_PER_NODE=8 ' \
|
||||
f'CPUS_PER_TASK=2 {script_name} '
|
||||
else:
|
||||
command_info = f'GPUS=4 GPUS_PER_NODE=4 ' \
|
||||
f'CPUS_PER_TASK=2 {script_name} '
|
||||
command_info += f'{partition} '
|
||||
command_info += f'{config_name} '
|
||||
command_info += f'{cfg} '
|
||||
command_info += f'--cfg-options ' \
|
||||
f'checkpoint_config.max_keep_ckpts=1 ' \
|
||||
f'dist_params.port={port} '
|
||||
command_info += f'--work-dir work_dirs/{model_name}/{config_name} '
|
||||
# Let the script shut up
|
||||
command_info += '>/dev/null &'
|
||||
|
||||
commands.append(command_info)
|
||||
commands.append('\n')
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
if args.out:
|
||||
out_suffix = args.out.split('.')[-1]
|
||||
assert args.out.endswith('.sh'), \
|
||||
f'Expected out file path suffix is .sh, but get .{out_suffix}'
|
||||
|
||||
root_name = './tools'
|
||||
script_name = osp.join(root_name, 'slurm_train.sh')
|
||||
port = args.port
|
||||
partition_name = 'PARTITION=$1'
|
||||
|
||||
commands = []
|
||||
commands.append(partition_name)
|
||||
commands.append('\n')
|
||||
commands.append('\n')
|
||||
|
||||
with open(args.txt_path, 'r') as f:
|
||||
model_cfgs = f.readlines()
|
||||
for i, cfg in enumerate(model_cfgs):
|
||||
create_train_bash_info(commands, cfg, script_name, '$PARTITION',
|
||||
port)
|
||||
port += 1
|
||||
|
||||
command_str = ''.join(commands)
|
||||
if args.out:
|
||||
with open(args.out, 'w') as f:
|
||||
f.write(command_str)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -0,0 +1,18 @@
|
||||
work_dir = '../../work_dirs'
|
||||
metric = 'mIoU'
|
||||
|
||||
# specify the log files we would like to collect in `log_items`
|
||||
log_items = [
|
||||
'segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup',
|
||||
'segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr',
|
||||
'segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr',
|
||||
'segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr'
|
||||
]
|
||||
# or specify ignore_keywords, then the folders whose name contain
|
||||
# `'segformer'` won't be collected
|
||||
# ignore_keywords = ['segformer']
|
||||
|
||||
# should not include metric
|
||||
other_info_keys = ['mAcc']
|
||||
markdown_file = 'markdowns/lr_in_trans.json.md'
|
||||
json_file = 'jsons/trans_in_cnn.json'
|
||||
@ -0,0 +1,143 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import argparse
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import os.path as osp
|
||||
from collections import OrderedDict
|
||||
|
||||
from utils import load_config
|
||||
|
||||
# automatically collect all the results
|
||||
|
||||
# The structure of the directory:
|
||||
# ├── work-dir
|
||||
# │ ├── config_1
|
||||
# │ │ ├── time1.log.json
|
||||
# │ │ ├── time2.log.json
|
||||
# │ │ ├── time3.log.json
|
||||
# │ │ ├── time4.log.json
|
||||
# │ ├── config_2
|
||||
# │ │ ├── time5.log.json
|
||||
# │ │ ├── time6.log.json
|
||||
# │ │ ├── time7.log.json
|
||||
# │ │ ├── time8.log.json
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='extract info from log.json')
|
||||
parser.add_argument('config_dir')
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def has_keyword(name: str, keywords: list):
|
||||
for a_keyword in keywords:
|
||||
if a_keyword in name:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
cfg = load_config(args.config_dir)
|
||||
work_dir = cfg['work_dir']
|
||||
metric = cfg['metric']
|
||||
log_items = cfg.get('log_items', [])
|
||||
ignore_keywords = cfg.get('ignore_keywords', [])
|
||||
other_info_keys = cfg.get('other_info_keys', [])
|
||||
markdown_file = cfg.get('markdown_file', None)
|
||||
json_file = cfg.get('json_file', None)
|
||||
|
||||
if json_file and osp.split(json_file)[0] != '':
|
||||
os.makedirs(osp.split(json_file)[0], exist_ok=True)
|
||||
if markdown_file and osp.split(markdown_file)[0] != '':
|
||||
os.makedirs(osp.split(markdown_file)[0], exist_ok=True)
|
||||
|
||||
assert not (log_items and ignore_keywords), \
|
||||
'log_items and ignore_keywords cannot be specified at the same time'
|
||||
assert metric not in other_info_keys, \
|
||||
'other_info_keys should not contain metric'
|
||||
|
||||
if ignore_keywords and isinstance(ignore_keywords, str):
|
||||
ignore_keywords = [ignore_keywords]
|
||||
if other_info_keys and isinstance(other_info_keys, str):
|
||||
other_info_keys = [other_info_keys]
|
||||
if log_items and isinstance(log_items, str):
|
||||
log_items = [log_items]
|
||||
|
||||
if not log_items:
|
||||
log_items = [
|
||||
item for item in sorted(os.listdir(work_dir))
|
||||
if not has_keyword(item, ignore_keywords)
|
||||
]
|
||||
|
||||
experiment_info_list = []
|
||||
for config_dir in log_items:
|
||||
preceding_path = os.path.join(work_dir, config_dir)
|
||||
log_list = [
|
||||
item for item in os.listdir(preceding_path)
|
||||
if item.endswith('.log.json')
|
||||
]
|
||||
log_list = sorted(
|
||||
log_list,
|
||||
key=lambda time_str: datetime.datetime.strptime(
|
||||
time_str, '%Y%m%d_%H%M%S.log.json'))
|
||||
val_list = []
|
||||
last_iter = 0
|
||||
for log_name in log_list:
|
||||
with open(os.path.join(preceding_path, log_name), 'r') as f:
|
||||
# ignore the info line
|
||||
f.readline()
|
||||
all_lines = f.readlines()
|
||||
val_list.extend([
|
||||
json.loads(line) for line in all_lines
|
||||
if json.loads(line)['mode'] == 'val'
|
||||
])
|
||||
for index in range(len(all_lines) - 1, -1, -1):
|
||||
line_dict = json.loads(all_lines[index])
|
||||
if line_dict['mode'] == 'train':
|
||||
last_iter = max(last_iter, line_dict['iter'])
|
||||
break
|
||||
|
||||
new_log_dict = dict(
|
||||
method=config_dir, metric_used=metric, last_iter=last_iter)
|
||||
for index, log in enumerate(val_list, 1):
|
||||
new_ordered_dict = OrderedDict()
|
||||
new_ordered_dict['eval_index'] = index
|
||||
new_ordered_dict[metric] = log[metric]
|
||||
for key in other_info_keys:
|
||||
if key in log:
|
||||
new_ordered_dict[key] = log[key]
|
||||
val_list[index - 1] = new_ordered_dict
|
||||
|
||||
assert len(val_list) >= 1, \
|
||||
f"work dir {config_dir} doesn't contain any evaluation."
|
||||
new_log_dict['last eval'] = val_list[-1]
|
||||
new_log_dict['best eval'] = max(val_list, key=lambda x: x[metric])
|
||||
experiment_info_list.append(new_log_dict)
|
||||
print(f'{config_dir} is processed')
|
||||
|
||||
if json_file:
|
||||
with open(json_file, 'w') as f:
|
||||
json.dump(experiment_info_list, f, indent=4)
|
||||
|
||||
if markdown_file:
|
||||
lines_to_write = []
|
||||
for index, log in enumerate(experiment_info_list, 1):
|
||||
lines_to_write.append(
|
||||
f"|{index}|{log['method']}|{log['best eval'][metric]}"
|
||||
f"|{log['best eval']['eval_index']}|"
|
||||
f"{log['last eval'][metric]}|"
|
||||
f"{log['last eval']['eval_index']}|{log['last_iter']}|\n")
|
||||
with open(markdown_file, 'w') as f:
|
||||
f.write(f'|exp_num|method|{metric} best|best index|'
|
||||
f'{metric} last|last index|last iter num|\n')
|
||||
f.write('|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\n')
|
||||
f.writelines(lines_to_write)
|
||||
|
||||
print('processed successfully')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -0,0 +1,144 @@
|
||||
# Log Collector
|
||||
|
||||
## Function
|
||||
|
||||
Automatically collect logs and write the result in a json file or markdown file.
|
||||
|
||||
If there are several `.log.json` files in one folder, Log Collector assumes that the `.log.json` files other than the first one are resume from the preceding `.log.json` file. Log Collector returns the result considering all `.log.json` files.
|
||||
|
||||
## Usage:
|
||||
|
||||
To use log collector, you need to write a config file to configure the log collector first.
|
||||
|
||||
For example:
|
||||
|
||||
example_config.py:
|
||||
|
||||
```python
|
||||
# The work directory that contains folders that contains .log.json files.
|
||||
work_dir = '../../work_dirs'
|
||||
# The metric used to find the best evaluation.
|
||||
metric = 'mIoU'
|
||||
|
||||
# **Don't specify the log_items and ignore_keywords at the same time.**
|
||||
# Specify the log files we would like to collect in `log_items`.
|
||||
# The folders specified should be the subdirectories of `work_dir`.
|
||||
log_items = [
|
||||
'segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup',
|
||||
'segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr',
|
||||
'segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr',
|
||||
'segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr'
|
||||
]
|
||||
# Or specify `ignore_keywords`. The folders whose name contain one
|
||||
# of the keywords in the `ignore_keywords` list(e.g., `'segformer'`)
|
||||
# won't be collected.
|
||||
# ignore_keywords = ['segformer']
|
||||
|
||||
# Other log items in .log.json that you want to collect.
|
||||
# should not include metric.
|
||||
other_info_keys = ["mAcc"]
|
||||
# The output markdown file's name.
|
||||
markdown_file ='markdowns/lr_in_trans.json.md'
|
||||
# The output json file's name. (optional)
|
||||
json_file = 'jsons/trans_in_cnn.json'
|
||||
```
|
||||
|
||||
The structure of the work-dir directory should be like:
|
||||
|
||||
```text
|
||||
├── work-dir
|
||||
│ ├── folder1
|
||||
│ │ ├── time1.log.json
|
||||
│ │ ├── time2.log.json
|
||||
│ │ ├── time3.log.json
|
||||
│ │ ├── time4.log.json
|
||||
│ ├── folder2
|
||||
│ │ ├── time5.log.json
|
||||
│ │ ├── time6.log.json
|
||||
│ │ ├── time7.log.json
|
||||
│ │ ├── time8.log.json
|
||||
```
|
||||
|
||||
Then , cd to the log collector folder.
|
||||
|
||||
Now you can run log_collector.py by using command:
|
||||
|
||||
```bash
|
||||
python log_collector.py ./example_config.py
|
||||
```
|
||||
|
||||
The output markdown file is like:
|
||||
|
||||
| exp_num | method | mIoU best | best index | mIoU last | last index | last iter num |
|
||||
| :-----: | :-----------------------------------------------------: | :-------: | :--------: | :-------: | :--------: | :-----------: |
|
||||
| 1 | segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup | 0.2776 | 10 | 0.2776 | 10 | 160000 |
|
||||
| 2 | segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr | 0.2802 | 10 | 0.2802 | 10 | 160000 |
|
||||
| 3 | segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr | 0.4943 | 11 | 0.4943 | 11 | 160000 |
|
||||
| 4 | segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr | 0.4883 | 11 | 0.4883 | 11 | 160000 |
|
||||
|
||||
The output json file is like:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"method": "segformer_mit-b5_512x512_160k_ade20k_cnn_lr_with_warmup",
|
||||
"metric_used": "mIoU",
|
||||
"last_iter": 160000,
|
||||
"last eval": {
|
||||
"eval_index": 10,
|
||||
"mIoU": 0.2776,
|
||||
"mAcc": 0.3779
|
||||
},
|
||||
"best eval": {
|
||||
"eval_index": 10,
|
||||
"mIoU": 0.2776,
|
||||
"mAcc": 0.3779
|
||||
}
|
||||
},
|
||||
{
|
||||
"method": "segformer_mit-b5_512x512_160k_ade20k_cnn_no_warmup_lr",
|
||||
"metric_used": "mIoU",
|
||||
"last_iter": 160000,
|
||||
"last eval": {
|
||||
"eval_index": 10,
|
||||
"mIoU": 0.2802,
|
||||
"mAcc": 0.3764
|
||||
},
|
||||
"best eval": {
|
||||
"eval_index": 10,
|
||||
"mIoU": 0.2802,
|
||||
"mAcc": 0.3764
|
||||
}
|
||||
},
|
||||
{
|
||||
"method": "segformer_mit-b5_512x512_160k_ade20k_mit_trans_lr",
|
||||
"metric_used": "mIoU",
|
||||
"last_iter": 160000,
|
||||
"last eval": {
|
||||
"eval_index": 11,
|
||||
"mIoU": 0.4943,
|
||||
"mAcc": 0.6097
|
||||
},
|
||||
"best eval": {
|
||||
"eval_index": 11,
|
||||
"mIoU": 0.4943,
|
||||
"mAcc": 0.6097
|
||||
}
|
||||
},
|
||||
{
|
||||
"method": "segformer_mit-b5_512x512_160k_ade20k_swin_trans_lr",
|
||||
"metric_used": "mIoU",
|
||||
"last_iter": 160000,
|
||||
"last eval": {
|
||||
"eval_index": 11,
|
||||
"mIoU": 0.4883,
|
||||
"mAcc": 0.6061
|
||||
},
|
||||
"best eval": {
|
||||
"eval_index": 11,
|
||||
"mIoU": 0.4883,
|
||||
"mAcc": 0.6061
|
||||
}
|
||||
}
|
||||
]
|
||||
```
|
||||
@ -0,0 +1,20 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# modified from https://github.dev/open-mmlab/mmcv
|
||||
import os.path as osp
|
||||
import sys
|
||||
from importlib import import_module
|
||||
|
||||
|
||||
def load_config(cfg_dir: str) -> dict:
|
||||
assert cfg_dir.endswith('.py')
|
||||
root_path, file_name = osp.split(cfg_dir)
|
||||
temp_module = osp.splitext(file_name)[0]
|
||||
sys.path.insert(0, root_path)
|
||||
mod = import_module(temp_module)
|
||||
sys.path.pop(0)
|
||||
cfg_dict = {
|
||||
k: v
|
||||
for k, v in mod.__dict__.items() if not k.startswith('__')
|
||||
}
|
||||
del sys.modules[temp_module]
|
||||
return cfg_dict
|
||||
317
prediction/image/mx15hdi/Detect/mmsegmentation/.dev/md2yml.py
Normal file
317
prediction/image/mx15hdi/Detect/mmsegmentation/.dev/md2yml.py
Normal file
@ -0,0 +1,317 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
# This tool is used to update model-index.yml which is required by MIM, and
|
||||
# will be automatically called as a pre-commit hook. The updating will be
|
||||
# triggered if any change of model information (.md files in configs/) has been
|
||||
# detected before a commit.
|
||||
|
||||
import glob
|
||||
import os
|
||||
import os.path as osp
|
||||
import re
|
||||
import sys
|
||||
|
||||
from lxml import etree
|
||||
from mmcv.fileio import dump
|
||||
|
||||
MMSEG_ROOT = osp.dirname(osp.dirname((osp.dirname(__file__))))
|
||||
|
||||
COLLECTIONS = [
|
||||
'ANN', 'APCNet', 'BiSeNetV1', 'BiSeNetV2', 'CCNet', 'CGNet', 'DANet',
|
||||
'DeepLabV3', 'DeepLabV3+', 'DMNet', 'DNLNet', 'DPT', 'EMANet', 'EncNet',
|
||||
'ERFNet', 'FastFCN', 'FastSCNN', 'FCN', 'GCNet', 'ICNet', 'ISANet', 'KNet',
|
||||
'NonLocalNet', 'OCRNet', 'PointRend', 'PSANet', 'PSPNet', 'Segformer',
|
||||
'Segmenter', 'FPN', 'SETR', 'STDC', 'UNet', 'UPerNet'
|
||||
]
|
||||
COLLECTIONS_TEMP = []
|
||||
|
||||
|
||||
def dump_yaml_and_check_difference(obj, filename, sort_keys=False):
|
||||
"""Dump object to a yaml file, and check if the file content is different
|
||||
from the original.
|
||||
|
||||
Args:
|
||||
obj (any): The python object to be dumped.
|
||||
filename (str): YAML filename to dump the object to.
|
||||
sort_keys (str); Sort key by dictionary order.
|
||||
Returns:
|
||||
Bool: If the target YAML file is different from the original.
|
||||
"""
|
||||
|
||||
str_dump = dump(obj, None, file_format='yaml', sort_keys=sort_keys)
|
||||
if osp.isfile(filename):
|
||||
file_exists = True
|
||||
with open(filename, 'r', encoding='utf-8') as f:
|
||||
str_orig = f.read()
|
||||
else:
|
||||
file_exists = False
|
||||
str_orig = None
|
||||
|
||||
if file_exists and str_orig == str_dump:
|
||||
is_different = False
|
||||
else:
|
||||
is_different = True
|
||||
with open(filename, 'w', encoding='utf-8') as f:
|
||||
f.write(str_dump)
|
||||
|
||||
return is_different
|
||||
|
||||
|
||||
def parse_md(md_file):
|
||||
"""Parse .md file and convert it to a .yml file which can be used for MIM.
|
||||
|
||||
Args:
|
||||
md_file (str): Path to .md file.
|
||||
Returns:
|
||||
Bool: If the target YAML file is different from the original.
|
||||
"""
|
||||
collection_name = osp.split(osp.dirname(md_file))[1]
|
||||
configs = os.listdir(osp.dirname(md_file))
|
||||
|
||||
collection = dict(
|
||||
Name=collection_name,
|
||||
Metadata={'Training Data': []},
|
||||
Paper={
|
||||
'URL': '',
|
||||
'Title': ''
|
||||
},
|
||||
README=md_file,
|
||||
Code={
|
||||
'URL': '',
|
||||
'Version': ''
|
||||
})
|
||||
collection.update({'Converted From': {'Weights': '', 'Code': ''}})
|
||||
models = []
|
||||
datasets = []
|
||||
paper_url = None
|
||||
paper_title = None
|
||||
code_url = None
|
||||
code_version = None
|
||||
repo_url = None
|
||||
|
||||
# To avoid re-counting number of backbone model in OpenMMLab,
|
||||
# if certain model in configs folder is backbone whose name is already
|
||||
# recorded in MMClassification, then the `COLLECTION` dict of this model
|
||||
# in MMSegmentation should be deleted, and `In Collection` in `Models`
|
||||
# should be set with head or neck of this config file.
|
||||
is_backbone = None
|
||||
|
||||
with open(md_file, 'r', encoding='UTF-8') as md:
|
||||
lines = md.readlines()
|
||||
i = 0
|
||||
current_dataset = ''
|
||||
while i < len(lines):
|
||||
line = lines[i].strip()
|
||||
# In latest README.md the title and url are in the third line.
|
||||
if i == 2:
|
||||
paper_url = lines[i].split('](')[1].split(')')[0]
|
||||
paper_title = lines[i].split('](')[0].split('[')[1]
|
||||
if len(line) == 0:
|
||||
i += 1
|
||||
continue
|
||||
elif line[:3] == '<a ':
|
||||
content = etree.HTML(line)
|
||||
node = content.xpath('//a')[0]
|
||||
if node.text == 'Code Snippet':
|
||||
code_url = node.get('href', None)
|
||||
assert code_url is not None, (
|
||||
f'{collection_name} hasn\'t code snippet url.')
|
||||
# version extraction
|
||||
filter_str = r'blob/(.*)/mm'
|
||||
pattern = re.compile(filter_str)
|
||||
code_version = pattern.findall(code_url)
|
||||
assert len(code_version) == 1, (
|
||||
f'false regular expression ({filter_str}) use.')
|
||||
code_version = code_version[0]
|
||||
elif node.text == 'Official Repo':
|
||||
repo_url = node.get('href', None)
|
||||
assert repo_url is not None, (
|
||||
f'{collection_name} hasn\'t official repo url.')
|
||||
i += 1
|
||||
elif line[:4] == '### ':
|
||||
datasets.append(line[4:])
|
||||
current_dataset = line[4:]
|
||||
i += 2
|
||||
elif line[:15] == '<!-- [BACKBONE]':
|
||||
is_backbone = True
|
||||
i += 1
|
||||
elif (line[0] == '|' and (i + 1) < len(lines)
|
||||
and lines[i + 1][:3] == '| -' and 'Method' in line
|
||||
and 'Crop Size' in line and 'Mem (GB)' in line):
|
||||
cols = [col.strip() for col in line.split('|')]
|
||||
method_id = cols.index('Method')
|
||||
backbone_id = cols.index('Backbone')
|
||||
crop_size_id = cols.index('Crop Size')
|
||||
lr_schd_id = cols.index('Lr schd')
|
||||
mem_id = cols.index('Mem (GB)')
|
||||
fps_id = cols.index('Inf time (fps)')
|
||||
try:
|
||||
ss_id = cols.index('mIoU')
|
||||
except ValueError:
|
||||
ss_id = cols.index('Dice')
|
||||
try:
|
||||
ms_id = cols.index('mIoU(ms+flip)')
|
||||
except ValueError:
|
||||
ms_id = False
|
||||
config_id = cols.index('config')
|
||||
download_id = cols.index('download')
|
||||
j = i + 2
|
||||
while j < len(lines) and lines[j][0] == '|':
|
||||
els = [el.strip() for el in lines[j].split('|')]
|
||||
config = ''
|
||||
model_name = ''
|
||||
weight = ''
|
||||
for fn in configs:
|
||||
if fn in els[config_id]:
|
||||
left = els[download_id].index(
|
||||
'https://download.openmmlab.com')
|
||||
right = els[download_id].index('.pth') + 4
|
||||
weight = els[download_id][left:right]
|
||||
config = f'configs/{collection_name}/{fn}'
|
||||
model_name = fn[:-3]
|
||||
fps = els[fps_id] if els[fps_id] != '-' and els[
|
||||
fps_id] != '' else -1
|
||||
mem = els[mem_id].split(
|
||||
'\\'
|
||||
)[0] if els[mem_id] != '-' and els[mem_id] != '' else -1
|
||||
crop_size = els[crop_size_id].split('x')
|
||||
assert len(crop_size) == 2
|
||||
method = els[method_id].split()[0].split('-')[-1]
|
||||
model = {
|
||||
'Name':
|
||||
model_name,
|
||||
'In Collection':
|
||||
method,
|
||||
'Metadata': {
|
||||
'backbone': els[backbone_id],
|
||||
'crop size': f'({crop_size[0]},{crop_size[1]})',
|
||||
'lr schd': int(els[lr_schd_id]),
|
||||
},
|
||||
'Results': [
|
||||
{
|
||||
'Task': 'Semantic Segmentation',
|
||||
'Dataset': current_dataset,
|
||||
'Metrics': {
|
||||
cols[ss_id]: float(els[ss_id]),
|
||||
},
|
||||
},
|
||||
],
|
||||
'Config':
|
||||
config,
|
||||
'Weights':
|
||||
weight,
|
||||
}
|
||||
if fps != -1:
|
||||
try:
|
||||
fps = float(fps)
|
||||
except Exception:
|
||||
j += 1
|
||||
continue
|
||||
model['Metadata']['inference time (ms/im)'] = [{
|
||||
'value':
|
||||
round(1000 / float(fps), 2),
|
||||
'hardware':
|
||||
'V100',
|
||||
'backend':
|
||||
'PyTorch',
|
||||
'batch size':
|
||||
1,
|
||||
'mode':
|
||||
'FP32' if 'fp16' not in config else 'FP16',
|
||||
'resolution':
|
||||
f'({crop_size[0]},{crop_size[1]})'
|
||||
}]
|
||||
if mem != -1:
|
||||
model['Metadata']['Training Memory (GB)'] = float(mem)
|
||||
# Only have semantic segmentation now
|
||||
if ms_id and els[ms_id] != '-' and els[ms_id] != '':
|
||||
model['Results'][0]['Metrics'][
|
||||
'mIoU(ms+flip)'] = float(els[ms_id])
|
||||
models.append(model)
|
||||
j += 1
|
||||
i = j
|
||||
else:
|
||||
i += 1
|
||||
flag = (code_url is not None) and (paper_url is not None) and (repo_url
|
||||
is not None)
|
||||
assert flag, f'{collection_name} readme error'
|
||||
collection['Name'] = method
|
||||
collection['Metadata']['Training Data'] = datasets
|
||||
collection['Code']['URL'] = code_url
|
||||
collection['Code']['Version'] = code_version
|
||||
collection['Paper']['URL'] = paper_url
|
||||
collection['Paper']['Title'] = paper_title
|
||||
collection['Converted From']['Code'] = repo_url
|
||||
# ['Converted From']['Weights] miss
|
||||
# remove empty attribute
|
||||
check_key_list = ['Code', 'Paper', 'Converted From']
|
||||
for check_key in check_key_list:
|
||||
key_list = list(collection[check_key].keys())
|
||||
for key in key_list:
|
||||
if check_key not in collection:
|
||||
break
|
||||
if collection[check_key][key] == '':
|
||||
if len(collection[check_key].keys()) == 1:
|
||||
collection.pop(check_key)
|
||||
else:
|
||||
collection[check_key].pop(key)
|
||||
yml_file = f'{md_file[:-9]}{collection_name}.yml'
|
||||
if is_backbone:
|
||||
if collection['Name'] not in COLLECTIONS:
|
||||
result = {
|
||||
'Collections': [collection],
|
||||
'Models': models,
|
||||
'Yml': yml_file
|
||||
}
|
||||
COLLECTIONS_TEMP.append(result)
|
||||
return False
|
||||
else:
|
||||
result = {'Models': models}
|
||||
else:
|
||||
COLLECTIONS.append(collection['Name'])
|
||||
result = {'Collections': [collection], 'Models': models}
|
||||
return dump_yaml_and_check_difference(result, yml_file)
|
||||
|
||||
|
||||
def update_model_index():
|
||||
"""Update model-index.yml according to model .md files.
|
||||
|
||||
Returns:
|
||||
Bool: If the updated model-index.yml is different from the original.
|
||||
"""
|
||||
configs_dir = osp.join(MMSEG_ROOT, 'configs')
|
||||
yml_files = glob.glob(osp.join(configs_dir, '**', '*.yml'), recursive=True)
|
||||
yml_files.sort()
|
||||
|
||||
# add .replace('\\', '/') to avoid Windows Style path
|
||||
model_index = {
|
||||
'Import': [
|
||||
osp.relpath(yml_file, MMSEG_ROOT).replace('\\', '/')
|
||||
for yml_file in yml_files
|
||||
]
|
||||
}
|
||||
model_index_file = osp.join(MMSEG_ROOT, 'model-index.yml')
|
||||
is_different = dump_yaml_and_check_difference(model_index,
|
||||
model_index_file)
|
||||
|
||||
return is_different
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
file_list = [fn for fn in sys.argv[1:] if osp.basename(fn) == 'README.md']
|
||||
if not file_list:
|
||||
sys.exit(0)
|
||||
file_modified = False
|
||||
for fn in file_list:
|
||||
file_modified |= parse_md(fn)
|
||||
|
||||
for result in COLLECTIONS_TEMP:
|
||||
collection = result['Collections'][0]
|
||||
yml_file = result.pop('Yml', None)
|
||||
if collection['Name'] in COLLECTIONS:
|
||||
result.pop('Collections')
|
||||
file_modified |= dump_yaml_and_check_difference(result, yml_file)
|
||||
|
||||
file_modified |= update_model_index()
|
||||
sys.exit(1 if file_modified else 0)
|
||||
@ -0,0 +1,45 @@
|
||||
# Copyright (c) OpenMMLab. All rights reserved.
|
||||
import argparse
|
||||
import os
|
||||
import os.path as osp
|
||||
|
||||
import oss2
|
||||
|
||||
ACCESS_KEY_ID = os.getenv('OSS_ACCESS_KEY_ID', None)
|
||||
ACCESS_KEY_SECRET = os.getenv('OSS_ACCESS_KEY_SECRET', None)
|
||||
BUCKET_NAME = 'openmmlab'
|
||||
ENDPOINT = 'https://oss-accelerate.aliyuncs.com'
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description='Upload models to OSS')
|
||||
parser.add_argument('model_zoo', type=str, help='model_zoo input')
|
||||
parser.add_argument(
|
||||
'--dst-folder',
|
||||
type=str,
|
||||
default='mmsegmentation/v0.5',
|
||||
help='destination folder')
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
model_zoo = args.model_zoo
|
||||
dst_folder = args.dst_folder
|
||||
bucket = oss2.Bucket(
|
||||
oss2.Auth(ACCESS_KEY_ID, ACCESS_KEY_SECRET), ENDPOINT, BUCKET_NAME)
|
||||
|
||||
for root, dirs, files in os.walk(model_zoo):
|
||||
for file in files:
|
||||
file_path = osp.relpath(osp.join(root, file), model_zoo)
|
||||
print(f'Uploading {file_path}')
|
||||
|
||||
oss2.resumable_upload(bucket, osp.join(dst_folder, file_path),
|
||||
osp.join(model_zoo, file_path))
|
||||
bucket.put_object_acl(
|
||||
osp.join(dst_folder, file_path), oss2.OBJECT_ACL_PUBLIC_READ)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
76
prediction/image/mx15hdi/Detect/mmsegmentation/.github/CODE_OF_CONDUCT.md
vendored
Normal file
76
prediction/image/mx15hdi/Detect/mmsegmentation/.github/CODE_OF_CONDUCT.md
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, sex characteristics, gender identity and expression,
|
||||
level of experience, education, socio-economic status, nationality, personal
|
||||
appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
- Using welcoming and inclusive language
|
||||
- Being respectful of differing viewpoints and experiences
|
||||
- Gracefully accepting constructive criticism
|
||||
- Focusing on what is best for the community
|
||||
- Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
- The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
- Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
- Public or private harassment
|
||||
- Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
- Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at chenkaidev@gmail.com. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
|
||||
|
||||
For answers to common questions about this code of conduct, see
|
||||
https://www.contributor-covenant.org/faq
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
58
prediction/image/mx15hdi/Detect/mmsegmentation/.github/CONTRIBUTING.md
vendored
Normal file
58
prediction/image/mx15hdi/Detect/mmsegmentation/.github/CONTRIBUTING.md
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
# Contributing to mmsegmentation
|
||||
|
||||
All kinds of contributions are welcome, including but not limited to the following.
|
||||
|
||||
- Fixes (typo, bugs)
|
||||
- New features and components
|
||||
|
||||
## Workflow
|
||||
|
||||
1. fork and pull the latest mmsegmentation
|
||||
2. checkout a new branch (do not use master branch for PRs)
|
||||
3. commit your changes
|
||||
4. create a PR
|
||||
|
||||
:::{note}
|
||||
|
||||
- If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first.
|
||||
- If you are the author of some papers and would like to include your method to mmsegmentation,
|
||||
please contact Kai Chen (chenkaidev\[at\]gmail\[dot\]com). We will much appreciate your contribution.
|
||||
:::
|
||||
|
||||
## Code style
|
||||
|
||||
### Python
|
||||
|
||||
We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style.
|
||||
|
||||
We use the following tools for linting and formatting:
|
||||
|
||||
- [flake8](http://flake8.pycqa.org/en/latest/): linter
|
||||
- [yapf](https://github.com/google/yapf): formatter
|
||||
- [isort](https://github.com/timothycrosley/isort): sort imports
|
||||
|
||||
Style configurations of yapf and isort can be found in [setup.cfg](../setup.cfg) and [.isort.cfg](../.isort.cfg).
|
||||
|
||||
We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`,
|
||||
fixes `end-of-files`, sorts `requirments.txt` automatically on every commit.
|
||||
The config for a pre-commit hook is stored in [.pre-commit-config](../.pre-commit-config.yaml).
|
||||
|
||||
After you clone the repository, you will need to install initialize pre-commit hook.
|
||||
|
||||
```shell
|
||||
pip install -U pre-commit
|
||||
```
|
||||
|
||||
From the repository folder
|
||||
|
||||
```shell
|
||||
pre-commit install
|
||||
```
|
||||
|
||||
After this on every commit check code linters and formatter will be enforced.
|
||||
|
||||
> Before you create a PR, make sure that your code lints and is formatted by yapf.
|
||||
|
||||
### C++ and CUDA
|
||||
|
||||
We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html).
|
||||
6
prediction/image/mx15hdi/Detect/mmsegmentation/.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
6
prediction/image/mx15hdi/Detect/mmsegmentation/.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
blank_issues_enabled: false
|
||||
|
||||
contact_links:
|
||||
- name: MMSegmentation Documentation
|
||||
url: https://mmsegmentation.readthedocs.io
|
||||
about: Check the docs and FAQ to see if you question is already answered.
|
||||
48
prediction/image/mx15hdi/Detect/mmsegmentation/.github/ISSUE_TEMPLATE/error-report.md
vendored
Normal file
48
prediction/image/mx15hdi/Detect/mmsegmentation/.github/ISSUE_TEMPLATE/error-report.md
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
---
|
||||
name: Error report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
Thanks for your error report and we appreciate it a lot.
|
||||
|
||||
**Checklist**
|
||||
|
||||
1. I have searched related issues but cannot get the expected help.
|
||||
2. The bug has not been fixed in the latest version.
|
||||
|
||||
**Describe the bug**
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
**Reproduction**
|
||||
|
||||
1. What command or script did you run?
|
||||
|
||||
```none
|
||||
A placeholder for the command.
|
||||
```
|
||||
|
||||
2. Did you make any modifications on the code or config? Did you understand what you have modified?
|
||||
|
||||
3. What dataset did you use?
|
||||
|
||||
**Environment**
|
||||
|
||||
1. Please run `python mmseg/utils/collect_env.py` to collect necessary environment information and paste it here.
|
||||
2. You may add addition that may be helpful for locating the problem, such as
|
||||
- How you installed PyTorch \[e.g., pip, conda, source\]
|
||||
- Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
|
||||
|
||||
**Error traceback**
|
||||
|
||||
If applicable, paste the error trackback here.
|
||||
|
||||
```none
|
||||
A placeholder for trackback.
|
||||
```
|
||||
|
||||
**Bug fix**
|
||||
|
||||
If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
|
||||
21
prediction/image/mx15hdi/Detect/mmsegmentation/.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
21
prediction/image/mx15hdi/Detect/mmsegmentation/.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
# Describe the feature
|
||||
|
||||
**Motivation**
|
||||
A clear and concise description of the motivation of the feature.
|
||||
Ex1. It is inconvenient when \[....\].
|
||||
Ex2. There is a recent paper \[....\], which is very helpful for \[....\].
|
||||
|
||||
**Related resources**
|
||||
If there is an official code release or third-party implementations, please also provide the information here, which would be very helpful.
|
||||
|
||||
**Additional context**
|
||||
Add any other context or screenshots about the feature request here.
|
||||
If you would like to implement the feature and create a PR, please leave a comment here and that would be much appreciated.
|
||||
7
prediction/image/mx15hdi/Detect/mmsegmentation/.github/ISSUE_TEMPLATE/general_questions.md
vendored
Normal file
7
prediction/image/mx15hdi/Detect/mmsegmentation/.github/ISSUE_TEMPLATE/general_questions.md
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
---
|
||||
name: General questions
|
||||
about: Ask general questions to get help
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
---
|
||||
@ -0,0 +1,69 @@
|
||||
---
|
||||
name: Reimplementation Questions
|
||||
about: Ask about questions during model reimplementation
|
||||
title: ''
|
||||
labels: reimplementation
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
If you feel we have helped you, give us a STAR! :satisfied:
|
||||
|
||||
**Notice**
|
||||
|
||||
There are several common situations in the reimplementation issues as below
|
||||
|
||||
1. Reimplement a model in the model zoo using the provided configs
|
||||
2. Reimplement a model in the model zoo on other datasets (e.g., custom datasets)
|
||||
3. Reimplement a custom model but all the components are implemented in MMSegmentation
|
||||
4. Reimplement a custom model with new modules implemented by yourself
|
||||
|
||||
There are several things to do for different cases as below.
|
||||
|
||||
- For cases 1 & 3, please follow the steps in the following sections thus we could help to quickly identify the issue.
|
||||
- For cases 2 & 4, please understand that we are not able to do much help here because we usually do not know the full code, and the users should be responsible for the code they write.
|
||||
- One suggestion for cases 2 & 4 is that the users should first check whether the bug lies in the self-implemented code or the original code. For example, users can first make sure that the same model runs well on supported datasets. If you still need help, please describe what you have done and what you obtain in the issue, and follow the steps in the following sections, and try as clear as possible so that we can better help you.
|
||||
|
||||
**Checklist**
|
||||
|
||||
1. I have searched related issues but cannot get the expected help.
|
||||
2. The issue has not been fixed in the latest version.
|
||||
|
||||
**Describe the issue**
|
||||
|
||||
A clear and concise description of the problem you meet and what you have done.
|
||||
|
||||
**Reproduction**
|
||||
|
||||
1. What command or script did you run?
|
||||
|
||||
```
|
||||
A placeholder for the command.
|
||||
```
|
||||
|
||||
2. What config dir you run?
|
||||
|
||||
```
|
||||
A placeholder for the config.
|
||||
```
|
||||
|
||||
3. Did you make any modifications to the code or config? Did you understand what you have modified?
|
||||
4. What dataset did you use?
|
||||
|
||||
**Environment**
|
||||
|
||||
1. Please run `PYTHONPATH=${PWD}:$PYTHONPATH python mmseg/utils/collect_env.py` to collect the necessary environment information and paste it here.
|
||||
2. You may add an addition that may be helpful for locating the problem, such as
|
||||
1. How you installed PyTorch \[e.g., pip, conda, source\]
|
||||
2. Other environment variables that may be related (such as `$PATH`, `$LD_LIBRARY_PATH`, `$PYTHONPATH`, etc.)
|
||||
|
||||
**Results**
|
||||
|
||||
If applicable, paste the related results here, e.g., what you expect and what you get.
|
||||
|
||||
```
|
||||
A placeholder for results comparison
|
||||
```
|
||||
|
||||
**Issue fix**
|
||||
|
||||
If you have already identified the reason, you can provide the information here. If you are willing to create a PR to fix it, please also leave a comment here and that would be much appreciated!
|
||||
25
prediction/image/mx15hdi/Detect/mmsegmentation/.github/pull_request_template.md
vendored
Normal file
25
prediction/image/mx15hdi/Detect/mmsegmentation/.github/pull_request_template.md
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
Thanks for your contribution and we appreciate it a lot. The following instructions would make your pull request more healthy and more easily get feedback. If you do not understand some items, don't worry, just make the pull request and seek help from maintainers.
|
||||
|
||||
## Motivation
|
||||
|
||||
Please describe the motivation of this PR and the goal you want to achieve through this PR.
|
||||
|
||||
## Modification
|
||||
|
||||
Please briefly describe what modification is made in this PR.
|
||||
|
||||
## BC-breaking (Optional)
|
||||
|
||||
Does the modification introduce changes that break the backward-compatibility of the downstream repos?
|
||||
If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR.
|
||||
|
||||
## Use cases (Optional)
|
||||
|
||||
If this PR introduces a new feature, it is better to list some use cases here, and update the documentation.
|
||||
|
||||
## Checklist
|
||||
|
||||
1. Pre-commit or other linting tools are used to fix the potential lint issues.
|
||||
2. The modification is covered by complete unit tests. If not, please add more unit test to ensure the correctness.
|
||||
3. If the modification has potential influence on downstream projects, this PR should be tested with downstream projects, like MMDet or MMDet3D.
|
||||
4. The documentation has been modified accordingly, like docstring or example tutorials.
|
||||
257
prediction/image/mx15hdi/Detect/mmsegmentation/.github/workflows/build.yml
vendored
Normal file
257
prediction/image/mx15hdi/Detect/mmsegmentation/.github/workflows/build.yml
vendored
Normal file
@ -0,0 +1,257 @@
|
||||
name: build
|
||||
|
||||
on:
|
||||
push:
|
||||
paths-ignore:
|
||||
- 'demo/**'
|
||||
- '.dev/**'
|
||||
- 'docker/**'
|
||||
- 'tools/**'
|
||||
- '**.md'
|
||||
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'demo/**'
|
||||
- '.dev/**'
|
||||
- 'docker/**'
|
||||
- 'tools/**'
|
||||
- 'docs/**'
|
||||
- '**.md'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_cpu:
|
||||
runs-on: ubuntu-18.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7]
|
||||
torch: [1.5.1, 1.6.0, 1.7.0, 1.8.0, 1.9.0]
|
||||
include:
|
||||
- torch: 1.5.1
|
||||
torch_version: torch1.5
|
||||
torchvision: 0.6.1
|
||||
- torch: 1.6.0
|
||||
torch_version: torch1.6
|
||||
torchvision: 0.7.0
|
||||
- torch: 1.7.0
|
||||
torch_version: torch1.7
|
||||
torchvision: 0.8.1
|
||||
- torch: 1.8.0
|
||||
torch_version: torch1.8
|
||||
torchvision: 0.9.0
|
||||
- torch: 1.9.0
|
||||
torch_version: torch1.9
|
||||
torchvision: 0.10.0
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Upgrade pip
|
||||
run: pip install pip --upgrade
|
||||
- name: Install PyTorch
|
||||
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- name: Install MMCV
|
||||
run: |
|
||||
pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/${{matrix.torch_version}}/index.html
|
||||
python -c 'import mmcv; print(mmcv.__version__)'
|
||||
- name: Install unittest dependencies
|
||||
run: |
|
||||
pip install -r requirements.txt
|
||||
- name: Build and install
|
||||
run: rm -rf .eggs && pip install -e .
|
||||
- name: Run unittests and generate coverage report
|
||||
run: |
|
||||
pip install timm
|
||||
coverage run --branch --source mmseg -m pytest tests/
|
||||
coverage xml
|
||||
coverage report -m
|
||||
if: ${{matrix.torch >= '1.5.0'}}
|
||||
- name: Skip timm unittests and generate coverage report
|
||||
run: |
|
||||
coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
|
||||
coverage xml
|
||||
coverage report -m
|
||||
if: ${{matrix.torch < '1.5.0'}}
|
||||
|
||||
build_cuda101:
|
||||
runs-on: ubuntu-18.04
|
||||
container:
|
||||
image: pytorch/pytorch:1.6.0-cuda10.1-cudnn7-devel
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7]
|
||||
torch:
|
||||
[
|
||||
1.5.1+cu101,
|
||||
1.6.0+cu101,
|
||||
1.7.0+cu101,
|
||||
1.8.0+cu101
|
||||
]
|
||||
include:
|
||||
- torch: 1.5.1+cu101
|
||||
torch_version: torch1.5
|
||||
torchvision: 0.6.1+cu101
|
||||
- torch: 1.6.0+cu101
|
||||
torch_version: torch1.6
|
||||
torchvision: 0.7.0+cu101
|
||||
- torch: 1.7.0+cu101
|
||||
torch_version: torch1.7
|
||||
torchvision: 0.8.1+cu101
|
||||
- torch: 1.8.0+cu101
|
||||
torch_version: torch1.8
|
||||
torchvision: 0.9.0+cu101
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Fetch GPG keys
|
||||
run: |
|
||||
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
|
||||
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y libgl1-mesa-glx ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 python${{matrix.python-version}}-dev
|
||||
apt-get clean
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
- name: Install Pillow
|
||||
run: python -m pip install Pillow==6.2.2
|
||||
if: ${{matrix.torchvision < 0.5}}
|
||||
- name: Install PyTorch
|
||||
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- name: Install mmseg dependencies
|
||||
run: |
|
||||
python -V
|
||||
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu101/${{matrix.torch_version}}/index.html
|
||||
python -m pip install -r requirements.txt
|
||||
python -c 'import mmcv; print(mmcv.__version__)'
|
||||
- name: Build and install
|
||||
run: |
|
||||
rm -rf .eggs
|
||||
python setup.py check -m -s
|
||||
TORCH_CUDA_ARCH_LIST=7.0 pip install .
|
||||
- name: Run unittests and generate coverage report
|
||||
run: |
|
||||
python -m pip install timm
|
||||
coverage run --branch --source mmseg -m pytest tests/
|
||||
coverage xml
|
||||
coverage report -m
|
||||
if: ${{matrix.torch >= '1.5.0'}}
|
||||
- name: Skip timm unittests and generate coverage report
|
||||
run: |
|
||||
coverage run --branch --source mmseg -m pytest tests/ --ignore tests/test_models/test_backbones/test_timm_backbone.py
|
||||
coverage xml
|
||||
coverage report -m
|
||||
if: ${{matrix.torch < '1.5.0'}}
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v1.0.10
|
||||
with:
|
||||
file: ./coverage.xml
|
||||
flags: unittests
|
||||
env_vars: OS,PYTHON
|
||||
name: codecov-umbrella
|
||||
fail_ci_if_error: false
|
||||
|
||||
build_cuda102:
|
||||
runs-on: ubuntu-18.04
|
||||
container:
|
||||
image: pytorch/pytorch:1.9.0-cuda10.2-cudnn7-devel
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.6, 3.7, 3.8, 3.9]
|
||||
torch: [1.9.0+cu102]
|
||||
include:
|
||||
- torch: 1.9.0+cu102
|
||||
torch_version: torch1.9
|
||||
torchvision: 0.10.0+cu102
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Fetch GPG keys
|
||||
run: |
|
||||
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
|
||||
apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
apt-get update && apt-get install -y libgl1-mesa-glx ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6
|
||||
apt-get clean
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
- name: Install Pillow
|
||||
run: python -m pip install Pillow==6.2.2
|
||||
if: ${{matrix.torchvision < 0.5}}
|
||||
- name: Install PyTorch
|
||||
run: python -m pip install torch==${{matrix.torch}} torchvision==${{matrix.torchvision}} -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- name: Install mmseg dependencies
|
||||
run: |
|
||||
python -V
|
||||
python -m pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu102/${{matrix.torch_version}}/index.html
|
||||
python -m pip install -r requirements.txt
|
||||
python -c 'import mmcv; print(mmcv.__version__)'
|
||||
- name: Build and install
|
||||
run: |
|
||||
rm -rf .eggs
|
||||
python setup.py check -m -s
|
||||
TORCH_CUDA_ARCH_LIST=7.0 pip install .
|
||||
- name: Run unittests and generate coverage report
|
||||
run: |
|
||||
python -m pip install timm
|
||||
coverage run --branch --source mmseg -m pytest tests/
|
||||
coverage xml
|
||||
coverage report -m
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
flags: unittests
|
||||
env_vars: OS,PYTHON
|
||||
name: codecov-umbrella
|
||||
fail_ci_if_error: false
|
||||
|
||||
test_windows:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-2022]
|
||||
python: [3.8]
|
||||
platform: [cpu, cu111]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python }}
|
||||
- name: Upgrade pip
|
||||
run: python -m pip install pip --upgrade --user
|
||||
- name: Install OpenCV
|
||||
run: pip install opencv-python>=3
|
||||
- name: Install PyTorch
|
||||
# As a complement to Linux CI, we test on PyTorch LTS version
|
||||
run: pip install torch==1.8.2+${{ matrix.platform }} torchvision==0.9.2+${{ matrix.platform }} -f https://download.pytorch.org/whl/lts/1.8/torch_lts.html
|
||||
- name: Install MMCV
|
||||
run: |
|
||||
pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8/index.html --only-binary mmcv-full
|
||||
- name: Install unittest dependencies
|
||||
run: pip install -r requirements/tests.txt -r requirements/optional.txt
|
||||
- name: Build and install
|
||||
run: pip install -e .
|
||||
- name: Run unittests
|
||||
run: |
|
||||
python -m pip install timm
|
||||
coverage run --branch --source mmseg -m pytest tests/
|
||||
- name: Generate coverage report
|
||||
run: |
|
||||
coverage xml
|
||||
coverage report -m
|
||||
26
prediction/image/mx15hdi/Detect/mmsegmentation/.github/workflows/deploy.yml
vendored
Normal file
26
prediction/image/mx15hdi/Detect/mmsegmentation/.github/workflows/deploy.yml
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
name: deploy
|
||||
|
||||
on: push
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build-n-publish:
|
||||
runs-on: ubuntu-latest
|
||||
if: startsWith(github.event.ref, 'refs/tags')
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.7
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Build MMSegmentation
|
||||
run: |
|
||||
pip install wheel
|
||||
python setup.py sdist bdist_wheel
|
||||
- name: Publish distribution to PyPI
|
||||
run: |
|
||||
pip install twine
|
||||
twine upload dist/* -u __token__ -p ${{ secrets.pypi_password }}
|
||||
28
prediction/image/mx15hdi/Detect/mmsegmentation/.github/workflows/lint.yml
vendored
Normal file
28
prediction/image/mx15hdi/Detect/mmsegmentation/.github/workflows/lint.yml
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
name: lint
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-18.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python 3.7
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: 3.7
|
||||
- name: Install pre-commit hook
|
||||
run: |
|
||||
pip install pre-commit
|
||||
pre-commit install
|
||||
- name: Linting
|
||||
run: |
|
||||
pre-commit run --all-files
|
||||
- name: Check docstring coverage
|
||||
run: |
|
||||
pip install interrogate
|
||||
interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --exclude mmseg/ops --ignore-regex "__repr__" --fail-under 80 mmseg
|
||||
44
prediction/image/mx15hdi/Detect/mmsegmentation/.github/workflows/test_mim.yml
vendored
Normal file
44
prediction/image/mx15hdi/Detect/mmsegmentation/.github/workflows/test_mim.yml
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
name: test-mim
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- 'model-index.yml'
|
||||
- 'configs/**'
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- 'model-index.yml'
|
||||
- 'configs/**'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build_cpu:
|
||||
runs-on: ubuntu-18.04
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: [3.7]
|
||||
torch: [1.8.0]
|
||||
include:
|
||||
- torch: 1.8.0
|
||||
torch_version: torch1.8
|
||||
torchvision: 0.9.0
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Upgrade pip
|
||||
run: pip install pip --upgrade
|
||||
- name: Install PyTorch
|
||||
run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html
|
||||
- name: Install openmim
|
||||
run: pip install openmim
|
||||
- name: Build and install
|
||||
run: rm -rf .eggs && mim install -e .
|
||||
- name: test commands of mim
|
||||
run: mim search mmsegmentation
|
||||
120
prediction/image/mx15hdi/Detect/mmsegmentation/.gitignore
vendored
Normal file
120
prediction/image/mx15hdi/Detect/mmsegmentation/.gitignore
vendored
Normal file
@ -0,0 +1,120 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/en/_build/
|
||||
docs/zh_cn/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# celery beat schedule file
|
||||
celerybeat-schedule
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
.DS_Store
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
|
||||
data
|
||||
.vscode
|
||||
.idea
|
||||
|
||||
# custom
|
||||
*.pkl
|
||||
*.pkl.json
|
||||
*.log.json
|
||||
work_dirs/
|
||||
mmseg/.mim
|
||||
|
||||
# Pytorch
|
||||
*.pth
|
||||
11
prediction/image/mx15hdi/Detect/mmsegmentation/.owners.yml
Normal file
11
prediction/image/mx15hdi/Detect/mmsegmentation/.owners.yml
Normal file
@ -0,0 +1,11 @@
|
||||
assign:
|
||||
strategy:
|
||||
# random
|
||||
# round-robin
|
||||
daily-shift-based
|
||||
assignees:
|
||||
- MengzhangLI
|
||||
- xiexinch
|
||||
- MeowZheng
|
||||
- MengzhangLI
|
||||
- xiexinch
|
||||
@ -0,0 +1,60 @@
|
||||
repos:
|
||||
- repo: https://gitlab.com/pycqa/flake8.git
|
||||
rev: 3.8.3
|
||||
hooks:
|
||||
- id: flake8
|
||||
- repo: https://github.com/PyCQA/isort
|
||||
rev: 5.10.1
|
||||
hooks:
|
||||
- id: isort
|
||||
- repo: https://github.com/pre-commit/mirrors-yapf
|
||||
rev: v0.30.0
|
||||
hooks:
|
||||
- id: yapf
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.1.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: check-yaml
|
||||
- id: end-of-file-fixer
|
||||
- id: requirements-txt-fixer
|
||||
- id: double-quote-string-fixer
|
||||
- id: check-merge-conflict
|
||||
- id: fix-encoding-pragma
|
||||
args: ["--remove"]
|
||||
- id: mixed-line-ending
|
||||
args: ["--fix=lf"]
|
||||
- repo: https://github.com/executablebooks/mdformat
|
||||
rev: 0.7.9
|
||||
hooks:
|
||||
- id: mdformat
|
||||
args: ["--number"]
|
||||
additional_dependencies:
|
||||
- mdformat-openmmlab
|
||||
- mdformat_frontmatter
|
||||
- linkify-it-py
|
||||
- repo: https://github.com/codespell-project/codespell
|
||||
rev: v2.1.0
|
||||
hooks:
|
||||
- id: codespell
|
||||
- repo: https://github.com/myint/docformatter
|
||||
rev: v1.3.1
|
||||
hooks:
|
||||
- id: docformatter
|
||||
args: ["--in-place", "--wrap-descriptions", "79"]
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: update-model-index
|
||||
name: update-model-index
|
||||
description: Collect model information and update model-index.yml
|
||||
entry: .dev/md2yml.py
|
||||
additional_dependencies: [mmcv, lxml, opencv-python]
|
||||
language: python
|
||||
files: ^configs/.*\.md$
|
||||
require_serial: true
|
||||
- repo: https://github.com/open-mmlab/pre-commit-hooks
|
||||
rev: v0.2.0 # Use the rev to fix revision
|
||||
hooks:
|
||||
- id: check-algo-readme
|
||||
- id: check-copyright
|
||||
args: ["mmseg", "tools", "tests", "demo"] # the dir_to_check with expected directory to check
|
||||
@ -0,0 +1,9 @@
|
||||
version: 2
|
||||
|
||||
formats: all
|
||||
|
||||
python:
|
||||
version: 3.7
|
||||
install:
|
||||
- requirements: requirements/docs.txt
|
||||
- requirements: requirements/readthedocs.txt
|
||||
@ -0,0 +1,8 @@
|
||||
cff-version: 1.2.0
|
||||
message: "If you use this software, please cite it as below."
|
||||
authors:
|
||||
- name: "MMSegmentation Contributors"
|
||||
title: "OpenMMLab Semantic Segmentation Toolbox and Benchmark"
|
||||
date-released: 2020-07-10
|
||||
url: "https://github.com/open-mmlab/mmsegmentation"
|
||||
license: Apache-2.0
|
||||
203
prediction/image/mx15hdi/Detect/mmsegmentation/LICENSE
Normal file
203
prediction/image/mx15hdi/Detect/mmsegmentation/LICENSE
Normal file
@ -0,0 +1,203 @@
|
||||
Copyright 2020 The MMSegmentation Authors. All rights reserved.
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2020 The MMSegmentation Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
@ -0,0 +1,7 @@
|
||||
# Licenses for special features
|
||||
|
||||
In this file, we list the features with other licenses instead of Apache 2.0. Users should be careful about adopting these features in any commercial matters.
|
||||
|
||||
| Feature | Files | License |
|
||||
| :-------: | :-------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------: |
|
||||
| SegFormer | [mmseg/models/decode_heads/segformer_head.py](https://github.com/open-mmlab/mmsegmentation/blob/master/mmseg/models/decode_heads/segformer_head.py) | [NVIDIA License](https://github.com/NVlabs/SegFormer#license) |
|
||||
@ -0,0 +1,4 @@
|
||||
include requirements/*.txt
|
||||
include mmseg/.mim/model-index.yml
|
||||
recursive-include mmseg/.mim/configs *.py *.yml
|
||||
recursive-include mmseg/.mim/tools *.py *.sh
|
||||
229
prediction/image/mx15hdi/Detect/mmsegmentation/README.md
Normal file
229
prediction/image/mx15hdi/Detect/mmsegmentation/README.md
Normal file
@ -0,0 +1,229 @@
|
||||
<div align="center">
|
||||
<img src="resources/mmseg-logo.png" width="600"/>
|
||||
<div> </div>
|
||||
<div align="center">
|
||||
<b><font size="5">OpenMMLab website</font></b>
|
||||
<sup>
|
||||
<a href="https://openmmlab.com">
|
||||
<i><font size="4">HOT</font></i>
|
||||
</a>
|
||||
</sup>
|
||||
|
||||
<b><font size="5">OpenMMLab platform</font></b>
|
||||
<sup>
|
||||
<a href="https://platform.openmmlab.com">
|
||||
<i><font size="4">TRY IT OUT</font></i>
|
||||
</a>
|
||||
</sup>
|
||||
</div>
|
||||
<div> </div>
|
||||
|
||||
<br />
|
||||
|
||||
[](https://pypi.org/project/mmsegmentation/)
|
||||
[](https://pypi.org/project/mmsegmentation)
|
||||
[](https://mmsegmentation.readthedocs.io/en/latest/)
|
||||
[](https://github.com/open-mmlab/mmsegmentation/actions)
|
||||
[](https://codecov.io/gh/open-mmlab/mmsegmentation)
|
||||
[](https://github.com/open-mmlab/mmsegmentation/blob/master/LICENSE)
|
||||
[](https://github.com/open-mmlab/mmsegmentation/issues)
|
||||
[](https://github.com/open-mmlab/mmsegmentation/issues)
|
||||
|
||||
[📘Documentation](https://mmsegmentation.readthedocs.io/en/latest/) |
|
||||
[🛠️Installation](https://mmsegmentation.readthedocs.io/en/latest/get_started.html) |
|
||||
[👀Model Zoo](https://mmsegmentation.readthedocs.io/en/latest/model_zoo.html) |
|
||||
[🆕Update News](https://mmsegmentation.readthedocs.io/en/latest/changelog.html) |
|
||||
[🤔Reporting Issues](https://github.com/open-mmlab/mmsegmentation/issues/new/choose)
|
||||
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
|
||||
English | [简体中文](README_zh-CN.md)
|
||||
|
||||
</div>
|
||||
|
||||
## Introduction
|
||||
|
||||
MMSegmentation is an open source semantic segmentation toolbox based on PyTorch.
|
||||
It is a part of the [OpenMMLab](https://openmmlab.com/) project.
|
||||
|
||||
The master branch works with **PyTorch 1.5+**.
|
||||
|
||||

|
||||
|
||||
<details open>
|
||||
<summary>Major features</summary>
|
||||
|
||||
- **Unified Benchmark**
|
||||
|
||||
We provide a unified benchmark toolbox for various semantic segmentation methods.
|
||||
|
||||
- **Modular Design**
|
||||
|
||||
We decompose the semantic segmentation framework into different components and one can easily construct a customized semantic segmentation framework by combining different modules.
|
||||
|
||||
- **Support of multiple methods out of box**
|
||||
|
||||
The toolbox directly supports popular and contemporary semantic segmentation frameworks, *e.g.* PSPNet, DeepLabV3, PSANet, DeepLabV3+, etc.
|
||||
|
||||
- **High efficiency**
|
||||
|
||||
The training speed is faster than or comparable to other codebases.
|
||||
|
||||
</details>
|
||||
|
||||
## What's New
|
||||
|
||||
v0.25.0 was released in 6/2/2022:
|
||||
|
||||
- Support PyTorch backend on MLU
|
||||
|
||||
Please refer to [changelog.md](docs/en/changelog.md) for details and release history.
|
||||
|
||||
## Installation
|
||||
|
||||
Please refer to [get_started.md](docs/en/get_started.md#installation) for installation and [dataset_prepare.md](docs/en/dataset_prepare.md#prepare-datasets) for dataset preparation.
|
||||
|
||||
## Get Started
|
||||
|
||||
Please see [train.md](docs/en/train.md) and [inference.md](docs/en/inference.md) for the basic usage of MMSegmentation.
|
||||
There are also tutorials for:
|
||||
|
||||
- [customizing dataset](docs/en/tutorials/customize_datasets.md)
|
||||
- [designing data pipeline](docs/en/tutorials/data_pipeline.md)
|
||||
- [customizing modules](docs/en/tutorials/customize_models.md)
|
||||
- [customizing runtime](docs/en/tutorials/customize_runtime.md)
|
||||
- [training tricks](docs/en/tutorials/training_tricks.md)
|
||||
- [useful tools](docs/en/useful_tools.md)
|
||||
|
||||
A Colab tutorial is also provided. You may preview the notebook [here](demo/MMSegmentation_Tutorial.ipynb) or directly [run](https://colab.research.google.com/github/open-mmlab/mmsegmentation/blob/master/demo/MMSegmentation_Tutorial.ipynb) on Colab.
|
||||
|
||||
## Benchmark and model zoo
|
||||
|
||||
Results and models are available in the [model zoo](docs/en/model_zoo.md).
|
||||
|
||||
Supported backbones:
|
||||
|
||||
- [x] ResNet (CVPR'2016)
|
||||
- [x] ResNeXt (CVPR'2017)
|
||||
- [x] [HRNet (CVPR'2019)](configs/hrnet)
|
||||
- [x] [ResNeSt (ArXiv'2020)](configs/resnest)
|
||||
- [x] [MobileNetV2 (CVPR'2018)](configs/mobilenet_v2)
|
||||
- [x] [MobileNetV3 (ICCV'2019)](configs/mobilenet_v3)
|
||||
- [x] [Vision Transformer (ICLR'2021)](configs/vit)
|
||||
- [x] [Swin Transformer (ICCV'2021)](configs/swin)
|
||||
- [x] [Twins (NeurIPS'2021)](configs/twins)
|
||||
- [x] [BEiT (ICLR'2022)](configs/beit)
|
||||
- [x] [ConvNeXt (CVPR'2022)](configs/convnext)
|
||||
- [x] [MAE (CVPR'2022)](configs/mae)
|
||||
|
||||
Supported methods:
|
||||
|
||||
- [x] [FCN (CVPR'2015/TPAMI'2017)](configs/fcn)
|
||||
- [x] [ERFNet (T-ITS'2017)](configs/erfnet)
|
||||
- [x] [UNet (MICCAI'2016/Nat. Methods'2019)](configs/unet)
|
||||
- [x] [PSPNet (CVPR'2017)](configs/pspnet)
|
||||
- [x] [DeepLabV3 (ArXiv'2017)](configs/deeplabv3)
|
||||
- [x] [BiSeNetV1 (ECCV'2018)](configs/bisenetv1)
|
||||
- [x] [PSANet (ECCV'2018)](configs/psanet)
|
||||
- [x] [DeepLabV3+ (CVPR'2018)](configs/deeplabv3plus)
|
||||
- [x] [UPerNet (ECCV'2018)](configs/upernet)
|
||||
- [x] [ICNet (ECCV'2018)](configs/icnet)
|
||||
- [x] [NonLocal Net (CVPR'2018)](configs/nonlocal_net)
|
||||
- [x] [EncNet (CVPR'2018)](configs/encnet)
|
||||
- [x] [Semantic FPN (CVPR'2019)](configs/sem_fpn)
|
||||
- [x] [DANet (CVPR'2019)](configs/danet)
|
||||
- [x] [APCNet (CVPR'2019)](configs/apcnet)
|
||||
- [x] [EMANet (ICCV'2019)](configs/emanet)
|
||||
- [x] [CCNet (ICCV'2019)](configs/ccnet)
|
||||
- [x] [DMNet (ICCV'2019)](configs/dmnet)
|
||||
- [x] [ANN (ICCV'2019)](configs/ann)
|
||||
- [x] [GCNet (ICCVW'2019/TPAMI'2020)](configs/gcnet)
|
||||
- [x] [FastFCN (ArXiv'2019)](configs/fastfcn)
|
||||
- [x] [Fast-SCNN (ArXiv'2019)](configs/fastscnn)
|
||||
- [x] [ISANet (ArXiv'2019/IJCV'2021)](configs/isanet)
|
||||
- [x] [OCRNet (ECCV'2020)](configs/ocrnet)
|
||||
- [x] [DNLNet (ECCV'2020)](configs/dnlnet)
|
||||
- [x] [PointRend (CVPR'2020)](configs/point_rend)
|
||||
- [x] [CGNet (TIP'2020)](configs/cgnet)
|
||||
- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2)
|
||||
- [x] [STDC (CVPR'2021)](configs/stdc)
|
||||
- [x] [SETR (CVPR'2021)](configs/setr)
|
||||
- [x] [DPT (ArXiv'2021)](configs/dpt)
|
||||
- [x] [Segmenter (ICCV'2021)](configs/segmenter)
|
||||
- [x] [SegFormer (NeurIPS'2021)](configs/segformer)
|
||||
- [x] [K-Net (NeurIPS'2021)](configs/knet)
|
||||
|
||||
Supported datasets:
|
||||
|
||||
- [x] [Cityscapes](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#cityscapes)
|
||||
- [x] [PASCAL VOC](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#pascal-voc)
|
||||
- [x] [ADE20K](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#ade20k)
|
||||
- [x] [Pascal Context](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#pascal-context)
|
||||
- [x] [COCO-Stuff 10k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#coco-stuff-10k)
|
||||
- [x] [COCO-Stuff 164k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#coco-stuff-164k)
|
||||
- [x] [CHASE_DB1](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#chase-db1)
|
||||
- [x] [DRIVE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#drive)
|
||||
- [x] [HRF](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#hrf)
|
||||
- [x] [STARE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#stare)
|
||||
- [x] [Dark Zurich](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#dark-zurich)
|
||||
- [x] [Nighttime Driving](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#nighttime-driving)
|
||||
- [x] [LoveDA](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#loveda)
|
||||
- [x] [Potsdam](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#isprs-potsdam)
|
||||
- [x] [Vaihingen](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#isprs-vaihingen)
|
||||
- [x] [iSAID](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/en/dataset_prepare.md#isaid)
|
||||
|
||||
## FAQ
|
||||
|
||||
Please refer to [FAQ](docs/en/faq.md) for frequently asked questions.
|
||||
|
||||
## Contributing
|
||||
|
||||
We appreciate all contributions to improve MMSegmentation. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline.
|
||||
|
||||
## Acknowledgement
|
||||
|
||||
MMSegmentation is an open source project that welcome any contribution and feedback.
|
||||
We wish that the toolbox and benchmark could serve the growing research
|
||||
community by providing a flexible as well as standardized toolkit to reimplement existing methods
|
||||
and develop their own new semantic segmentation methods.
|
||||
|
||||
## Citation
|
||||
|
||||
If you find this project useful in your research, please consider cite:
|
||||
|
||||
```bibtex
|
||||
@misc{mmseg2020,
|
||||
title={{MMSegmentation}: OpenMMLab Semantic Segmentation Toolbox and Benchmark},
|
||||
author={MMSegmentation Contributors},
|
||||
howpublished = {\url{https://github.com/open-mmlab/mmsegmentation}},
|
||||
year={2020}
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MMSegmentation is released under the Apache 2.0 license, while some specific features in this library are with other licenses. Please refer to [LICENSES.md](LICENSES.md) for the careful check, if you are using our code for commercial matters.
|
||||
|
||||
## Projects in OpenMMLab
|
||||
|
||||
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
|
||||
- [MIM](https://github.com/open-mmlab/mim): MIM installs OpenMMLab packages.
|
||||
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark.
|
||||
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
|
||||
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
|
||||
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab rotated object detection toolbox and benchmark.
|
||||
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
|
||||
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab text detection, recognition, and understanding toolbox.
|
||||
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
|
||||
- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 3D human parametric model toolbox and benchmark.
|
||||
- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab self-supervised learning toolbox and benchmark.
|
||||
- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab model compression toolbox and benchmark.
|
||||
- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab fewshot learning toolbox and benchmark.
|
||||
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
|
||||
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
|
||||
- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab optical flow toolbox and benchmark.
|
||||
- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
|
||||
- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox.
|
||||
- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab Model Deployment Framework.
|
||||
242
prediction/image/mx15hdi/Detect/mmsegmentation/README_zh-CN.md
Normal file
242
prediction/image/mx15hdi/Detect/mmsegmentation/README_zh-CN.md
Normal file
@ -0,0 +1,242 @@
|
||||
<div align="center">
|
||||
<img src="resources/mmseg-logo.png" width="600"/>
|
||||
<div> </div>
|
||||
<div align="center">
|
||||
<b><font size="5">OpenMMLab 官网</font></b>
|
||||
<sup>
|
||||
<a href="https://openmmlab.com">
|
||||
<i><font size="4">HOT</font></i>
|
||||
</a>
|
||||
</sup>
|
||||
|
||||
<b><font size="5">OpenMMLab 开放平台</font></b>
|
||||
<sup>
|
||||
<a href="https://platform.openmmlab.com">
|
||||
<i><font size="4">TRY IT OUT</font></i>
|
||||
</a>
|
||||
</sup>
|
||||
</div>
|
||||
<div> </div>
|
||||
|
||||
<br />
|
||||
|
||||
[](https://pypi.org/project/mmsegmentation/)
|
||||
[](https://pypi.org/project/mmsegmentation)
|
||||
[](https://mmsegmentation.readthedocs.io/zh_CN/latest/)
|
||||
[](https://github.com/open-mmlab/mmsegmentation/actions)
|
||||
[](https://codecov.io/gh/open-mmlab/mmsegmentation)
|
||||
[](https://github.com/open-mmlab/mmsegmentation/blob/master/LICENSE)
|
||||
[](https://github.com/open-mmlab/mmsegmentation/issues)
|
||||
[](https://github.com/open-mmlab/mmsegmentation/issues)
|
||||
|
||||
[📘使用文档](https://mmsegmentation.readthedocs.io/en/latest/) |
|
||||
[🛠️安装指南](https://mmsegmentation.readthedocs.io/en/latest/get_started.html) |
|
||||
[👀模型库](https://mmsegmentation.readthedocs.io/en/latest/model_zoo.html) |
|
||||
[🆕更新日志](https://mmsegmentation.readthedocs.io/en/latest/changelog.html) |
|
||||
[🤔报告问题](https://github.com/open-mmlab/mmsegmentation/issues/new/choose)
|
||||
|
||||
[English](README.md) | 简体中文
|
||||
|
||||
</div>
|
||||
|
||||
## 简介
|
||||
|
||||
MMSegmentation 是一个基于 PyTorch 的语义分割开源工具箱。它是 OpenMMLab 项目的一部分。
|
||||
|
||||
主分支代码目前支持 PyTorch 1.5 以上的版本。
|
||||
|
||||

|
||||
|
||||
<details open>
|
||||
<summary>Major features</summary>
|
||||
|
||||
### 主要特性
|
||||
|
||||
- **统一的基准平台**
|
||||
|
||||
我们将各种各样的语义分割算法集成到了一个统一的工具箱,进行基准测试。
|
||||
|
||||
- **模块化设计**
|
||||
|
||||
MMSegmentation 将分割框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的分割模型。
|
||||
|
||||
- **丰富的即插即用的算法和模型**
|
||||
|
||||
MMSegmentation 支持了众多主流的和最新的检测算法,例如 PSPNet,DeepLabV3,PSANet,DeepLabV3+ 等.
|
||||
|
||||
- **速度快**
|
||||
|
||||
训练速度比其他语义分割代码库更快或者相当。
|
||||
|
||||
</details>
|
||||
|
||||
## 最新进展
|
||||
|
||||
最新版本 v0.25.0 在 2022.6.2 发布:
|
||||
|
||||
- 支持 PyTorch MLU 后端
|
||||
|
||||
如果想了解更多版本更新细节和历史信息,请阅读[更新日志](docs/en/changelog.md)。
|
||||
|
||||
## 安装
|
||||
|
||||
请参考[快速入门文档](docs/zh_cn/get_started.md#installation)进行安装,参考[数据集准备](docs/zh_cn/dataset_prepare.md)处理数据。
|
||||
|
||||
## 快速入门
|
||||
|
||||
请参考[训练教程](docs/zh_cn/train.md)和[测试教程](docs/zh_cn/inference.md)学习 MMSegmentation 的基本使用。
|
||||
我们也提供了一些进阶教程,内容覆盖了:
|
||||
|
||||
- [增加自定义数据集](docs/zh_cn/tutorials/customize_datasets.md)
|
||||
- [设计新的数据预处理流程](docs/zh_cn/tutorials/data_pipeline.md)
|
||||
- [增加自定义模型](docs/zh_cn/tutorials/customize_models.md)
|
||||
- [增加自定义的运行时配置](docs/zh_cn/tutorials/customize_runtime.md)。
|
||||
- [训练技巧说明](docs/zh_cn/tutorials/training_tricks.md)
|
||||
- [有用的工具](docs/zh_cn/useful_tools.md)。
|
||||
|
||||
同时,我们提供了 Colab 教程。你可以在[这里](demo/MMSegmentation_Tutorial.ipynb)浏览教程,或者直接在 Colab 上[运行](https://colab.research.google.com/github/open-mmlab/mmsegmentation/blob/master/demo/MMSegmentation_Tutorial.ipynb)。
|
||||
|
||||
## 基准测试和模型库
|
||||
|
||||
测试结果和模型可以在[模型库](docs/zh_cn/model_zoo.md)中找到。
|
||||
|
||||
已支持的骨干网络:
|
||||
|
||||
- [x] ResNet (CVPR'2016)
|
||||
- [x] ResNeXt (CVPR'2017)
|
||||
- [x] [HRNet (CVPR'2019)](configs/hrnet)
|
||||
- [x] [ResNeSt (ArXiv'2020)](configs/resnest)
|
||||
- [x] [MobileNetV2 (CVPR'2018)](configs/mobilenet_v2)
|
||||
- [x] [MobileNetV3 (ICCV'2019)](configs/mobilenet_v3)
|
||||
- [x] [Vision Transformer (ICLR'2021)](configs/vit)
|
||||
- [x] [Swin Transformer (ICCV'2021)](configs/swin)
|
||||
- [x] [Twins (NeurIPS'2021)](configs/twins)
|
||||
- [x] [BEiT (ICLR'2022)](configs/beit)
|
||||
- [x] [ConvNeXt (CVPR'2022)](configs/convnext)
|
||||
- [x] [MAE (CVPR'2022)](configs/mae)
|
||||
|
||||
已支持的算法:
|
||||
|
||||
- [x] [FCN (CVPR'2015/TPAMI'2017)](configs/fcn)
|
||||
- [x] [ERFNet (T-ITS'2017)](configs/erfnet)
|
||||
- [x] [UNet (MICCAI'2016/Nat. Methods'2019)](configs/unet)
|
||||
- [x] [PSPNet (CVPR'2017)](configs/pspnet)
|
||||
- [x] [DeepLabV3 (ArXiv'2017)](configs/deeplabv3)
|
||||
- [x] [BiSeNetV1 (ECCV'2018)](configs/bisenetv1)
|
||||
- [x] [PSANet (ECCV'2018)](configs/psanet)
|
||||
- [x] [DeepLabV3+ (CVPR'2018)](configs/deeplabv3plus)
|
||||
- [x] [UPerNet (ECCV'2018)](configs/upernet)
|
||||
- [x] [ICNet (ECCV'2018)](configs/icnet)
|
||||
- [x] [NonLocal Net (CVPR'2018)](configs/nonlocal_net)
|
||||
- [x] [EncNet (CVPR'2018)](configs/encnet)
|
||||
- [x] [Semantic FPN (CVPR'2019)](configs/sem_fpn)
|
||||
- [x] [DANet (CVPR'2019)](configs/danet)
|
||||
- [x] [APCNet (CVPR'2019)](configs/apcnet)
|
||||
- [x] [EMANet (ICCV'2019)](configs/emanet)
|
||||
- [x] [CCNet (ICCV'2019)](configs/ccnet)
|
||||
- [x] [DMNet (ICCV'2019)](configs/dmnet)
|
||||
- [x] [ANN (ICCV'2019)](configs/ann)
|
||||
- [x] [GCNet (ICCVW'2019/TPAMI'2020)](configs/gcnet)
|
||||
- [x] [FastFCN (ArXiv'2019)](configs/fastfcn)
|
||||
- [x] [Fast-SCNN (ArXiv'2019)](configs/fastscnn)
|
||||
- [x] [ISANet (ArXiv'2019/IJCV'2021)](configs/isanet)
|
||||
- [x] [OCRNet (ECCV'2020)](configs/ocrnet)
|
||||
- [x] [DNLNet (ECCV'2020)](configs/dnlnet)
|
||||
- [x] [PointRend (CVPR'2020)](configs/point_rend)
|
||||
- [x] [CGNet (TIP'2020)](configs/cgnet)
|
||||
- [x] [BiSeNetV2 (IJCV'2021)](configs/bisenetv2)
|
||||
- [x] [STDC (CVPR'2021)](configs/stdc)
|
||||
- [x] [SETR (CVPR'2021)](configs/setr)
|
||||
- [x] [DPT (ArXiv'2021)](configs/dpt)
|
||||
- [x] [Segmenter (ICCV'2021)](configs/segmenter)
|
||||
- [x] [SegFormer (NeurIPS'2021)](configs/segformer)
|
||||
- [x] [K-Net (NeurIPS'2021)](configs/knet)
|
||||
|
||||
已支持的数据集:
|
||||
|
||||
- [x] [Cityscapes](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#cityscapes)
|
||||
- [x] [PASCAL VOC](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#pascal-voc)
|
||||
- [x] [ADE20K](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#ade20k)
|
||||
- [x] [Pascal Context](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#pascal-context)
|
||||
- [x] [COCO-Stuff 10k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#coco-stuff-10k)
|
||||
- [x] [COCO-Stuff 164k](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#coco-stuff-164k)
|
||||
- [x] [CHASE_DB1](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#chase-db1)
|
||||
- [x] [DRIVE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#drive)
|
||||
- [x] [HRF](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#hrf)
|
||||
- [x] [STARE](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#stare)
|
||||
- [x] [Dark Zurich](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#dark-zurich)
|
||||
- [x] [Nighttime Driving](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#nighttime-driving)
|
||||
- [x] [LoveDA](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#loveda)
|
||||
- [x] [Potsdam](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#isprs-potsdam)
|
||||
- [x] [Vaihingen](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#isprs-vaihingen)
|
||||
- [x] [iSAID](https://github.com/open-mmlab/mmsegmentation/blob/master/docs/zh_cn/dataset_prepare.md#isaid)
|
||||
|
||||
## 常见问题
|
||||
|
||||
如果遇到问题,请参考 [常见问题解答](docs/zh_cn/faq.md)。
|
||||
|
||||
## 贡献指南
|
||||
|
||||
我们感谢所有的贡献者为改进和提升 MMSegmentation 所作出的努力。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。
|
||||
|
||||
## 致谢
|
||||
|
||||
MMSegmentation 是一个由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新模型,从而不断为开源社区提供贡献。
|
||||
|
||||
## 引用
|
||||
|
||||
如果你觉得本项目对你的研究工作有所帮助,请参考如下 bibtex 引用 MMSegmentation。
|
||||
|
||||
```bibtex
|
||||
@misc{mmseg2020,
|
||||
title={{MMSegmentation}: OpenMMLab Semantic Segmentation Toolbox and Benchmark},
|
||||
author={MMSegmentation Contributors},
|
||||
howpublished = {\url{https://github.com/open-mmlab/mmsegmentation}},
|
||||
year={2020}
|
||||
}
|
||||
```
|
||||
|
||||
## 开源许可证
|
||||
|
||||
`MMSegmentation` 目前以 Apache 2.0 的许可证发布,但是其中有一部分功能并不是使用的 Apache2.0 许可证,我们在 [许可证](LICENSES.md) 中详细地列出了这些功能以及他们对应的许可证,如果您正在从事盈利性活动,请谨慎参考此文档。
|
||||
|
||||
## OpenMMLab 的其他项目
|
||||
|
||||
- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
|
||||
- [MIM](https://github.com/open-mmlab/mim): MIM 是 OpenMMlab 项目、算法、模型的统一入口
|
||||
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱
|
||||
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
|
||||
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
|
||||
- [MMRotate](https://github.com/open-mmlab/mmrotate): OpenMMLab 旋转框检测工具箱与测试基准
|
||||
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
|
||||
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
|
||||
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
|
||||
- [MMHuman3D](https://github.com/open-mmlab/mmhuman3d): OpenMMLab 人体参数化模型工具箱与测试基准
|
||||
- [MMSelfSup](https://github.com/open-mmlab/mmselfsup): OpenMMLab 自监督学习工具箱与测试基准
|
||||
- [MMRazor](https://github.com/open-mmlab/mmrazor): OpenMMLab 模型压缩工具箱与测试基准
|
||||
- [MMFewShot](https://github.com/open-mmlab/mmfewshot): OpenMMLab 少样本学习工具箱与测试基准
|
||||
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱
|
||||
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
|
||||
- [MMFlow](https://github.com/open-mmlab/mmflow): OpenMMLab 光流估计工具箱与测试基准
|
||||
- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱
|
||||
- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱
|
||||
- [MMDeploy](https://github.com/open-mmlab/mmdeploy): OpenMMLab 模型部署框架
|
||||
|
||||
## 欢迎加入 OpenMMLab 社区
|
||||
|
||||
扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 [OpenMMLab 团队](https://jq.qq.com/?_wv=1027&k=aCvMxdr3) 以及 [MMSegmentation](https://jq.qq.com/?_wv=1027&k=9sprS2YO) 的 QQ 群。
|
||||
|
||||
<div align="center">
|
||||
<img src="docs/zh_cn/imgs/zhihu_qrcode.jpg" height="400" /> <img src="docs/zh_cn/imgs/qq_group_qrcode.jpg" height="400" />
|
||||
</div>
|
||||
|
||||
我们会在 OpenMMLab 社区为大家
|
||||
|
||||
- 📢 分享 AI 框架的前沿核心技术
|
||||
- 💻 解读 PyTorch 常用模块源码
|
||||
- 📰 发布 OpenMMLab 的相关新闻
|
||||
- 🚀 介绍 OpenMMLab 开发的前沿算法
|
||||
- 🏃 获取更高效的问题答疑和意见反馈
|
||||
- 🔥 提供与各行各业开发者充分交流的平台
|
||||
|
||||
干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬
|
||||
@ -0,0 +1,53 @@
|
||||
# dataset settings
|
||||
dataset_type = 'CustomDataset' # need to change
|
||||
data_root = 'data/my_dataset_v7' # need to change
|
||||
img_norm_cfg = dict(
|
||||
mean=[127.93135507, 116.76565979, 103.67335042], std=[49.55883976, 47.7692082, 50.7934459], to_rgb=True) # need to calculate
|
||||
crop_size = (512, 512) # need to change
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(512, 512)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', flip_ratio=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(512, 512), # need to change
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=2, # need to change
|
||||
workers_per_gpu=1, # need to change
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='img_dir/train', # need to change
|
||||
ann_dir='ann_dir/train', # need to change
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='img_dir/val',# need to change
|
||||
ann_dir='ann_dir/val',# need to change
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='img_dir/test',# need to change
|
||||
ann_dir='ann_dir/test',# need to change
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,54 @@
|
||||
# dataset settings
|
||||
dataset_type = 'ADE20KDataset'
|
||||
data_root = 'data/ade/ADEChallengeData2016'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (512, 512)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations', reduce_zero_label=True),
|
||||
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2048, 512),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/training',
|
||||
ann_dir='annotations/training',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,54 @@
|
||||
# dataset settings
|
||||
dataset_type = 'ADE20KDataset'
|
||||
data_root = 'data/ade/ADEChallengeData2016'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (640, 640)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations', reduce_zero_label=True),
|
||||
dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2560, 640),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/training',
|
||||
ann_dir='annotations/training',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,59 @@
|
||||
# dataset settings
|
||||
dataset_type = 'ChaseDB1Dataset'
|
||||
data_root = 'data/CHASE_DB1'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
img_scale = (960, 999)
|
||||
crop_size = (128, 128)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=img_scale,
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img'])
|
||||
])
|
||||
]
|
||||
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type='RepeatDataset',
|
||||
times=40000,
|
||||
dataset=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/training',
|
||||
ann_dir='annotations/training',
|
||||
pipeline=train_pipeline)),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,54 @@
|
||||
# dataset settings
|
||||
dataset_type = 'CityscapesDataset'
|
||||
data_root = 'data/cityscapes/'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (512, 1024)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2048, 1024),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=2,
|
||||
workers_per_gpu=2,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='leftImg8bit/train',
|
||||
ann_dir='gtFine/train',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='leftImg8bit/val',
|
||||
ann_dir='gtFine/val',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='leftImg8bit/val',
|
||||
ann_dir='gtFine/val',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,35 @@
|
||||
_base_ = './cityscapes.py'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (1024, 1024)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2048, 1024),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
train=dict(pipeline=train_pipeline),
|
||||
val=dict(pipeline=test_pipeline),
|
||||
test=dict(pipeline=test_pipeline))
|
||||
@ -0,0 +1,35 @@
|
||||
_base_ = './cityscapes.py'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (768, 768)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2049, 1025),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
train=dict(pipeline=train_pipeline),
|
||||
val=dict(pipeline=test_pipeline),
|
||||
test=dict(pipeline=test_pipeline))
|
||||
@ -0,0 +1,35 @@
|
||||
_base_ = './cityscapes.py'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (769, 769)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(2049, 1025), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2049, 1025),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
train=dict(pipeline=train_pipeline),
|
||||
val=dict(pipeline=test_pipeline),
|
||||
test=dict(pipeline=test_pipeline))
|
||||
@ -0,0 +1,35 @@
|
||||
_base_ = './cityscapes.py'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (832, 832)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(2048, 1024), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2048, 1024),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
train=dict(pipeline=train_pipeline),
|
||||
val=dict(pipeline=test_pipeline),
|
||||
test=dict(pipeline=test_pipeline))
|
||||
@ -0,0 +1,57 @@
|
||||
# dataset settings
|
||||
dataset_type = 'COCOStuffDataset'
|
||||
data_root = 'data/coco_stuff10k'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (512, 512)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations', reduce_zero_label=True),
|
||||
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2048, 512),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
reduce_zero_label=True,
|
||||
img_dir='images/train2014',
|
||||
ann_dir='annotations/train2014',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
reduce_zero_label=True,
|
||||
img_dir='images/test2014',
|
||||
ann_dir='annotations/test2014',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
reduce_zero_label=True,
|
||||
img_dir='images/test2014',
|
||||
ann_dir='annotations/test2014',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,54 @@
|
||||
# dataset settings
|
||||
dataset_type = 'COCOStuffDataset'
|
||||
data_root = 'data/coco_stuff164k'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (512, 512)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2048, 512),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/train2017',
|
||||
ann_dir='annotations/train2017',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/val2017',
|
||||
ann_dir='annotations/val2017',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/val2017',
|
||||
ann_dir='annotations/val2017',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,59 @@
|
||||
# dataset settings
|
||||
dataset_type = 'DRIVEDataset'
|
||||
data_root = 'data/DRIVE'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
img_scale = (584, 565)
|
||||
crop_size = (64, 64)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=img_scale,
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img'])
|
||||
])
|
||||
]
|
||||
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type='RepeatDataset',
|
||||
times=40000,
|
||||
dataset=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/training',
|
||||
ann_dir='annotations/training',
|
||||
pipeline=train_pipeline)),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,59 @@
|
||||
# dataset settings
|
||||
dataset_type = 'HRFDataset'
|
||||
data_root = 'data/HRF'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
img_scale = (2336, 3504)
|
||||
crop_size = (256, 256)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg'])
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=img_scale,
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img'])
|
||||
])
|
||||
]
|
||||
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type='RepeatDataset',
|
||||
times=40000,
|
||||
dataset=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/training',
|
||||
ann_dir='annotations/training',
|
||||
pipeline=train_pipeline)),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='images/validation',
|
||||
ann_dir='annotations/validation',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,62 @@
|
||||
# dataset settings
|
||||
dataset_type = 'iSAIDDataset'
|
||||
data_root = 'data/iSAID'
|
||||
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
"""
|
||||
This crop_size setting is followed by the implementation of
|
||||
`PointFlow: Flowing Semantics Through Points for Aerial Image
|
||||
Segmentation <https://arxiv.org/pdf/2103.06564.pdf>`_.
|
||||
"""
|
||||
|
||||
crop_size = (896, 896)
|
||||
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(896, 896), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(896, 896),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='img_dir/train',
|
||||
ann_dir='ann_dir/train',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='img_dir/val',
|
||||
ann_dir='ann_dir/val',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='img_dir/val',
|
||||
ann_dir='ann_dir/val',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,54 @@
|
||||
# dataset settings
|
||||
dataset_type = 'LoveDADataset'
|
||||
data_root = 'data/loveDA'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (512, 512)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations', reduce_zero_label=True),
|
||||
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(1024, 1024),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='img_dir/train',
|
||||
ann_dir='ann_dir/train',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='img_dir/val',
|
||||
ann_dir='ann_dir/val',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='img_dir/val',
|
||||
ann_dir='ann_dir/val',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,60 @@
|
||||
# dataset settings
|
||||
dataset_type = 'PascalContextDataset'
|
||||
data_root = 'data/VOCdevkit/VOC2010/'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
|
||||
img_scale = (520, 520)
|
||||
crop_size = (480, 480)
|
||||
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=img_scale,
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='JPEGImages',
|
||||
ann_dir='SegmentationClassContext',
|
||||
split='ImageSets/SegmentationContext/train.txt',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='JPEGImages',
|
||||
ann_dir='SegmentationClassContext',
|
||||
split='ImageSets/SegmentationContext/val.txt',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='JPEGImages',
|
||||
ann_dir='SegmentationClassContext',
|
||||
split='ImageSets/SegmentationContext/val.txt',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,60 @@
|
||||
# dataset settings
|
||||
dataset_type = 'PascalContextDataset59'
|
||||
data_root = 'data/VOCdevkit/VOC2010/'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
|
||||
img_scale = (520, 520)
|
||||
crop_size = (480, 480)
|
||||
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations', reduce_zero_label=True),
|
||||
dict(type='Resize', img_scale=img_scale, ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=img_scale,
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='JPEGImages',
|
||||
ann_dir='SegmentationClassContext',
|
||||
split='ImageSets/SegmentationContext/train.txt',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='JPEGImages',
|
||||
ann_dir='SegmentationClassContext',
|
||||
split='ImageSets/SegmentationContext/val.txt',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='JPEGImages',
|
||||
ann_dir='SegmentationClassContext',
|
||||
split='ImageSets/SegmentationContext/val.txt',
|
||||
pipeline=test_pipeline))
|
||||
@ -0,0 +1,57 @@
|
||||
# dataset settings
|
||||
dataset_type = 'PascalVOCDataset'
|
||||
data_root = 'data/VOCdevkit/VOC2012'
|
||||
img_norm_cfg = dict(
|
||||
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
|
||||
crop_size = (512, 512)
|
||||
train_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(type='LoadAnnotations'),
|
||||
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
|
||||
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
|
||||
dict(type='RandomFlip', prob=0.5),
|
||||
dict(type='PhotoMetricDistortion'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
|
||||
dict(type='DefaultFormatBundle'),
|
||||
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
|
||||
]
|
||||
test_pipeline = [
|
||||
dict(type='LoadImageFromFile'),
|
||||
dict(
|
||||
type='MultiScaleFlipAug',
|
||||
img_scale=(2048, 512),
|
||||
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
|
||||
flip=False,
|
||||
transforms=[
|
||||
dict(type='Resize', keep_ratio=True),
|
||||
dict(type='RandomFlip'),
|
||||
dict(type='Normalize', **img_norm_cfg),
|
||||
dict(type='ImageToTensor', keys=['img']),
|
||||
dict(type='Collect', keys=['img']),
|
||||
])
|
||||
]
|
||||
data = dict(
|
||||
samples_per_gpu=4,
|
||||
workers_per_gpu=4,
|
||||
train=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='JPEGImages',
|
||||
ann_dir='SegmentationClass',
|
||||
split='ImageSets/Segmentation/train.txt',
|
||||
pipeline=train_pipeline),
|
||||
val=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='JPEGImages',
|
||||
ann_dir='SegmentationClass',
|
||||
split='ImageSets/Segmentation/val.txt',
|
||||
pipeline=test_pipeline),
|
||||
test=dict(
|
||||
type=dataset_type,
|
||||
data_root=data_root,
|
||||
img_dir='JPEGImages',
|
||||
ann_dir='SegmentationClass',
|
||||
split='ImageSets/Segmentation/val.txt',
|
||||
pipeline=test_pipeline))
|
||||
Some files were not shown because too many files have changed in this diff Show More
불러오는 중...
Reference in New Issue
Block a user