first commit
							
								
								
									
										21
									
								
								11.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,21 @@ | |||||||
|  | import torch | ||||||
|  | from algorithm.yolov5.models.common import DetectMultiBackend | ||||||
|  | import os | ||||||
|  | from algorithm.yolov5.models.yolo import Detect, Model,RotationDetect | ||||||
|  | device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | ||||||
|  | 
 | ||||||
|  | # model_state_dict = torch.load('weight/remote_sensing/oriented.pt') | ||||||
|  | model = DetectMultiBackend(weights='weight/remote_sensing/oriented.pt', device=device, dnn=True) | ||||||
|  | # model.load_state_dict(model_state_dict) | ||||||
|  | # model = torch.hub.load((os.getcwd()) + "/algorithm/yolov5", 'custom', source='local', path='./weight//remote_sensing/oriented.pt', force_reload=True) | ||||||
|  | print(RotationDetect()) | ||||||
|  | for m in model.modules(): | ||||||
|  |     t = type(m) | ||||||
|  |     print(t) | ||||||
|  |     if Detect: | ||||||
|  |         print(1) | ||||||
|  |         m = RotationDetect() | ||||||
|  | 
 | ||||||
|  | # print(model) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
							
								
								
									
										21
									
								
								LICENSE
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,21 @@ | |||||||
|  | MIT License | ||||||
|  | 
 | ||||||
|  | Copyright (c) 2023 Kadir Tuna | ||||||
|  | 
 | ||||||
|  | Permission is hereby granted, free of charge, to any person obtaining a copy | ||||||
|  | of this software and associated documentation files (the "Software"), to deal | ||||||
|  | in the Software without restriction, including without limitation the rights | ||||||
|  | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||||
|  | copies of the Software, and to permit persons to whom the Software is | ||||||
|  | furnished to do so, subject to the following conditions: | ||||||
|  | 
 | ||||||
|  | The above copyright notice and this permission notice shall be included in all | ||||||
|  | copies or substantial portions of the Software. | ||||||
|  | 
 | ||||||
|  | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||||
|  | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||||
|  | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||||
|  | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||||
|  | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||||
|  | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||||||
|  | SOFTWARE. | ||||||
							
								
								
									
										53
									
								
								MyThreadFunc.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,53 @@ | |||||||
|  | import inspect | ||||||
|  | import ctypes | ||||||
|  | import threading | ||||||
|  | 
 | ||||||
|  | class MyThreadFunc(object): | ||||||
|  |     ''' | ||||||
|  |     手动终止线程的方法 | ||||||
|  |     ''' | ||||||
|  |     def __init__(self, func, argsTup): | ||||||
|  |         self.myThread = threading.Thread(target=func, args=argsTup) | ||||||
|  |         self.daemon = True | ||||||
|  | 
 | ||||||
|  |     def start(self): | ||||||
|  |         print('线程启动') | ||||||
|  |         self.result = self.myThread.start() | ||||||
|  | 
 | ||||||
|  |     def join(self): | ||||||
|  |         self.myThread.join() | ||||||
|  | 
 | ||||||
|  |     def get_result(self): | ||||||
|  |         try: | ||||||
|  |             return self.result | ||||||
|  |         except Exception as e: | ||||||
|  |             return None | ||||||
|  | 
 | ||||||
|  |     def state(self): | ||||||
|  |         status = self.myThread.is_alive() | ||||||
|  |         print('线程状态: {0}'.format(status)) | ||||||
|  |         return status | ||||||
|  | 
 | ||||||
|  |     def stop(self): | ||||||
|  |         print('线程终止') | ||||||
|  |         # self.myThread.join() | ||||||
|  |         try: | ||||||
|  |             for i in range(5): | ||||||
|  |                 self._async_raise(self.myThread.ident, SystemExit) | ||||||
|  |                 # time.sleep(1) | ||||||
|  |         except Exception as e: | ||||||
|  |             print(e) | ||||||
|  | 
 | ||||||
|  |     def _async_raise(self, tid, exctype): | ||||||
|  |         """raises the exception, performs cleanup if needed""" | ||||||
|  |         tid = ctypes.c_long(tid) | ||||||
|  |         if not inspect.isclass(exctype): | ||||||
|  |             exctype = type(exctype) | ||||||
|  |         res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype)) | ||||||
|  |         if res == 0: | ||||||
|  |             raise ValueError("invalid thread id") | ||||||
|  |         elif res != 1: | ||||||
|  |             # """if it returns a number greater than one, you're in trouble, | ||||||
|  |             # and you should call it again with exc=NULL to revert the effect""" | ||||||
|  |             ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) | ||||||
|  |             raise SystemError("PyThreadState_SetAsyncExc failed") | ||||||
							
								
								
									
										13
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,13 @@ | |||||||
|  | # 算法系统 | ||||||
|  |  算法系统在服务器后端运行代码 | ||||||
|  | 
 | ||||||
|  |  运行方式,python app.py | ||||||
|  | 
 | ||||||
|  | 可以根据自己服务器配置咋app.py进行修改 | ||||||
|  |  app.run(host='10.51.10.122',debug=True, port=5001) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 模型下载:https://pan.baidu.com/s/12F5H4hC1VQIOAqLdCLzZtA?pwd=upup  | ||||||
|  | 提取码:upup | ||||||
|  | 
 | ||||||
|  | 下载后放在weight文件夹 | ||||||
							
								
								
									
										83
									
								
								Scripts/activate
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,83 @@ | |||||||
|  | # This file must be used with "source bin/activate" *from bash* | ||||||
|  | # you cannot run it directly | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if [ "${BASH_SOURCE-}" = "$0" ]; then | ||||||
|  |     echo "You must source this script: \$ source $0" >&2 | ||||||
|  |     exit 33 | ||||||
|  | fi | ||||||
|  | 
 | ||||||
|  | deactivate () { | ||||||
|  |     unset -f pydoc >/dev/null 2>&1 || true | ||||||
|  | 
 | ||||||
|  |     # reset old environment variables | ||||||
|  |     # ! [ -z ${VAR+_} ] returns true if VAR is declared at all | ||||||
|  |     if ! [ -z "${_OLD_VIRTUAL_PATH:+_}" ] ; then | ||||||
|  |         PATH="$_OLD_VIRTUAL_PATH" | ||||||
|  |         export PATH | ||||||
|  |         unset _OLD_VIRTUAL_PATH | ||||||
|  |     fi | ||||||
|  |     if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then | ||||||
|  |         PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME" | ||||||
|  |         export PYTHONHOME | ||||||
|  |         unset _OLD_VIRTUAL_PYTHONHOME | ||||||
|  |     fi | ||||||
|  | 
 | ||||||
|  |     # The hash command must be called to get it to forget past | ||||||
|  |     # commands. Without forgetting past commands the $PATH changes | ||||||
|  |     # we made may not be respected | ||||||
|  |     hash -r 2>/dev/null | ||||||
|  | 
 | ||||||
|  |     if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then | ||||||
|  |         PS1="$_OLD_VIRTUAL_PS1" | ||||||
|  |         export PS1 | ||||||
|  |         unset _OLD_VIRTUAL_PS1 | ||||||
|  |     fi | ||||||
|  | 
 | ||||||
|  |     unset VIRTUAL_ENV | ||||||
|  |     if [ ! "${1-}" = "nondestructive" ] ; then | ||||||
|  |     # Self destruct! | ||||||
|  |         unset -f deactivate | ||||||
|  |     fi | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | # unset irrelevant variables | ||||||
|  | deactivate nondestructive | ||||||
|  | 
 | ||||||
|  | VIRTUAL_ENV='C:\Users\ka\Desktop\Programming Projects\Python Projeleri\LiveVideoServer' | ||||||
|  | if ([ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ]) && $(command -v cygpath &> /dev/null) ; then | ||||||
|  |     VIRTUAL_ENV=$(cygpath -u "$VIRTUAL_ENV") | ||||||
|  | fi | ||||||
|  | export VIRTUAL_ENV | ||||||
|  | 
 | ||||||
|  | _OLD_VIRTUAL_PATH="$PATH" | ||||||
|  | PATH="$VIRTUAL_ENV/Scripts:$PATH" | ||||||
|  | export PATH | ||||||
|  | 
 | ||||||
|  | # unset PYTHONHOME if set | ||||||
|  | if ! [ -z "${PYTHONHOME+_}" ] ; then | ||||||
|  |     _OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME" | ||||||
|  |     unset PYTHONHOME | ||||||
|  | fi | ||||||
|  | 
 | ||||||
|  | if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then | ||||||
|  |     _OLD_VIRTUAL_PS1="${PS1-}" | ||||||
|  |     if [ "x" != x ] ; then | ||||||
|  |         PS1="() ${PS1-}" | ||||||
|  |     else | ||||||
|  |         PS1="(`basename \"$VIRTUAL_ENV\"`) ${PS1-}" | ||||||
|  |     fi | ||||||
|  |     export PS1 | ||||||
|  | fi | ||||||
|  | 
 | ||||||
|  | # Make sure to unalias pydoc if it's already there | ||||||
|  | alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true | ||||||
|  | 
 | ||||||
|  | pydoc () { | ||||||
|  |     python -m pydoc "$@" | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | # The hash command must be called to get it to forget past | ||||||
|  | # commands. Without forgetting past commands the $PATH changes | ||||||
|  | # we made may not be respected | ||||||
|  | hash -r 2>/dev/null | ||||||
							
								
								
									
										39
									
								
								Scripts/activate.bat
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,39 @@ | |||||||
|  | @echo off | ||||||
|  | 
 | ||||||
|  | set "VIRTUAL_ENV=C:\Users\ka\Desktop\Programming Projects\Python Projeleri\LiveVideoServer" | ||||||
|  | 
 | ||||||
|  | if defined _OLD_VIRTUAL_PROMPT ( | ||||||
|  |     set "PROMPT=%_OLD_VIRTUAL_PROMPT%" | ||||||
|  | ) else ( | ||||||
|  |     if not defined PROMPT ( | ||||||
|  |         set "PROMPT=$P$G" | ||||||
|  |     ) | ||||||
|  |     if not defined VIRTUAL_ENV_DISABLE_PROMPT ( | ||||||
|  |         set "_OLD_VIRTUAL_PROMPT=%PROMPT%" | ||||||
|  |     ) | ||||||
|  | ) | ||||||
|  | if not defined VIRTUAL_ENV_DISABLE_PROMPT ( | ||||||
|  |     if "" NEQ "" ( | ||||||
|  |         set "PROMPT=() %PROMPT%" | ||||||
|  |     ) else ( | ||||||
|  |         for %%d in ("%VIRTUAL_ENV%") do set "PROMPT=(%%~nxd) %PROMPT%" | ||||||
|  |     ) | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | REM Don't use () to avoid problems with them in %PATH% | ||||||
|  | if defined _OLD_VIRTUAL_PYTHONHOME goto ENDIFVHOME | ||||||
|  |     set "_OLD_VIRTUAL_PYTHONHOME=%PYTHONHOME%" | ||||||
|  | :ENDIFVHOME | ||||||
|  | 
 | ||||||
|  | set PYTHONHOME= | ||||||
|  | 
 | ||||||
|  | REM if defined _OLD_VIRTUAL_PATH ( | ||||||
|  | if not defined _OLD_VIRTUAL_PATH goto ENDIFVPATH1 | ||||||
|  |     set "PATH=%_OLD_VIRTUAL_PATH%" | ||||||
|  | :ENDIFVPATH1 | ||||||
|  | REM ) else ( | ||||||
|  | if defined _OLD_VIRTUAL_PATH goto ENDIFVPATH2 | ||||||
|  |     set "_OLD_VIRTUAL_PATH=%PATH%" | ||||||
|  | :ENDIFVPATH2 | ||||||
|  | 
 | ||||||
|  | set "PATH=%VIRTUAL_ENV%\Scripts;%PATH%" | ||||||
							
								
								
									
										100
									
								
								Scripts/activate.fish
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,100 @@ | |||||||
|  | # This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*. | ||||||
|  | # Do not run it directly. | ||||||
|  | 
 | ||||||
|  | function _bashify_path -d "Converts a fish path to something bash can recognize" | ||||||
|  |     set fishy_path $argv | ||||||
|  |     set bashy_path $fishy_path[1] | ||||||
|  |     for path_part in $fishy_path[2..-1] | ||||||
|  |         set bashy_path "$bashy_path:$path_part" | ||||||
|  |     end | ||||||
|  |     echo $bashy_path | ||||||
|  | end | ||||||
|  | 
 | ||||||
|  | function _fishify_path -d "Converts a bash path to something fish can recognize" | ||||||
|  |     echo $argv | tr ':' '\n' | ||||||
|  | end | ||||||
|  | 
 | ||||||
|  | function deactivate -d 'Exit virtualenv mode and return to the normal environment.' | ||||||
|  |     # reset old environment variables | ||||||
|  |     if test -n "$_OLD_VIRTUAL_PATH" | ||||||
|  |         # https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling | ||||||
|  |         if test (echo $FISH_VERSION | head -c 1) -lt 3 | ||||||
|  |             set -gx PATH (_fishify_path "$_OLD_VIRTUAL_PATH") | ||||||
|  |         else | ||||||
|  |             set -gx PATH $_OLD_VIRTUAL_PATH | ||||||
|  |         end | ||||||
|  |         set -e _OLD_VIRTUAL_PATH | ||||||
|  |     end | ||||||
|  | 
 | ||||||
|  |     if test -n "$_OLD_VIRTUAL_PYTHONHOME" | ||||||
|  |         set -gx PYTHONHOME "$_OLD_VIRTUAL_PYTHONHOME" | ||||||
|  |         set -e _OLD_VIRTUAL_PYTHONHOME | ||||||
|  |     end | ||||||
|  | 
 | ||||||
|  |     if test -n "$_OLD_FISH_PROMPT_OVERRIDE" | ||||||
|  |        and functions -q _old_fish_prompt | ||||||
|  |         # Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`. | ||||||
|  |         set -l fish_function_path | ||||||
|  | 
 | ||||||
|  |         # Erase virtualenv's `fish_prompt` and restore the original. | ||||||
|  |         functions -e fish_prompt | ||||||
|  |         functions -c _old_fish_prompt fish_prompt | ||||||
|  |         functions -e _old_fish_prompt | ||||||
|  |         set -e _OLD_FISH_PROMPT_OVERRIDE | ||||||
|  |     end | ||||||
|  | 
 | ||||||
|  |     set -e VIRTUAL_ENV | ||||||
|  | 
 | ||||||
|  |     if test "$argv[1]" != 'nondestructive' | ||||||
|  |         # Self-destruct! | ||||||
|  |         functions -e pydoc | ||||||
|  |         functions -e deactivate | ||||||
|  |         functions -e _bashify_path | ||||||
|  |         functions -e _fishify_path | ||||||
|  |     end | ||||||
|  | end | ||||||
|  | 
 | ||||||
|  | # Unset irrelevant variables. | ||||||
|  | deactivate nondestructive | ||||||
|  | 
 | ||||||
|  | set -gx VIRTUAL_ENV 'C:\Users\ka\Desktop\Programming Projects\Python Projeleri\LiveVideoServer' | ||||||
|  | 
 | ||||||
|  | # https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling | ||||||
|  | if test (echo $FISH_VERSION | head -c 1) -lt 3 | ||||||
|  |    set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH) | ||||||
|  | else | ||||||
|  |     set -gx _OLD_VIRTUAL_PATH $PATH | ||||||
|  | end | ||||||
|  | set -gx PATH "$VIRTUAL_ENV"'/Scripts' $PATH | ||||||
|  | 
 | ||||||
|  | # Unset `$PYTHONHOME` if set. | ||||||
|  | if set -q PYTHONHOME | ||||||
|  |     set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME | ||||||
|  |     set -e PYTHONHOME | ||||||
|  | end | ||||||
|  | 
 | ||||||
|  | function pydoc | ||||||
|  |     python -m pydoc $argv | ||||||
|  | end | ||||||
|  | 
 | ||||||
|  | if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" | ||||||
|  |     # Copy the current `fish_prompt` function as `_old_fish_prompt`. | ||||||
|  |     functions -c fish_prompt _old_fish_prompt | ||||||
|  | 
 | ||||||
|  |     function fish_prompt | ||||||
|  |         # Run the user's prompt first; it might depend on (pipe)status. | ||||||
|  |         set -l prompt (_old_fish_prompt) | ||||||
|  | 
 | ||||||
|  |         # Prompt override provided? | ||||||
|  |         # If not, just prepend the environment name. | ||||||
|  |         if test -n '' | ||||||
|  |             printf '(%s) ' '' | ||||||
|  |         else | ||||||
|  |             printf '(%s) ' (basename "$VIRTUAL_ENV") | ||||||
|  |         end | ||||||
|  | 
 | ||||||
|  |         string join -- \n $prompt # handle multi-line prompts | ||||||
|  |     end | ||||||
|  | 
 | ||||||
|  |     set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" | ||||||
|  | end | ||||||
							
								
								
									
										41
									
								
								Scripts/activate.nu
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,41 @@ | |||||||
|  | # Setting all environment variables for the venv | ||||||
|  | let path-name = (if ((sys).host.name == "Windows") { "Path" } { "PATH" }) | ||||||
|  | let virtual-env = "C:\Users\ka\Desktop\Programming Projects\Python Projeleri\LiveVideoServer" | ||||||
|  | let bin = "Scripts" | ||||||
|  | let path-sep = ";" | ||||||
|  | 
 | ||||||
|  | let old-path = ($nu.path | str collect ($path-sep)) | ||||||
|  | 
 | ||||||
|  | let venv-path = ([$virtual-env $bin] | path join) | ||||||
|  | let new-path = ($nu.path | prepend $venv-path | str collect ($path-sep)) | ||||||
|  | 
 | ||||||
|  | # environment variables that will be batched loaded to the virtual env | ||||||
|  | let new-env = ([ | ||||||
|  |     [name, value]; | ||||||
|  |     [$path-name $new-path] | ||||||
|  |     [_OLD_VIRTUAL_PATH $old-path] | ||||||
|  |     [VIRTUAL_ENV $virtual-env] | ||||||
|  | ]) | ||||||
|  | 
 | ||||||
|  | load-env $new-env | ||||||
|  | 
 | ||||||
|  | # Creating the new prompt for the session | ||||||
|  | let virtual_prompt = (if ("" != "") { | ||||||
|  |     "() " | ||||||
|  | } { | ||||||
|  |     (build-string '(' ($virtual-env | path basename) ') ') | ||||||
|  | } | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | # If there is no default prompt, then only the env is printed in the prompt | ||||||
|  | let new_prompt = (if ( config | select prompt | empty? ) { | ||||||
|  |     ($"build-string '($virtual_prompt)'") | ||||||
|  | } { | ||||||
|  |     ($"build-string '($virtual_prompt)' (config get prompt | str find-replace "build-string" "")") | ||||||
|  | }) | ||||||
|  | let-env PROMPT_COMMAND = $new_prompt | ||||||
|  | 
 | ||||||
|  | # We are using alias as the function definitions because only aliases can be | ||||||
|  | # removed from the scope | ||||||
|  | alias pydoc = python -m pydoc | ||||||
|  | alias deactivate = source "C:\Users\ka\Desktop\Programming Projects\Python Projeleri\LiveVideoServer\Scripts\deactivate.nu" | ||||||
							
								
								
									
										60
									
								
								Scripts/activate.ps1
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,60 @@ | |||||||
|  | $script:THIS_PATH = $myinvocation.mycommand.path | ||||||
|  | $script:BASE_DIR = Split-Path (Resolve-Path "$THIS_PATH/..") -Parent | ||||||
|  | 
 | ||||||
|  | function global:deactivate([switch] $NonDestructive) { | ||||||
|  |     if (Test-Path variable:_OLD_VIRTUAL_PATH) { | ||||||
|  |         $env:PATH = $variable:_OLD_VIRTUAL_PATH | ||||||
|  |         Remove-Variable "_OLD_VIRTUAL_PATH" -Scope global | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     if (Test-Path function:_old_virtual_prompt) { | ||||||
|  |         $function:prompt = $function:_old_virtual_prompt | ||||||
|  |         Remove-Item function:\_old_virtual_prompt | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     if ($env:VIRTUAL_ENV) { | ||||||
|  |         Remove-Item env:VIRTUAL_ENV -ErrorAction SilentlyContinue | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|  |     if (!$NonDestructive) { | ||||||
|  |         # Self destruct! | ||||||
|  |         Remove-Item function:deactivate | ||||||
|  |         Remove-Item function:pydoc | ||||||
|  |     } | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | function global:pydoc { | ||||||
|  |     python -m pydoc $args | ||||||
|  | } | ||||||
|  | 
 | ||||||
|  | # unset irrelevant variables | ||||||
|  | deactivate -nondestructive | ||||||
|  | 
 | ||||||
|  | $VIRTUAL_ENV = $BASE_DIR | ||||||
|  | $env:VIRTUAL_ENV = $VIRTUAL_ENV | ||||||
|  | 
 | ||||||
|  | New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH | ||||||
|  | 
 | ||||||
|  | $env:PATH = "$env:VIRTUAL_ENV/Scripts;" + $env:PATH | ||||||
|  | if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) { | ||||||
|  |     function global:_old_virtual_prompt { | ||||||
|  |         "" | ||||||
|  |     } | ||||||
|  |     $function:_old_virtual_prompt = $function:prompt | ||||||
|  | 
 | ||||||
|  |     if ("" -ne "") { | ||||||
|  |         function global:prompt { | ||||||
|  |             # Add the custom prefix to the existing prompt | ||||||
|  |             $previous_prompt_value = & $function:_old_virtual_prompt | ||||||
|  |             ("() " + $previous_prompt_value) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  |     else { | ||||||
|  |         function global:prompt { | ||||||
|  |             # Add a prefix to the current prompt, but don't discard it. | ||||||
|  |             $previous_prompt_value = & $function:_old_virtual_prompt | ||||||
|  |             $new_prompt_value = "($( Split-Path $env:VIRTUAL_ENV -Leaf )) " | ||||||
|  |             ($new_prompt_value + $previous_prompt_value) | ||||||
|  |         } | ||||||
|  |     } | ||||||
|  | } | ||||||
							
								
								
									
										32
									
								
								Scripts/activate_this.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,32 @@ | |||||||
|  | # -*- coding: utf-8 -*- | ||||||
|  | """Activate virtualenv for current interpreter: | ||||||
|  | 
 | ||||||
|  | Use exec(open(this_file).read(), {'__file__': this_file}). | ||||||
|  | 
 | ||||||
|  | This can be used when you must use an existing Python interpreter, not the virtualenv bin/python. | ||||||
|  | """ | ||||||
|  | import os | ||||||
|  | import site | ||||||
|  | import sys | ||||||
|  | 
 | ||||||
|  | try: | ||||||
|  |     abs_file = os.path.abspath(__file__) | ||||||
|  | except NameError: | ||||||
|  |     raise AssertionError("You must use exec(open(this_file).read(), {'__file__': this_file}))") | ||||||
|  | 
 | ||||||
|  | bin_dir = os.path.dirname(abs_file) | ||||||
|  | base = bin_dir[: -len("Scripts") - 1]  # strip away the bin part from the __file__, plus the path separator | ||||||
|  | 
 | ||||||
|  | # prepend bin to PATH (this file is inside the bin directory) | ||||||
|  | os.environ["PATH"] = os.pathsep.join([bin_dir] + os.environ.get("PATH", "").split(os.pathsep)) | ||||||
|  | os.environ["VIRTUAL_ENV"] = base  # virtual env is right above bin directory | ||||||
|  | 
 | ||||||
|  | # add the virtual environments libraries to the host python import mechanism | ||||||
|  | prev_length = len(sys.path) | ||||||
|  | for lib in "..\Lib\site-packages".split(os.pathsep): | ||||||
|  |     path = os.path.realpath(os.path.join(bin_dir, lib)) | ||||||
|  |     site.addsitedir(path.decode("utf-8") if "" else path) | ||||||
|  | sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length] | ||||||
|  | 
 | ||||||
|  | sys.real_prefix = sys.prefix | ||||||
|  | sys.prefix = base | ||||||
							
								
								
									
										
											BIN
										
									
								
								Scripts/convert-caffe2-to-onnx.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/convert-onnx-to-caffe2.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										19
									
								
								Scripts/deactivate.bat
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,19 @@ | |||||||
|  | @echo off | ||||||
|  | 
 | ||||||
|  | set VIRTUAL_ENV= | ||||||
|  | 
 | ||||||
|  | REM Don't use () to avoid problems with them in %PATH% | ||||||
|  | if not defined _OLD_VIRTUAL_PROMPT goto ENDIFVPROMPT | ||||||
|  |     set "PROMPT=%_OLD_VIRTUAL_PROMPT%" | ||||||
|  |     set _OLD_VIRTUAL_PROMPT= | ||||||
|  | :ENDIFVPROMPT | ||||||
|  | 
 | ||||||
|  | if not defined _OLD_VIRTUAL_PYTHONHOME goto ENDIFVHOME | ||||||
|  |     set "PYTHONHOME=%_OLD_VIRTUAL_PYTHONHOME%" | ||||||
|  |     set _OLD_VIRTUAL_PYTHONHOME= | ||||||
|  | :ENDIFVHOME | ||||||
|  | 
 | ||||||
|  | if not defined _OLD_VIRTUAL_PATH goto ENDIFVPATH | ||||||
|  |     set "PATH=%_OLD_VIRTUAL_PATH%" | ||||||
|  |     set _OLD_VIRTUAL_PATH= | ||||||
|  | :ENDIFVPATH | ||||||
							
								
								
									
										11
									
								
								Scripts/deactivate.nu
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,11 @@ | |||||||
|  | # Setting the old path | ||||||
|  | let path-name = (if ((sys).host.name == "Windows") { "Path" } { "PATH" }) | ||||||
|  | let-env $path-name = $nu.env._OLD_VIRTUAL_PATH | ||||||
|  | 
 | ||||||
|  | # Unleting the environment variables that were created when activating the env | ||||||
|  | unlet-env VIRTUAL_ENV | ||||||
|  | unlet-env _OLD_VIRTUAL_PATH | ||||||
|  | unlet-env PROMPT_COMMAND | ||||||
|  | 
 | ||||||
|  | unalias pydoc | ||||||
|  | unalias deactivate | ||||||
							
								
								
									
										
											BIN
										
									
								
								Scripts/f2py.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/flask.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/fonttools.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/ipython.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/ipython3.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/isympy.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/normalizer.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/pip.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/pip3.9.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/pip3.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										1
									
								
								Scripts/pydoc.bat
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1 @@ | |||||||
|  | python.exe -m pydoc %* | ||||||
							
								
								
									
										
											BIN
										
									
								
								Scripts/pyftmerge.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/pyftsubset.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										0
									
								
								Scripts/pygmentize.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										0
									
								
								Scripts/pygmentize.exe.deleteme
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/python.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/pythonw.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/torchrun.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/tqdm.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/ttx.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/wheel-3.9.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/wheel.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/wheel3.9.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								Scripts/wheel3.exe
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										31
									
								
								algorithm/Car_recognition/.gitignore
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,31 @@ | |||||||
|  | # .gitignore | ||||||
|  | # 首先忽略所有的文件 | ||||||
|  | * | ||||||
|  | # 但是不忽略目录 | ||||||
|  | !*/ | ||||||
|  | # 忽略一些指定的目录名 | ||||||
|  | ut/ | ||||||
|  | runs/ | ||||||
|  | .vscode/ | ||||||
|  | build/ | ||||||
|  | result1/ | ||||||
|  | mytest/ | ||||||
|  | *.pyc | ||||||
|  | # 不忽略下面指定的文件类型 | ||||||
|  | !*.cpp | ||||||
|  | !*.h | ||||||
|  | !*.hpp | ||||||
|  | !*.c | ||||||
|  | !.gitignore | ||||||
|  | !*.py | ||||||
|  | !*.sh | ||||||
|  | !*.npy | ||||||
|  | !*.jpg | ||||||
|  | !*.pt | ||||||
|  | !*.npy | ||||||
|  | !*.pth | ||||||
|  | !*.png | ||||||
|  | !*.yaml | ||||||
|  | !*.ttf | ||||||
|  | !*.txt | ||||||
|  | !*.md | ||||||
							
								
								
									
										70
									
								
								algorithm/Car_recognition/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,70 @@ | |||||||
|  | ## 车辆识别系统 | ||||||
|  | 
 | ||||||
|  | **目前支持车辆检测+车牌检测识别** | ||||||
|  | 
 | ||||||
|  | 环境要求: python >=3.6  pytorch >=1.7 | ||||||
|  | 
 | ||||||
|  | #### **图片测试demo:** | ||||||
|  | 
 | ||||||
|  | ``` | ||||||
|  | python Car_recognition.py --detect_model weights/detect.pt  --rec_model weights/plate_rec_color.pth --image_path imgs --output result | ||||||
|  | ``` | ||||||
|  | 
 | ||||||
|  | 测试文件夹imgs,结果保存再 result 文件夹中 | ||||||
|  | 
 | ||||||
|  |  | ||||||
|  | 
 | ||||||
|  | ## **检测训练** | ||||||
|  | 
 | ||||||
|  | 1. **下载数据集:**  [datasets](https://pan.baidu.com/s/1YSURJvo4v1N5x7NVsxEA_Q) 提取码:3s0j     数据从CCPD和CRPD数据集中选取并转换的 | ||||||
|  |    数据集格式为yolo格式: | ||||||
|  | 
 | ||||||
|  |    ``` | ||||||
|  |    label x y w h  pt1x pt1y pt2x pt2y pt3x pt3y pt4x pt4y | ||||||
|  |    ``` | ||||||
|  | 
 | ||||||
|  |    关键点依次是(左上,右上,右下,左下) | ||||||
|  |    坐标都是经过归一化,x,y是中心点除以图片宽高,w,h是框的宽高除以图片宽高,ptx,pty是关键点坐标除以宽高 | ||||||
|  | 
 | ||||||
|  |    车辆标注不需要关键点 关键点全部置为-1即可 | ||||||
|  | 2. **修改 data/widerface.yaml    train和val路径,换成你的数据路径** | ||||||
|  | 
 | ||||||
|  |    ``` | ||||||
|  |    train: /your/train/path #修改成你的路径 | ||||||
|  |    val: /your/val/path     #修改成你的路径 | ||||||
|  |    # number of classes | ||||||
|  |    nc: 3                #这里用的是3分类,0 单层车牌 1 双层车牌 2 车辆 | ||||||
|  | 
 | ||||||
|  |    # class names | ||||||
|  |    names: [ 'single_plate','double_plate','Car']  | ||||||
|  | 
 | ||||||
|  |    ``` | ||||||
|  | 3. **训练** | ||||||
|  | 
 | ||||||
|  |    ``` | ||||||
|  |    python3 train.py --data data/plateAndCar.yaml --cfg models/yolov5n-0.5.yaml --weights weights/detect.pt --epoch 250 | ||||||
|  |    ``` | ||||||
|  | 
 | ||||||
|  |    结果存在run文件夹中 | ||||||
|  | 
 | ||||||
|  | ## **车牌识别训练** | ||||||
|  | 
 | ||||||
|  | 车牌识别训练链接如下: | ||||||
|  | 
 | ||||||
|  | [车牌识别训练](https://github.com/we0091234/crnn_plate_recognition) | ||||||
|  | 
 | ||||||
|  | ## References | ||||||
|  | 
 | ||||||
|  | * [https://github.com/we0091234/Chinese_license_plate_detection_recognition](https://github.com/we0091234/Chinese_license_plate_detection_recognition) | ||||||
|  | * [https://github.com/deepcam-cn/yolov5-face](https://github.com/deepcam-cn/yolov5-face) | ||||||
|  | * [https://github.com/meijieru/crnn.pytorch](https://github.com/meijieru/crnn.pytorch) | ||||||
|  | 
 | ||||||
|  | ## TODO | ||||||
|  | 
 | ||||||
|  | 车型,车辆颜色,品牌等。 | ||||||
|  | 
 | ||||||
|  | ## 联系 | ||||||
|  | 
 | ||||||
|  | **有问题可以提issues 或者加qq群 823419837 询问** | ||||||
|  | 
 | ||||||
|  |  | ||||||
							
								
								
									
										286
									
								
								algorithm/Car_recognition/car_detection.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,286 @@ | |||||||
|  | # -*- coding: UTF-8 -*- | ||||||
|  | import argparse | ||||||
|  | import time | ||||||
|  | import os | ||||||
|  | import cv2 | ||||||
|  | import torch | ||||||
|  | from numpy import random | ||||||
|  | import copy | ||||||
|  | import numpy as np | ||||||
|  | from algorithm.Car_recognition.plate_recognition.plate_rec import get_plate_result,allFilePath,init_model,cv_imread | ||||||
|  | # from plate_recognition.plate_cls import cv_imread | ||||||
|  | from algorithm.Car_recognition.plate_recognition.double_plate_split_merge import get_split_merge | ||||||
|  | from algorithm.Car_recognition.plate_recognition.color_rec import plate_color_rec,init_color_model | ||||||
|  | from algorithm.Car_recognition.car_recognition.car_rec import init_car_rec_model,get_color_and_score | ||||||
|  | from algorithm.Car_recognition.utils.datasets import letterbox | ||||||
|  | from algorithm.Car_recognition.utils.general import check_img_size, non_max_suppression_face, scale_coords | ||||||
|  | from algorithm.Car_recognition.utils.cv_puttext import cv2ImgAddText | ||||||
|  | 
 | ||||||
|  | from read_data import LoadImages, LoadStreams | ||||||
|  | import torch.backends.cudnn as cudnn | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)] | ||||||
|  | danger=['危','险'] | ||||||
|  | object_color=[(0,255,255),(0,255,0),(255,255,0)] | ||||||
|  | class_type=['单层车牌','双层车牌','汽车'] | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class CarDetection(): | ||||||
|  |      | ||||||
|  |     def __init__(self, video_path=None): | ||||||
|  |          | ||||||
|  |         # self.detect_model = detect_model | ||||||
|  |         # self.plate_rec_model = plate_rec_model | ||||||
|  |         # self.car_rec_model = car_rec_model | ||||||
|  |         self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | ||||||
|  |       | ||||||
|  |         self.detect_model =torch.load('weight/traffic/best.pt', map_location=self.device)['model'].float().fuse() | ||||||
|  |         # self.detect_model = load_model((os.getcwd()) + "/weight/traffic/detect.pt") #初始化检测模型 | ||||||
|  |         self.plate_rec_model= init_model((os.getcwd()) + "/weight/traffic/plate_rec_color.pth")     #初始化识别模型   | ||||||
|  |         self.car_rec_model =  init_car_rec_model((os.getcwd()) + "/weight/traffic/car_rec_color.pth") #初始化车辆识别模型 | ||||||
|  |   | ||||||
|  |          | ||||||
|  | 
 | ||||||
|  |         time_all = 0 | ||||||
|  |         time_begin=time.time() | ||||||
|  |              | ||||||
|  |         # self.frame = [None]  | ||||||
|  | 
 | ||||||
|  |         if video_path is not None: | ||||||
|  |             self.video_name = video_path | ||||||
|  |         else: | ||||||
|  |             self.video_name = 'vid2.mp4'  # A default video file | ||||||
|  |         | ||||||
|  |         self.imgsz = 384 | ||||||
|  |         self.dataset = LoadImages(self.video_name,self.imgsz) | ||||||
|  |      | ||||||
|  |     def use_webcam(self, source): | ||||||
|  | 
 | ||||||
|  |         source = source | ||||||
|  |         cudnn.benchmark = True | ||||||
|  |         self.dataset = LoadStreams(source, img_size=self.imgsz) | ||||||
|  |      | ||||||
|  |     def get_frame(self): | ||||||
|  |          | ||||||
|  |         for im0s in self.dataset: | ||||||
|  |             # print(self.dataset.mode) | ||||||
|  |             # print(self.dataset) | ||||||
|  |             if self.dataset.mode == 'stream': | ||||||
|  |                 img = im0s[0].copy() | ||||||
|  |             else: | ||||||
|  |                 img = im0s.copy() | ||||||
|  | 
 | ||||||
|  |             dict_list=detect_Recognition_plate(self.detect_model, img, self.device, self.plate_rec_model, car_rec_model=self.car_rec_model) | ||||||
|  |             ori_img=draw_result(img,dict_list) | ||||||
|  | 
 | ||||||
|  |             ret, jpeg = cv2.imencode(".jpg", ori_img) | ||||||
|  |             txt = str(dict_list) | ||||||
|  |              | ||||||
|  | 
 | ||||||
|  |         return jpeg.tobytes(), txt | ||||||
|  |      | ||||||
|  |      | ||||||
|  | 
 | ||||||
|  | def order_points(pts):                   #四个点安好左上 右上 右下 左下排列 | ||||||
|  |     rect = np.zeros((4, 2), dtype = "float32") | ||||||
|  |     s = pts.sum(axis = 1) | ||||||
|  |     rect[0] = pts[np.argmin(s)] | ||||||
|  |     rect[2] = pts[np.argmax(s)] | ||||||
|  |     diff = np.diff(pts, axis = 1) | ||||||
|  |     rect[1] = pts[np.argmin(diff)] | ||||||
|  |     rect[3] = pts[np.argmax(diff)] | ||||||
|  |     return rect | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def four_point_transform(image, pts):                       #透视变换得到车牌小图 | ||||||
|  |     rect = order_points(pts) | ||||||
|  |     (tl, tr, br, bl) = rect | ||||||
|  |     widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) | ||||||
|  |     widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) | ||||||
|  |     maxWidth = max(int(widthA), int(widthB)) | ||||||
|  |     heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) | ||||||
|  |     heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) | ||||||
|  |     maxHeight = max(int(heightA), int(heightB)) | ||||||
|  |     dst = np.array([ | ||||||
|  |         [0, 0], | ||||||
|  |         [maxWidth - 1, 0], | ||||||
|  |         [maxWidth - 1, maxHeight - 1], | ||||||
|  |         [0, maxHeight - 1]], dtype = "float32") | ||||||
|  |     M = cv2.getPerspectiveTransform(rect, dst) | ||||||
|  |     warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) | ||||||
|  |     return warped | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None):  #返回到原图坐标 | ||||||
|  |     # Rescale coords (xyxy) from img1_shape to img0_shape | ||||||
|  |     if ratio_pad is None:  # calculate from img0_shape | ||||||
|  |         gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new | ||||||
|  |         pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding | ||||||
|  |     else: | ||||||
|  |         gain = ratio_pad[0][0] | ||||||
|  |         pad = ratio_pad[1] | ||||||
|  | 
 | ||||||
|  |     coords[:, [0, 2, 4, 6]] -= pad[0]  # x padding | ||||||
|  |     coords[:, [1, 3, 5, 7]] -= pad[1]  # y padding | ||||||
|  |     coords[:, :8] /= gain | ||||||
|  |     #clip_coords(coords, img0_shape) | ||||||
|  |     coords[:, 0].clamp_(0, img0_shape[1])  # x1 | ||||||
|  |     coords[:, 1].clamp_(0, img0_shape[0])  # y1 | ||||||
|  |     coords[:, 2].clamp_(0, img0_shape[1])  # x2 | ||||||
|  |     coords[:, 3].clamp_(0, img0_shape[0])  # y2 | ||||||
|  |     coords[:, 4].clamp_(0, img0_shape[1])  # x3 | ||||||
|  |     coords[:, 5].clamp_(0, img0_shape[0])  # y3 | ||||||
|  |     coords[:, 6].clamp_(0, img0_shape[1])  # x4 | ||||||
|  |     coords[:, 7].clamp_(0, img0_shape[0])  # y4 | ||||||
|  |     # coords[:, 8].clamp_(0, img0_shape[1])  # x5 | ||||||
|  |     # coords[:, 9].clamp_(0, img0_shape[0])  # y5 | ||||||
|  |     return coords | ||||||
|  | 
 | ||||||
|  | def get_plate_rec_landmark(img, xyxy, conf, landmarks, class_num,device,plate_rec_model,car_rec_model): | ||||||
|  |     h,w,c = img.shape | ||||||
|  |     result_dict={} | ||||||
|  |     x1 = int(xyxy[0]) | ||||||
|  |     y1 = int(xyxy[1]) | ||||||
|  |     x2 = int(xyxy[2]) | ||||||
|  |     y2 = int(xyxy[3]) | ||||||
|  |     landmarks_np=np.zeros((4,2)) | ||||||
|  |     rect=[x1,y1,x2,y2] | ||||||
|  |      | ||||||
|  |     if int(class_num) ==2: | ||||||
|  |         car_roi_img = img[y1:y2,x1:x2] | ||||||
|  |         car_color,color_conf=get_color_and_score(car_rec_model,car_roi_img,device) | ||||||
|  |         result_dict['class_type']=class_type[int(class_num)] | ||||||
|  |         result_dict['rect']=rect                      #车辆roi | ||||||
|  |         result_dict['score']=conf                     #车牌区域检测得分 | ||||||
|  |         result_dict['object_no']=int(class_num) | ||||||
|  |         result_dict['car_color']=car_color | ||||||
|  |         result_dict['color_conf']=color_conf | ||||||
|  |         return result_dict | ||||||
|  |      | ||||||
|  |     for i in range(4): | ||||||
|  |         point_x = int(landmarks[2 * i]) | ||||||
|  |         point_y = int(landmarks[2 * i + 1]) | ||||||
|  |         landmarks_np[i]=np.array([point_x,point_y]) | ||||||
|  | 
 | ||||||
|  |     class_label= int(class_num)  #车牌的的类型0代表单牌,1代表双层车牌 | ||||||
|  |     roi_img = four_point_transform(img,landmarks_np)   #透视变换得到车牌小图 | ||||||
|  |     if class_label:        #判断是否是双层车牌,是双牌的话进行分割后然后拼接 | ||||||
|  |         roi_img=get_split_merge(roi_img) | ||||||
|  |     plate_number ,plate_color= get_plate_result(roi_img,device,plate_rec_model)                 #对车牌小图进行识别,得到颜色和车牌号 | ||||||
|  |     for dan in danger:                                                           #只要出现‘危’或者‘险’就是危险品车牌 | ||||||
|  |         if dan in plate_number: | ||||||
|  |             plate_number='危险品' | ||||||
|  |     # cv2.imwrite("roi.jpg",roi_img) | ||||||
|  |     result_dict['class_type']=class_type[class_label] | ||||||
|  |     result_dict['rect']=rect                      #车牌roi区域 | ||||||
|  |     result_dict['landmarks']=landmarks_np.tolist() #车牌角点坐标 | ||||||
|  |     result_dict['plate_no']=plate_number   #车牌号 | ||||||
|  |     result_dict['roi_height']=roi_img.shape[0]  #车牌高度 | ||||||
|  |     result_dict['plate_color']=plate_color   #车牌颜色 | ||||||
|  |     result_dict['object_no']=class_label   #单双层 0单层 1双层 | ||||||
|  |     result_dict['score']=conf           #车牌区域检测得分 | ||||||
|  |     return result_dict | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def detect_Recognition_plate(model, orgimg, device,plate_rec_model,car_rec_model=None): | ||||||
|  |     # Load model | ||||||
|  |     conf_thres = 0.3 | ||||||
|  |     iou_thres = 0.5 | ||||||
|  |     dict_list=[] | ||||||
|  |     # orgimg = cv2.imread(image_path)  # BGR | ||||||
|  | 
 | ||||||
|  |     img0 = copy.deepcopy(orgimg) | ||||||
|  |      | ||||||
|  |     img0 = np.transpose(img0, (2, 0, 1)) | ||||||
|  | 
 | ||||||
|  |     img = torch.from_numpy(img0) | ||||||
|  |     assert orgimg is not None, 'Image Not Found '  | ||||||
|  |     # print(model) | ||||||
|  |     model.to(device) | ||||||
|  |     img.to(device) | ||||||
|  |     img = img.float()  # uint8 to fp16/32 | ||||||
|  |     img /= 255.0  # 0 - 255 to 0.0 - 1.0 | ||||||
|  |     if img.ndimension() == 3: | ||||||
|  |         img = img.unsqueeze(0) | ||||||
|  |          | ||||||
|  |     # Inference | ||||||
|  |     pred = model(img)[0] | ||||||
|  | 
 | ||||||
|  |     # Apply NMS | ||||||
|  |     pred = non_max_suppression_face(pred, conf_thres, iou_thres) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  |     # Process detections | ||||||
|  |     for i, det in enumerate(pred):  # detections per image | ||||||
|  |         if len(det): | ||||||
|  |             # Rescale boxes from img_size to im0 size | ||||||
|  |             det[:, :4] = scale_coords(img.shape[2:], det[:, :4], orgimg.shape).round() | ||||||
|  | 
 | ||||||
|  |             # Print results | ||||||
|  |             for c in det[:, -1].unique(): | ||||||
|  |                 n = (det[:, -1] == c).sum()  # detections per class | ||||||
|  | 
 | ||||||
|  |             det[:, 5:13] = scale_coords_landmarks(img.shape[2:], det[:, 5:13], orgimg.shape).round() | ||||||
|  | 
 | ||||||
|  |             for j in range(det.size()[0]): | ||||||
|  |                 xyxy = det[j, :4].view(-1).tolist() | ||||||
|  |                 conf = det[j, 4].cpu().numpy() | ||||||
|  |                 landmarks = det[j, 5:13].view(-1).tolist() | ||||||
|  |                 class_num = det[j, 13].cpu().numpy() | ||||||
|  |                 result_dict = get_plate_rec_landmark(orgimg, xyxy, conf, landmarks, class_num,device,plate_rec_model,car_rec_model) | ||||||
|  |                 dict_list.append(result_dict) | ||||||
|  |     return dict_list | ||||||
|  |     # cv2.imwrite('result.jpg', orgimg) | ||||||
|  | 
 | ||||||
|  | def draw_result(orgimg,dict_list): | ||||||
|  |     result_str ="" | ||||||
|  |     for result in dict_list: | ||||||
|  |         rect_area = result['rect'] | ||||||
|  |         object_no = result['object_no'] | ||||||
|  |         if not object_no==2:     | ||||||
|  |             x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1] | ||||||
|  |             padding_w = 0.05*w | ||||||
|  |             padding_h = 0.11*h | ||||||
|  |             rect_area[0]=max(0,int(x-padding_w)) | ||||||
|  |             rect_area[1]=max(0,int(y-padding_h)) | ||||||
|  |             rect_area[2]=min(orgimg.shape[1],int(rect_area[2]+padding_w)) | ||||||
|  |             rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h)) | ||||||
|  | 
 | ||||||
|  |             height_area = int(result['roi_height']/2) | ||||||
|  |             landmarks=result['landmarks'] | ||||||
|  |             result_p = result['plate_no'] | ||||||
|  |             if result['object_no']==0:#单层 | ||||||
|  |                 result_p+=" "+result['plate_color'] | ||||||
|  |             else:                             #双层 | ||||||
|  |                 result_p+=" "+result['plate_color']+"双层" | ||||||
|  |             result_str+=result_p+" " | ||||||
|  |             for i in range(4):  #关键点 | ||||||
|  |                 cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) | ||||||
|  |              | ||||||
|  |             if len(result)>=1: | ||||||
|  |                 if "危险品" in result_p: #如果是危险品车牌,文字就画在下面 | ||||||
|  |                     orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0],rect_area[3],(0,255,0),height_area) | ||||||
|  |                 else: | ||||||
|  |                     orgimg=cv2ImgAddText(orgimg,result_p,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area) | ||||||
|  |         else: | ||||||
|  |             height_area=int((rect_area[3]-rect_area[1])/20) | ||||||
|  |             car_color = result['car_color'] | ||||||
|  |             car_color_str="车辆颜色:" | ||||||
|  |             car_color_str+=car_color | ||||||
|  |             orgimg=cv2ImgAddText(orgimg,car_color_str,rect_area[0],rect_area[1],(0,255,0),height_area) | ||||||
|  |              | ||||||
|  |         cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),object_color[object_no],2) #画框        | ||||||
|  |     # print(result_str) | ||||||
|  |     return orgimg | ||||||
|  | 
 | ||||||
|  | def get_second(capture): | ||||||
|  |     if capture.isOpened(): | ||||||
|  |         rate = capture.get(5)   # 帧速率 | ||||||
|  |         FrameNumber = capture.get(7)  # 视频文件的帧数 | ||||||
|  |         duration = FrameNumber/rate  # 帧速率/视频总帧数 是时间,除以60之后单位是分钟 | ||||||
|  |         return int(rate),int(FrameNumber),int(duration)     | ||||||
|  | 
 | ||||||
							
								
								
									
										64
									
								
								algorithm/Car_recognition/car_recognition/car_rec.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,64 @@ | |||||||
|  | from algorithm.Car_recognition.car_recognition.myNet import myNet | ||||||
|  | import torch | ||||||
|  | import cv2 | ||||||
|  | import torch.nn.functional as F | ||||||
|  | import os | ||||||
|  | import numpy as np | ||||||
|  | 
 | ||||||
|  | colors = ['黑色','蓝色','黄色','棕色','绿色','灰色','橙色','粉色','紫色','红色','白色'] | ||||||
|  | def init_car_rec_model(model_path): | ||||||
|  |     device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | ||||||
|  |     check_point = torch.load(model_path) | ||||||
|  |     cfg= check_point['cfg']   | ||||||
|  |     model = myNet(num_classes=11,cfg=cfg) | ||||||
|  |     model.load_state_dict(check_point['state_dict']) | ||||||
|  |     model.to(device)  | ||||||
|  |     model.eval() | ||||||
|  |     return model | ||||||
|  | 
 | ||||||
|  | def imge_processing(img,device): | ||||||
|  |     img = cv2.resize(img,(64,64)) | ||||||
|  |     img = img.transpose([2,0,1]) | ||||||
|  |     img = torch.from_numpy(img).float().to(device) | ||||||
|  |     img = img-127.5 | ||||||
|  |     img = img.unsqueeze(0) | ||||||
|  |     return img | ||||||
|  |      | ||||||
|  | def allFilePath(rootPath,allFIleList): | ||||||
|  |     fileList = os.listdir(rootPath) | ||||||
|  |     for temp in fileList: | ||||||
|  |         if os.path.isfile(os.path.join(rootPath,temp)): | ||||||
|  |             allFIleList.append(os.path.join(rootPath,temp)) | ||||||
|  |         else: | ||||||
|  |             allFilePath(os.path.join(rootPath,temp),allFIleList)     | ||||||
|  | 
 | ||||||
|  | def get_color_and_score(model,img,device): | ||||||
|  |     img = imge_processing(img,device) | ||||||
|  |     result = model(img) | ||||||
|  |     out =F.softmax( result) | ||||||
|  |     _, predicted = torch.max(out.data, 1) | ||||||
|  |     out=out.data.cpu().numpy().tolist()[0] | ||||||
|  |     predicted = predicted.item() | ||||||
|  |     car_color= colors[predicted] | ||||||
|  |     color_conf = out[predicted] | ||||||
|  |     # print(pic_,colors[predicted[0]]) | ||||||
|  |     return  car_color,color_conf | ||||||
|  |      | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     # root_file =r"/mnt/Gpan/BaiduNetdiskDownload/VehicleColour/VehicleColour/class/7" | ||||||
|  |     root_file =r"imgs" | ||||||
|  |     file_list=[] | ||||||
|  |     allFilePath(root_file,file_list) | ||||||
|  |     device = torch.device("cuda" if torch.cuda.is_available else "cpu") | ||||||
|  |     model_path = r"/mnt/Gpan/Mydata/pytorchPorject/Car_system/car_color/color_model/0.8682285244554049_epoth_117_model.pth" | ||||||
|  |     model = init_car_rec_model(model_path,device) | ||||||
|  |     for pic_ in file_list: | ||||||
|  |         img = cv2.imread(pic_) | ||||||
|  |         # img = imge_processing(img,device) | ||||||
|  |         color,conf = get_color_and_score(model,img,device) | ||||||
|  |         print(pic_,color,conf) | ||||||
|  |        | ||||||
|  |      | ||||||
|  |       | ||||||
|  |     | ||||||
							
								
								
									
										95
									
								
								algorithm/Car_recognition/car_recognition/myNet.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,95 @@ | |||||||
|  | import math | ||||||
|  | 
 | ||||||
|  | import torch | ||||||
|  | import torch.nn as nn | ||||||
|  | from torch.autograd import Variable | ||||||
|  | from torchvision import models | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | __all__ = ['myNet','myResNet18'] | ||||||
|  | 
 | ||||||
|  | # defaultcfg = { | ||||||
|  | #     11 : [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512], | ||||||
|  | #     13 : [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512], | ||||||
|  | #     16 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512], | ||||||
|  | #     19 : [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512], | ||||||
|  | # } | ||||||
|  | # myCfg = [32,'M',64,'M',96,'M',128,'M',192,'M',256] | ||||||
|  | myCfg = [32,'M',64,'M',96,'M',128,'M',256] | ||||||
|  | # myCfg = [8,'M',16,'M',32,'M',64,'M',96] | ||||||
|  | class myNet(nn.Module): | ||||||
|  |     def __init__(self,cfg=None,num_classes=3): | ||||||
|  |         super(myNet, self).__init__() | ||||||
|  |         if cfg is None: | ||||||
|  |             cfg = myCfg | ||||||
|  |         self.feature = self.make_layers(cfg, True) | ||||||
|  |         self.gap =nn.AdaptiveAvgPool2d((1,1)) | ||||||
|  |         self.classifier = nn.Linear(cfg[-1], num_classes) | ||||||
|  |         # self.classifier = nn.Conv2d(cfg[-1],num_classes,kernel_size=1,stride=1) | ||||||
|  |         # self.bn_c= nn.BatchNorm2d(num_classes) | ||||||
|  |         # self.flatten = nn.Flatten() | ||||||
|  |     def make_layers(self, cfg, batch_norm=False): | ||||||
|  |         layers = [] | ||||||
|  |         in_channels = 3 | ||||||
|  |         for i in range(len(cfg)): | ||||||
|  |             if i == 0: | ||||||
|  |                 conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) | ||||||
|  |                 if batch_norm: | ||||||
|  |                     layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] | ||||||
|  |                 else: | ||||||
|  |                     layers += [conv2d, nn.ReLU(inplace=True)] | ||||||
|  |                 in_channels = cfg[i] | ||||||
|  |             else : | ||||||
|  |                 if cfg[i] == 'M': | ||||||
|  |                     layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] | ||||||
|  |                 else: | ||||||
|  |                     conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=1,stride =1) | ||||||
|  |                     if batch_norm: | ||||||
|  |                         layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] | ||||||
|  |                     else: | ||||||
|  |                         layers += [conv2d, nn.ReLU(inplace=True)] | ||||||
|  |                     in_channels = cfg[i] | ||||||
|  |         return nn.Sequential(*layers) | ||||||
|  | 
 | ||||||
|  |     def forward(self, x): | ||||||
|  |         y = self.feature(x) | ||||||
|  |         y = nn.AvgPool2d(kernel_size=3, stride=1)(y) | ||||||
|  |         y = y.view(x.size(0), -1) | ||||||
|  |         y = self.classifier(y) | ||||||
|  |         | ||||||
|  |         # y = self.flatten(y) | ||||||
|  |         return y | ||||||
|  | 
 | ||||||
|  | class myResNet18(nn.Module): | ||||||
|  |     def __init__(self,num_classes=1000): | ||||||
|  |         super(myResNet18,self).__init__() | ||||||
|  |         model_ft = models.resnet18(pretrained=True) | ||||||
|  |         self.model =model_ft | ||||||
|  |         self.model.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1,ceil_mode=True) | ||||||
|  |         self.model.averagePool = nn.AvgPool2d((5,5),stride=1,ceil_mode=True) | ||||||
|  |         self.cls=nn.Linear(512,num_classes) | ||||||
|  | 
 | ||||||
|  |     def forward(self,x): | ||||||
|  |         x = self.model.conv1(x) | ||||||
|  |         x = self.model.bn1(x) | ||||||
|  |         x = self.model.relu(x) | ||||||
|  |         x = self.model.maxpool(x) | ||||||
|  | 
 | ||||||
|  |         x = self.model.layer1(x) | ||||||
|  |         x = self.model.layer2(x) | ||||||
|  |         x = self.model.layer3(x) | ||||||
|  |         x = self.model.layer4(x) | ||||||
|  | 
 | ||||||
|  |         x = self.model.averagePool(x) | ||||||
|  |         x = x.view(x.size(0), -1) | ||||||
|  |         x = self.cls(x) | ||||||
|  | 
 | ||||||
|  |         return x | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     net = myNet(num_classes=2) | ||||||
|  |     # infeatures = net.cls.in_features | ||||||
|  |     # net.cls=nn.Linear(infeatures,2) | ||||||
|  |     x = torch.FloatTensor(16, 3, 64, 64) | ||||||
|  |     y = net(x) | ||||||
|  |     print(y.shape) | ||||||
|  |     # print(net) | ||||||
							
								
								
									
										21
									
								
								algorithm/Car_recognition/data/argoverse_hd.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,21 @@ | |||||||
|  | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ | ||||||
|  | # Train command: python train.py --data argoverse_hd.yaml | ||||||
|  | # Default dataset location is next to /yolov5: | ||||||
|  | #   /parent_folder | ||||||
|  | #     /argoverse | ||||||
|  | #     /yolov5 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # download command/URL (optional) | ||||||
|  | download: bash data/scripts/get_argoverse_hd.sh | ||||||
|  | 
 | ||||||
|  | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] | ||||||
|  | train: ../argoverse/Argoverse-1.1/images/train/  # 39384 images | ||||||
|  | val: ../argoverse/Argoverse-1.1/images/val/  # 15062 iamges | ||||||
|  | test: ../argoverse/Argoverse-1.1/images/test/  # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview | ||||||
|  | 
 | ||||||
|  | # number of classes | ||||||
|  | nc: 8 | ||||||
|  | 
 | ||||||
|  | # class names | ||||||
|  | names: [ 'person',  'bicycle',  'car',  'motorcycle',  'bus',  'truck',  'traffic_light',  'stop_sign' ] | ||||||
							
								
								
									
										35
									
								
								algorithm/Car_recognition/data/coco.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,35 @@ | |||||||
|  | # COCO 2017 dataset http://cocodataset.org | ||||||
|  | # Train command: python train.py --data coco.yaml | ||||||
|  | # Default dataset location is next to /yolov5: | ||||||
|  | #   /parent_folder | ||||||
|  | #     /coco | ||||||
|  | #     /yolov5 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # download command/URL (optional) | ||||||
|  | download: bash data/scripts/get_coco.sh | ||||||
|  | 
 | ||||||
|  | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] | ||||||
|  | train: ../coco/train2017.txt  # 118287 images | ||||||
|  | val: ../coco/val2017.txt  # 5000 images | ||||||
|  | test: ../coco/test-dev2017.txt  # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794 | ||||||
|  | 
 | ||||||
|  | # number of classes | ||||||
|  | nc: 80 | ||||||
|  | 
 | ||||||
|  | # class names | ||||||
|  | names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', | ||||||
|  |          'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', | ||||||
|  |          'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', | ||||||
|  |          'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', | ||||||
|  |          'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', | ||||||
|  |          'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', | ||||||
|  |          'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', | ||||||
|  |          'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', | ||||||
|  |          'hair drier', 'toothbrush' ] | ||||||
|  | 
 | ||||||
|  | # Print classes | ||||||
|  | # with open('data/coco.yaml') as f: | ||||||
|  | #   d = yaml.load(f, Loader=yaml.FullLoader)  # dict | ||||||
|  | #   for i, x in enumerate(d['names']): | ||||||
|  | #     print(i, x) | ||||||
							
								
								
									
										28
									
								
								algorithm/Car_recognition/data/coco128.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,28 @@ | |||||||
|  | # COCO 2017 dataset http://cocodataset.org - first 128 training images | ||||||
|  | # Train command: python train.py --data coco128.yaml | ||||||
|  | # Default dataset location is next to /yolov5: | ||||||
|  | #   /parent_folder | ||||||
|  | #     /coco128 | ||||||
|  | #     /yolov5 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # download command/URL (optional) | ||||||
|  | download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip | ||||||
|  | 
 | ||||||
|  | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] | ||||||
|  | train: ../coco128/images/train2017/  # 128 images | ||||||
|  | val: ../coco128/images/train2017/  # 128 images | ||||||
|  | 
 | ||||||
|  | # number of classes | ||||||
|  | nc: 80 | ||||||
|  | 
 | ||||||
|  | # class names | ||||||
|  | names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', | ||||||
|  |          'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', | ||||||
|  |          'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', | ||||||
|  |          'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', | ||||||
|  |          'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', | ||||||
|  |          'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', | ||||||
|  |          'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', | ||||||
|  |          'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', | ||||||
|  |          'hair drier', 'toothbrush' ] | ||||||
							
								
								
									
										38
									
								
								algorithm/Car_recognition/data/hyp.finetune.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,38 @@ | |||||||
|  | # Hyperparameters for VOC finetuning | ||||||
|  | # python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50 | ||||||
|  | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # Hyperparameter Evolution Results | ||||||
|  | # Generations: 306 | ||||||
|  | #                   P         R     mAP.5 mAP.5:.95       box       obj       cls | ||||||
|  | # Metrics:        0.6     0.936     0.896     0.684    0.0115   0.00805   0.00146 | ||||||
|  | 
 | ||||||
|  | lr0: 0.0032 | ||||||
|  | lrf: 0.12 | ||||||
|  | momentum: 0.843 | ||||||
|  | weight_decay: 0.00036 | ||||||
|  | warmup_epochs: 2.0 | ||||||
|  | warmup_momentum: 0.5 | ||||||
|  | warmup_bias_lr: 0.05 | ||||||
|  | box: 0.0296 | ||||||
|  | cls: 0.243 | ||||||
|  | cls_pw: 0.631 | ||||||
|  | obj: 0.301 | ||||||
|  | obj_pw: 0.911 | ||||||
|  | iou_t: 0.2 | ||||||
|  | anchor_t: 2.91 | ||||||
|  | # anchors: 3.63 | ||||||
|  | fl_gamma: 0.0 | ||||||
|  | hsv_h: 0.0138 | ||||||
|  | hsv_s: 0.664 | ||||||
|  | hsv_v: 0.464 | ||||||
|  | degrees: 0.373 | ||||||
|  | translate: 0.245 | ||||||
|  | scale: 0.898 | ||||||
|  | shear: 0.602 | ||||||
|  | perspective: 0.0 | ||||||
|  | flipud: 0.00856 | ||||||
|  | fliplr: 0.5 | ||||||
|  | mosaic: 1.0 | ||||||
|  | mixup: 0.243 | ||||||
							
								
								
									
										34
									
								
								algorithm/Car_recognition/data/hyp.scratch.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,34 @@ | |||||||
|  | # Hyperparameters for COCO training from scratch | ||||||
|  | # python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300 | ||||||
|  | # See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | lr0: 0.01  # initial learning rate (SGD=1E-2, Adam=1E-3) | ||||||
|  | lrf: 0.2  # final OneCycleLR learning rate (lr0 * lrf) | ||||||
|  | momentum: 0.937  # SGD momentum/Adam beta1 | ||||||
|  | weight_decay: 0.0005  # optimizer weight decay 5e-4 | ||||||
|  | warmup_epochs: 3.0  # warmup epochs (fractions ok) | ||||||
|  | warmup_momentum: 0.8  # warmup initial momentum | ||||||
|  | warmup_bias_lr: 0.1  # warmup initial bias lr | ||||||
|  | box: 0.05  # box loss gain | ||||||
|  | cls: 0.5  # cls loss gain | ||||||
|  | landmark: 0.005 # landmark loss gain | ||||||
|  | cls_pw: 1.0  # cls BCELoss positive_weight | ||||||
|  | obj: 1.0  # obj loss gain (scale with pixels) | ||||||
|  | obj_pw: 1.0  # obj BCELoss positive_weight | ||||||
|  | iou_t: 0.20  # IoU training threshold | ||||||
|  | anchor_t: 4.0  # anchor-multiple threshold | ||||||
|  | # anchors: 3  # anchors per output layer (0 to ignore) | ||||||
|  | fl_gamma: 0.0  # focal loss gamma (efficientDet default gamma=1.5) | ||||||
|  | hsv_h: 0.015  # image HSV-Hue augmentation (fraction) | ||||||
|  | hsv_s: 0.7  # image HSV-Saturation augmentation (fraction) | ||||||
|  | hsv_v: 0.4  # image HSV-Value augmentation (fraction) | ||||||
|  | degrees: 0.0  # image rotation (+/- deg) | ||||||
|  | translate: 0.1  # image translation (+/- fraction) | ||||||
|  | scale: 0.5  # image scale (+/- gain) | ||||||
|  | shear: 0.5  # image shear (+/- deg) | ||||||
|  | perspective: 0.0  # image perspective (+/- fraction), range 0-0.001 | ||||||
|  | flipud: 0.0  # image flip up-down (probability) | ||||||
|  | fliplr: 0.5  # image flip left-right (probability) | ||||||
|  | mosaic: 0.5  # image mosaic (probability) | ||||||
|  | mixup: 0.0  # image mixup (probability) | ||||||
							
								
								
									
										19
									
								
								algorithm/Car_recognition/data/plateAndCar.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,19 @@ | |||||||
|  | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ | ||||||
|  | # Train command: python train.py --data voc.yaml | ||||||
|  | # Default dataset location is next to /yolov5: | ||||||
|  | #   /parent_folder | ||||||
|  | #     /VOC | ||||||
|  | #     /yolov5 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # download command/URL (optional) | ||||||
|  | download: bash data/scripts/get_voc.sh | ||||||
|  | 
 | ||||||
|  | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] | ||||||
|  | train: /mnt/Gpan/Mydata/pytorchPorject/datasets/ccpd/train_car_plate/train_detect   | ||||||
|  | val: /mnt/Gpan/Mydata/pytorchPorject/datasets/ccpd/train_car_plate/val_detect | ||||||
|  | # number of classes | ||||||
|  | nc: 3 | ||||||
|  | 
 | ||||||
|  | # class names | ||||||
|  | names: [ 'single_plate','double_plate','car'] | ||||||
							
								
								
									
										150
									
								
								algorithm/Car_recognition/data/retinaface2yolo.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,150 @@ | |||||||
|  | import os | ||||||
|  | import os.path | ||||||
|  | import sys | ||||||
|  | import torch | ||||||
|  | import torch.utils.data as data | ||||||
|  | import cv2 | ||||||
|  | import numpy as np | ||||||
|  | 
 | ||||||
|  | class WiderFaceDetection(data.Dataset): | ||||||
|  |     def __init__(self, txt_path, preproc=None): | ||||||
|  |         self.preproc = preproc | ||||||
|  |         self.imgs_path = [] | ||||||
|  |         self.words = [] | ||||||
|  |         f = open(txt_path,'r') | ||||||
|  |         lines = f.readlines() | ||||||
|  |         isFirst = True | ||||||
|  |         labels = [] | ||||||
|  |         for line in lines: | ||||||
|  |             line = line.rstrip() | ||||||
|  |             if line.startswith('#'): | ||||||
|  |                 if isFirst is True: | ||||||
|  |                     isFirst = False | ||||||
|  |                 else: | ||||||
|  |                     labels_copy = labels.copy() | ||||||
|  |                     self.words.append(labels_copy) | ||||||
|  |                     labels.clear() | ||||||
|  |                 path = line[2:] | ||||||
|  |                 path = txt_path.replace('label.txt','images/') + path | ||||||
|  |                 self.imgs_path.append(path) | ||||||
|  |             else: | ||||||
|  |                 line = line.split(' ') | ||||||
|  |                 label = [float(x) for x in line] | ||||||
|  |                 labels.append(label) | ||||||
|  | 
 | ||||||
|  |         self.words.append(labels) | ||||||
|  | 
 | ||||||
|  |     def __len__(self): | ||||||
|  |         return len(self.imgs_path) | ||||||
|  | 
 | ||||||
|  |     def __getitem__(self, index): | ||||||
|  |         img = cv2.imread(self.imgs_path[index]) | ||||||
|  |         height, width, _ = img.shape | ||||||
|  | 
 | ||||||
|  |         labels = self.words[index] | ||||||
|  |         annotations = np.zeros((0, 15)) | ||||||
|  |         if len(labels) == 0: | ||||||
|  |             return annotations | ||||||
|  |         for idx, label in enumerate(labels): | ||||||
|  |             annotation = np.zeros((1, 15)) | ||||||
|  |             # bbox | ||||||
|  |             annotation[0, 0] = label[0]  # x1 | ||||||
|  |             annotation[0, 1] = label[1]  # y1 | ||||||
|  |             annotation[0, 2] = label[0] + label[2]  # x2 | ||||||
|  |             annotation[0, 3] = label[1] + label[3]  # y2 | ||||||
|  | 
 | ||||||
|  |             # landmarks | ||||||
|  |             annotation[0, 4] = label[4]    # l0_x | ||||||
|  |             annotation[0, 5] = label[5]    # l0_y | ||||||
|  |             annotation[0, 6] = label[7]    # l1_x | ||||||
|  |             annotation[0, 7] = label[8]    # l1_y | ||||||
|  |             annotation[0, 8] = label[10]   # l2_x | ||||||
|  |             annotation[0, 9] = label[11]   # l2_y | ||||||
|  |             annotation[0, 10] = label[13]  # l3_x | ||||||
|  |             annotation[0, 11] = label[14]  # l3_y | ||||||
|  |             annotation[0, 12] = label[16]  # l4_x | ||||||
|  |             annotation[0, 13] = label[17]  # l4_y | ||||||
|  |             if (annotation[0, 4]<0): | ||||||
|  |                 annotation[0, 14] = -1 | ||||||
|  |             else: | ||||||
|  |                 annotation[0, 14] = 1 | ||||||
|  | 
 | ||||||
|  |             annotations = np.append(annotations, annotation, axis=0) | ||||||
|  |         target = np.array(annotations) | ||||||
|  |         if self.preproc is not None: | ||||||
|  |             img, target = self.preproc(img, target) | ||||||
|  | 
 | ||||||
|  |         return torch.from_numpy(img), target | ||||||
|  | 
 | ||||||
|  | def detection_collate(batch): | ||||||
|  |     """Custom collate fn for dealing with batches of images that have a different | ||||||
|  |     number of associated object annotations (bounding boxes). | ||||||
|  | 
 | ||||||
|  |     Arguments: | ||||||
|  |         batch: (tuple) A tuple of tensor images and lists of annotations | ||||||
|  | 
 | ||||||
|  |     Return: | ||||||
|  |         A tuple containing: | ||||||
|  |             1) (tensor) batch of images stacked on their 0 dim | ||||||
|  |             2) (list of tensors) annotations for a given image are stacked on 0 dim | ||||||
|  |     """ | ||||||
|  |     targets = [] | ||||||
|  |     imgs = [] | ||||||
|  |     for _, sample in enumerate(batch): | ||||||
|  |         for _, tup in enumerate(sample): | ||||||
|  |             if torch.is_tensor(tup): | ||||||
|  |                 imgs.append(tup) | ||||||
|  |             elif isinstance(tup, type(np.empty(0))): | ||||||
|  |                 annos = torch.from_numpy(tup).float() | ||||||
|  |                 targets.append(annos) | ||||||
|  | 
 | ||||||
|  |     return (torch.stack(imgs, 0), targets) | ||||||
|  | 
 | ||||||
|  | save_path = '/ssd_1t/derron/yolov5-face/data/widerface/train' | ||||||
|  | aa=WiderFaceDetection("/ssd_1t/derron/yolov5-face/data/widerface/widerface/train/label.txt") | ||||||
|  | for i in range(len(aa.imgs_path)): | ||||||
|  |     print(i, aa.imgs_path[i]) | ||||||
|  |     img = cv2.imread(aa.imgs_path[i]) | ||||||
|  |     base_img = os.path.basename(aa.imgs_path[i]) | ||||||
|  |     base_txt = os.path.basename(aa.imgs_path[i])[:-4] +".txt" | ||||||
|  |     save_img_path = os.path.join(save_path, base_img) | ||||||
|  |     save_txt_path = os.path.join(save_path, base_txt) | ||||||
|  |     with open(save_txt_path, "w") as f: | ||||||
|  |         height, width, _ = img.shape | ||||||
|  |         labels = aa.words[i] | ||||||
|  |         annotations = np.zeros((0, 14)) | ||||||
|  |         if len(labels) == 0: | ||||||
|  |             continue | ||||||
|  |         for idx, label in enumerate(labels): | ||||||
|  |             annotation = np.zeros((1, 14)) | ||||||
|  |             # bbox | ||||||
|  |             label[0] = max(0, label[0]) | ||||||
|  |             label[1] = max(0, label[1]) | ||||||
|  |             label[2] = min(width -  1, label[2]) | ||||||
|  |             label[3] = min(height - 1, label[3]) | ||||||
|  |             annotation[0, 0] = (label[0] + label[2] / 2) / width  # cx | ||||||
|  |             annotation[0, 1] = (label[1] + label[3] / 2) / height  # cy | ||||||
|  |             annotation[0, 2] = label[2] / width  # w | ||||||
|  |             annotation[0, 3] = label[3] / height  # h | ||||||
|  |             #if (label[2] -label[0]) < 8 or (label[3] - label[1]) < 8: | ||||||
|  |             #    img[int(label[1]):int(label[3]), int(label[0]):int(label[2])] = 127 | ||||||
|  |             #    continue | ||||||
|  |             # landmarks | ||||||
|  |             annotation[0, 4] = label[4] / width  # l0_x | ||||||
|  |             annotation[0, 5] = label[5] / height  # l0_y | ||||||
|  |             annotation[0, 6] = label[7] / width  # l1_x | ||||||
|  |             annotation[0, 7] = label[8]  / height # l1_y | ||||||
|  |             annotation[0, 8] = label[10] / width  # l2_x | ||||||
|  |             annotation[0, 9] = label[11] / height  # l2_y | ||||||
|  |             annotation[0, 10] = label[13] / width  # l3_x | ||||||
|  |             annotation[0, 11] = label[14] / height  # l3_y | ||||||
|  |             annotation[0, 12] = label[16] / width  # l4_x | ||||||
|  |             annotation[0, 13] = label[17] / height  # l4_y | ||||||
|  |             str_label="0 " | ||||||
|  |             for i in range(len(annotation[0])): | ||||||
|  |                 str_label =str_label+" "+str(annotation[0][i]) | ||||||
|  |             str_label = str_label.replace('[', '').replace(']', '') | ||||||
|  |             str_label = str_label.replace(',', '') + '\n' | ||||||
|  |             f.write(str_label) | ||||||
|  |     cv2.imwrite(save_img_path, img) | ||||||
|  | 
 | ||||||
							
								
								
									
										62
									
								
								algorithm/Car_recognition/data/scripts/get_argoverse_hd.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,62 @@ | |||||||
|  | #!/bin/bash | ||||||
|  | # Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/ | ||||||
|  | # Download command: bash data/scripts/get_argoverse_hd.sh | ||||||
|  | # Train command: python train.py --data argoverse_hd.yaml | ||||||
|  | # Default dataset location is next to /yolov5: | ||||||
|  | #   /parent_folder | ||||||
|  | #     /argoverse | ||||||
|  | #     /yolov5 | ||||||
|  | 
 | ||||||
|  | # Download/unzip images | ||||||
|  | d='../argoverse/' # unzip directory | ||||||
|  | mkdir $d | ||||||
|  | url=https://argoverse-hd.s3.us-east-2.amazonaws.com/ | ||||||
|  | f=Argoverse-HD-Full.zip | ||||||
|  | curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background | ||||||
|  | wait                                              # finish background tasks | ||||||
|  | 
 | ||||||
|  | cd ../argoverse/Argoverse-1.1/ | ||||||
|  | ln -s tracking images | ||||||
|  | 
 | ||||||
|  | cd ../Argoverse-HD/annotations/ | ||||||
|  | 
 | ||||||
|  | python3 - "$@" <<END | ||||||
|  | import json | ||||||
|  | from pathlib import Path | ||||||
|  | 
 | ||||||
|  | annotation_files = ["train.json", "val.json"] | ||||||
|  | print("Converting annotations to YOLOv5 format...") | ||||||
|  | 
 | ||||||
|  | for val in annotation_files: | ||||||
|  |     a = json.load(open(val, "rb")) | ||||||
|  | 
 | ||||||
|  |     label_dict = {} | ||||||
|  |     for annot in a['annotations']: | ||||||
|  |         img_id = annot['image_id'] | ||||||
|  |         img_name = a['images'][img_id]['name'] | ||||||
|  |         img_label_name = img_name[:-3] + "txt" | ||||||
|  | 
 | ||||||
|  |         obj_class = annot['category_id'] | ||||||
|  |         x_center, y_center, width, height = annot['bbox'] | ||||||
|  |         x_center = (x_center + width / 2) / 1920.  # offset and scale | ||||||
|  |         y_center = (y_center + height / 2) / 1200.  # offset and scale | ||||||
|  |         width /= 1920.  # scale | ||||||
|  |         height /= 1200.  # scale | ||||||
|  | 
 | ||||||
|  |         img_dir = "./labels/" + a['seq_dirs'][a['images'][annot['image_id']]['sid']] | ||||||
|  | 
 | ||||||
|  |         Path(img_dir).mkdir(parents=True, exist_ok=True) | ||||||
|  | 
 | ||||||
|  |         if img_dir + "/" + img_label_name not in label_dict: | ||||||
|  |             label_dict[img_dir + "/" + img_label_name] = [] | ||||||
|  | 
 | ||||||
|  |         label_dict[img_dir + "/" + img_label_name].append(f"{obj_class} {x_center} {y_center} {width} {height}\n") | ||||||
|  | 
 | ||||||
|  |     for filename in label_dict: | ||||||
|  |         with open(filename, "w") as file: | ||||||
|  |             for string in label_dict[filename]: | ||||||
|  |                 file.write(string) | ||||||
|  | 
 | ||||||
|  | END | ||||||
|  | 
 | ||||||
|  | mv ./labels ../../Argoverse-1.1/ | ||||||
							
								
								
									
										27
									
								
								algorithm/Car_recognition/data/scripts/get_coco.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,27 @@ | |||||||
|  | #!/bin/bash | ||||||
|  | # COCO 2017 dataset http://cocodataset.org | ||||||
|  | # Download command: bash data/scripts/get_coco.sh | ||||||
|  | # Train command: python train.py --data coco.yaml | ||||||
|  | # Default dataset location is next to /yolov5: | ||||||
|  | #   /parent_folder | ||||||
|  | #     /coco | ||||||
|  | #     /yolov5 | ||||||
|  | 
 | ||||||
|  | # Download/unzip labels | ||||||
|  | d='../' # unzip directory | ||||||
|  | url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ | ||||||
|  | f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB | ||||||
|  | echo 'Downloading' $url$f ' ...' | ||||||
|  | curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background | ||||||
|  | 
 | ||||||
|  | # Download/unzip images | ||||||
|  | d='../coco/images' # unzip directory | ||||||
|  | url=http://images.cocodataset.org/zips/ | ||||||
|  | f1='train2017.zip' # 19G, 118k images | ||||||
|  | f2='val2017.zip'   # 1G, 5k images | ||||||
|  | f3='test2017.zip'  # 7G, 41k images (optional) | ||||||
|  | for f in $f1 $f2; do | ||||||
|  |   echo 'Downloading' $url$f '...' | ||||||
|  |   curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background | ||||||
|  | done | ||||||
|  | wait # finish background tasks | ||||||
							
								
								
									
										139
									
								
								algorithm/Car_recognition/data/scripts/get_voc.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,139 @@ | |||||||
|  | #!/bin/bash | ||||||
|  | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ | ||||||
|  | # Download command: bash data/scripts/get_voc.sh | ||||||
|  | # Train command: python train.py --data voc.yaml | ||||||
|  | # Default dataset location is next to /yolov5: | ||||||
|  | #   /parent_folder | ||||||
|  | #     /VOC | ||||||
|  | #     /yolov5 | ||||||
|  | 
 | ||||||
|  | start=$(date +%s) | ||||||
|  | mkdir -p ../tmp | ||||||
|  | cd ../tmp/ | ||||||
|  | 
 | ||||||
|  | # Download/unzip images and labels | ||||||
|  | d='.' # unzip directory | ||||||
|  | url=https://github.com/ultralytics/yolov5/releases/download/v1.0/ | ||||||
|  | f1=VOCtrainval_06-Nov-2007.zip # 446MB, 5012 images | ||||||
|  | f2=VOCtest_06-Nov-2007.zip     # 438MB, 4953 images | ||||||
|  | f3=VOCtrainval_11-May-2012.zip # 1.95GB, 17126 images | ||||||
|  | for f in $f3 $f2 $f1; do | ||||||
|  |   echo 'Downloading' $url$f '...'  | ||||||
|  |   curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background | ||||||
|  | done | ||||||
|  | wait # finish background tasks | ||||||
|  | 
 | ||||||
|  | end=$(date +%s) | ||||||
|  | runtime=$((end - start)) | ||||||
|  | echo "Completed in" $runtime "seconds" | ||||||
|  | 
 | ||||||
|  | echo "Splitting dataset..." | ||||||
|  | python3 - "$@" <<END | ||||||
|  | import xml.etree.ElementTree as ET | ||||||
|  | import pickle | ||||||
|  | import os | ||||||
|  | from os import listdir, getcwd | ||||||
|  | from os.path import join | ||||||
|  | 
 | ||||||
|  | sets=[('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')] | ||||||
|  | 
 | ||||||
|  | classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def convert(size, box): | ||||||
|  |     dw = 1./(size[0]) | ||||||
|  |     dh = 1./(size[1]) | ||||||
|  |     x = (box[0] + box[1])/2.0 - 1 | ||||||
|  |     y = (box[2] + box[3])/2.0 - 1 | ||||||
|  |     w = box[1] - box[0] | ||||||
|  |     h = box[3] - box[2] | ||||||
|  |     x = x*dw | ||||||
|  |     w = w*dw | ||||||
|  |     y = y*dh | ||||||
|  |     h = h*dh | ||||||
|  |     return (x,y,w,h) | ||||||
|  | 
 | ||||||
|  | def convert_annotation(year, image_id): | ||||||
|  |     in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml'%(year, image_id)) | ||||||
|  |     out_file = open('VOCdevkit/VOC%s/labels/%s.txt'%(year, image_id), 'w') | ||||||
|  |     tree=ET.parse(in_file) | ||||||
|  |     root = tree.getroot() | ||||||
|  |     size = root.find('size') | ||||||
|  |     w = int(size.find('width').text) | ||||||
|  |     h = int(size.find('height').text) | ||||||
|  | 
 | ||||||
|  |     for obj in root.iter('object'): | ||||||
|  |         difficult = obj.find('difficult').text | ||||||
|  |         cls = obj.find('name').text | ||||||
|  |         if cls not in classes or int(difficult)==1: | ||||||
|  |             continue | ||||||
|  |         cls_id = classes.index(cls) | ||||||
|  |         xmlbox = obj.find('bndbox') | ||||||
|  |         b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)) | ||||||
|  |         bb = convert((w,h), b) | ||||||
|  |         out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n') | ||||||
|  | 
 | ||||||
|  | wd = getcwd() | ||||||
|  | 
 | ||||||
|  | for year, image_set in sets: | ||||||
|  |     if not os.path.exists('VOCdevkit/VOC%s/labels/'%(year)): | ||||||
|  |         os.makedirs('VOCdevkit/VOC%s/labels/'%(year)) | ||||||
|  |     image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt'%(year, image_set)).read().strip().split() | ||||||
|  |     list_file = open('%s_%s.txt'%(year, image_set), 'w') | ||||||
|  |     for image_id in image_ids: | ||||||
|  |         list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\n'%(wd, year, image_id)) | ||||||
|  |         convert_annotation(year, image_id) | ||||||
|  |     list_file.close() | ||||||
|  | 
 | ||||||
|  | END | ||||||
|  | 
 | ||||||
|  | cat 2007_train.txt 2007_val.txt 2012_train.txt 2012_val.txt >train.txt | ||||||
|  | cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt | ||||||
|  | 
 | ||||||
|  | python3 - "$@" <<END | ||||||
|  | 
 | ||||||
|  | import shutil | ||||||
|  | import os | ||||||
|  | os.system('mkdir ../VOC/') | ||||||
|  | os.system('mkdir ../VOC/images') | ||||||
|  | os.system('mkdir ../VOC/images/train') | ||||||
|  | os.system('mkdir ../VOC/images/val') | ||||||
|  | 
 | ||||||
|  | os.system('mkdir ../VOC/labels') | ||||||
|  | os.system('mkdir ../VOC/labels/train') | ||||||
|  | os.system('mkdir ../VOC/labels/val') | ||||||
|  | 
 | ||||||
|  | import os | ||||||
|  | print(os.path.exists('../tmp/train.txt')) | ||||||
|  | f = open('../tmp/train.txt', 'r') | ||||||
|  | lines = f.readlines() | ||||||
|  | 
 | ||||||
|  | for line in lines: | ||||||
|  |     line = "/".join(line.split('/')[-5:]).strip() | ||||||
|  |     if (os.path.exists("../" + line)): | ||||||
|  |         os.system("cp ../"+ line + " ../VOC/images/train") | ||||||
|  |          | ||||||
|  |     line = line.replace('JPEGImages', 'labels') | ||||||
|  |     line = line.replace('jpg', 'txt') | ||||||
|  |     if (os.path.exists("../" + line)): | ||||||
|  |         os.system("cp ../"+ line + " ../VOC/labels/train") | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | print(os.path.exists('../tmp/2007_test.txt')) | ||||||
|  | f = open('../tmp/2007_test.txt', 'r') | ||||||
|  | lines = f.readlines() | ||||||
|  | 
 | ||||||
|  | for line in lines: | ||||||
|  |     line = "/".join(line.split('/')[-5:]).strip() | ||||||
|  |     if (os.path.exists("../" + line)): | ||||||
|  |         os.system("cp ../"+ line + " ../VOC/images/val") | ||||||
|  |          | ||||||
|  |     line = line.replace('JPEGImages', 'labels') | ||||||
|  |     line = line.replace('jpg', 'txt') | ||||||
|  |     if (os.path.exists("../" + line)): | ||||||
|  |         os.system("cp ../"+ line + " ../VOC/labels/val") | ||||||
|  | 
 | ||||||
|  | END | ||||||
|  | 
 | ||||||
|  | rm -rf ../tmp # remove temporary directory | ||||||
|  | echo "VOC download done." | ||||||
							
								
								
									
										176
									
								
								algorithm/Car_recognition/data/train2yolo.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,176 @@ | |||||||
|  | import os.path | ||||||
|  | import sys | ||||||
|  | import torch | ||||||
|  | import torch.utils.data as data | ||||||
|  | import cv2 | ||||||
|  | import numpy as np | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class WiderFaceDetection(data.Dataset): | ||||||
|  |     def __init__(self, txt_path, preproc=None): | ||||||
|  |         self.preproc = preproc | ||||||
|  |         self.imgs_path = [] | ||||||
|  |         self.words = [] | ||||||
|  |         f = open(txt_path, 'r') | ||||||
|  |         lines = f.readlines() | ||||||
|  |         isFirst = True | ||||||
|  |         labels = [] | ||||||
|  |         for line in lines: | ||||||
|  |             line = line.rstrip() | ||||||
|  |             if line.startswith('#'): | ||||||
|  |                 if isFirst is True: | ||||||
|  |                     isFirst = False | ||||||
|  |                 else: | ||||||
|  |                     labels_copy = labels.copy() | ||||||
|  |                     self.words.append(labels_copy) | ||||||
|  |                     labels.clear() | ||||||
|  |                 path = line[2:] | ||||||
|  |                 path = txt_path.replace('label.txt', 'images/') + path | ||||||
|  |                 self.imgs_path.append(path) | ||||||
|  |             else: | ||||||
|  |                 line = line.split(' ') | ||||||
|  |                 label = [float(x) for x in line] | ||||||
|  |                 labels.append(label) | ||||||
|  | 
 | ||||||
|  |         self.words.append(labels) | ||||||
|  | 
 | ||||||
|  |     def __len__(self): | ||||||
|  |         return len(self.imgs_path) | ||||||
|  | 
 | ||||||
|  |     def __getitem__(self, index): | ||||||
|  |         img = cv2.imread(self.imgs_path[index]) | ||||||
|  |         height, width, _ = img.shape | ||||||
|  | 
 | ||||||
|  |         labels = self.words[index] | ||||||
|  |         annotations = np.zeros((0, 15)) | ||||||
|  |         if len(labels) == 0: | ||||||
|  |             return annotations | ||||||
|  |         for idx, label in enumerate(labels): | ||||||
|  |             annotation = np.zeros((1, 15)) | ||||||
|  |             # bbox | ||||||
|  |             annotation[0, 0] = label[0]  # x1 | ||||||
|  |             annotation[0, 1] = label[1]  # y1 | ||||||
|  |             annotation[0, 2] = label[0] + label[2]  # x2 | ||||||
|  |             annotation[0, 3] = label[1] + label[3]  # y2 | ||||||
|  | 
 | ||||||
|  |             # landmarks | ||||||
|  |             annotation[0, 4] = label[4]    # l0_x | ||||||
|  |             annotation[0, 5] = label[5]    # l0_y | ||||||
|  |             annotation[0, 6] = label[7]    # l1_x | ||||||
|  |             annotation[0, 7] = label[8]    # l1_y | ||||||
|  |             annotation[0, 8] = label[10]   # l2_x | ||||||
|  |             annotation[0, 9] = label[11]   # l2_y | ||||||
|  |             annotation[0, 10] = label[13]  # l3_x | ||||||
|  |             annotation[0, 11] = label[14]  # l3_y | ||||||
|  |             annotation[0, 12] = label[16]  # l4_x | ||||||
|  |             annotation[0, 13] = label[17]  # l4_y | ||||||
|  |             if annotation[0, 4] < 0: | ||||||
|  |                 annotation[0, 14] = -1 | ||||||
|  |             else: | ||||||
|  |                 annotation[0, 14] = 1 | ||||||
|  | 
 | ||||||
|  |             annotations = np.append(annotations, annotation, axis=0) | ||||||
|  |         target = np.array(annotations) | ||||||
|  |         if self.preproc is not None: | ||||||
|  |             img, target = self.preproc(img, target) | ||||||
|  | 
 | ||||||
|  |         return torch.from_numpy(img), target | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def detection_collate(batch): | ||||||
|  |     """Custom collate fn for dealing with batches of images that have a different | ||||||
|  |     number of associated object annotations (bounding boxes). | ||||||
|  | 
 | ||||||
|  |     Arguments: | ||||||
|  |         batch: (tuple) A tuple of tensor images and lists of annotations | ||||||
|  | 
 | ||||||
|  |     Return: | ||||||
|  |         A tuple containing: | ||||||
|  |             1) (tensor) batch of images stacked on their 0 dim | ||||||
|  |             2) (list of tensors) annotations for a given image are stacked on 0 dim | ||||||
|  |     """ | ||||||
|  |     targets = [] | ||||||
|  |     imgs = [] | ||||||
|  |     for _, sample in enumerate(batch): | ||||||
|  |         for _, tup in enumerate(sample): | ||||||
|  |             if torch.is_tensor(tup): | ||||||
|  |                 imgs.append(tup) | ||||||
|  |             elif isinstance(tup, type(np.empty(0))): | ||||||
|  |                 annos = torch.from_numpy(tup).float() | ||||||
|  |                 targets.append(annos) | ||||||
|  | 
 | ||||||
|  |     return torch.stack(imgs, 0), targets | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     if len(sys.argv) == 1: | ||||||
|  |         print('Missing path to WIDERFACE train folder.') | ||||||
|  |         print('Run command: python3 train2yolo.py /path/to/original/widerface/train [/path/to/save/widerface/train]') | ||||||
|  |         exit(1) | ||||||
|  |     elif len(sys.argv) > 3: | ||||||
|  |         print('Too many arguments were provided.') | ||||||
|  |         print('Run command: python3 train2yolo.py /path/to/original/widerface/train [/path/to/save/widerface/train]') | ||||||
|  |         exit(1) | ||||||
|  |     original_path = sys.argv[1] | ||||||
|  | 
 | ||||||
|  |     if len(sys.argv) == 2: | ||||||
|  |         if not os.path.isdir('widerface'): | ||||||
|  |             os.mkdir('widerface') | ||||||
|  |         if not os.path.isdir('widerface/train'): | ||||||
|  |             os.mkdir('widerface/train') | ||||||
|  | 
 | ||||||
|  |         save_path = 'widerface/train' | ||||||
|  |     else: | ||||||
|  |         save_path = sys.argv[2] | ||||||
|  | 
 | ||||||
|  |     if not os.path.isfile(os.path.join(original_path, 'label.txt')): | ||||||
|  |         print('Missing label.txt file.') | ||||||
|  |         exit(1) | ||||||
|  | 
 | ||||||
|  |     aa = WiderFaceDetection(os.path.join(original_path, 'label.txt')) | ||||||
|  | 
 | ||||||
|  |     for i in range(len(aa.imgs_path)): | ||||||
|  |         print(i, aa.imgs_path[i]) | ||||||
|  |         img = cv2.imread(aa.imgs_path[i]) | ||||||
|  |         base_img = os.path.basename(aa.imgs_path[i]) | ||||||
|  |         base_txt = os.path.basename(aa.imgs_path[i])[:-4] + ".txt" | ||||||
|  |         save_img_path = os.path.join(save_path, base_img) | ||||||
|  |         save_txt_path = os.path.join(save_path, base_txt) | ||||||
|  |         with open(save_txt_path, "w") as f: | ||||||
|  |             height, width, _ = img.shape | ||||||
|  |             labels = aa.words[i] | ||||||
|  |             annotations = np.zeros((0, 14)) | ||||||
|  |             if len(labels) == 0: | ||||||
|  |                 continue | ||||||
|  |             for idx, label in enumerate(labels): | ||||||
|  |                 annotation = np.zeros((1, 14)) | ||||||
|  |                 # bbox | ||||||
|  |                 label[0] = max(0, label[0]) | ||||||
|  |                 label[1] = max(0, label[1]) | ||||||
|  |                 label[2] = min(width - 1, label[2]) | ||||||
|  |                 label[3] = min(height - 1, label[3]) | ||||||
|  |                 annotation[0, 0] = (label[0] + label[2] / 2) / width  # cx | ||||||
|  |                 annotation[0, 1] = (label[1] + label[3] / 2) / height  # cy | ||||||
|  |                 annotation[0, 2] = label[2] / width  # w | ||||||
|  |                 annotation[0, 3] = label[3] / height  # h | ||||||
|  |                 #if (label[2] -label[0]) < 8 or (label[3] - label[1]) < 8: | ||||||
|  |                 #    img[int(label[1]):int(label[3]), int(label[0]):int(label[2])] = 127 | ||||||
|  |                 #    continue | ||||||
|  |                 # landmarks | ||||||
|  |                 annotation[0, 4] = label[4] / width  # l0_x | ||||||
|  |                 annotation[0, 5] = label[5] / height  # l0_y | ||||||
|  |                 annotation[0, 6] = label[7] / width  # l1_x | ||||||
|  |                 annotation[0, 7] = label[8] / height  # l1_y | ||||||
|  |                 annotation[0, 8] = label[10] / width  # l2_x | ||||||
|  |                 annotation[0, 9] = label[11] / height  # l2_y | ||||||
|  |                 annotation[0, 10] = label[13] / width  # l3_x | ||||||
|  |                 annotation[0, 11] = label[14] / height  # l3_y | ||||||
|  |                 annotation[0, 12] = label[16] / width  # l4_x | ||||||
|  |                 annotation[0, 13] = label[17] / height  # l4_yca | ||||||
|  |                 str_label = "0 " | ||||||
|  |                 for i in range(len(annotation[0])): | ||||||
|  |                     str_label = str_label + " " + str(annotation[0][i]) | ||||||
|  |                 str_label = str_label.replace('[', '').replace(']', '') | ||||||
|  |                 str_label = str_label.replace(',', '') + '\n' | ||||||
|  |                 f.write(str_label) | ||||||
|  |         cv2.imwrite(save_img_path, img) | ||||||
							
								
								
									
										88
									
								
								algorithm/Car_recognition/data/val2yolo.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,88 @@ | |||||||
|  | import os | ||||||
|  | import cv2 | ||||||
|  | import numpy as np | ||||||
|  | import shutil | ||||||
|  | import sys | ||||||
|  | from tqdm import tqdm | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def xywh2xxyy(box): | ||||||
|  |     x1 = box[0] | ||||||
|  |     y1 = box[1] | ||||||
|  |     x2 = box[0] + box[2] | ||||||
|  |     y2 = box[1] + box[3] | ||||||
|  |     return x1, x2, y1, y2 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def convert(size, box): | ||||||
|  |     dw = 1. / (size[0]) | ||||||
|  |     dh = 1. / (size[1]) | ||||||
|  |     x = (box[0] + box[1]) / 2.0 - 1 | ||||||
|  |     y = (box[2] + box[3]) / 2.0 - 1 | ||||||
|  |     w = box[1] - box[0] | ||||||
|  |     h = box[3] - box[2] | ||||||
|  |     x = x * dw | ||||||
|  |     w = w * dw | ||||||
|  |     y = y * dh | ||||||
|  |     h = h * dh | ||||||
|  |     return x, y, w, h | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def wider2face(root, phase='val', ignore_small=0): | ||||||
|  |     data = {} | ||||||
|  |     with open('{}/{}/label.txt'.format(root, phase), 'r') as f: | ||||||
|  |         lines = f.readlines() | ||||||
|  |         for line in tqdm(lines): | ||||||
|  |             line = line.strip() | ||||||
|  |             if '#' in line: | ||||||
|  |                 path = '{}/{}/images/{}'.format(root, phase, line.split()[-1]) | ||||||
|  |                 img = cv2.imread(path) | ||||||
|  |                 height, width, _ = img.shape | ||||||
|  |                 data[path] = list() | ||||||
|  |             else: | ||||||
|  |                 box = np.array(line.split()[0:4], dtype=np.float32)  # (x1,y1,w,h) | ||||||
|  |                 if box[2] < ignore_small or box[3] < ignore_small: | ||||||
|  |                     continue | ||||||
|  |                 box = convert((width, height), xywh2xxyy(box)) | ||||||
|  |                 label = '0 {} {} {} {} -1 -1 -1 -1 -1 -1 -1 -1 -1 -1'.format(round(box[0], 4), round(box[1], 4), | ||||||
|  |                                                                              round(box[2], 4), round(box[3], 4)) | ||||||
|  |                 data[path].append(label) | ||||||
|  |     return data | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     if len(sys.argv) == 1: | ||||||
|  |         print('Missing path to WIDERFACE folder.') | ||||||
|  |         print('Run command: python3 val2yolo.py /path/to/original/widerface [/path/to/save/widerface/val]') | ||||||
|  |         exit(1) | ||||||
|  |     elif len(sys.argv) > 3: | ||||||
|  |         print('Too many arguments were provided.') | ||||||
|  |         print('Run command: python3 val2yolo.py /path/to/original/widerface [/path/to/save/widerface/val]') | ||||||
|  |         exit(1) | ||||||
|  | 
 | ||||||
|  |     root_path = sys.argv[1] | ||||||
|  |     if not os.path.isfile(os.path.join(root_path, 'val', 'label.txt')): | ||||||
|  |         print('Missing label.txt file.') | ||||||
|  |         exit(1) | ||||||
|  | 
 | ||||||
|  |     if len(sys.argv) == 2: | ||||||
|  |         if not os.path.isdir('widerface'): | ||||||
|  |             os.mkdir('widerface') | ||||||
|  |         if not os.path.isdir('widerface/val'): | ||||||
|  |             os.mkdir('widerface/val') | ||||||
|  | 
 | ||||||
|  |         save_path = 'widerface/val' | ||||||
|  |     else: | ||||||
|  |         save_path = sys.argv[2] | ||||||
|  | 
 | ||||||
|  |     datas = wider2face(root_path, phase='val') | ||||||
|  |     for idx, data in enumerate(datas.keys()): | ||||||
|  |         pict_name = os.path.basename(data) | ||||||
|  |         out_img = f'{save_path}/{idx}.jpg' | ||||||
|  |         out_txt = f'{save_path}/{idx}.txt' | ||||||
|  |         shutil.copyfile(data, out_img) | ||||||
|  |         labels = datas[data] | ||||||
|  |         f = open(out_txt, 'w') | ||||||
|  |         for label in labels: | ||||||
|  |             f.write(label + '\n') | ||||||
|  |         f.close() | ||||||
							
								
								
									
										65
									
								
								algorithm/Car_recognition/data/val2yolo_for_test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,65 @@ | |||||||
|  | import os | ||||||
|  | import cv2 | ||||||
|  | import numpy as np | ||||||
|  | import shutil | ||||||
|  | from tqdm import tqdm | ||||||
|  | 
 | ||||||
|  | root = '/ssd_1t/derron/WiderFace' | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def xywh2xxyy(box): | ||||||
|  |     x1 = box[0] | ||||||
|  |     y1 = box[1] | ||||||
|  |     x2 = box[0] + box[2] | ||||||
|  |     y2 = box[1] + box[3] | ||||||
|  |     return (x1, x2, y1, y2) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def convert(size, box): | ||||||
|  |     dw = 1. / (size[0]) | ||||||
|  |     dh = 1. / (size[1]) | ||||||
|  |     x = (box[0] + box[1]) / 2.0 - 1 | ||||||
|  |     y = (box[2] + box[3]) / 2.0 - 1 | ||||||
|  |     w = box[1] - box[0] | ||||||
|  |     h = box[3] - box[2] | ||||||
|  |     x = x * dw | ||||||
|  |     w = w * dw | ||||||
|  |     y = y * dh | ||||||
|  |     h = h * dh | ||||||
|  |     return (x, y, w, h) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def wider2face(phase='val', ignore_small=0): | ||||||
|  |     data = {} | ||||||
|  |     with open('{}/{}/label.txt'.format(root, phase), 'r') as f: | ||||||
|  |         lines = f.readlines() | ||||||
|  |         for line in tqdm(lines): | ||||||
|  |             line = line.strip() | ||||||
|  |             if '#' in line: | ||||||
|  |                 path = '{}/{}/images/{}'.format(root, phase, os.path.basename(line)) | ||||||
|  |                 img = cv2.imread(path) | ||||||
|  |                 height, width, _ = img.shape | ||||||
|  |                 data[path] = list() | ||||||
|  |             else: | ||||||
|  |                 box = np.array(line.split()[0:4], dtype=np.float32)  # (x1,y1,w,h) | ||||||
|  |                 if box[2] < ignore_small or box[3] < ignore_small: | ||||||
|  |                     continue | ||||||
|  |                 box = convert((width, height), xywh2xxyy(box)) | ||||||
|  |                 label = '0 {} {} {} {} -1 -1 -1 -1 -1 -1 -1 -1 -1 -1'.format(round(box[0], 4), round(box[1], 4), | ||||||
|  |                                                                              round(box[2], 4), round(box[3], 4)) | ||||||
|  |                 data[path].append(label) | ||||||
|  |     return data | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     datas = wider2face('val') | ||||||
|  |     for idx, data in enumerate(datas.keys()): | ||||||
|  |         pict_name = os.path.basename(data) | ||||||
|  |         out_img = 'widerface/val/images/{}'.format(pict_name) | ||||||
|  |         out_txt = 'widerface/val/labels/{}.txt'.format(os.path.splitext(pict_name)[0]) | ||||||
|  |         shutil.copyfile(data, out_img) | ||||||
|  |         labels = datas[data] | ||||||
|  |         f = open(out_txt, 'w') | ||||||
|  |         for label in labels: | ||||||
|  |             f.write(label + '\n') | ||||||
|  |         f.close() | ||||||
							
								
								
									
										21
									
								
								algorithm/Car_recognition/data/voc.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,21 @@ | |||||||
|  | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ | ||||||
|  | # Train command: python train.py --data voc.yaml | ||||||
|  | # Default dataset location is next to /yolov5: | ||||||
|  | #   /parent_folder | ||||||
|  | #     /VOC | ||||||
|  | #     /yolov5 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # download command/URL (optional) | ||||||
|  | download: bash data/scripts/get_voc.sh | ||||||
|  | 
 | ||||||
|  | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] | ||||||
|  | train: ../VOC/images/train/  # 16551 images | ||||||
|  | val: ../VOC/images/val/  # 4952 images | ||||||
|  | 
 | ||||||
|  | # number of classes | ||||||
|  | nc: 20 | ||||||
|  | 
 | ||||||
|  | # class names | ||||||
|  | names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', | ||||||
|  |          'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] | ||||||
							
								
								
									
										19
									
								
								algorithm/Car_recognition/data/widerface.yaml
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,19 @@ | |||||||
|  | # PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/ | ||||||
|  | # Train command: python train.py --data voc.yaml | ||||||
|  | # Default dataset location is next to /yolov5: | ||||||
|  | #   /parent_folder | ||||||
|  | #     /VOC | ||||||
|  | #     /yolov5 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # download command/URL (optional) | ||||||
|  | download: bash data/scripts/get_voc.sh | ||||||
|  | 
 | ||||||
|  | # train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/] | ||||||
|  | train: /mnt/Gpan/Mydata/pytorchPorject/yolov5-face/ccpd/train_detect   | ||||||
|  | val: /mnt/Gpan/Mydata/pytorchPorject/yolov5-face/ccpd/val_detect  | ||||||
|  | # number of classes | ||||||
|  | nc: 2 | ||||||
|  | 
 | ||||||
|  | # class names | ||||||
|  | names: [ 'single','double'] | ||||||
							
								
								
									
										1
									
								
								algorithm/Car_recognition/demo.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1 @@ | |||||||
|  | python detect_plate.py --detect_model runs/train/exp22/weights/last.pt --rec_model /mnt/Gpan/Mydata/pytorchPorject/CRNN/newCrnn/crnn_plate_recognition/output/360CC/crnn/2022-12-02-22-29/checkpoints/checkpoint_71_acc_0.9524.pth --image_path mytest --img_size 384 | ||||||
							
								
								
									
										223
									
								
								algorithm/Car_recognition/detect_demo.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,223 @@ | |||||||
|  | # -*- coding: UTF-8 -*- | ||||||
|  | import argparse | ||||||
|  | import time | ||||||
|  | import os | ||||||
|  | import cv2 | ||||||
|  | import torch | ||||||
|  | from numpy import random | ||||||
|  | import copy | ||||||
|  | import numpy as np | ||||||
|  | from models.experimental import attempt_load | ||||||
|  | from utils.datasets import letterbox | ||||||
|  | from utils.general import check_img_size, non_max_suppression_face, scale_coords | ||||||
|  | 
 | ||||||
|  | from utils.torch_utils import  time_synchronized | ||||||
|  | from utils.cv_puttext import cv2ImgAddText | ||||||
|  | from plate_recognition.plate_rec import get_plate_result,allFilePath,cv_imread | ||||||
|  | 
 | ||||||
|  | from plate_recognition.double_plate_split_merge import get_split_merge | ||||||
|  | 
 | ||||||
|  | clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)] | ||||||
|  | 
 | ||||||
|  | def load_model(weights, device): | ||||||
|  |     model = attempt_load(weights, map_location=device)  # load FP32 model | ||||||
|  |     return model | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None): | ||||||
|  |     # Rescale coords (xyxy) from img1_shape to img0_shape | ||||||
|  |     if ratio_pad is None:  # calculate from img0_shape | ||||||
|  |         gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new | ||||||
|  |         pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding | ||||||
|  |     else: | ||||||
|  |         gain = ratio_pad[0][0] | ||||||
|  |         pad = ratio_pad[1] | ||||||
|  | 
 | ||||||
|  |     coords[:, [0, 2, 4, 6]] -= pad[0]  # x padding | ||||||
|  |     coords[:, [1, 3, 5, 7]] -= pad[1]  # y padding | ||||||
|  |     coords[:, :10] /= gain | ||||||
|  |     #clip_coords(coords, img0_shape) | ||||||
|  |     coords[:, 0].clamp_(0, img0_shape[1])  # x1 | ||||||
|  |     coords[:, 1].clamp_(0, img0_shape[0])  # y1 | ||||||
|  |     coords[:, 2].clamp_(0, img0_shape[1])  # x2 | ||||||
|  |     coords[:, 3].clamp_(0, img0_shape[0])  # y2 | ||||||
|  |     coords[:, 4].clamp_(0, img0_shape[1])  # x3 | ||||||
|  |     coords[:, 5].clamp_(0, img0_shape[0])  # y3 | ||||||
|  |     coords[:, 6].clamp_(0, img0_shape[1])  # x4 | ||||||
|  |     coords[:, 7].clamp_(0, img0_shape[0])  # y4 | ||||||
|  |     # coords[:, 8].clamp_(0, img0_shape[1])  # x5 | ||||||
|  |     # coords[:, 9].clamp_(0, img0_shape[0])  # y5 | ||||||
|  |     return coords | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def get_plate_rec_landmark(img, xyxy, conf, landmarks, class_num,device): | ||||||
|  |     h,w,c = img.shape | ||||||
|  |     result_dict={} | ||||||
|  |     tl = 1 or round(0.002 * (h + w) / 2) + 1  # line/font thickness | ||||||
|  | 
 | ||||||
|  |     x1 = int(xyxy[0]) | ||||||
|  |     y1 = int(xyxy[1]) | ||||||
|  |     x2 = int(xyxy[2]) | ||||||
|  |     y2 = int(xyxy[3]) | ||||||
|  |     landmarks_np=np.zeros((4,2)) | ||||||
|  |     rect=[x1,y1,x2,y2] | ||||||
|  |     for i in range(4): | ||||||
|  |         point_x = int(landmarks[2 * i]) | ||||||
|  |         point_y = int(landmarks[2 * i + 1]) | ||||||
|  |         landmarks_np[i]=np.array([point_x,point_y]) | ||||||
|  | 
 | ||||||
|  |     class_label= int(class_num)  #车牌的的类型0代表单牌,1代表双层车牌 | ||||||
|  |     result_dict['rect']=rect | ||||||
|  |     result_dict['landmarks']=landmarks_np.tolist() | ||||||
|  |     result_dict['class']=class_label | ||||||
|  |     return result_dict | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def detect_plate(model, orgimg, device,img_size): | ||||||
|  |     # Load model | ||||||
|  |     # img_size = opt_img_size | ||||||
|  |     conf_thres = 0.3 | ||||||
|  |     iou_thres = 0.5 | ||||||
|  |     dict_list=[] | ||||||
|  |     # orgimg = cv2.imread(image_path)  # BGR | ||||||
|  |     img0 = copy.deepcopy(orgimg) | ||||||
|  |     assert orgimg is not None, 'Image Not Found '  | ||||||
|  |     h0, w0 = orgimg.shape[:2]  # orig hw | ||||||
|  |     r = img_size / max(h0, w0)  # resize image to img_size | ||||||
|  |     if r != 1:  # always resize down, only resize up if training with augmentation | ||||||
|  |         interp = cv2.INTER_AREA if r < 1  else cv2.INTER_LINEAR | ||||||
|  |         img0 = cv2.resize(img0, (int(w0 * r), int(h0 * r)), interpolation=interp) | ||||||
|  | 
 | ||||||
|  |     imgsz = check_img_size(img_size, s=model.stride.max())  # check img_size | ||||||
|  | 
 | ||||||
|  |     img = letterbox(img0, new_shape=imgsz)[0] | ||||||
|  |     # img =process_data(img0) | ||||||
|  |     # Convert | ||||||
|  |     img = img[:, :, ::-1].transpose(2, 0, 1).copy()  # BGR to RGB, to 3x416x416 | ||||||
|  | 
 | ||||||
|  |     # Run inference | ||||||
|  |     t0 = time.time() | ||||||
|  | 
 | ||||||
|  |     img = torch.from_numpy(img).to(device) | ||||||
|  |     img = img.float()  # uint8 to fp16/32 | ||||||
|  |     img /= 255.0  # 0 - 255 to 0.0 - 1.0 | ||||||
|  |     if img.ndimension() == 3: | ||||||
|  |         img = img.unsqueeze(0) | ||||||
|  | 
 | ||||||
|  |     # Inference | ||||||
|  |     t1 = time_synchronized() | ||||||
|  |     pred = model(img)[0] | ||||||
|  |     t2=time_synchronized() | ||||||
|  |     # print(f"infer time is {(t2-t1)*1000} ms") | ||||||
|  | 
 | ||||||
|  |     # Apply NMS | ||||||
|  |     pred = non_max_suppression_face(pred, conf_thres, iou_thres) | ||||||
|  | 
 | ||||||
|  |     # print('img.shape: ', img.shape) | ||||||
|  |     # print('orgimg.shape: ', orgimg.shape) | ||||||
|  | 
 | ||||||
|  |     # Process detections | ||||||
|  |     for i, det in enumerate(pred):  # detections per image | ||||||
|  |         if len(det): | ||||||
|  |             # Rescale boxes from img_size to im0 size | ||||||
|  |             det[:, :4] = scale_coords(img.shape[2:], det[:, :4], orgimg.shape).round() | ||||||
|  | 
 | ||||||
|  |             # Print results | ||||||
|  |             for c in det[:, -1].unique(): | ||||||
|  |                 n = (det[:, -1] == c).sum()  # detections per class | ||||||
|  | 
 | ||||||
|  |             det[:, 5:13] = scale_coords_landmarks(img.shape[2:], det[:, 5:13], orgimg.shape).round() | ||||||
|  | 
 | ||||||
|  |             for j in range(det.size()[0]): | ||||||
|  |                 xyxy = det[j, :4].view(-1).tolist() | ||||||
|  |                 conf = det[j, 4].cpu().numpy() | ||||||
|  |                 landmarks = det[j, 5:13].view(-1).tolist() | ||||||
|  |                 class_num = det[j, 13].cpu().numpy() | ||||||
|  |                 result_dict = get_plate_rec_landmark(orgimg, xyxy, conf, landmarks, class_num,device) | ||||||
|  |                 dict_list.append(result_dict) | ||||||
|  |     return dict_list | ||||||
|  |     # cv2.imwrite('result.jpg', orgimg) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def draw_result(orgimg,dict_list): | ||||||
|  |     result_str ="" | ||||||
|  |     for result in dict_list: | ||||||
|  |         rect_area = result['rect'] | ||||||
|  |          | ||||||
|  |         x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1] | ||||||
|  |         padding_w = 0.05*w | ||||||
|  |         padding_h = 0.11*h | ||||||
|  |         rect_area[0]=max(0,int(x-padding_w)) | ||||||
|  |         rect_area[1]=max(0,int(y-padding_h)) | ||||||
|  |         rect_area[2]=min(orgimg.shape[1],int(rect_area[2]+padding_w)) | ||||||
|  |         rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h)) | ||||||
|  | 
 | ||||||
|  |          | ||||||
|  |         landmarks=result['landmarks'] | ||||||
|  |         label=result['class'] | ||||||
|  |         # result_str+=result+" " | ||||||
|  |         for i in range(4):  #关键点 | ||||||
|  |             cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) | ||||||
|  |         cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),clors[label],2) #画框 | ||||||
|  |         cv2.putText(img,str(label),(rect_area[0],rect_area[1]),cv2.FONT_HERSHEY_SIMPLEX,0.5,clors[label],2) | ||||||
|  |     #     orgimg=cv2ImgAddText(orgimg,label,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area) | ||||||
|  |     # print(result_str) | ||||||
|  |     return orgimg | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     parser = argparse.ArgumentParser() | ||||||
|  |     parser.add_argument('--detect_model', nargs='+', type=str, default='weights/detect.pt', help='model.pt path(s)')  #检测模型 | ||||||
|  |     parser.add_argument('--image_path', type=str, default='imgs', help='source')  | ||||||
|  |     parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)') | ||||||
|  |     parser.add_argument('--output', type=str, default='result1', help='source')  | ||||||
|  |     device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | ||||||
|  |     # device =torch.device("cpu") | ||||||
|  |     opt = parser.parse_args() | ||||||
|  |     print(opt) | ||||||
|  |     save_path = opt.output | ||||||
|  |     count=0 | ||||||
|  |     if not os.path.exists(save_path): | ||||||
|  |         os.mkdir(save_path) | ||||||
|  | 
 | ||||||
|  |     detect_model = load_model(opt.detect_model, device)  #初始化检测模型 | ||||||
|  |     time_all = 0 | ||||||
|  |     time_begin=time.time() | ||||||
|  |     if not os.path.isfile(opt.image_path):            #目录 | ||||||
|  |         file_list=[] | ||||||
|  |         allFilePath(opt.image_path,file_list) | ||||||
|  |         for img_path in file_list: | ||||||
|  |              | ||||||
|  |             print(count,img_path) | ||||||
|  |             time_b = time.time() | ||||||
|  |             img =cv_imread(img_path) | ||||||
|  |              | ||||||
|  |             if img is None: | ||||||
|  |                 continue | ||||||
|  |             if img.shape[-1]==4: | ||||||
|  |                 img=cv2.cvtColor(img,cv2.COLOR_BGRA2BGR) | ||||||
|  |             # detect_one(model,img_path,device) | ||||||
|  |             dict_list=detect_plate(detect_model, img, device,opt.img_size) | ||||||
|  |             ori_img=draw_result(img,dict_list) | ||||||
|  |             img_name = os.path.basename(img_path) | ||||||
|  |             save_img_path = os.path.join(save_path,img_name) | ||||||
|  |             time_e=time.time() | ||||||
|  |             time_gap = time_e-time_b | ||||||
|  |             if count: | ||||||
|  |                 time_all+=time_gap | ||||||
|  |             cv2.imwrite(save_img_path,ori_img) | ||||||
|  |             count+=1 | ||||||
|  |     else:                                          #单个图片 | ||||||
|  |             print(count,opt.image_path,end=" ") | ||||||
|  |             img =cv_imread(opt.image_path) | ||||||
|  |             if img.shape[-1]==4: | ||||||
|  |                 img=cv2.cvtColor(img,cv2.COLOR_BGRA2BGR) | ||||||
|  |             # detect_one(model,img_path,device) | ||||||
|  |             dict_list=detect_plate(detect_model, img, device,opt.img_size) | ||||||
|  |             ori_img=draw_result(img,dict_list) | ||||||
|  |             img_name = os.path.basename(opt.image_path) | ||||||
|  |             save_img_path = os.path.join(save_path,img_name) | ||||||
|  |             cv2.imwrite(save_img_path,ori_img)   | ||||||
|  |     print(f"sumTime time is {time.time()-time_begin} s, average pic time is {time_all/(len(file_list)-1)}") | ||||||
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/fonts/platech.ttf
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/image/README/1.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 26 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/image/README/double_yellow.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 65 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/image/README/test_1.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 1.3 MiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/image/README/weixian.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 960 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/image/test.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 482 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/Quicker_20220930_180856.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 1.4 MiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/Quicker_20220930_180919.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 1.0 MiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/Quicker_20220930_180938.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 241 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/Quicker_20220930_181044.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 328 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/WJdouble.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 67 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/double_yellow.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 29 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/hongkang1.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 571 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/minghang.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 584 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/nongyong_double.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 34 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/police.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 382 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/shi_lin_guan.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 47 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/single_blue.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 1.8 MiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/single_green.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 903 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/single_yellow.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 85 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/tmpA5E3.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 513 KiB | 
							
								
								
									
										
											BIN
										
									
								
								algorithm/Car_recognition/imgs/xue.jpg
									
									
									
									
									
										Normal file
									
								
							
							
						
						| After Width: | Height: | Size: 999 KiB | 
							
								
								
									
										255
									
								
								algorithm/Car_recognition/onnx_infer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,255 @@ | |||||||
|  | import onnxruntime | ||||||
|  | import numpy as np | ||||||
|  | import cv2 | ||||||
|  | import copy | ||||||
|  | import os | ||||||
|  | import argparse | ||||||
|  | from PIL import Image, ImageDraw, ImageFont | ||||||
|  | import time | ||||||
|  | 
 | ||||||
|  | plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品" | ||||||
|  | mean_value,std_value=((0.588,0.193))#识别模型均值标准差 | ||||||
|  | 
 | ||||||
|  | def decodePlate(preds):        #识别后处理 | ||||||
|  |     pre=0 | ||||||
|  |     newPreds=[] | ||||||
|  |     for i in range(len(preds)): | ||||||
|  |         if preds[i]!=0 and preds[i]!=pre: | ||||||
|  |             newPreds.append(preds[i]) | ||||||
|  |         pre=preds[i] | ||||||
|  |     plate="" | ||||||
|  |     for i in newPreds: | ||||||
|  |         plate+=plateName[int(i)] | ||||||
|  |     return plate | ||||||
|  |     # return newPreds | ||||||
|  | 
 | ||||||
|  | def rec_pre_precessing(img,size=(48,168)): #识别前处理 | ||||||
|  |     img =cv2.resize(img,(168,48)) | ||||||
|  |     img = img.astype(np.float32) | ||||||
|  |     img = (img/255-mean_value)/std_value  #归一化 减均值 除标准差 | ||||||
|  |     img = img.transpose(2,0,1)         #h,w,c 转为 c,h,w | ||||||
|  |     img = img.reshape(1,*img.shape)    #channel,height,width转为batch,channel,height,channel | ||||||
|  |     return img | ||||||
|  | 
 | ||||||
|  | def get_plate_result(img,session_rec): #识别后处理 | ||||||
|  |     img =rec_pre_precessing(img) | ||||||
|  |     y_onnx = session_rec.run([session_rec.get_outputs()[0].name], {session_rec.get_inputs()[0].name: img})[0] | ||||||
|  |     # print(y_onnx[0]) | ||||||
|  |     index =np.argmax(y_onnx[0],axis=1)  #找出概率最大的那个字符的序号 | ||||||
|  |     # print(y_onnx[0]) | ||||||
|  |     plate_no = decodePlate(index) | ||||||
|  |     # plate_no = decodePlate(y_onnx[0]) | ||||||
|  |     return plate_no | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def allFilePath(rootPath,allFIleList):  #遍历文件 | ||||||
|  |     fileList = os.listdir(rootPath) | ||||||
|  |     for temp in fileList: | ||||||
|  |         if os.path.isfile(os.path.join(rootPath,temp)): | ||||||
|  |             allFIleList.append(os.path.join(rootPath,temp)) | ||||||
|  |         else: | ||||||
|  |             allFilePath(os.path.join(rootPath,temp),allFIleList) | ||||||
|  | 
 | ||||||
|  | def get_split_merge(img):  #双层车牌进行分割后识别 | ||||||
|  |     h,w,c = img.shape | ||||||
|  |     img_upper = img[0:int(5/12*h),:] | ||||||
|  |     img_lower = img[int(1/3*h):,:] | ||||||
|  |     img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0])) | ||||||
|  |     new_img = np.hstack((img_upper,img_lower)) | ||||||
|  |     return new_img | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def order_points(pts):     # 关键点排列 按照(左上,右上,右下,左下)的顺序排列 | ||||||
|  |     rect = np.zeros((4, 2), dtype = "float32") | ||||||
|  |     s = pts.sum(axis = 1) | ||||||
|  |     rect[0] = pts[np.argmin(s)] | ||||||
|  |     rect[2] = pts[np.argmax(s)] | ||||||
|  |     diff = np.diff(pts, axis = 1) | ||||||
|  |     rect[1] = pts[np.argmin(diff)] | ||||||
|  |     rect[3] = pts[np.argmax(diff)] | ||||||
|  |     return rect | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def four_point_transform(image, pts):  #透视变换得到矫正后的图像,方便识别 | ||||||
|  |     rect = order_points(pts) | ||||||
|  |     (tl, tr, br, bl) = rect | ||||||
|  |     widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) | ||||||
|  |     widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) | ||||||
|  |     maxWidth = max(int(widthA), int(widthB)) | ||||||
|  |     heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) | ||||||
|  |     heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) | ||||||
|  |     maxHeight = max(int(heightA), int(heightB)) | ||||||
|  |     dst = np.array([ | ||||||
|  |         [0, 0], | ||||||
|  |         [maxWidth - 1, 0], | ||||||
|  |         [maxWidth - 1, maxHeight - 1], | ||||||
|  |         [0, maxHeight - 1]], dtype = "float32") | ||||||
|  |     M = cv2.getPerspectiveTransform(rect, dst) | ||||||
|  |     warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) | ||||||
|  |   | ||||||
|  |     # return the warped image | ||||||
|  |     return warped | ||||||
|  | 
 | ||||||
|  | def my_letter_box(img,size=(640,640)):  # | ||||||
|  |     h,w,c = img.shape | ||||||
|  |     r = min(size[0]/h,size[1]/w) | ||||||
|  |     new_h,new_w = int(h*r),int(w*r) | ||||||
|  |     top = int((size[0]-new_h)/2) | ||||||
|  |     left = int((size[1]-new_w)/2) | ||||||
|  |      | ||||||
|  |     bottom = size[0]-new_h-top | ||||||
|  |     right = size[1]-new_w-left | ||||||
|  |     img_resize = cv2.resize(img,(new_w,new_h)) | ||||||
|  |     img = cv2.copyMakeBorder(img_resize,top,bottom,left,right,borderType=cv2.BORDER_CONSTANT,value=(114,114,114)) | ||||||
|  |     return img,r,left,top | ||||||
|  | 
 | ||||||
|  | def xywh2xyxy(boxes):   #xywh坐标变为 左上 ,右下坐标 x1,y1  x2,y2 | ||||||
|  |     xywh =copy.deepcopy(boxes) | ||||||
|  |     xywh[:,0]=boxes[:,0]-boxes[:,2]/2 | ||||||
|  |     xywh[:,1]=boxes[:,1]-boxes[:,3]/2 | ||||||
|  |     xywh[:,2]=boxes[:,0]+boxes[:,2]/2 | ||||||
|  |     xywh[:,3]=boxes[:,1]+boxes[:,3]/2 | ||||||
|  |     return xywh | ||||||
|  |   | ||||||
|  | def my_nms(boxes,iou_thresh):         #nms | ||||||
|  |     index = np.argsort(boxes[:,4])[::-1] | ||||||
|  |     keep = [] | ||||||
|  |     while index.size >0: | ||||||
|  |         i = index[0] | ||||||
|  |         keep.append(i) | ||||||
|  |         x1=np.maximum(boxes[i,0],boxes[index[1:],0]) | ||||||
|  |         y1=np.maximum(boxes[i,1],boxes[index[1:],1]) | ||||||
|  |         x2=np.minimum(boxes[i,2],boxes[index[1:],2]) | ||||||
|  |         y2=np.minimum(boxes[i,3],boxes[index[1:],3]) | ||||||
|  |          | ||||||
|  |         w = np.maximum(0,x2-x1) | ||||||
|  |         h = np.maximum(0,y2-y1) | ||||||
|  | 
 | ||||||
|  |         inter_area = w*h | ||||||
|  |         union_area = (boxes[i,2]-boxes[i,0])*(boxes[i,3]-boxes[i,1])+(boxes[index[1:],2]-boxes[index[1:],0])*(boxes[index[1:],3]-boxes[index[1:],1]) | ||||||
|  |         iou = inter_area/(union_area-inter_area) | ||||||
|  |         idx = np.where(iou<=iou_thresh)[0] | ||||||
|  |         index = index[idx+1] | ||||||
|  |     return keep | ||||||
|  | 
 | ||||||
|  | def restore_box(boxes,r,left,top):  #返回原图上面的坐标 | ||||||
|  |     boxes[:,[0,2,5,7,9,11]]-=left | ||||||
|  |     boxes[:,[1,3,6,8,10,12]]-=top | ||||||
|  | 
 | ||||||
|  |     boxes[:,[0,2,5,7,9,11]]/=r | ||||||
|  |     boxes[:,[1,3,6,8,10,12]]/=r | ||||||
|  |     return boxes | ||||||
|  | 
 | ||||||
|  | def detect_pre_precessing(img,img_size):  #检测前处理 | ||||||
|  |     img,r,left,top=my_letter_box(img,img_size) | ||||||
|  |     # cv2.imwrite("1.jpg",img) | ||||||
|  |     img =img[:,:,::-1].transpose(2,0,1).copy().astype(np.float32) | ||||||
|  |     img=img/255 | ||||||
|  |     img=img.reshape(1,*img.shape) | ||||||
|  |     return img,r,left,top | ||||||
|  | 
 | ||||||
|  | def post_precessing(dets,r,left,top,conf_thresh=0.3,iou_thresh=0.5):#检测后处理 | ||||||
|  |     choice = dets[:,:,4]>conf_thresh | ||||||
|  |     dets=dets[choice] | ||||||
|  |     dets[:,13:15]*=dets[:,4:5] | ||||||
|  |     box = dets[:,:4] | ||||||
|  |     boxes = xywh2xyxy(box) | ||||||
|  |     score= np.max(dets[:,13:15],axis=-1,keepdims=True) | ||||||
|  |     index = np.argmax(dets[:,13:15],axis=-1).reshape(-1,1) | ||||||
|  |     output = np.concatenate((boxes,score,dets[:,5:13],index),axis=1)  | ||||||
|  |     reserve_=my_nms(output,iou_thresh)  | ||||||
|  |     output=output[reserve_]  | ||||||
|  |     output = restore_box(output,r,left,top) | ||||||
|  |     return output | ||||||
|  | 
 | ||||||
|  | def rec_plate(outputs,img0,session_rec):  #识别车牌 | ||||||
|  |     dict_list=[] | ||||||
|  |     for output in outputs: | ||||||
|  |         result_dict={} | ||||||
|  |         rect=output[:4].tolist() | ||||||
|  |         land_marks = output[5:13].reshape(4,2) | ||||||
|  |         roi_img = four_point_transform(img0,land_marks) | ||||||
|  |         label = int(output[-1]) | ||||||
|  |         score = output[4] | ||||||
|  |         if label==1:  #代表是双层车牌 | ||||||
|  |             roi_img = get_split_merge(roi_img) | ||||||
|  |         plate_no = get_plate_result(roi_img,session_rec) | ||||||
|  |         result_dict['rect']=rect | ||||||
|  |         result_dict['landmarks']=land_marks.tolist() | ||||||
|  |         result_dict['plate_no']=plate_no | ||||||
|  |         result_dict['roi_height']=roi_img.shape[0] | ||||||
|  |         dict_list.append(result_dict) | ||||||
|  |     return dict_list | ||||||
|  | 
 | ||||||
|  | def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):  #将识别结果画在图上 | ||||||
|  |     if (isinstance(img, np.ndarray)):  #判断是否OpenCV图片类型 | ||||||
|  |         img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) | ||||||
|  |     draw = ImageDraw.Draw(img) | ||||||
|  |     fontText = ImageFont.truetype( | ||||||
|  |         "fonts/platech.ttf", textSize, encoding="utf-8") | ||||||
|  |     draw.text((left, top), text, textColor, font=fontText) | ||||||
|  |     return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) | ||||||
|  | 
 | ||||||
|  | def draw_result(orgimg,dict_list): | ||||||
|  |     result_str ="" | ||||||
|  |     for result in dict_list: | ||||||
|  |         rect_area = result['rect'] | ||||||
|  |          | ||||||
|  |         x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1] | ||||||
|  |         padding_w = 0.05*w | ||||||
|  |         padding_h = 0.11*h | ||||||
|  |         rect_area[0]=max(0,int(x-padding_w)) | ||||||
|  |         rect_area[1]=min(orgimg.shape[1],int(y-padding_h)) | ||||||
|  |         rect_area[2]=max(0,int(rect_area[2]+padding_w)) | ||||||
|  |         rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h)) | ||||||
|  | 
 | ||||||
|  |         height_area = result['roi_height'] | ||||||
|  |         landmarks=result['landmarks'] | ||||||
|  |         result = result['plate_no'] | ||||||
|  |         result_str+=result+" " | ||||||
|  |         for i in range(4):  #关键点 | ||||||
|  |             cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) | ||||||
|  |         cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),(0,0,255),2) #画框 | ||||||
|  |         if len(result)>=1: | ||||||
|  |             orgimg=cv2ImgAddText(orgimg,result,rect_area[0]-height_area,rect_area[1]-height_area-10,(255,0,0),height_area) | ||||||
|  |     print(result_str) | ||||||
|  |     return orgimg | ||||||
|  | 
 | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     begin = time.time() | ||||||
|  |     parser = argparse.ArgumentParser() | ||||||
|  |     parser.add_argument('--detect_model',type=str, default=r'weights/plate_detect.onnx', help='model.pt path(s)')  #检测模型 | ||||||
|  |     parser.add_argument('--rec_model', type=str, default='weights/plate_rec.onnx', help='model.pt path(s)')#识别模型 | ||||||
|  |     parser.add_argument('--image_path', type=str, default='imgs', help='source')  | ||||||
|  |     parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)') | ||||||
|  |     parser.add_argument('--output', type=str, default='result1', help='source')  | ||||||
|  |     opt = parser.parse_args() | ||||||
|  |     file_list = [] | ||||||
|  |     allFilePath(opt.image_path,file_list) | ||||||
|  |     providers =  ['CPUExecutionProvider'] | ||||||
|  |     clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)] | ||||||
|  |     img_size = (opt.img_size,opt.img_size) | ||||||
|  |     session_detect = onnxruntime.InferenceSession(opt.detect_model, providers=providers ) | ||||||
|  |     session_rec = onnxruntime.InferenceSession(opt.rec_model, providers=providers ) | ||||||
|  |     if not os.path.exists(opt.output): | ||||||
|  |         os.mkdir(opt.output) | ||||||
|  |     save_path = opt.output | ||||||
|  |     count = 0 | ||||||
|  |     for pic_ in file_list: | ||||||
|  |         count+=1 | ||||||
|  |         print(count,pic_,end=" ") | ||||||
|  |         img=cv2.imread(pic_) | ||||||
|  |         img0 = copy.deepcopy(img) | ||||||
|  |         img,r,left,top = detect_pre_precessing(img,img_size) #检测前处理 | ||||||
|  |         # print(img.shape) | ||||||
|  |         y_onnx = session_detect.run([session_detect.get_outputs()[0].name], {session_detect.get_inputs()[0].name: img})[0] | ||||||
|  |         outputs = post_precessing(y_onnx,r,left,top) #检测后处理 | ||||||
|  |         result_list=rec_plate(outputs,img0,session_rec) | ||||||
|  |         ori_img = draw_result(img0,result_list) | ||||||
|  |         img_name = os.path.basename(pic_) | ||||||
|  |         save_img_path = os.path.join(save_path,img_name) | ||||||
|  |         cv2.imwrite(save_img_path,ori_img) | ||||||
|  |     print(f"总共耗时{time.time()-begin} s") | ||||||
|  |      | ||||||
|  | 
 | ||||||
|  |          | ||||||
							
								
								
									
										342
									
								
								algorithm/Car_recognition/openvino_infer.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,342 @@ | |||||||
|  | import cv2 | ||||||
|  | import matplotlib.pyplot as plt | ||||||
|  | import numpy as np | ||||||
|  | from openvino.runtime import Core | ||||||
|  | import os | ||||||
|  | import time | ||||||
|  | import copy | ||||||
|  | from PIL import Image, ImageDraw, ImageFont | ||||||
|  | import argparse | ||||||
|  | 
 | ||||||
|  | def cv_imread(path): | ||||||
|  |     img=cv2.imdecode(np.fromfile(path,dtype=np.uint8),-1) | ||||||
|  |     return img | ||||||
|  | 
 | ||||||
|  | def allFilePath(rootPath,allFIleList): | ||||||
|  |     fileList = os.listdir(rootPath) | ||||||
|  |     for temp in fileList: | ||||||
|  |         if os.path.isfile(os.path.join(rootPath,temp)): | ||||||
|  |             # if temp.endswith("jpg"): | ||||||
|  |             allFIleList.append(os.path.join(rootPath,temp)) | ||||||
|  |         else: | ||||||
|  |             allFilePath(os.path.join(rootPath,temp),allFIleList) | ||||||
|  | 
 | ||||||
|  | mean_value,std_value=((0.588,0.193))#识别模型均值标准差 | ||||||
|  | plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品" | ||||||
|  | 
 | ||||||
|  | def rec_pre_precessing(img,size=(48,168)): #识别前处理 | ||||||
|  |     img =cv2.resize(img,(168,48)) | ||||||
|  |     img = img.astype(np.float32) | ||||||
|  |     img = (img/255-mean_value)/std_value | ||||||
|  |     img = img.transpose(2,0,1) | ||||||
|  |     img = img.reshape(1,*img.shape) | ||||||
|  |     return img | ||||||
|  | 
 | ||||||
|  | def decodePlate(preds):        #识别后处理 | ||||||
|  |     pre=0 | ||||||
|  |     newPreds=[] | ||||||
|  |     preds=preds.astype(np.int8)[0] | ||||||
|  |     for i in range(len(preds)): | ||||||
|  |         if preds[i]!=0 and preds[i]!=pre: | ||||||
|  |             newPreds.append(preds[i]) | ||||||
|  |         pre=preds[i] | ||||||
|  |     plate="" | ||||||
|  |     for i in newPreds: | ||||||
|  |         plate+=plateName[int(i)] | ||||||
|  |     return plate | ||||||
|  | 
 | ||||||
|  | def load_model(onnx_path): | ||||||
|  |     ie = Core() | ||||||
|  |     model_onnx = ie.read_model(model=onnx_path) | ||||||
|  |     compiled_model_onnx = ie.compile_model(model=model_onnx, device_name="CPU") | ||||||
|  |     output_layer_onnx = compiled_model_onnx.output(0) | ||||||
|  |     return compiled_model_onnx,output_layer_onnx | ||||||
|  | 
 | ||||||
|  | def get_plate_result(img,rec_model,rec_output): | ||||||
|  |     img =rec_pre_precessing(img) | ||||||
|  |     # time_b = time.time() | ||||||
|  |     res_onnx = rec_model([img])[rec_output] | ||||||
|  |     # time_e= time.time() | ||||||
|  |     index =np.argmax(res_onnx,axis=-1)  #找出最大概率的那个字符的序号 | ||||||
|  |     plate_no = decodePlate(index) | ||||||
|  |     # print(f'{plate_no},time is {time_e-time_b}') | ||||||
|  |     return plate_no | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def get_split_merge(img):  #双层车牌进行分割后识别 | ||||||
|  |     h,w,c = img.shape | ||||||
|  |     img_upper = img[0:int(5/12*h),:] | ||||||
|  |     img_lower = img[int(1/3*h):,:] | ||||||
|  |     img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0])) | ||||||
|  |     new_img = np.hstack((img_upper,img_lower)) | ||||||
|  |     return new_img | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def order_points(pts): | ||||||
|  |     rect = np.zeros((4, 2), dtype = "float32") | ||||||
|  |     s = pts.sum(axis = 1) | ||||||
|  |     rect[0] = pts[np.argmin(s)] | ||||||
|  |     rect[2] = pts[np.argmax(s)] | ||||||
|  |     diff = np.diff(pts, axis = 1) | ||||||
|  |     rect[1] = pts[np.argmin(diff)] | ||||||
|  |     rect[3] = pts[np.argmax(diff)] | ||||||
|  |     return rect | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def four_point_transform(image, pts): | ||||||
|  |     rect = order_points(pts) | ||||||
|  |     (tl, tr, br, bl) = rect | ||||||
|  |     widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2)) | ||||||
|  |     widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2)) | ||||||
|  |     maxWidth = max(int(widthA), int(widthB)) | ||||||
|  |     heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2)) | ||||||
|  |     heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2)) | ||||||
|  |     maxHeight = max(int(heightA), int(heightB)) | ||||||
|  |     dst = np.array([ | ||||||
|  |         [0, 0], | ||||||
|  |         [maxWidth - 1, 0], | ||||||
|  |         [maxWidth - 1, maxHeight - 1], | ||||||
|  |         [0, maxHeight - 1]], dtype = "float32") | ||||||
|  |     M = cv2.getPerspectiveTransform(rect, dst) | ||||||
|  |     warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) | ||||||
|  |   | ||||||
|  |     # return the warped image | ||||||
|  |     return warped | ||||||
|  | 
 | ||||||
|  | def my_letter_box(img,size=(640,640)): | ||||||
|  |     h,w,c = img.shape | ||||||
|  |     r = min(size[0]/h,size[1]/w) | ||||||
|  |     new_h,new_w = int(h*r),int(w*r) | ||||||
|  |     top = int((size[0]-new_h)/2) | ||||||
|  |     left = int((size[1]-new_w)/2) | ||||||
|  |      | ||||||
|  |     bottom = size[0]-new_h-top | ||||||
|  |     right = size[1]-new_w-left | ||||||
|  |     img_resize = cv2.resize(img,(new_w,new_h)) | ||||||
|  |     img = cv2.copyMakeBorder(img_resize,top,bottom,left,right,borderType=cv2.BORDER_CONSTANT,value=(114,114,114)) | ||||||
|  |     return img,r,left,top | ||||||
|  | 
 | ||||||
|  | def xywh2xyxy(boxes): | ||||||
|  |     xywh =copy.deepcopy(boxes) | ||||||
|  |     xywh[:,0]=boxes[:,0]-boxes[:,2]/2 | ||||||
|  |     xywh[:,1]=boxes[:,1]-boxes[:,3]/2 | ||||||
|  |     xywh[:,2]=boxes[:,0]+boxes[:,2]/2 | ||||||
|  |     xywh[:,3]=boxes[:,1]+boxes[:,3]/2 | ||||||
|  |     return xywh | ||||||
|  | 
 | ||||||
|  | def my_nms(boxes,iou_thresh): | ||||||
|  |     index = np.argsort(boxes[:,4])[::-1] | ||||||
|  |     keep = [] | ||||||
|  |     while index.size >0: | ||||||
|  |         i = index[0] | ||||||
|  |         keep.append(i) | ||||||
|  |         x1=np.maximum(boxes[i,0],boxes[index[1:],0]) | ||||||
|  |         y1=np.maximum(boxes[i,1],boxes[index[1:],1]) | ||||||
|  |         x2=np.minimum(boxes[i,2],boxes[index[1:],2]) | ||||||
|  |         y2=np.minimum(boxes[i,3],boxes[index[1:],3]) | ||||||
|  |          | ||||||
|  |         w = np.maximum(0,x2-x1) | ||||||
|  |         h = np.maximum(0,y2-y1) | ||||||
|  | 
 | ||||||
|  |         inter_area = w*h | ||||||
|  |         union_area = (boxes[i,2]-boxes[i,0])*(boxes[i,3]-boxes[i,1])+(boxes[index[1:],2]-boxes[index[1:],0])*(boxes[index[1:],3]-boxes[index[1:],1]) | ||||||
|  |         iou = inter_area/(union_area-inter_area) | ||||||
|  |         idx = np.where(iou<=iou_thresh)[0] | ||||||
|  |         index = index[idx+1] | ||||||
|  |     return keep | ||||||
|  | 
 | ||||||
|  | def restore_box(boxes,r,left,top): | ||||||
|  |     boxes[:,[0,2,5,7,9,11]]-=left | ||||||
|  |     boxes[:,[1,3,6,8,10,12]]-=top | ||||||
|  | 
 | ||||||
|  |     boxes[:,[0,2,5,7,9,11]]/=r | ||||||
|  |     boxes[:,[1,3,6,8,10,12]]/=r | ||||||
|  |     return boxes | ||||||
|  | 
 | ||||||
|  | def detect_pre_precessing(img,img_size): | ||||||
|  |     img,r,left,top=my_letter_box(img,img_size) | ||||||
|  |     # cv2.imwrite("1.jpg",img) | ||||||
|  |     img =img[:,:,::-1].transpose(2,0,1).copy().astype(np.float32) | ||||||
|  |     img=img/255 | ||||||
|  |     img=img.reshape(1,*img.shape) | ||||||
|  |     return img,r,left,top | ||||||
|  | 
 | ||||||
|  | def post_precessing(dets,r,left,top,conf_thresh=0.3,iou_thresh=0.5):#检测后处理 | ||||||
|  |     choice = dets[:,:,4]>conf_thresh | ||||||
|  |     dets=dets[choice] | ||||||
|  |     dets[:,13:15]*=dets[:,4:5] | ||||||
|  |     box = dets[:,:4] | ||||||
|  |     boxes = xywh2xyxy(box) | ||||||
|  |     score= np.max(dets[:,13:15],axis=-1,keepdims=True) | ||||||
|  |     index = np.argmax(dets[:,13:15],axis=-1).reshape(-1,1) | ||||||
|  |     output = np.concatenate((boxes,score,dets[:,5:13],index),axis=1)  | ||||||
|  |     reserve_=my_nms(output,iou_thresh)  | ||||||
|  |     output=output[reserve_]  | ||||||
|  |     output = restore_box(output,r,left,top) | ||||||
|  |     return output | ||||||
|  | 
 | ||||||
|  | def rec_plate(outputs,img0,rec_model,rec_output): | ||||||
|  |     dict_list=[] | ||||||
|  |     for output in outputs: | ||||||
|  |         result_dict={} | ||||||
|  |         rect=output[:4].tolist() | ||||||
|  |         land_marks = output[5:13].reshape(4,2) | ||||||
|  |         roi_img = four_point_transform(img0,land_marks) | ||||||
|  |         label = int(output[-1]) | ||||||
|  |         if label==1:  #代表是双层车牌 | ||||||
|  |             roi_img = get_split_merge(roi_img) | ||||||
|  |         plate_no = get_plate_result(roi_img,rec_model,rec_output) #得到车牌识别结果 | ||||||
|  |         result_dict['rect']=rect | ||||||
|  |         result_dict['landmarks']=land_marks.tolist() | ||||||
|  |         result_dict['plate_no']=plate_no | ||||||
|  |         result_dict['roi_height']=roi_img.shape[0] | ||||||
|  |         dict_list.append(result_dict) | ||||||
|  |     return dict_list | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20): | ||||||
|  |     if (isinstance(img, np.ndarray)):  #判断是否OpenCV图片类型 | ||||||
|  |         img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) | ||||||
|  |     draw = ImageDraw.Draw(img) | ||||||
|  |     fontText = ImageFont.truetype( | ||||||
|  |         "fonts/platech.ttf", textSize, encoding="utf-8") | ||||||
|  |     draw.text((left, top), text, textColor, font=fontText) | ||||||
|  |     return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) | ||||||
|  | 
 | ||||||
|  | def draw_result(orgimg,dict_list): | ||||||
|  |     result_str ="" | ||||||
|  |     for result in dict_list: | ||||||
|  |         rect_area = result['rect'] | ||||||
|  |          | ||||||
|  |         x,y,w,h = rect_area[0],rect_area[1],rect_area[2]-rect_area[0],rect_area[3]-rect_area[1] | ||||||
|  |         padding_w = 0.05*w | ||||||
|  |         padding_h = 0.11*h | ||||||
|  |         rect_area[0]=max(0,int(x-padding_w)) | ||||||
|  |         rect_area[1]=min(orgimg.shape[1],int(y-padding_h)) | ||||||
|  |         rect_area[2]=max(0,int(rect_area[2]+padding_w)) | ||||||
|  |         rect_area[3]=min(orgimg.shape[0],int(rect_area[3]+padding_h)) | ||||||
|  | 
 | ||||||
|  |         height_area = result['roi_height'] | ||||||
|  |         landmarks=result['landmarks'] | ||||||
|  |         result = result['plate_no'] | ||||||
|  |         result_str+=result+" " | ||||||
|  |         # for i in range(4):  #关键点 | ||||||
|  |         #     cv2.circle(orgimg, (int(landmarks[i][0]), int(landmarks[i][1])), 5, clors[i], -1) | ||||||
|  |          | ||||||
|  |         if len(result)>=6: | ||||||
|  |             cv2.rectangle(orgimg,(rect_area[0],rect_area[1]),(rect_area[2],rect_area[3]),(0,0,255),2) #画框 | ||||||
|  |             orgimg=cv2ImgAddText(orgimg,result,rect_area[0]-height_area,rect_area[1]-height_area-10,(0,255,0),height_area) | ||||||
|  |     # print(result_str) | ||||||
|  |     return orgimg | ||||||
|  | 
 | ||||||
|  | def get_second(capture): | ||||||
|  |     if capture.isOpened(): | ||||||
|  |         rate = capture.get(5)   # 帧速率 | ||||||
|  |         FrameNumber = capture.get(7)  # 视频文件的帧数 | ||||||
|  |         duration = FrameNumber/rate  # 帧速率/视频总帧数 是时间,除以60之后单位是分钟 | ||||||
|  |         return int(rate),int(FrameNumber),int(duration)     | ||||||
|  | 
 | ||||||
|  | if __name__=="__main__": | ||||||
|  |     parser = argparse.ArgumentParser() | ||||||
|  |     parser.add_argument('--detect_model',type=str, default=r'weights/plate_detect.onnx', help='model.pt path(s)')  #检测模型 | ||||||
|  |     parser.add_argument('--rec_model', type=str, default='weights/plate_rec.onnx', help='model.pt path(s)')#识别模型 | ||||||
|  |     parser.add_argument('--image_path', type=str, default='imgs', help='source')  | ||||||
|  |     parser.add_argument('--img_size', type=int, default=640, help='inference size (pixels)') | ||||||
|  |     parser.add_argument('--output', type=str, default='result1', help='source')  | ||||||
|  |     opt = parser.parse_args() | ||||||
|  |     file_list=[] | ||||||
|  |     file_folder=opt.image_path | ||||||
|  |     allFilePath(file_folder,file_list) | ||||||
|  |     rec_onnx_path =opt.rec_model | ||||||
|  |     detect_onnx_path=opt.detect_model | ||||||
|  |     rec_model,rec_output=load_model(rec_onnx_path) | ||||||
|  |     detect_model,detect_output=load_model(detect_onnx_path) | ||||||
|  |     count=0 | ||||||
|  |     img_size=(opt.img_size,opt.img_size) | ||||||
|  |     begin=time.time() | ||||||
|  |     save_path=opt.output | ||||||
|  |     if not os.path.exists(save_path): | ||||||
|  |         os.mkdir(save_path) | ||||||
|  |     for pic_ in file_list: | ||||||
|  |      | ||||||
|  |         count+=1 | ||||||
|  |         print(count,pic_,end=" ") | ||||||
|  |         img=cv2.imread(pic_) | ||||||
|  |         time_b = time.time() | ||||||
|  |         if img.shape[-1]==4: | ||||||
|  |             img = cv2.cvtColor(img,cv2.COLOR_BGRA2BGR) | ||||||
|  |         img0 = copy.deepcopy(img) | ||||||
|  |         img,r,left,top = detect_pre_precessing(img,img_size) #检测前处理 | ||||||
|  |         # print(img.shape) | ||||||
|  |         det_result = detect_model([img])[detect_output] | ||||||
|  |         outputs = post_precessing(det_result,r,left,top) #检测后处理 | ||||||
|  |         time_1 = time.time() | ||||||
|  |         result_list=rec_plate(outputs,img0,rec_model,rec_output) | ||||||
|  |         time_e= time.time() | ||||||
|  |         print(f'耗时 {time_e-time_b} s') | ||||||
|  |         ori_img = draw_result(img0,result_list) | ||||||
|  |         img_name = os.path.basename(pic_) | ||||||
|  |         save_img_path = os.path.join(save_path,img_name) | ||||||
|  |          | ||||||
|  |         cv2.imwrite(save_img_path,ori_img) | ||||||
|  | print(f"总共耗时{time.time()-begin} s") | ||||||
|  | 
 | ||||||
|  |     # video_name = r"plate.mp4" | ||||||
|  |     # capture=cv2.VideoCapture(video_name) | ||||||
|  |     # fourcc = cv2.VideoWriter_fourcc(*'MP4V')  | ||||||
|  |     # fps = capture.get(cv2.CAP_PROP_FPS)  # 帧数 | ||||||
|  |     # width, height = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)), int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))  # 宽高 | ||||||
|  |     # out = cv2.VideoWriter('2result.mp4', fourcc, fps, (width, height))  # 写入视频 | ||||||
|  |     # frame_count = 0 | ||||||
|  |     # fps_all=0 | ||||||
|  |     # rate,FrameNumber,duration=get_second(capture) | ||||||
|  |     # # with open("example.csv",mode='w',newline='') as example_file: | ||||||
|  |     #     # fieldnames = ['车牌', '时间'] | ||||||
|  |     #     # writer = csv.DictWriter(example_file, fieldnames=fieldnames, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL) | ||||||
|  |     #     # writer.writeheader() | ||||||
|  |     # if capture.isOpened(): | ||||||
|  |     #     while True: | ||||||
|  |     #         t1 = cv2.getTickCount() | ||||||
|  |     #         frame_count+=1 | ||||||
|  |     #         ret,img=capture.read() | ||||||
|  |     #         if not ret: | ||||||
|  |     #             break | ||||||
|  |     #         # if frame_count%rate==0: | ||||||
|  |     #         img0 = copy.deepcopy(img) | ||||||
|  |     #         img,r,left,top = detect_pre_precessing(img,img_size) #检测前处理 | ||||||
|  |     #         # print(img.shape) | ||||||
|  |     #         det_result = detect_model([img])[detect_output] | ||||||
|  |     #         outputs = post_precessing(det_result,r,left,top) #检测后处理 | ||||||
|  |     #         result_list=rec_plate(outputs,img0,rec_model,rec_output) | ||||||
|  |     #         ori_img = draw_result(img0,result_list) | ||||||
|  |     #         t2 =cv2.getTickCount() | ||||||
|  |     #         infer_time =(t2-t1)/cv2.getTickFrequency() | ||||||
|  |     #         fps=1.0/infer_time | ||||||
|  |     #         fps_all+=fps | ||||||
|  |     #         str_fps = f'fps:{fps:.4f}' | ||||||
|  |     #         out.write(ori_img) | ||||||
|  |     #         cv2.putText(ori_img,str_fps,(20,20),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2) | ||||||
|  |     #         cv2.imshow("haha",ori_img) | ||||||
|  |     #         cv2.waitKey(1) | ||||||
|  | 
 | ||||||
|  |     #         # current_time = int(frame_count/FrameNumber*duration) | ||||||
|  |     #         # sec = current_time%60 | ||||||
|  |     #         # minute = current_time//60 | ||||||
|  |     #         # for result_ in result_list: | ||||||
|  |     #         #     plate_no = result_['plate_no'] | ||||||
|  |     #         #     if not is_car_number(pattern_str,plate_no): | ||||||
|  |     #         #         continue | ||||||
|  |     #         #     print(f'车牌号:{plate_no},时间:{minute}分{sec}秒') | ||||||
|  |     #         #     time_str =f'{minute}分{sec}秒' | ||||||
|  |     #         #     writer.writerow({"车牌":plate_no,"时间":time_str}) | ||||||
|  |     #         # out.write(ori_img) | ||||||
|  |              | ||||||
|  |              | ||||||
|  |     # else: | ||||||
|  |     #     print("失败") | ||||||
|  |     # capture.release() | ||||||
|  |     # out.release() | ||||||
|  |     # cv2.destroyAllWindows() | ||||||
|  |     # print(f"all frame is {frame_count},average fps is {fps_all/frame_count}") | ||||||
|  | 
 | ||||||
							
								
								
									
										74
									
								
								algorithm/Car_recognition/plate_recognition/color_rec.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,74 @@ | |||||||
|  | import warnings | ||||||
|  | import cv2 | ||||||
|  | import torch | ||||||
|  | import numpy as np | ||||||
|  | import torch.nn as nn | ||||||
|  | from torchvision import transforms | ||||||
|  | from algorithm.Car_recognition.plate_recognition.plateNet import MyNet_color | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class MyNet(nn.Module): | ||||||
|  |     def __init__(self, class_num=6): | ||||||
|  |         super(MyNet, self).__init__() | ||||||
|  |         self.class_num = class_num | ||||||
|  |         self.backbone = nn.Sequential( | ||||||
|  |             nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(5, 5), stride=(1, 1)),  # 0 | ||||||
|  |             torch.nn.BatchNorm2d(16), | ||||||
|  |             nn.ReLU(), | ||||||
|  |             nn.MaxPool2d(kernel_size=(2, 2)), | ||||||
|  |             nn.Dropout(0), | ||||||
|  |             nn.Flatten(), | ||||||
|  |             nn.Linear(480, 64), | ||||||
|  |             nn.Dropout(0), | ||||||
|  |             nn.ReLU(), | ||||||
|  |             nn.Linear(64, class_num), | ||||||
|  |             nn.Dropout(0), | ||||||
|  |             nn.Softmax(1) | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |     def forward(self, x): | ||||||
|  |         logits = self.backbone(x) | ||||||
|  | 
 | ||||||
|  |         return logits | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def init_color_model(model_path,device): | ||||||
|  | 
 | ||||||
|  |     # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | ||||||
|  |     # print("color_rec_device:", device) | ||||||
|  |     # PATH = 'E:\study\plate\Chinese_license_plate_detection_recognition-main\weights\color_classify.pth'  # 定义模型路径 | ||||||
|  |     class_num = 6 | ||||||
|  |     warnings.filterwarnings('ignore') | ||||||
|  |     net = MyNet_color(class_num) | ||||||
|  |     net.load_state_dict(torch.load(model_path, map_location=torch.device(device))) | ||||||
|  |     net.eval().to(device) | ||||||
|  |     modelc = net | ||||||
|  |      | ||||||
|  |     return modelc | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def plate_color_rec(img,model,device): | ||||||
|  |     class_name = ['黑色', '蓝色', '', '绿色', '白色', '黄色'] | ||||||
|  |     data_input = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) | ||||||
|  |     image = cv2.resize(data_input, (34, 9)) | ||||||
|  |     image = np.transpose(image, (2, 0, 1)) | ||||||
|  |     img = image / 255 | ||||||
|  |     img = torch.tensor(img) | ||||||
|  | 
 | ||||||
|  |     normalize = transforms.Normalize(mean=[0.4243, 0.4947, 0.434], | ||||||
|  |                                      std=[0.2569, 0.2478, 0.2174]) | ||||||
|  |     img = normalize(img) | ||||||
|  |     img = torch.unsqueeze(img, dim=0).to(device).float() | ||||||
|  |     xx = model(img) | ||||||
|  |      | ||||||
|  |     return class_name[int(torch.argmax(xx, dim=1)[0])] | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     class_name = ['black', 'blue', 'danger', 'green', 'white', 'yellow'] | ||||||
|  |     data_input = cv2.imread("/mnt/Gpan/Mydata/pytorchPorject/myCrnnPlate/images/test.jpg")  # (高,宽,通道(B,G,R)),(H,W,C) | ||||||
|  |     device = torch.device("cuda" if torch.cuda.is_available else "cpu") | ||||||
|  |     model = init_color_model("/mnt/Gpan/Mydata/pytorchPorject/Chinese_license_plate_detection_recognition/weights/color_classify.pth",device) | ||||||
|  |     color_code = plate_color_rec(data_input,model,device) | ||||||
|  |     print(color_code) | ||||||
|  |     print(class_name[color_code]) | ||||||
| @ -0,0 +1,15 @@ | |||||||
|  | import os | ||||||
|  | import cv2 | ||||||
|  | import numpy as np | ||||||
|  | def get_split_merge(img): | ||||||
|  |     h,w,c = img.shape | ||||||
|  |     img_upper = img[0:int(5/12*h),:] | ||||||
|  |     img_lower = img[int(1/3*h):,:] | ||||||
|  |     img_upper = cv2.resize(img_upper,(img_lower.shape[1],img_lower.shape[0])) | ||||||
|  |     new_img = np.hstack((img_upper,img_lower)) | ||||||
|  |     return new_img | ||||||
|  | 
 | ||||||
|  | if __name__=="__main__": | ||||||
|  |     img = cv2.imread("double_plate/tmp8078.png") | ||||||
|  |     new_img =get_split_merge(img) | ||||||
|  |     cv2.imwrite("double_plate/new.jpg",new_img) | ||||||
							
								
								
									
										210
									
								
								algorithm/Car_recognition/plate_recognition/plateNet.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,210 @@ | |||||||
|  | import torch.nn as nn | ||||||
|  | import torch | ||||||
|  | import torch.nn.functional as F | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class myNet_ocr(nn.Module): | ||||||
|  |     def __init__(self,cfg=None,num_classes=78,export=False): | ||||||
|  |         super(myNet_ocr, self).__init__() | ||||||
|  |         if cfg is None: | ||||||
|  |             cfg =[32,32,64,64,'M',128,128,'M',196,196,'M',256,256] | ||||||
|  |             # cfg =[32,32,'M',64,64,'M',128,128,'M',256,256] | ||||||
|  |         self.feature = self.make_layers(cfg, True) | ||||||
|  |         self.export = export | ||||||
|  |         # self.classifier = nn.Linear(cfg[-1], num_classes) | ||||||
|  |         # self.loc =  nn.MaxPool2d((2, 2), (5, 1), (0, 1),ceil_mode=True) | ||||||
|  |         # self.loc =  nn.AvgPool2d((2, 2), (5, 2), (0, 1),ceil_mode=False) | ||||||
|  |         self.loc =  nn.MaxPool2d((5, 2), (1, 1),(0,1),ceil_mode=False) | ||||||
|  |         self.newCnn=nn.Conv2d(cfg[-1],num_classes,1,1) | ||||||
|  |         # self.newBn=nn.BatchNorm2d(num_classes) | ||||||
|  |     def make_layers(self, cfg, batch_norm=False): | ||||||
|  |         layers = [] | ||||||
|  |         in_channels = 3 | ||||||
|  |         for i in range(len(cfg)): | ||||||
|  |             if i == 0: | ||||||
|  |                 conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) | ||||||
|  |                 if batch_norm: | ||||||
|  |                     layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] | ||||||
|  |                 else: | ||||||
|  |                     layers += [conv2d, nn.ReLU(inplace=True)] | ||||||
|  |                 in_channels = cfg[i] | ||||||
|  |             else : | ||||||
|  |                 if cfg[i] == 'M': | ||||||
|  |                     layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] | ||||||
|  |                 else: | ||||||
|  |                     conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=(1,1),stride =1) | ||||||
|  |                     if batch_norm: | ||||||
|  |                         layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] | ||||||
|  |                     else: | ||||||
|  |                         layers += [conv2d, nn.ReLU(inplace=True)] | ||||||
|  |                     in_channels = cfg[i] | ||||||
|  |         return nn.Sequential(*layers) | ||||||
|  | 
 | ||||||
|  |     def forward(self, x): | ||||||
|  |         x = self.feature(x) | ||||||
|  |         x=self.loc(x) | ||||||
|  |         x=self.newCnn(x) | ||||||
|  |         # x=self.newBn(x) | ||||||
|  |         if self.export: | ||||||
|  |             conv = x.squeeze(2) # b *512 * width | ||||||
|  |             conv = conv.transpose(2,1)  # [w, b, c] | ||||||
|  |             conv =conv.argmax(dim=2) | ||||||
|  |             return conv | ||||||
|  |         else: | ||||||
|  |             b, c, h, w = x.size() | ||||||
|  |             assert h == 1, "the height of conv must be 1" | ||||||
|  |             conv = x.squeeze(2) # b *512 * width | ||||||
|  |             conv = conv.permute(2, 0, 1)  # [w, b, c] | ||||||
|  |             # output = F.log_softmax(self.rnn(conv), dim=2) | ||||||
|  |             output = torch.softmax(conv, dim=2) | ||||||
|  |             return output | ||||||
|  | 
 | ||||||
|  | myCfg = [32,'M',64,'M',96,'M',128,'M',256] | ||||||
|  | class myNet(nn.Module): | ||||||
|  |     def __init__(self,cfg=None,num_classes=3): | ||||||
|  |         super(myNet, self).__init__() | ||||||
|  |         if cfg is None: | ||||||
|  |             cfg = myCfg | ||||||
|  |         self.feature = self.make_layers(cfg, True) | ||||||
|  |         self.classifier = nn.Linear(cfg[-1], num_classes) | ||||||
|  |     def make_layers(self, cfg, batch_norm=False): | ||||||
|  |         layers = [] | ||||||
|  |         in_channels = 3 | ||||||
|  |         for i in range(len(cfg)): | ||||||
|  |             if i == 0: | ||||||
|  |                 conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) | ||||||
|  |                 if batch_norm: | ||||||
|  |                     layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] | ||||||
|  |                 else: | ||||||
|  |                     layers += [conv2d, nn.ReLU(inplace=True)] | ||||||
|  |                 in_channels = cfg[i] | ||||||
|  |             else : | ||||||
|  |                 if cfg[i] == 'M': | ||||||
|  |                     layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] | ||||||
|  |                 else: | ||||||
|  |                     conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=1,stride =1) | ||||||
|  |                     if batch_norm: | ||||||
|  |                         layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] | ||||||
|  |                     else: | ||||||
|  |                         layers += [conv2d, nn.ReLU(inplace=True)] | ||||||
|  |                     in_channels = cfg[i] | ||||||
|  |         return nn.Sequential(*layers) | ||||||
|  | 
 | ||||||
|  |     def forward(self, x): | ||||||
|  |         x = self.feature(x) | ||||||
|  |         x = nn.AvgPool2d(kernel_size=3, stride=1)(x) | ||||||
|  |         x = x.view(x.size(0), -1) | ||||||
|  |         y = self.classifier(x) | ||||||
|  |         return y | ||||||
|  |      | ||||||
|  |      | ||||||
|  | class MyNet_color(nn.Module): | ||||||
|  |     def __init__(self, class_num=6): | ||||||
|  |         super(MyNet_color, self).__init__() | ||||||
|  |         self.class_num = class_num | ||||||
|  |         self.backbone = nn.Sequential( | ||||||
|  |             nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(5, 5), stride=(1, 1)),  # 0 | ||||||
|  |             torch.nn.BatchNorm2d(16), | ||||||
|  |             nn.ReLU(), | ||||||
|  |             nn.MaxPool2d(kernel_size=(2, 2)), | ||||||
|  |             nn.Dropout(0), | ||||||
|  |             nn.Flatten(), | ||||||
|  |             nn.Linear(480, 64), | ||||||
|  |             nn.Dropout(0), | ||||||
|  |             nn.ReLU(), | ||||||
|  |             nn.Linear(64, class_num), | ||||||
|  |             nn.Dropout(0), | ||||||
|  |             nn.Softmax(1) | ||||||
|  |         ) | ||||||
|  | 
 | ||||||
|  |     def forward(self, x): | ||||||
|  |         logits = self.backbone(x) | ||||||
|  | 
 | ||||||
|  |         return logits | ||||||
|  |      | ||||||
|  |      | ||||||
|  | class myNet_ocr_color(nn.Module): | ||||||
|  |     def __init__(self,cfg=None,num_classes=78,export=False,color_num=None): | ||||||
|  |         super(myNet_ocr_color, self).__init__() | ||||||
|  |         if cfg is None: | ||||||
|  |             cfg =[32,32,64,64,'M',128,128,'M',196,196,'M',256,256] | ||||||
|  |             # cfg =[32,32,'M',64,64,'M',128,128,'M',256,256] | ||||||
|  |         self.feature = self.make_layers(cfg, True) | ||||||
|  |         self.export = export | ||||||
|  |         self.color_num=color_num | ||||||
|  |         self.conv_out_num=12  #颜色第一个卷积层输出通道12 | ||||||
|  |         if self.color_num: | ||||||
|  |             self.conv1=nn.Conv2d(cfg[-1],self.conv_out_num,kernel_size=3,stride=2) | ||||||
|  |             self.bn1=nn.BatchNorm2d(self.conv_out_num) | ||||||
|  |             self.relu1=nn.ReLU(inplace=True) | ||||||
|  |             self.gap =nn.AdaptiveAvgPool2d(output_size=1) | ||||||
|  |             self.color_classifier=nn.Conv2d(self.conv_out_num,self.color_num,kernel_size=1,stride=1) | ||||||
|  |             self.color_bn = nn.BatchNorm2d(self.color_num) | ||||||
|  |             self.flatten = nn.Flatten() | ||||||
|  |             # self.relu = nn.ReLU(inplace=True) | ||||||
|  |         # self.classifier = nn.Linear(cfg[-1], num_classes) | ||||||
|  |         # self.loc =  nn.MaxPool2d((2, 2), (5, 1), (0, 1),ceil_mode=True) | ||||||
|  |         # self.loc =  nn.AvgPool2d((2, 2), (5, 2), (0, 1),ceil_mode=False) | ||||||
|  |         self.loc =  nn.MaxPool2d((5, 2), (1, 1),(0,1),ceil_mode=False) | ||||||
|  |         self.newCnn=nn.Conv2d(cfg[-1],num_classes,1,1) | ||||||
|  |         # self.newBn=nn.BatchNorm2d(num_classes) | ||||||
|  |     def make_layers(self, cfg, batch_norm=False): | ||||||
|  |         layers = [] | ||||||
|  |         in_channels = 3 | ||||||
|  |         for i in range(len(cfg)): | ||||||
|  |             if i == 0: | ||||||
|  |                 conv2d =nn.Conv2d(in_channels, cfg[i], kernel_size=5,stride =1) | ||||||
|  |                 if batch_norm: | ||||||
|  |                     layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] | ||||||
|  |                 else: | ||||||
|  |                     layers += [conv2d, nn.ReLU(inplace=True)] | ||||||
|  |                 in_channels = cfg[i] | ||||||
|  |             else : | ||||||
|  |                 if cfg[i] == 'M': | ||||||
|  |                     layers += [nn.MaxPool2d(kernel_size=3, stride=2,ceil_mode=True)] | ||||||
|  |                 else: | ||||||
|  |                     conv2d = nn.Conv2d(in_channels, cfg[i], kernel_size=3, padding=(1,1),stride =1) | ||||||
|  |                     if batch_norm: | ||||||
|  |                         layers += [conv2d, nn.BatchNorm2d(cfg[i]), nn.ReLU(inplace=True)] | ||||||
|  |                     else: | ||||||
|  |                         layers += [conv2d, nn.ReLU(inplace=True)] | ||||||
|  |                     in_channels = cfg[i] | ||||||
|  |         return nn.Sequential(*layers) | ||||||
|  | 
 | ||||||
|  |     def forward(self, x): | ||||||
|  |         x = self.feature(x) | ||||||
|  |         if self.color_num: | ||||||
|  |             x_color=self.conv1(x) | ||||||
|  |             x_color=self.bn1(x_color) | ||||||
|  |             x_color =self.relu1(x_color) | ||||||
|  |             x_color = self.color_classifier(x_color) | ||||||
|  |             x_color = self.color_bn(x_color) | ||||||
|  |             x_color =self.gap(x_color) | ||||||
|  |             x_color = self.flatten(x_color)  | ||||||
|  |         x=self.loc(x) | ||||||
|  |         x=self.newCnn(x) | ||||||
|  |         | ||||||
|  |         if self.export: | ||||||
|  |             conv = x.squeeze(2) # b *512 * width | ||||||
|  |             conv = conv.transpose(2,1)  # [w, b, c] | ||||||
|  |             if self.color_num: | ||||||
|  |                 return conv,x_color | ||||||
|  |             return conv | ||||||
|  |         else: | ||||||
|  |             b, c, h, w = x.size() | ||||||
|  |             assert h == 1, "the height of conv must be 1" | ||||||
|  |             conv = x.squeeze(2) # b *512 * width | ||||||
|  |             conv = conv.permute(2, 0, 1)  # [w, b, c] | ||||||
|  |             output = F.log_softmax(conv, dim=2) | ||||||
|  |             if self.color_num: | ||||||
|  |                 return output,x_color | ||||||
|  |             return output | ||||||
|  |      | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     x = torch.randn(1,3,48,216) | ||||||
|  |     model = myNet_ocr(num_classes=78,export=True) | ||||||
|  |     out = model(x) | ||||||
|  |     print(out.shape) | ||||||
							
								
								
									
										103
									
								
								algorithm/Car_recognition/plate_recognition/plate_rec.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,103 @@ | |||||||
|  | from algorithm.Car_recognition.plate_recognition.plateNet import myNet_ocr,myNet_ocr_color | ||||||
|  | import torch | ||||||
|  | import torch.nn as nn | ||||||
|  | import cv2 | ||||||
|  | import numpy as np | ||||||
|  | import os | ||||||
|  | import time | ||||||
|  | import sys | ||||||
|  | 
 | ||||||
|  | def cv_imread(path):  #可以读取中文路径的图片 | ||||||
|  |     img=cv2.imdecode(np.fromfile(path,dtype=np.uint8),-1) | ||||||
|  |     return img | ||||||
|  | 
 | ||||||
|  | def allFilePath(rootPath,allFIleList): | ||||||
|  |     fileList = os.listdir(rootPath) | ||||||
|  |     for temp in fileList: | ||||||
|  |         if os.path.isfile(os.path.join(rootPath,temp)): | ||||||
|  |             if temp.endswith('.jpg') or temp.endswith('.png') or temp.endswith('.JPG'): | ||||||
|  |                 allFIleList.append(os.path.join(rootPath,temp)) | ||||||
|  |         else: | ||||||
|  |             allFilePath(os.path.join(rootPath,temp),allFIleList) | ||||||
|  | device = torch.device('cuda') if torch.cuda.is_available() else torch.device("cpu") | ||||||
|  | plateName=r"#京沪津渝冀晋蒙辽吉黑苏浙皖闽赣鲁豫鄂湘粤桂琼川贵云藏陕甘青宁新学警港澳挂使领民航危0123456789ABCDEFGHJKLMNPQRSTUVWXYZ险品" | ||||||
|  | color_list=['黑色','蓝色','绿色','白色','黄色'] | ||||||
|  | mean_value,std_value=(0.588,0.193) | ||||||
|  | def decodePlate(preds): | ||||||
|  |     pre=0 | ||||||
|  |     newPreds=[] | ||||||
|  |     for i in range(len(preds)): | ||||||
|  |         if preds[i]!=0 and preds[i]!=pre: | ||||||
|  |             newPreds.append(preds[i]) | ||||||
|  |         pre=preds[i] | ||||||
|  |     return newPreds | ||||||
|  | 
 | ||||||
|  | def image_processing(img,device): | ||||||
|  |     img = cv2.resize(img, (168,48)) | ||||||
|  |     img = np.reshape(img, (48, 168, 3)) | ||||||
|  | 
 | ||||||
|  |     # normalize | ||||||
|  |     img = img.astype(np.float32) | ||||||
|  |     img = (img / 255. - mean_value) / std_value | ||||||
|  |     img = img.transpose([2, 0, 1]) | ||||||
|  |     img = torch.from_numpy(img) | ||||||
|  | 
 | ||||||
|  |     img = img.to(device) | ||||||
|  |     img = img.view(1, *img.size()) | ||||||
|  |     return img | ||||||
|  | 
 | ||||||
|  | def get_plate_result(img,device,model): | ||||||
|  |     input = image_processing(img,device) | ||||||
|  |     preds,color_preds = model(input) | ||||||
|  |     preds =preds.argmax(dim=2) #找出概率最大的那个字符 | ||||||
|  |     color_preds = color_preds.argmax(dim=-1) | ||||||
|  |     # print(preds) | ||||||
|  |     preds=preds.view(-1).detach().cpu().numpy() | ||||||
|  |     color_preds=color_preds.item() | ||||||
|  |     newPreds=decodePlate(preds) | ||||||
|  |     plate="" | ||||||
|  |     for i in newPreds: | ||||||
|  |         plate+=plateName[i] | ||||||
|  |     # if not (plate[0] in plateName[1:44] ): | ||||||
|  |     #     return "" | ||||||
|  |     return plate,color_list[color_preds] | ||||||
|  | 
 | ||||||
|  | def init_model(model_path): | ||||||
|  |     device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | ||||||
|  |     # print( print(sys.path)) | ||||||
|  |     # model_path ="plate_recognition/model/checkpoint_61_acc_0.9715.pth" | ||||||
|  |     check_point = torch.load(model_path,map_location=device) | ||||||
|  |     model_state=check_point['state_dict'] | ||||||
|  |     cfg=check_point['cfg'] | ||||||
|  |     model_path = os.sep.join([sys.path[0],model_path]) | ||||||
|  |     model = myNet_ocr_color(num_classes=len(plateName),export=True,cfg=cfg,color_num=len(color_list)) | ||||||
|  |     | ||||||
|  |     model.load_state_dict(model_state) | ||||||
|  |     model.to(device) | ||||||
|  |     model.eval() | ||||||
|  |     return model | ||||||
|  | 
 | ||||||
|  | # model = init_model(device) | ||||||
|  | if __name__ == '__main__': | ||||||
|  | 
 | ||||||
|  |    image_path ="images/tmp2424.png" | ||||||
|  |    testPath = r"double_plate" | ||||||
|  |    fileList=[] | ||||||
|  |    allFilePath(testPath,fileList) | ||||||
|  | #    result = get_plate_result(image_path,device) | ||||||
|  | #    print(result) | ||||||
|  |    model = init_model(device) | ||||||
|  |    right=0 | ||||||
|  |    begin = time.time() | ||||||
|  |    for imge_path in fileList: | ||||||
|  |         plate=get_plate_result(imge_path) | ||||||
|  |         plate_ori = imge_path.split('/')[-1].split('_')[0] | ||||||
|  |         # print(plate,"---",plate_ori) | ||||||
|  |         if(plate==plate_ori): | ||||||
|  | 
 | ||||||
|  |             right+=1 | ||||||
|  |         else: | ||||||
|  |             print(plate_ori,"--->",plate,imge_path) | ||||||
|  |    end=time.time() | ||||||
|  |    print("sum:%d ,right:%d , accuracy: %f, time: %f"%(len(fileList),right,right/len(fileList),end-begin)) | ||||||
|  |          | ||||||
							
								
								
									
										47
									
								
								algorithm/Car_recognition/requirements.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,47 @@ | |||||||
|  | asttokens | ||||||
|  | backcall | ||||||
|  | charset-normalizer | ||||||
|  | cycler | ||||||
|  | dataclasses | ||||||
|  | debugpy | ||||||
|  | decorator | ||||||
|  | executing | ||||||
|  | fonttools | ||||||
|  | idna | ||||||
|  | ipykernel | ||||||
|  | ipython | ||||||
|  | jedi | ||||||
|  | jupyter-client | ||||||
|  | jupyter-core | ||||||
|  | kiwisolver | ||||||
|  | matplotlib | ||||||
|  | matplotlib-inline | ||||||
|  | nest-asyncio | ||||||
|  | numpy | ||||||
|  | opencv-python | ||||||
|  | packaging | ||||||
|  | pandas | ||||||
|  | parso | ||||||
|  | pickleshare | ||||||
|  | Pillow | ||||||
|  | prompt-toolkit | ||||||
|  | psutil | ||||||
|  | pure-eval | ||||||
|  | Pygments | ||||||
|  | pyparsing | ||||||
|  | python-dateutil | ||||||
|  | pytz | ||||||
|  | PyYAML | ||||||
|  | pyzmq | ||||||
|  | requests | ||||||
|  | scipy | ||||||
|  | seaborn | ||||||
|  | six | ||||||
|  | stack-data | ||||||
|  | thop | ||||||
|  | tornado | ||||||
|  | tqdm | ||||||
|  | traitlets | ||||||
|  | typing-extensions | ||||||
|  | urllib3 | ||||||
|  | wcwidth | ||||||
							
								
								
									
										336
									
								
								algorithm/Car_recognition/test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,336 @@ | |||||||
|  | import argparse | ||||||
|  | import json | ||||||
|  | import os | ||||||
|  | from pathlib import Path | ||||||
|  | from threading import Thread | ||||||
|  | 
 | ||||||
|  | import numpy as np | ||||||
|  | import torch | ||||||
|  | import yaml | ||||||
|  | from tqdm import tqdm | ||||||
|  | 
 | ||||||
|  | from models.experimental import attempt_load | ||||||
|  | from utils.datasets import create_dataloader | ||||||
|  | from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, box_iou, \ | ||||||
|  |     non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, non_max_suppression_face | ||||||
|  | from utils.loss import compute_loss | ||||||
|  | from utils.metrics import ap_per_class, ConfusionMatrix | ||||||
|  | from utils.plots import plot_images, output_to_target, plot_study_txt | ||||||
|  | from utils.torch_utils import select_device, time_synchronized | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def test(data, | ||||||
|  |          weights=None, | ||||||
|  |          batch_size=32, | ||||||
|  |          imgsz=640, | ||||||
|  |          conf_thres=0.001, | ||||||
|  |          iou_thres=0.6,  # for NMS | ||||||
|  |          save_json=False, | ||||||
|  |          single_cls=False, | ||||||
|  |          augment=False, | ||||||
|  |          verbose=False, | ||||||
|  |          model=None, | ||||||
|  |          dataloader=None, | ||||||
|  |          save_dir=Path(''),  # for saving images | ||||||
|  |          save_txt=False,  # for auto-labelling | ||||||
|  |          save_hybrid=False,  # for hybrid auto-labelling | ||||||
|  |          save_conf=False,  # save auto-label confidences | ||||||
|  |          plots=True, | ||||||
|  |          log_imgs=0):  # number of logged images | ||||||
|  | 
 | ||||||
|  |     # Initialize/load model and set device | ||||||
|  |     training = model is not None | ||||||
|  |     if training:  # called by train.py | ||||||
|  |         device = next(model.parameters()).device  # get model device | ||||||
|  | 
 | ||||||
|  |     else:  # called directly | ||||||
|  |         set_logging() | ||||||
|  |         device = select_device(opt.device, batch_size=batch_size) | ||||||
|  | 
 | ||||||
|  |         # Directories | ||||||
|  |         save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok))  # increment run | ||||||
|  |         (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True)  # make dir | ||||||
|  | 
 | ||||||
|  |         # Load model | ||||||
|  |         model = attempt_load(weights, map_location=device)  # load FP32 model | ||||||
|  |         imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size | ||||||
|  | 
 | ||||||
|  |         # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 | ||||||
|  |         # if device.type != 'cpu' and torch.cuda.device_count() > 1: | ||||||
|  |         #     model = nn.DataParallel(model) | ||||||
|  | 
 | ||||||
|  |     # Half | ||||||
|  |     half = device.type != 'cpu'  # half precision only supported on CUDA | ||||||
|  |     if half: | ||||||
|  |         model.half() | ||||||
|  | 
 | ||||||
|  |     # Configure | ||||||
|  |     model.eval() | ||||||
|  |     is_coco = data.endswith('coco.yaml')  # is COCO dataset | ||||||
|  |     with open(data) as f: | ||||||
|  |         data = yaml.load(f, Loader=yaml.FullLoader)  # model dict | ||||||
|  |     check_dataset(data)  # check | ||||||
|  |     nc = 1 if single_cls else int(data['nc'])  # number of classes | ||||||
|  |     iouv = torch.linspace(0.5, 0.95, 10).to(device)  # iou vector for mAP@0.5:0.95 | ||||||
|  |     niou = iouv.numel() | ||||||
|  | 
 | ||||||
|  |     # Logging | ||||||
|  |     log_imgs, wandb = min(log_imgs, 100), None  # ceil | ||||||
|  |     try: | ||||||
|  |         import wandb  # Weights & Biases | ||||||
|  |     except ImportError: | ||||||
|  |         log_imgs = 0 | ||||||
|  | 
 | ||||||
|  |     # Dataloader | ||||||
|  |     if not training: | ||||||
|  |         img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img | ||||||
|  |         _ = model(img.half() if half else img) if device.type != 'cpu' else None  # run once | ||||||
|  |         path = data['test'] if opt.task == 'test' else data['val']  # path to val/test images | ||||||
|  |         dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True)[0] | ||||||
|  | 
 | ||||||
|  |     seen = 0 | ||||||
|  |     confusion_matrix = ConfusionMatrix(nc=nc) | ||||||
|  |     names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} | ||||||
|  |     coco91class = coco80_to_coco91_class() | ||||||
|  |     s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') | ||||||
|  |     p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. | ||||||
|  |     loss = torch.zeros(3, device=device) | ||||||
|  |     jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] | ||||||
|  |     for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): | ||||||
|  |         img = img.to(device, non_blocking=True) | ||||||
|  |         img = img.half() if half else img.float()  # uint8 to fp16/32 | ||||||
|  |         img /= 255.0  # 0 - 255 to 0.0 - 1.0 | ||||||
|  |         targets = targets.to(device) | ||||||
|  |         nb, _, height, width = img.shape  # batch size, channels, height, width | ||||||
|  | 
 | ||||||
|  |         with torch.no_grad(): | ||||||
|  |             # Run model | ||||||
|  |             t = time_synchronized() | ||||||
|  |             inf_out, train_out = model(img, augment=augment)  # inference and training outputs | ||||||
|  |             t0 += time_synchronized() - t | ||||||
|  | 
 | ||||||
|  |             # Compute loss | ||||||
|  |             if training: | ||||||
|  |                 loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3]  # box, obj, cls | ||||||
|  | 
 | ||||||
|  |             # Run NMS | ||||||
|  |             targets[:, 2:6] *= torch.Tensor([width, height, width, height]).to(device)  # to pixels | ||||||
|  |             lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else []  # for autolabelling | ||||||
|  |             t = time_synchronized() | ||||||
|  |             #output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) | ||||||
|  |             output = non_max_suppression_face(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) | ||||||
|  |             t1 += time_synchronized() - t | ||||||
|  | 
 | ||||||
|  |         # Statistics per image | ||||||
|  |         for si, pred in enumerate(output): | ||||||
|  |             pred = torch.cat((pred[:, :5], pred[:, 13:]), 1) # throw landmark in thresh | ||||||
|  |             labels = targets[targets[:, 0] == si, 1:] | ||||||
|  |             nl = len(labels) | ||||||
|  |             tcls = labels[:, 0].tolist() if nl else []  # target class | ||||||
|  |             path = Path(paths[si]) | ||||||
|  |             seen += 1 | ||||||
|  | 
 | ||||||
|  |             if len(pred) == 0: | ||||||
|  |                 if nl: | ||||||
|  |                     stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) | ||||||
|  |                 continue | ||||||
|  | 
 | ||||||
|  |             # Predictions | ||||||
|  |             predn = pred.clone() | ||||||
|  |             scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1])  # native-space pred | ||||||
|  | 
 | ||||||
|  |             # Append to text file | ||||||
|  |             if save_txt: | ||||||
|  |                 gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]]  # normalization gain whwh | ||||||
|  |                 for *xyxy, conf, cls in predn.tolist(): | ||||||
|  |                     xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh | ||||||
|  |                     line = (cls, *xywh, conf) if save_conf else (cls, *xywh)  # label format | ||||||
|  |                     with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: | ||||||
|  |                         f.write(('%g ' * len(line)).rstrip() % line + '\n') | ||||||
|  | 
 | ||||||
|  |             # W&B logging | ||||||
|  |             if plots and len(wandb_images) < log_imgs: | ||||||
|  |                 box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, | ||||||
|  |                              "class_id": int(cls), | ||||||
|  |                              "box_caption": "%s %.3f" % (names[cls], conf), | ||||||
|  |                              "scores": {"class_score": conf}, | ||||||
|  |                              "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] | ||||||
|  |                 boxes = {"predictions": {"box_data": box_data, "class_labels": names}}  # inference-space | ||||||
|  |                 wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) | ||||||
|  | 
 | ||||||
|  |             # Append to pycocotools JSON dictionary | ||||||
|  |             if save_json: | ||||||
|  |                 # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... | ||||||
|  |                 image_id = int(path.stem) if path.stem.isnumeric() else path.stem | ||||||
|  |                 box = xyxy2xywh(predn[:, :4])  # xywh | ||||||
|  |                 box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner | ||||||
|  |                 for p, b in zip(pred.tolist(), box.tolist()): | ||||||
|  |                     jdict.append({'image_id': image_id, | ||||||
|  |                                   'category_id': coco91class[int(p[15])] if is_coco else int(p[15]), | ||||||
|  |                                   'bbox': [round(x, 3) for x in b], | ||||||
|  |                                   'score': round(p[4], 5)}) | ||||||
|  | 
 | ||||||
|  |             # Assign all predictions as incorrect | ||||||
|  |             correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) | ||||||
|  |             if nl: | ||||||
|  |                 detected = []  # target indices | ||||||
|  |                 tcls_tensor = labels[:, 0] | ||||||
|  | 
 | ||||||
|  |                 # target boxes | ||||||
|  |                 tbox = xywh2xyxy(labels[:, 1:5]) | ||||||
|  |                 scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1])  # native-space labels | ||||||
|  |                 if plots: | ||||||
|  |                     confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1)) | ||||||
|  | 
 | ||||||
|  |                 # Per target class | ||||||
|  |                 for cls in torch.unique(tcls_tensor): | ||||||
|  |                     ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1)  # prediction indices | ||||||
|  |                     pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1)  # target indices | ||||||
|  | 
 | ||||||
|  |                     # Search for detections | ||||||
|  |                     if pi.shape[0]: | ||||||
|  |                         # Prediction to target ious | ||||||
|  |                         ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1)  # best ious, indices | ||||||
|  | 
 | ||||||
|  |                         # Append detections | ||||||
|  |                         detected_set = set() | ||||||
|  |                         for j in (ious > iouv[0]).nonzero(as_tuple=False): | ||||||
|  |                             d = ti[i[j]]  # detected target | ||||||
|  |                             if d.item() not in detected_set: | ||||||
|  |                                 detected_set.add(d.item()) | ||||||
|  |                                 detected.append(d) | ||||||
|  |                                 correct[pi[j]] = ious[j] > iouv  # iou_thres is 1xn | ||||||
|  |                                 if len(detected) == nl:  # all targets already located in image | ||||||
|  |                                     break | ||||||
|  | 
 | ||||||
|  |             # Append statistics (correct, conf, pcls, tcls) | ||||||
|  |             stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) | ||||||
|  | 
 | ||||||
|  |         # Plot images | ||||||
|  |         if plots and batch_i < 3: | ||||||
|  |             f = save_dir / f'test_batch{batch_i}_labels.jpg'  # labels | ||||||
|  |             Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() | ||||||
|  |             f = save_dir / f'test_batch{batch_i}_pred.jpg'  # predictions | ||||||
|  |             Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start() | ||||||
|  | 
 | ||||||
|  |     # Compute statistics | ||||||
|  |     stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy | ||||||
|  |     if len(stats) and stats[0].any(): | ||||||
|  |         p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) | ||||||
|  |         p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1)  # [P, R, AP@0.5, AP@0.5:0.95] | ||||||
|  |         mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() | ||||||
|  |         nt = np.bincount(stats[3].astype(np.int64), minlength=nc)  # number of targets per class | ||||||
|  |     else: | ||||||
|  |         nt = torch.zeros(1) | ||||||
|  | 
 | ||||||
|  |     # Print results | ||||||
|  |     pf = '%20s' + '%12.3g' * 6  # print format | ||||||
|  |     print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) | ||||||
|  | 
 | ||||||
|  |     # Print results per class | ||||||
|  |     if verbose and nc > 1 and len(stats): | ||||||
|  |         for i, c in enumerate(ap_class): | ||||||
|  |             print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) | ||||||
|  | 
 | ||||||
|  |     # Print speeds | ||||||
|  |     t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple | ||||||
|  |     if not training: | ||||||
|  |         print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) | ||||||
|  | 
 | ||||||
|  |     # Plots | ||||||
|  |     if plots: | ||||||
|  |         confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) | ||||||
|  |         if wandb and wandb.run: | ||||||
|  |             wandb.log({"Images": wandb_images}) | ||||||
|  |             wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]}) | ||||||
|  | 
 | ||||||
|  |     # Save JSON | ||||||
|  |     if save_json and len(jdict): | ||||||
|  |         w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else ''  # weights | ||||||
|  |         anno_json = '../coco/annotations/instances_val2017.json'  # annotations json | ||||||
|  |         pred_json = str(save_dir / f"{w}_predictions.json")  # predictions json | ||||||
|  |         print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) | ||||||
|  |         with open(pred_json, 'w') as f: | ||||||
|  |             json.dump(jdict, f) | ||||||
|  | 
 | ||||||
|  |         try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb | ||||||
|  |             from pycocotools.coco import COCO | ||||||
|  |             from pycocotools.cocoeval import COCOeval | ||||||
|  | 
 | ||||||
|  |             anno = COCO(anno_json)  # init annotations api | ||||||
|  |             pred = anno.loadRes(pred_json)  # init predictions api | ||||||
|  |             eval = COCOeval(anno, pred, 'bbox') | ||||||
|  |             if is_coco: | ||||||
|  |                 eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files]  # image IDs to evaluate | ||||||
|  |             eval.evaluate() | ||||||
|  |             eval.accumulate() | ||||||
|  |             eval.summarize() | ||||||
|  |             map, map50 = eval.stats[:2]  # update results (mAP@0.5:0.95, mAP@0.5) | ||||||
|  |         except Exception as e: | ||||||
|  |             print(f'pycocotools unable to run: {e}') | ||||||
|  | 
 | ||||||
|  |     # Return results | ||||||
|  |     if not training: | ||||||
|  |         s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' | ||||||
|  |         print(f"Results saved to {save_dir}{s}") | ||||||
|  |     model.float()  # for training | ||||||
|  |     maps = np.zeros(nc) + map | ||||||
|  |     for i, c in enumerate(ap_class): | ||||||
|  |         maps[c] = ap[i] | ||||||
|  |     return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     parser = argparse.ArgumentParser(prog='test.py') | ||||||
|  |     parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)') | ||||||
|  |     parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path') | ||||||
|  |     parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch') | ||||||
|  |     parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') | ||||||
|  |     parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold') | ||||||
|  |     parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS') | ||||||
|  |     parser.add_argument('--task', default='val', help="'val', 'test', 'study'") | ||||||
|  |     parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') | ||||||
|  |     parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') | ||||||
|  |     parser.add_argument('--augment', action='store_true', help='augmented inference') | ||||||
|  |     parser.add_argument('--verbose', action='store_true', help='report mAP by class') | ||||||
|  |     parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') | ||||||
|  |     parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') | ||||||
|  |     parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') | ||||||
|  |     parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file') | ||||||
|  |     parser.add_argument('--project', default='runs/test', help='save to project/name') | ||||||
|  |     parser.add_argument('--name', default='exp', help='save to project/name') | ||||||
|  |     parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') | ||||||
|  |     opt = parser.parse_args() | ||||||
|  |     opt.save_json |= opt.data.endswith('coco.yaml') | ||||||
|  |     opt.data = check_file(opt.data)  # check file | ||||||
|  |     print(opt) | ||||||
|  | 
 | ||||||
|  |     if opt.task in ['val', 'test']:  # run normally | ||||||
|  |         test(opt.data, | ||||||
|  |              opt.weights, | ||||||
|  |              opt.batch_size, | ||||||
|  |              opt.img_size, | ||||||
|  |              opt.conf_thres, | ||||||
|  |              opt.iou_thres, | ||||||
|  |              opt.save_json, | ||||||
|  |              opt.single_cls, | ||||||
|  |              opt.augment, | ||||||
|  |              opt.verbose, | ||||||
|  |              save_txt=opt.save_txt | opt.save_hybrid, | ||||||
|  |              save_hybrid=opt.save_hybrid, | ||||||
|  |              save_conf=opt.save_conf, | ||||||
|  |              ) | ||||||
|  | 
 | ||||||
|  |     elif opt.task == 'study':  # run over a range of settings and save/plot | ||||||
|  |         for weights in ['yolov5s.pt', 'yolov5m.pt', 'yolov5l.pt', 'yolov5x.pt']: | ||||||
|  |             f = 'study_%s_%s.txt' % (Path(opt.data).stem, Path(weights).stem)  # filename to save to | ||||||
|  |             x = list(range(320, 800, 64))  # x axis | ||||||
|  |             y = []  # y axis | ||||||
|  |             for i in x:  # img-size | ||||||
|  |                 print('\nRunning %s point %s...' % (f, i)) | ||||||
|  |                 r, _, t = test(opt.data, weights, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json, | ||||||
|  |                                plots=False) | ||||||
|  |                 y.append(r + t)  # results and times | ||||||
|  |             np.savetxt(f, y, fmt='%10.4g')  # save | ||||||
|  |         os.system('zip -r study.zip study_*.txt') | ||||||
|  |         plot_study_txt(f, x)  # plot | ||||||
							
								
								
									
										170
									
								
								algorithm/Car_recognition/test_widerface.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,170 @@ | |||||||
|  | import argparse | ||||||
|  | import glob | ||||||
|  | import time | ||||||
|  | from pathlib import Path | ||||||
|  | 
 | ||||||
|  | import os | ||||||
|  | import cv2 | ||||||
|  | import torch | ||||||
|  | import torch.backends.cudnn as cudnn | ||||||
|  | from numpy import random | ||||||
|  | import numpy as np | ||||||
|  | from models.experimental import attempt_load | ||||||
|  | from utils.datasets import letterbox | ||||||
|  | from utils.general import check_img_size, check_requirements, non_max_suppression_face, apply_classifier, \ | ||||||
|  |     scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path | ||||||
|  | from utils.plots import plot_one_box | ||||||
|  | from utils.torch_utils import select_device, load_classifier, time_synchronized | ||||||
|  | from tqdm import tqdm | ||||||
|  | 
 | ||||||
|  | def dynamic_resize(shape, stride=64): | ||||||
|  |     max_size = max(shape[0], shape[1]) | ||||||
|  |     if max_size % stride != 0: | ||||||
|  |         max_size = (int(max_size / stride) + 1) * stride  | ||||||
|  |     return max_size | ||||||
|  | 
 | ||||||
|  | def scale_coords_landmarks(img1_shape, coords, img0_shape, ratio_pad=None): | ||||||
|  |     # Rescale coords (xyxy) from img1_shape to img0_shape | ||||||
|  |     if ratio_pad is None:  # calculate from img0_shape | ||||||
|  |         gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1])  # gain  = old / new | ||||||
|  |         pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding | ||||||
|  |     else: | ||||||
|  |         gain = ratio_pad[0][0] | ||||||
|  |         pad = ratio_pad[1] | ||||||
|  | 
 | ||||||
|  |     coords[:, [0, 2, 4, 6, 8]] -= pad[0]  # x padding | ||||||
|  |     coords[:, [1, 3, 5, 7, 9]] -= pad[1]  # y padding | ||||||
|  |     coords[:, :10] /= gain | ||||||
|  |     #clip_coords(coords, img0_shape) | ||||||
|  |     coords[:, 0].clamp_(0, img0_shape[1])  # x1 | ||||||
|  |     coords[:, 1].clamp_(0, img0_shape[0])  # y1 | ||||||
|  |     coords[:, 2].clamp_(0, img0_shape[1])  # x2 | ||||||
|  |     coords[:, 3].clamp_(0, img0_shape[0])  # y2 | ||||||
|  |     coords[:, 4].clamp_(0, img0_shape[1])  # x3 | ||||||
|  |     coords[:, 5].clamp_(0, img0_shape[0])  # y3 | ||||||
|  |     coords[:, 6].clamp_(0, img0_shape[1])  # x4 | ||||||
|  |     coords[:, 7].clamp_(0, img0_shape[0])  # y4 | ||||||
|  |     coords[:, 8].clamp_(0, img0_shape[1])  # x5 | ||||||
|  |     coords[:, 9].clamp_(0, img0_shape[0])  # y5 | ||||||
|  |     return coords | ||||||
|  | 
 | ||||||
|  | def show_results(img, xywh, conf, landmarks, class_num): | ||||||
|  |     h,w,c = img.shape | ||||||
|  |     tl = 1 or round(0.002 * (h + w) / 2) + 1  # line/font thickness | ||||||
|  |     x1 = int(xywh[0] * w - 0.5 * xywh[2] * w) | ||||||
|  |     y1 = int(xywh[1] * h - 0.5 * xywh[3] * h) | ||||||
|  |     x2 = int(xywh[0] * w + 0.5 * xywh[2] * w) | ||||||
|  |     y2 = int(xywh[1] * h + 0.5 * xywh[3] * h) | ||||||
|  |     cv2.rectangle(img, (x1,y1), (x2, y2), (0,255,0), thickness=tl, lineType=cv2.LINE_AA) | ||||||
|  | 
 | ||||||
|  |     clors = [(255,0,0),(0,255,0),(0,0,255),(255,255,0),(0,255,255)] | ||||||
|  | 
 | ||||||
|  |     for i in range(5): | ||||||
|  |         point_x = int(landmarks[2 * i] * w) | ||||||
|  |         point_y = int(landmarks[2 * i + 1] * h) | ||||||
|  |         cv2.circle(img, (point_x, point_y), tl+1, clors[i], -1) | ||||||
|  | 
 | ||||||
|  |     tf = max(tl - 1, 1)  # font thickness | ||||||
|  |     label = str(int(class_num)) + ': ' + str(conf)[:5] | ||||||
|  |     cv2.putText(img, label, (x1, y1 - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) | ||||||
|  |     return img | ||||||
|  | 
 | ||||||
|  | def detect(model, img0): | ||||||
|  |     stride = int(model.stride.max())  # model stride | ||||||
|  |     imgsz = opt.img_size | ||||||
|  |     if imgsz <= 0:                    # original size     | ||||||
|  |         imgsz = dynamic_resize(img0.shape) | ||||||
|  |     imgsz = check_img_size(imgsz, s=64)  # check img_size | ||||||
|  |     img = letterbox(img0, imgsz)[0] | ||||||
|  |     # Convert | ||||||
|  |     img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB, to 3x416x416 | ||||||
|  |     img = np.ascontiguousarray(img) | ||||||
|  |     img = torch.from_numpy(img).to(device) | ||||||
|  |     img = img.float()  # uint8 to fp16/32 | ||||||
|  |     img /= 255.0  # 0 - 255 to 0.0 - 1.0 | ||||||
|  |     if img.ndimension() == 3: | ||||||
|  |         img = img.unsqueeze(0) | ||||||
|  | 
 | ||||||
|  |     # Inference | ||||||
|  |     pred = model(img, augment=opt.augment)[0] | ||||||
|  |     # Apply NMS | ||||||
|  |     pred = non_max_suppression_face(pred, opt.conf_thres, opt.iou_thres)[0] | ||||||
|  |     gn = torch.tensor(img0.shape)[[1, 0, 1, 0]].to(device)  # normalization gain whwh | ||||||
|  |     gn_lks = torch.tensor(img0.shape)[[1, 0, 1, 0, 1, 0, 1, 0, 1, 0]].to(device)  # normalization gain landmarks | ||||||
|  |     boxes = [] | ||||||
|  |     h, w, c = img0.shape | ||||||
|  |     if pred is not None: | ||||||
|  |         pred[:, :4] = scale_coords(img.shape[2:], pred[:, :4], img0.shape).round() | ||||||
|  |         pred[:, 5:15] = scale_coords_landmarks(img.shape[2:], pred[:, 5:15], img0.shape).round() | ||||||
|  |         for j in range(pred.size()[0]): | ||||||
|  |             xywh = (xyxy2xywh(pred[j, :4].view(1, 4)) / gn).view(-1) | ||||||
|  |             xywh = xywh.data.cpu().numpy() | ||||||
|  |             conf = pred[j, 4].cpu().numpy() | ||||||
|  |             landmarks = (pred[j, 5:15].view(1, 10) / gn_lks).view(-1).tolist() | ||||||
|  |             class_num = pred[j, 15].cpu().numpy() | ||||||
|  |             x1 = int(xywh[0] * w - 0.5 * xywh[2] * w) | ||||||
|  |             y1 = int(xywh[1] * h - 0.5 * xywh[3] * h) | ||||||
|  |             x2 = int(xywh[0] * w + 0.5 * xywh[2] * w) | ||||||
|  |             y2 = int(xywh[1] * h + 0.5 * xywh[3] * h) | ||||||
|  |             boxes.append([x1, y1, x2-x1, y2-y1, conf]) | ||||||
|  |     return boxes | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     parser = argparse.ArgumentParser() | ||||||
|  |     parser.add_argument('--weights', nargs='+', type=str, default='runs/train/exp5/weights/last.pt', help='model.pt path(s)') | ||||||
|  |     parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)') | ||||||
|  |     parser.add_argument('--conf-thres', type=float, default=0.02, help='object confidence threshold') | ||||||
|  |     parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS') | ||||||
|  |     parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') | ||||||
|  |     parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS') | ||||||
|  |     parser.add_argument('--augment', action='store_true', help='augmented inference') | ||||||
|  |     parser.add_argument('--update', action='store_true', help='update all models') | ||||||
|  |     parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3') | ||||||
|  |     parser.add_argument('--project', default='runs/detect', help='save results to project/name') | ||||||
|  |     parser.add_argument('--name', default='exp', help='save results to project/name') | ||||||
|  |     parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') | ||||||
|  |     parser.add_argument('--save_folder', default='./widerface_evaluate/widerface_txt/', type=str, help='Dir to save txt results') | ||||||
|  |     parser.add_argument('--dataset_folder', default='../WiderFace/val/images/', type=str, help='dataset path') | ||||||
|  |     parser.add_argument('--folder_pict', default='/yolov5-face/data/widerface/val/wider_val.txt', type=str, help='folder_pict') | ||||||
|  |     opt = parser.parse_args() | ||||||
|  |     print(opt) | ||||||
|  | 
 | ||||||
|  |     # changhy : read folder_pict | ||||||
|  |     pict_folder = {} | ||||||
|  |     with open(opt.folder_pict, 'r') as f: | ||||||
|  |         lines = f.readlines() | ||||||
|  |         for line in lines: | ||||||
|  |             line = line.strip().split('/') | ||||||
|  |             pict_folder[line[-1]] = line[-2] | ||||||
|  | 
 | ||||||
|  |     # Load model | ||||||
|  |     device = select_device(opt.device) | ||||||
|  |     model = attempt_load(opt.weights, map_location=device)  # load FP32 model | ||||||
|  |     with torch.no_grad(): | ||||||
|  |         # testing dataset | ||||||
|  |         testset_folder = opt.dataset_folder | ||||||
|  | 
 | ||||||
|  |         for image_path in tqdm(glob.glob(os.path.join(testset_folder, '*'))): | ||||||
|  |             if image_path.endswith('.txt'): | ||||||
|  |                 continue | ||||||
|  |             img0 = cv2.imread(image_path)  # BGR | ||||||
|  |             if img0 is None: | ||||||
|  |                 print(f'ignore : {image_path}') | ||||||
|  |                 continue | ||||||
|  |             boxes = detect(model, img0) | ||||||
|  |             # -------------------------------------------------------------------- | ||||||
|  |             image_name = os.path.basename(image_path) | ||||||
|  |             txt_name = os.path.splitext(image_name)[0] + ".txt" | ||||||
|  |             save_name = os.path.join(opt.save_folder, pict_folder[image_name], txt_name) | ||||||
|  |             dirname = os.path.dirname(save_name) | ||||||
|  |             if not os.path.isdir(dirname): | ||||||
|  |                 os.makedirs(dirname) | ||||||
|  |             with open(save_name, "w") as fd: | ||||||
|  |                 file_name = os.path.basename(save_name)[:-4] + "\n"             | ||||||
|  |                 bboxs_num = str(len(boxes)) + "\n" | ||||||
|  |                 fd.write(file_name) | ||||||
|  |                 fd.write(bboxs_num) | ||||||
|  |                 for box in boxes: | ||||||
|  |                     fd.write('%d %d %d %d %.03f' % (box[0], box[1], box[2], box[3], box[4] if box[4] <= 1 else 1) + '\n') | ||||||
|  |         print('done.') | ||||||
							
								
								
									
										602
									
								
								algorithm/Car_recognition/train.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,602 @@ | |||||||
|  | import argparse | ||||||
|  | import logging | ||||||
|  | import math | ||||||
|  | import os | ||||||
|  | import random | ||||||
|  | import time | ||||||
|  | from pathlib import Path | ||||||
|  | from threading import Thread | ||||||
|  | from warnings import warn | ||||||
|  | 
 | ||||||
|  | import numpy as np | ||||||
|  | import torch.distributed as dist | ||||||
|  | import torch.nn as nn | ||||||
|  | import torch.nn.functional as F | ||||||
|  | import torch.optim as optim | ||||||
|  | import torch.optim.lr_scheduler as lr_scheduler | ||||||
|  | import torch.utils.data | ||||||
|  | import yaml | ||||||
|  | from torch.cuda import amp | ||||||
|  | from torch.nn.parallel import DistributedDataParallel as DDP | ||||||
|  | from torch.utils.tensorboard import SummaryWriter | ||||||
|  | from tqdm import tqdm | ||||||
|  | 
 | ||||||
|  | import test  # import test.py to get mAP after each epoch | ||||||
|  | from models.experimental import attempt_load | ||||||
|  | from models.yolo import Model | ||||||
|  | from utils.autoanchor import check_anchors | ||||||
|  | from utils.face_datasets import create_dataloader | ||||||
|  | from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ | ||||||
|  |     fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ | ||||||
|  |     print_mutation, set_logging | ||||||
|  | from utils.google_utils import attempt_download | ||||||
|  | from utils.loss import compute_loss | ||||||
|  | from utils.plots import plot_images, plot_labels, plot_results, plot_evolution | ||||||
|  | from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first | ||||||
|  | 
 | ||||||
|  | logger = logging.getLogger(__name__) | ||||||
|  | begin_save=1 | ||||||
|  | try: | ||||||
|  |     import wandb | ||||||
|  | except ImportError: | ||||||
|  |     wandb = None | ||||||
|  |     logger.info("Install Weights & Biases for experiment logging via 'pip install wandb' (recommended)") | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def train(hyp, opt, device, tb_writer=None, wandb=None): | ||||||
|  |     logger.info(f'Hyperparameters {hyp}') | ||||||
|  |     save_dir, epochs, batch_size, total_batch_size, weights, rank = \ | ||||||
|  |         Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank | ||||||
|  | 
 | ||||||
|  |     # Directories | ||||||
|  |     wdir = save_dir / 'weights' | ||||||
|  |     wdir.mkdir(parents=True, exist_ok=True)  # make dir | ||||||
|  |     last = wdir / 'last.pt' | ||||||
|  |     best = wdir / 'best.pt' | ||||||
|  |     results_file = save_dir / 'results.txt' | ||||||
|  | 
 | ||||||
|  |     # Save run settings | ||||||
|  |     with open(save_dir / 'hyp.yaml', 'w') as f: | ||||||
|  |         yaml.dump(hyp, f, sort_keys=False) | ||||||
|  |     with open(save_dir / 'opt.yaml', 'w') as f: | ||||||
|  |         yaml.dump(vars(opt), f, sort_keys=False) | ||||||
|  | 
 | ||||||
|  |     # Configure | ||||||
|  |     plots = not opt.evolve  # create plots | ||||||
|  |     cuda = device.type != 'cpu' | ||||||
|  |     init_seeds(2 + rank) | ||||||
|  |     with open(opt.data) as f: | ||||||
|  |         data_dict = yaml.load(f, Loader=yaml.FullLoader)  # data dict | ||||||
|  |     with torch_distributed_zero_first(rank): | ||||||
|  |         check_dataset(data_dict)  # check | ||||||
|  |     train_path = data_dict['train'] | ||||||
|  |     test_path = data_dict['val'] | ||||||
|  |     nc = 1 if opt.single_cls else int(data_dict['nc'])  # number of classes | ||||||
|  |     names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names']  # class names | ||||||
|  |     assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data)  # check | ||||||
|  | 
 | ||||||
|  |     # Model | ||||||
|  |     pretrained = weights.endswith('.pt') | ||||||
|  |     if pretrained: | ||||||
|  |         with torch_distributed_zero_first(rank): | ||||||
|  |             attempt_download(weights)  # download if not found locally | ||||||
|  |         ckpt = torch.load(weights, map_location=device)  # load checkpoint | ||||||
|  |         if hyp.get('anchors'): | ||||||
|  |             ckpt['model'].yaml['anchors'] = round(hyp['anchors'])  # force autoanchor | ||||||
|  |         model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device)  # create | ||||||
|  |         exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else []  # exclude keys | ||||||
|  |         state_dict = ckpt['model'].float().state_dict()  # to FP32 | ||||||
|  |         state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude)  # intersect | ||||||
|  |         model.load_state_dict(state_dict, strict=False)  # load | ||||||
|  |         logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights))  # report | ||||||
|  |     else: | ||||||
|  |         model = Model(opt.cfg, ch=3, nc=nc).to(device)  # create | ||||||
|  | 
 | ||||||
|  |     # Freeze | ||||||
|  |     freeze = []  # parameter names to freeze (full or partial) | ||||||
|  |     for k, v in model.named_parameters(): | ||||||
|  |         v.requires_grad = True  # train all layers | ||||||
|  |         if any(x in k for x in freeze): | ||||||
|  |             print('freezing %s' % k) | ||||||
|  |             v.requires_grad = False | ||||||
|  | 
 | ||||||
|  |     # Optimizer | ||||||
|  |     nbs = 64  # nominal batch size | ||||||
|  |     accumulate = max(round(nbs / total_batch_size), 1)  # accumulate loss before optimizing | ||||||
|  |     hyp['weight_decay'] *= total_batch_size * accumulate / nbs  # scale weight_decay | ||||||
|  | 
 | ||||||
|  |     pg0, pg1, pg2 = [], [], []  # optimizer parameter groups | ||||||
|  |     for k, v in model.named_modules(): | ||||||
|  |         if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): | ||||||
|  |             pg2.append(v.bias)  # biases | ||||||
|  |         if isinstance(v, nn.BatchNorm2d): | ||||||
|  |             pg0.append(v.weight)  # no decay | ||||||
|  |         elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): | ||||||
|  |             pg1.append(v.weight)  # apply decay | ||||||
|  | 
 | ||||||
|  |     if opt.adam: | ||||||
|  |         optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999))  # adjust beta1 to momentum | ||||||
|  |     else: | ||||||
|  |         optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) | ||||||
|  | 
 | ||||||
|  |     optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']})  # add pg1 with weight_decay | ||||||
|  |     optimizer.add_param_group({'params': pg2})  # add pg2 (biases) | ||||||
|  |     logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) | ||||||
|  |     del pg0, pg1, pg2 | ||||||
|  | 
 | ||||||
|  |     # Scheduler https://arxiv.org/pdf/1812.01187.pdf | ||||||
|  |     # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR | ||||||
|  |     lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf']  # cosine | ||||||
|  |     scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) | ||||||
|  |     # plot_lr_scheduler(optimizer, scheduler, epochs) | ||||||
|  | 
 | ||||||
|  |     # Logging | ||||||
|  |     if wandb and wandb.run is None: | ||||||
|  |         opt.hyp = hyp  # add hyperparameters | ||||||
|  |         wandb_run = wandb.init(config=opt, resume="allow", | ||||||
|  |                                project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, | ||||||
|  |                                name=save_dir.stem, | ||||||
|  |                                id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) | ||||||
|  |     loggers = {'wandb': wandb}  # loggers dict | ||||||
|  | 
 | ||||||
|  |     # Resume | ||||||
|  |     start_epoch, best_fitness = 0, 0.0 | ||||||
|  |     if pretrained: | ||||||
|  |         # Optimizer | ||||||
|  |         if ckpt['optimizer'] is not None: | ||||||
|  |             optimizer.load_state_dict(ckpt['optimizer']) | ||||||
|  |             best_fitness = 0 | ||||||
|  | 
 | ||||||
|  |         # Results | ||||||
|  |         if ckpt.get('training_results') is not None: | ||||||
|  |             with open(results_file, 'w') as file: | ||||||
|  |                 file.write(ckpt['training_results'])  # write results.txt | ||||||
|  | 
 | ||||||
|  |         # Epochs | ||||||
|  |         # start_epoch = ckpt['epoch'] + 1 | ||||||
|  |         if opt.resume: | ||||||
|  |             assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) | ||||||
|  |         if epochs < start_epoch: | ||||||
|  |             logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % | ||||||
|  |                         (weights, ckpt['epoch'], epochs)) | ||||||
|  |             epochs += ckpt['epoch']  # finetune additional epochs | ||||||
|  | 
 | ||||||
|  |         del ckpt, state_dict | ||||||
|  | 
 | ||||||
|  |     # Image sizes | ||||||
|  |     gs = int(max(model.stride))  # grid size (max stride) | ||||||
|  |     imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size]  # verify imgsz are gs-multiples | ||||||
|  | 
 | ||||||
|  |     # DP mode | ||||||
|  |     if cuda and rank == -1 and torch.cuda.device_count() > 1: | ||||||
|  |         model = torch.nn.DataParallel(model) | ||||||
|  | 
 | ||||||
|  |     # SyncBatchNorm | ||||||
|  |     if opt.sync_bn and cuda and rank != -1: | ||||||
|  |         model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) | ||||||
|  |         logger.info('Using SyncBatchNorm()') | ||||||
|  | 
 | ||||||
|  |     # EMA | ||||||
|  |     ema = ModelEMA(model) if rank in [-1, 0] else None | ||||||
|  | 
 | ||||||
|  |     # DDP mode | ||||||
|  |     if cuda and rank != -1: | ||||||
|  |         model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) | ||||||
|  | 
 | ||||||
|  |     # Trainloader | ||||||
|  |     dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, | ||||||
|  |                                             hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, | ||||||
|  |                                             world_size=opt.world_size, workers=opt.workers, | ||||||
|  |                                             image_weights=opt.image_weights) | ||||||
|  |     mlc = np.concatenate(dataset.labels, 0)[:, 0].max()  # max label class | ||||||
|  |     nb = len(dataloader)  # number of batches | ||||||
|  |     assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1) | ||||||
|  | 
 | ||||||
|  |     # Process 0 | ||||||
|  |     if rank in [-1, 0]: | ||||||
|  |         ema.updates = start_epoch * nb // accumulate  # set EMA updates | ||||||
|  |         testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt,  # testloader | ||||||
|  |                                        hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, | ||||||
|  |                                        rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0] | ||||||
|  | 
 | ||||||
|  |         if not opt.resume: | ||||||
|  |             labels = np.concatenate(dataset.labels, 0) | ||||||
|  |             c = torch.tensor(labels[:, 0])  # classes | ||||||
|  |             # cf = torch.bincount(c.long(), minlength=nc) + 1.  # frequency | ||||||
|  |             # model._initialize_biases(cf.to(device)) | ||||||
|  |             if plots: | ||||||
|  |                 plot_labels(labels, save_dir, loggers) | ||||||
|  |                 if tb_writer: | ||||||
|  |                     tb_writer.add_histogram('classes', c, 0) | ||||||
|  | 
 | ||||||
|  |             # Anchors | ||||||
|  |             if not opt.noautoanchor: | ||||||
|  |                 check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) | ||||||
|  | 
 | ||||||
|  |     # Model parameters | ||||||
|  |     hyp['cls'] *= nc / 80.  # scale coco-tuned hyp['cls'] to current dataset | ||||||
|  |     model.nc = nc  # attach number of classes to model | ||||||
|  |     model.hyp = hyp  # attach hyperparameters to model | ||||||
|  |     model.gr = 1.0  # iou loss ratio (obj_loss = 1.0 or iou) | ||||||
|  |     model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc  # attach class weights | ||||||
|  |     model.names = names | ||||||
|  | 
 | ||||||
|  |     # Start training | ||||||
|  |     t0 = time.time() | ||||||
|  |     nw = max(round(hyp['warmup_epochs'] * nb), 1000)  # number of warmup iterations, max(3 epochs, 1k iterations) | ||||||
|  |     # nw = min(nw, (epochs - start_epoch) / 2 * nb)  # limit warmup to < 1/2 of training | ||||||
|  |     maps = np.zeros(nc)  # mAP per class | ||||||
|  |     results = (0, 0, 0, 0, 0, 0, 0)  # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) | ||||||
|  |     scheduler.last_epoch = start_epoch - 1  # do not move | ||||||
|  |     scaler = amp.GradScaler(enabled=cuda) | ||||||
|  |     logger.info('Image sizes %g train, %g test\n' | ||||||
|  |                 'Using %g dataloader workers\nLogging results to %s\n' | ||||||
|  |                 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs)) | ||||||
|  |     for epoch in range(start_epoch, epochs):  # epoch ------------------------------------------------------------------ | ||||||
|  |         model.train() | ||||||
|  | 
 | ||||||
|  |         # Update image weights (optional) | ||||||
|  |         if opt.image_weights: | ||||||
|  |             # Generate indices | ||||||
|  |             if rank in [-1, 0]: | ||||||
|  |                 cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc  # class weights | ||||||
|  |                 iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw)  # image weights | ||||||
|  |                 dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n)  # rand weighted idx | ||||||
|  |             # Broadcast if DDP | ||||||
|  |             if rank != -1: | ||||||
|  |                 indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() | ||||||
|  |                 dist.broadcast(indices, 0) | ||||||
|  |                 if rank != 0: | ||||||
|  |                     dataset.indices = indices.cpu().numpy() | ||||||
|  | 
 | ||||||
|  |         # Update mosaic border | ||||||
|  |         # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) | ||||||
|  |         # dataset.mosaic_border = [b - imgsz, -b]  # height, width borders | ||||||
|  | 
 | ||||||
|  |         mloss = torch.zeros(5, device=device)  # mean losses | ||||||
|  |         if rank != -1: | ||||||
|  |             dataloader.sampler.set_epoch(epoch) | ||||||
|  |         pbar = enumerate(dataloader) | ||||||
|  |         logger.info(('\n' + '%10s' * 9) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'landmark', 'total', 'targets', 'img_size')) | ||||||
|  |         if rank in [-1, 0]: | ||||||
|  |             pbar = tqdm(pbar, total=nb)  # progress bar | ||||||
|  |         optimizer.zero_grad() | ||||||
|  |         for i, (imgs, targets, paths, _) in pbar:  # batch ------------------------------------------------------------- | ||||||
|  |             ni = i + nb * epoch  # number integrated batches (since train start) | ||||||
|  |             imgs = imgs.to(device, non_blocking=True).float() / 255.0  # uint8 to float32, 0-255 to 0.0-1.0 | ||||||
|  | 
 | ||||||
|  |             # Warmup | ||||||
|  |             if ni <= nw: | ||||||
|  |                 xi = [0, nw]  # x interp | ||||||
|  |                 # model.gr = np.interp(ni, xi, [0.0, 1.0])  # iou loss ratio (obj_loss = 1.0 or iou) | ||||||
|  |                 accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) | ||||||
|  |                 for j, x in enumerate(optimizer.param_groups): | ||||||
|  |                     # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 | ||||||
|  |                     x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) | ||||||
|  |                     if 'momentum' in x: | ||||||
|  |                         x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) | ||||||
|  | 
 | ||||||
|  |             # Multi-scale | ||||||
|  |             if opt.multi_scale: | ||||||
|  |                 sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs  # size | ||||||
|  |                 sf = sz / max(imgs.shape[2:])  # scale factor | ||||||
|  |                 if sf != 1: | ||||||
|  |                     ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]]  # new shape (stretched to gs-multiple) | ||||||
|  |                     imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) | ||||||
|  | 
 | ||||||
|  |             # Forward | ||||||
|  |             with amp.autocast(enabled=cuda): | ||||||
|  |                 pred = model(imgs)  # forward | ||||||
|  |                 loss, loss_items = compute_loss(pred, targets.to(device), model)  # loss scaled by batch_size | ||||||
|  |                 if rank != -1: | ||||||
|  |                     loss *= opt.world_size  # gradient averaged between devices in DDP mode | ||||||
|  | 
 | ||||||
|  |             # Backward | ||||||
|  |             scaler.scale(loss).backward() | ||||||
|  | 
 | ||||||
|  |             # Optimize | ||||||
|  |             if ni % accumulate == 0: | ||||||
|  |                 scaler.step(optimizer)  # optimizer.step | ||||||
|  |                 scaler.update() | ||||||
|  |                 optimizer.zero_grad() | ||||||
|  |                 if ema: | ||||||
|  |                     ema.update(model) | ||||||
|  | 
 | ||||||
|  |             # Print | ||||||
|  |             if rank in [-1, 0]: | ||||||
|  |                 mloss = (mloss * i + loss_items) / (i + 1)  # update mean losses | ||||||
|  |                 mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0)  # (GB) | ||||||
|  |                 s = ('%10s' * 2 + '%10.4g' * 7) % ( | ||||||
|  |                     '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) | ||||||
|  |                 pbar.set_description(s) | ||||||
|  | 
 | ||||||
|  |                 # Plot | ||||||
|  |                 if plots and ni < 3: | ||||||
|  |                     f = save_dir / f'train_batch{ni}.jpg'  # filename | ||||||
|  |                     Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() | ||||||
|  |                     # if tb_writer: | ||||||
|  |                     #     tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) | ||||||
|  |                     #     tb_writer.add_graph(model, imgs)  # add model to tensorboard | ||||||
|  |                 elif plots and ni == 3 and wandb: | ||||||
|  |                     wandb.log({"Mosaics": [wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg')]}) | ||||||
|  | 
 | ||||||
|  |             # end batch ------------------------------------------------------------------------------------------------ | ||||||
|  |         # end epoch ---------------------------------------------------------------------------------------------------- | ||||||
|  | 
 | ||||||
|  |         # Scheduler | ||||||
|  |         lr = [x['lr'] for x in optimizer.param_groups]  # for tensorboard | ||||||
|  |         scheduler.step() | ||||||
|  | 
 | ||||||
|  |         # DDP process 0 or single-GPU | ||||||
|  |         if rank in [-1, 0] and epoch > begin_save: | ||||||
|  |             # mAP | ||||||
|  |             if ema: | ||||||
|  |                 ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights']) | ||||||
|  |             final_epoch = epoch + 1 == epochs | ||||||
|  |             if not opt.notest or final_epoch:  # Calculate mAP | ||||||
|  |                 results, maps, times = test.test(opt.data, | ||||||
|  |                                                  batch_size=total_batch_size, | ||||||
|  |                                                  imgsz=imgsz_test, | ||||||
|  |                                                  model=ema.ema, | ||||||
|  |                                                  single_cls=opt.single_cls, | ||||||
|  |                                                  dataloader=testloader, | ||||||
|  |                                                  save_dir=save_dir, | ||||||
|  |                                                  plots=False, | ||||||
|  |                                                  log_imgs=opt.log_imgs if wandb else 0) | ||||||
|  | 
 | ||||||
|  |             # Write | ||||||
|  |             with open(results_file, 'a') as f: | ||||||
|  |                 f.write(s + '%10.4g' * 7 % results + '\n')  # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls) | ||||||
|  |             if len(opt.name) and opt.bucket: | ||||||
|  |                 os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) | ||||||
|  | 
 | ||||||
|  |             # Log | ||||||
|  |             tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss',  # train loss | ||||||
|  |                     'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', | ||||||
|  |                     'val/box_loss', 'val/obj_loss', 'val/cls_loss',  # val loss | ||||||
|  |                     'x/lr0', 'x/lr1', 'x/lr2']  # params | ||||||
|  |             for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): | ||||||
|  |                 if tb_writer: | ||||||
|  |                     tb_writer.add_scalar(tag, x, epoch)  # tensorboard | ||||||
|  |                 if wandb: | ||||||
|  |                     wandb.log({tag: x})  # W&B | ||||||
|  | 
 | ||||||
|  |             # Update best mAP | ||||||
|  |             fi = fitness(np.array(results).reshape(1, -1))  # weighted combination of [P, R, mAP@.5, mAP@.5-.95] | ||||||
|  |             if fi > best_fitness: | ||||||
|  |                 best_fitness = fi | ||||||
|  | 
 | ||||||
|  |             # Save model | ||||||
|  |             save = (not opt.nosave) or (final_epoch and not opt.evolve) | ||||||
|  |             if save: | ||||||
|  |                 with open(results_file, 'r') as f:  # create checkpoint | ||||||
|  |                     ckpt = {'epoch': epoch, | ||||||
|  |                             'best_fitness': best_fitness, | ||||||
|  |                             'training_results': f.read(), | ||||||
|  |                             'model': ema.ema, | ||||||
|  |                             'optimizer': None if final_epoch else optimizer.state_dict(), | ||||||
|  |                             'wandb_id': wandb_run.id if wandb else None} | ||||||
|  | 
 | ||||||
|  |                 # Save last, best and delete | ||||||
|  |                 torch.save(ckpt, last) | ||||||
|  |                 if best_fitness == fi: | ||||||
|  |                     ckpt_best = { | ||||||
|  |                             'epoch': epoch, | ||||||
|  |                             'best_fitness': best_fitness, | ||||||
|  |                             # 'training_results': f.read(), | ||||||
|  |                             'model': ema.ema, | ||||||
|  |                             # 'optimizer': None if final_epoch else optimizer.state_dict(), | ||||||
|  |                             # 'wandb_id': wandb_run.id if wandb else None | ||||||
|  |                             } | ||||||
|  |                     torch.save(ckpt_best, best) | ||||||
|  |                 del ckpt | ||||||
|  |         # end epoch ---------------------------------------------------------------------------------------------------- | ||||||
|  |     # end training | ||||||
|  | 
 | ||||||
|  |     if rank in [-1, 0]: | ||||||
|  |         # Strip optimizers | ||||||
|  |         final = best if best.exists() else last  # final model | ||||||
|  |         for f in [last, best]: | ||||||
|  |             if f.exists(): | ||||||
|  |                 strip_optimizer(f)  # strip optimizers | ||||||
|  |         if opt.bucket: | ||||||
|  |             os.system(f'gsutil cp {final} gs://{opt.bucket}/weights')  # upload | ||||||
|  | 
 | ||||||
|  |         # Plots | ||||||
|  |         if plots: | ||||||
|  |             plot_results(save_dir=save_dir)  # save as results.png | ||||||
|  |             if wandb: | ||||||
|  |                 files = ['results.png', 'precision_recall_curve.png', 'confusion_matrix.png'] | ||||||
|  |                 wandb.log({"Results": [wandb.Image(str(save_dir / f), caption=f) for f in files | ||||||
|  |                                        if (save_dir / f).exists()]}) | ||||||
|  |                 if opt.log_artifacts: | ||||||
|  |                     wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem) | ||||||
|  | 
 | ||||||
|  |         # Test best.pt | ||||||
|  |         logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) | ||||||
|  |         if opt.data.endswith('coco.yaml') and nc == 80:  # if COCO | ||||||
|  |             for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]):  # speed, mAP tests | ||||||
|  |                 results, _, _ = test.test(opt.data, | ||||||
|  |                                           batch_size=total_batch_size, | ||||||
|  |                                           imgsz=imgsz_test, | ||||||
|  |                                           conf_thres=conf, | ||||||
|  |                                           iou_thres=iou, | ||||||
|  |                                           model=attempt_load(final, device).half(), | ||||||
|  |                                           single_cls=opt.single_cls, | ||||||
|  |                                           dataloader=testloader, | ||||||
|  |                                           save_dir=save_dir, | ||||||
|  |                                           save_json=save_json, | ||||||
|  |                                           plots=False) | ||||||
|  | 
 | ||||||
|  |     else: | ||||||
|  |         dist.destroy_process_group() | ||||||
|  | 
 | ||||||
|  |     wandb.run.finish() if wandb and wandb.run else None | ||||||
|  |     torch.cuda.empty_cache() | ||||||
|  |     return results | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == '__main__': | ||||||
|  |     parser = argparse.ArgumentParser() | ||||||
|  |     parser.add_argument('--weights', type=str, default='weights/yolov5s.pt', help='initial weights path') | ||||||
|  |     parser.add_argument('--cfg', type=str, default='models/yolov5s.yaml', help='model.yaml path') | ||||||
|  |     parser.add_argument('--data', type=str, default='data/widerface.yaml', help='data.yaml path') | ||||||
|  |     parser.add_argument('--hyp', type=str, default='data/hyp.scratch.yaml', help='hyperparameters path') | ||||||
|  |     parser.add_argument('--epochs', type=int, default=120) | ||||||
|  |     parser.add_argument('--batch-size', type=int, default=32, help='total batch size for all GPUs') | ||||||
|  |     parser.add_argument('--img-size', nargs='+', type=int, default=[640, 640], help='[train, test] image sizes') | ||||||
|  |     parser.add_argument('--rect', action='store_true', help='rectangular training') | ||||||
|  |     parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') | ||||||
|  |     parser.add_argument('--nosave', action='store_true', help='only save final checkpoint') | ||||||
|  |     parser.add_argument('--notest', action='store_true', help='only test final epoch') | ||||||
|  |     parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check') | ||||||
|  |     parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters') | ||||||
|  |     parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') | ||||||
|  |     parser.add_argument('--cache-images', action='store_true', help='cache images for faster training') | ||||||
|  |     parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') | ||||||
|  |     parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') | ||||||
|  |     parser.add_argument('--multi-scale', action='store_true', default=True, help='vary img-size +/- 50%%') | ||||||
|  |     parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') | ||||||
|  |     parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') | ||||||
|  |     parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode') | ||||||
|  |     parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify') | ||||||
|  |     parser.add_argument('--log-imgs', type=int, default=16, help='number of images for W&B logging, max 100') | ||||||
|  |     parser.add_argument('--log-artifacts', action='store_true', help='log artifacts, i.e. final trained model') | ||||||
|  |     parser.add_argument('--workers', type=int, default=4, help='maximum number of dataloader workers') | ||||||
|  |     parser.add_argument('--project', default='runs/train', help='save to project/name') | ||||||
|  |     parser.add_argument('--name', default='exp', help='save to project/name') | ||||||
|  |     parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') | ||||||
|  |     opt = parser.parse_args() | ||||||
|  | 
 | ||||||
|  |     # Set DDP variables | ||||||
|  |     opt.total_batch_size = opt.batch_size | ||||||
|  |     opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 | ||||||
|  |     opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1 | ||||||
|  |     set_logging(opt.global_rank) | ||||||
|  |     if opt.global_rank in [-1, 0]: | ||||||
|  |         check_git_status() | ||||||
|  | 
 | ||||||
|  |     # Resume | ||||||
|  |     if opt.resume:  # resume an interrupted run | ||||||
|  |         ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run()  # specified or most recent path | ||||||
|  |         assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist' | ||||||
|  |         with open(Path(ckpt).parent.parent / 'opt.yaml') as f: | ||||||
|  |             opt = argparse.Namespace(**yaml.load(f, Loader=yaml.FullLoader))  # replace | ||||||
|  |         opt.cfg, opt.weights, opt.resume = '', ckpt, True | ||||||
|  |         logger.info('Resuming training from %s' % ckpt) | ||||||
|  |     else: | ||||||
|  |         # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml') | ||||||
|  |         opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp)  # check files | ||||||
|  |         assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified' | ||||||
|  |         opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size)))  # extend to 2 sizes (train, test) | ||||||
|  |         opt.name = 'evolve' if opt.evolve else opt.name | ||||||
|  |         opt.save_dir = increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve)  # increment run | ||||||
|  | 
 | ||||||
|  |     # DDP mode | ||||||
|  |     device = select_device(opt.device, batch_size=opt.batch_size) | ||||||
|  |     if opt.local_rank != -1: | ||||||
|  |         assert torch.cuda.device_count() > opt.local_rank | ||||||
|  |         torch.cuda.set_device(opt.local_rank) | ||||||
|  |         device = torch.device('cuda', opt.local_rank) | ||||||
|  |         dist.init_process_group(backend='nccl', init_method='env://')  # distributed backend | ||||||
|  |         assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count' | ||||||
|  |         opt.batch_size = opt.total_batch_size // opt.world_size | ||||||
|  | 
 | ||||||
|  |     # Hyperparameters | ||||||
|  |     with open(opt.hyp) as f: | ||||||
|  |         hyp = yaml.load(f, Loader=yaml.FullLoader)  # load hyps | ||||||
|  |         if 'box' not in hyp: | ||||||
|  |             warn('Compatibility: %s missing "box" which was renamed from "giou" in %s' % | ||||||
|  |                  (opt.hyp, 'https://github.com/ultralytics/yolov5/pull/1120')) | ||||||
|  |             hyp['box'] = hyp.pop('giou') | ||||||
|  | 
 | ||||||
|  |     # Train | ||||||
|  |     logger.info(opt) | ||||||
|  |     if not opt.evolve: | ||||||
|  |         tb_writer = None  # init loggers | ||||||
|  |         if opt.global_rank in [-1, 0]: | ||||||
|  |             logger.info(f'Start Tensorboard with "tensorboard --logdir {opt.project}", view at http://localhost:6006/') | ||||||
|  |             tb_writer = SummaryWriter(opt.save_dir)  # Tensorboard | ||||||
|  |         train(hyp, opt, device, tb_writer, wandb) | ||||||
|  | 
 | ||||||
|  |     # Evolve hyperparameters (optional) | ||||||
|  |     else: | ||||||
|  |         # Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit) | ||||||
|  |         meta = {'lr0': (1, 1e-5, 1e-1),  # initial learning rate (SGD=1E-2, Adam=1E-3) | ||||||
|  |                 'lrf': (1, 0.01, 1.0),  # final OneCycleLR learning rate (lr0 * lrf) | ||||||
|  |                 'momentum': (0.3, 0.6, 0.98),  # SGD momentum/Adam beta1 | ||||||
|  |                 'weight_decay': (1, 0.0, 0.001),  # optimizer weight decay | ||||||
|  |                 'warmup_epochs': (1, 0.0, 5.0),  # warmup epochs (fractions ok) | ||||||
|  |                 'warmup_momentum': (1, 0.0, 0.95),  # warmup initial momentum | ||||||
|  |                 'warmup_bias_lr': (1, 0.0, 0.2),  # warmup initial bias lr | ||||||
|  |                 'box': (1, 0.02, 0.2),  # box loss gain | ||||||
|  |                 'cls': (1, 0.2, 4.0),  # cls loss gain | ||||||
|  |                 'cls_pw': (1, 0.5, 2.0),  # cls BCELoss positive_weight | ||||||
|  |                 'obj': (1, 0.2, 4.0),  # obj loss gain (scale with pixels) | ||||||
|  |                 'obj_pw': (1, 0.5, 2.0),  # obj BCELoss positive_weight | ||||||
|  |                 'iou_t': (0, 0.1, 0.7),  # IoU training threshold | ||||||
|  |                 'anchor_t': (1, 2.0, 8.0),  # anchor-multiple threshold | ||||||
|  |                 'anchors': (2, 2.0, 10.0),  # anchors per output grid (0 to ignore) | ||||||
|  |                 'fl_gamma': (0, 0.0, 2.0),  # focal loss gamma (efficientDet default gamma=1.5) | ||||||
|  |                 'hsv_h': (1, 0.0, 0.1),  # image HSV-Hue augmentation (fraction) | ||||||
|  |                 'hsv_s': (1, 0.0, 0.9),  # image HSV-Saturation augmentation (fraction) | ||||||
|  |                 'hsv_v': (1, 0.0, 0.9),  # image HSV-Value augmentation (fraction) | ||||||
|  |                 'degrees': (1, 0.0, 45.0),  # image rotation (+/- deg) | ||||||
|  |                 'translate': (1, 0.0, 0.9),  # image translation (+/- fraction) | ||||||
|  |                 'scale': (1, 0.0, 0.9),  # image scale (+/- gain) | ||||||
|  |                 'shear': (1, 0.0, 10.0),  # image shear (+/- deg) | ||||||
|  |                 'perspective': (0, 0.0, 0.001),  # image perspective (+/- fraction), range 0-0.001 | ||||||
|  |                 'flipud': (1, 0.0, 1.0),  # image flip up-down (probability) | ||||||
|  |                 'fliplr': (0, 0.0, 1.0),  # image flip left-right (probability) | ||||||
|  |                 'mosaic': (1, 0.0, 1.0),  # image mixup (probability) | ||||||
|  |                 'mixup': (1, 0.0, 1.0)}  # image mixup (probability) | ||||||
|  | 
 | ||||||
|  |         assert opt.local_rank == -1, 'DDP mode not implemented for --evolve' | ||||||
|  |         opt.notest, opt.nosave = True, True  # only test/save final epoch | ||||||
|  |         # ei = [isinstance(x, (int, float)) for x in hyp.values()]  # evolvable indices | ||||||
|  |         yaml_file = Path(opt.save_dir) / 'hyp_evolved.yaml'  # save best result here | ||||||
|  |         if opt.bucket: | ||||||
|  |             os.system('gsutil cp gs://%s/evolve.txt .' % opt.bucket)  # download evolve.txt if exists | ||||||
|  | 
 | ||||||
|  |         for _ in range(300):  # generations to evolve | ||||||
|  |             if Path('evolve.txt').exists():  # if evolve.txt exists: select best hyps and mutate | ||||||
|  |                 # Select parent(s) | ||||||
|  |                 parent = 'single'  # parent selection method: 'single' or 'weighted' | ||||||
|  |                 x = np.loadtxt('evolve.txt', ndmin=2) | ||||||
|  |                 n = min(5, len(x))  # number of previous results to consider | ||||||
|  |                 x = x[np.argsort(-fitness(x))][:n]  # top n mutations | ||||||
|  |                 w = fitness(x) - fitness(x).min()  # weights | ||||||
|  |                 if parent == 'single' or len(x) == 1: | ||||||
|  |                     # x = x[random.randint(0, n - 1)]  # random selection | ||||||
|  |                     x = x[random.choices(range(n), weights=w)[0]]  # weighted selection | ||||||
|  |                 elif parent == 'weighted': | ||||||
|  |                     x = (x * w.reshape(n, 1)).sum(0) / w.sum()  # weighted combination | ||||||
|  | 
 | ||||||
|  |                 # Mutate | ||||||
|  |                 mp, s = 0.8, 0.2  # mutation probability, sigma | ||||||
|  |                 npr = np.random | ||||||
|  |                 npr.seed(int(time.time())) | ||||||
|  |                 g = np.array([x[0] for x in meta.values()])  # gains 0-1 | ||||||
|  |                 ng = len(meta) | ||||||
|  |                 v = np.ones(ng) | ||||||
|  |                 while all(v == 1):  # mutate until a change occurs (prevent duplicates) | ||||||
|  |                     v = (g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1).clip(0.3, 3.0) | ||||||
|  |                 for i, k in enumerate(hyp.keys()):  # plt.hist(v.ravel(), 300) | ||||||
|  |                     hyp[k] = float(x[i + 7] * v[i])  # mutate | ||||||
|  | 
 | ||||||
|  |             # Constrain to limits | ||||||
|  |             for k, v in meta.items(): | ||||||
|  |                 hyp[k] = max(hyp[k], v[1])  # lower limit | ||||||
|  |                 hyp[k] = min(hyp[k], v[2])  # upper limit | ||||||
|  |                 hyp[k] = round(hyp[k], 5)  # significant digits | ||||||
|  | 
 | ||||||
|  |             # Train mutation | ||||||
|  |             results = train(hyp.copy(), opt, device, wandb=wandb) | ||||||
|  | 
 | ||||||
|  |             # Write mutation results | ||||||
|  |             print_mutation(hyp.copy(), results, yaml_file, opt.bucket) | ||||||
|  | 
 | ||||||
|  |         # Plot results | ||||||
|  |         plot_evolution(yaml_file) | ||||||
|  |         print(f'Hyperparameter evolution complete. Best results saved as: {yaml_file}\n' | ||||||
|  |               f'Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}') | ||||||
							
								
								
									
										0
									
								
								algorithm/Car_recognition/utils/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										72
									
								
								algorithm/Car_recognition/utils/activations.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,72 @@ | |||||||
|  | # Activation functions | ||||||
|  | 
 | ||||||
|  | import torch | ||||||
|  | import torch.nn as nn | ||||||
|  | import torch.nn.functional as F | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # SiLU https://arxiv.org/pdf/1606.08415.pdf ---------------------------------------------------------------------------- | ||||||
|  | class SiLU(nn.Module):  # export-friendly version of nn.SiLU() | ||||||
|  |     @staticmethod | ||||||
|  |     def forward(x): | ||||||
|  |         return x * torch.sigmoid(x) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class Hardswish(nn.Module):  # export-friendly version of nn.Hardswish() | ||||||
|  |     @staticmethod | ||||||
|  |     def forward(x): | ||||||
|  |         # return x * F.hardsigmoid(x)  # for torchscript and CoreML | ||||||
|  |         return x * F.hardtanh(x + 3, 0., 6.) / 6.  # for torchscript, CoreML and ONNX | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class MemoryEfficientSwish(nn.Module): | ||||||
|  |     class F(torch.autograd.Function): | ||||||
|  |         @staticmethod | ||||||
|  |         def forward(ctx, x): | ||||||
|  |             ctx.save_for_backward(x) | ||||||
|  |             return x * torch.sigmoid(x) | ||||||
|  | 
 | ||||||
|  |         @staticmethod | ||||||
|  |         def backward(ctx, grad_output): | ||||||
|  |             x = ctx.saved_tensors[0] | ||||||
|  |             sx = torch.sigmoid(x) | ||||||
|  |             return grad_output * (sx * (1 + x * (1 - sx))) | ||||||
|  | 
 | ||||||
|  |     def forward(self, x): | ||||||
|  |         return self.F.apply(x) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # Mish https://github.com/digantamisra98/Mish -------------------------------------------------------------------------- | ||||||
|  | class Mish(nn.Module): | ||||||
|  |     @staticmethod | ||||||
|  |     def forward(x): | ||||||
|  |         return x * F.softplus(x).tanh() | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | class MemoryEfficientMish(nn.Module): | ||||||
|  |     class F(torch.autograd.Function): | ||||||
|  |         @staticmethod | ||||||
|  |         def forward(ctx, x): | ||||||
|  |             ctx.save_for_backward(x) | ||||||
|  |             return x.mul(torch.tanh(F.softplus(x)))  # x * tanh(ln(1 + exp(x))) | ||||||
|  | 
 | ||||||
|  |         @staticmethod | ||||||
|  |         def backward(ctx, grad_output): | ||||||
|  |             x = ctx.saved_tensors[0] | ||||||
|  |             sx = torch.sigmoid(x) | ||||||
|  |             fx = F.softplus(x).tanh() | ||||||
|  |             return grad_output * (fx + x * sx * (1 - fx * fx)) | ||||||
|  | 
 | ||||||
|  |     def forward(self, x): | ||||||
|  |         return self.F.apply(x) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | # FReLU https://arxiv.org/abs/2007.11824 ------------------------------------------------------------------------------- | ||||||
|  | class FReLU(nn.Module): | ||||||
|  |     def __init__(self, c1, k=3):  # ch_in, kernel | ||||||
|  |         super().__init__() | ||||||
|  |         self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) | ||||||
|  |         self.bn = nn.BatchNorm2d(c1) | ||||||
|  | 
 | ||||||
|  |     def forward(self, x): | ||||||
|  |         return torch.max(x, self.bn(self.conv(x))) | ||||||
							
								
								
									
										155
									
								
								algorithm/Car_recognition/utils/autoanchor.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,155 @@ | |||||||
|  | # Auto-anchor utils | ||||||
|  | 
 | ||||||
|  | import numpy as np | ||||||
|  | import torch | ||||||
|  | import yaml | ||||||
|  | from scipy.cluster.vq import kmeans | ||||||
|  | from tqdm import tqdm | ||||||
|  | 
 | ||||||
|  | from utils.general import colorstr | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def check_anchor_order(m): | ||||||
|  |     # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary | ||||||
|  |     a = m.anchor_grid.prod(-1).view(-1)  # anchor area | ||||||
|  |     da = a[-1] - a[0]  # delta a | ||||||
|  |     ds = m.stride[-1] - m.stride[0]  # delta s | ||||||
|  |     if da.sign() != ds.sign():  # same order | ||||||
|  |         print('Reversing anchor order') | ||||||
|  |         m.anchors[:] = m.anchors.flip(0) | ||||||
|  |         m.anchor_grid[:] = m.anchor_grid.flip(0) | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def check_anchors(dataset, model, thr=4.0, imgsz=640): | ||||||
|  |     # Check anchor fit to data, recompute if necessary | ||||||
|  |     prefix = colorstr('autoanchor: ') | ||||||
|  |     print(f'\n{prefix}Analyzing anchors... ', end='') | ||||||
|  |     m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1]  # Detect() | ||||||
|  |     shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True) | ||||||
|  |     scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1))  # augment scale | ||||||
|  |     wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float()  # wh | ||||||
|  | 
 | ||||||
|  |     def metric(k):  # compute metric | ||||||
|  |         r = wh[:, None] / k[None] | ||||||
|  |         x = torch.min(r, 1. / r).min(2)[0]  # ratio metric | ||||||
|  |         best = x.max(1)[0]  # best_x | ||||||
|  |         aat = (x > 1. / thr).float().sum(1).mean()  # anchors above threshold | ||||||
|  |         bpr = (best > 1. / thr).float().mean()  # best possible recall | ||||||
|  |         return bpr, aat | ||||||
|  | 
 | ||||||
|  |     bpr, aat = metric(m.anchor_grid.clone().cpu().view(-1, 2)) | ||||||
|  |     print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='') | ||||||
|  |     if bpr < 0.98:  # threshold to recompute | ||||||
|  |         print('. Attempting to improve anchors, please wait...') | ||||||
|  |         na = m.anchor_grid.numel() // 2  # number of anchors | ||||||
|  |         new_anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False) | ||||||
|  |         new_bpr = metric(new_anchors.reshape(-1, 2))[0] | ||||||
|  |         if new_bpr > bpr:  # replace anchors | ||||||
|  |             new_anchors = torch.tensor(new_anchors, device=m.anchors.device).type_as(m.anchors) | ||||||
|  |             m.anchor_grid[:] = new_anchors.clone().view_as(m.anchor_grid)  # for inference | ||||||
|  |             m.anchors[:] = new_anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1)  # loss | ||||||
|  |             check_anchor_order(m) | ||||||
|  |             print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.') | ||||||
|  |         else: | ||||||
|  |             print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.') | ||||||
|  |     print('')  # newline | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True): | ||||||
|  |     """ Creates kmeans-evolved anchors from training dataset | ||||||
|  | 
 | ||||||
|  |         Arguments: | ||||||
|  |             path: path to dataset *.yaml, or a loaded dataset | ||||||
|  |             n: number of anchors | ||||||
|  |             img_size: image size used for training | ||||||
|  |             thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0 | ||||||
|  |             gen: generations to evolve anchors using genetic algorithm | ||||||
|  |             verbose: print all results | ||||||
|  | 
 | ||||||
|  |         Return: | ||||||
|  |             k: kmeans evolved anchors | ||||||
|  | 
 | ||||||
|  |         Usage: | ||||||
|  |             from utils.autoanchor import *; _ = kmean_anchors() | ||||||
|  |     """ | ||||||
|  |     thr = 1. / thr | ||||||
|  |     prefix = colorstr('autoanchor: ') | ||||||
|  | 
 | ||||||
|  |     def metric(k, wh):  # compute metrics | ||||||
|  |         r = wh[:, None] / k[None] | ||||||
|  |         x = torch.min(r, 1. / r).min(2)[0]  # ratio metric | ||||||
|  |         # x = wh_iou(wh, torch.tensor(k))  # iou metric | ||||||
|  |         return x, x.max(1)[0]  # x, best_x | ||||||
|  | 
 | ||||||
|  |     def anchor_fitness(k):  # mutation fitness | ||||||
|  |         _, best = metric(torch.tensor(k, dtype=torch.float32), wh) | ||||||
|  |         return (best * (best > thr).float()).mean()  # fitness | ||||||
|  | 
 | ||||||
|  |     def print_results(k): | ||||||
|  |         k = k[np.argsort(k.prod(1))]  # sort small to large | ||||||
|  |         x, best = metric(k, wh0) | ||||||
|  |         bpr, aat = (best > thr).float().mean(), (x > thr).float().mean() * n  # best possible recall, anch > thr | ||||||
|  |         print(f'{prefix}thr={thr:.2f}: {bpr:.4f} best possible recall, {aat:.2f} anchors past thr') | ||||||
|  |         print(f'{prefix}n={n}, img_size={img_size}, metric_all={x.mean():.3f}/{best.mean():.3f}-mean/best, ' | ||||||
|  |               f'past_thr={x[x > thr].mean():.3f}-mean: ', end='') | ||||||
|  |         for i, x in enumerate(k): | ||||||
|  |             print('%i,%i' % (round(x[0]), round(x[1])), end=',  ' if i < len(k) - 1 else '\n')  # use in *.cfg | ||||||
|  |         return k | ||||||
|  | 
 | ||||||
|  |     if isinstance(path, str):  # *.yaml file | ||||||
|  |         with open(path) as f: | ||||||
|  |             data_dict = yaml.load(f, Loader=yaml.SafeLoader)  # model dict | ||||||
|  |         from utils.datasets import LoadImagesAndLabels | ||||||
|  |         dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True) | ||||||
|  |     else: | ||||||
|  |         dataset = path  # dataset | ||||||
|  | 
 | ||||||
|  |     # Get label wh | ||||||
|  |     shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True) | ||||||
|  |     wh0 = np.concatenate([l[:, 3:5] * s for s, l in zip(shapes, dataset.labels)])  # wh | ||||||
|  | 
 | ||||||
|  |     # Filter | ||||||
|  |     i = (wh0 < 3.0).any(1).sum() | ||||||
|  |     if i: | ||||||
|  |         print(f'{prefix}WARNING: Extremely small objects found. {i} of {len(wh0)} labels are < 3 pixels in size.') | ||||||
|  |     wh = wh0[(wh0 >= 2.0).any(1)]  # filter > 2 pixels | ||||||
|  |     # wh = wh * (np.random.rand(wh.shape[0], 1) * 0.9 + 0.1)  # multiply by random scale 0-1 | ||||||
|  | 
 | ||||||
|  |     # Kmeans calculation | ||||||
|  |     print(f'{prefix}Running kmeans for {n} anchors on {len(wh)} points...') | ||||||
|  |     s = wh.std(0)  # sigmas for whitening | ||||||
|  |     k, dist = kmeans(wh / s, n, iter=30)  # points, mean distance | ||||||
|  |     k *= s | ||||||
|  |     wh = torch.tensor(wh, dtype=torch.float32)  # filtered | ||||||
|  |     wh0 = torch.tensor(wh0, dtype=torch.float32)  # unfiltered | ||||||
|  |     k = print_results(k) | ||||||
|  | 
 | ||||||
|  |     # Plot | ||||||
|  |     # k, d = [None] * 20, [None] * 20 | ||||||
|  |     # for i in tqdm(range(1, 21)): | ||||||
|  |     #     k[i-1], d[i-1] = kmeans(wh / s, i)  # points, mean distance | ||||||
|  |     # fig, ax = plt.subplots(1, 2, figsize=(14, 7), tight_layout=True) | ||||||
|  |     # ax = ax.ravel() | ||||||
|  |     # ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.') | ||||||
|  |     # fig, ax = plt.subplots(1, 2, figsize=(14, 7))  # plot wh | ||||||
|  |     # ax[0].hist(wh[wh[:, 0]<100, 0],400) | ||||||
|  |     # ax[1].hist(wh[wh[:, 1]<100, 1],400) | ||||||
|  |     # fig.savefig('wh.png', dpi=200) | ||||||
|  | 
 | ||||||
|  |     # Evolve | ||||||
|  |     npr = np.random | ||||||
|  |     f, sh, mp, s = anchor_fitness(k), k.shape, 0.9, 0.1  # fitness, generations, mutation prob, sigma | ||||||
|  |     pbar = tqdm(range(gen), desc=f'{prefix}Evolving anchors with Genetic Algorithm:')  # progress bar | ||||||
|  |     for _ in pbar: | ||||||
|  |         v = np.ones(sh) | ||||||
|  |         while (v == 1).all():  # mutate until a change occurs (prevent duplicates) | ||||||
|  |             v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) | ||||||
|  |         kg = (k.copy() * v).clip(min=2.0) | ||||||
|  |         fg = anchor_fitness(kg) | ||||||
|  |         if fg > f: | ||||||
|  |             f, k = fg, kg.copy() | ||||||
|  |             pbar.desc = f'{prefix}Evolving anchors with Genetic Algorithm: fitness = {f:.4f}' | ||||||
|  |             if verbose: | ||||||
|  |                 print_results(k) | ||||||
|  | 
 | ||||||
|  |     return print_results(k) | ||||||
							
								
								
									
										0
									
								
								algorithm/Car_recognition/utils/aws/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
							
								
								
									
										26
									
								
								algorithm/Car_recognition/utils/aws/mime.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,26 @@ | |||||||
|  | # AWS EC2 instance startup 'MIME' script https://aws.amazon.com/premiumsupport/knowledge-center/execute-user-data-ec2/ | ||||||
|  | # This script will run on every instance restart, not only on first start | ||||||
|  | # --- DO NOT COPY ABOVE COMMENTS WHEN PASTING INTO USERDATA --- | ||||||
|  | 
 | ||||||
|  | Content-Type: multipart/mixed; boundary="//" | ||||||
|  | MIME-Version: 1.0 | ||||||
|  | 
 | ||||||
|  | --// | ||||||
|  | Content-Type: text/cloud-config; charset="us-ascii" | ||||||
|  | MIME-Version: 1.0 | ||||||
|  | Content-Transfer-Encoding: 7bit | ||||||
|  | Content-Disposition: attachment; filename="cloud-config.txt" | ||||||
|  | 
 | ||||||
|  | #cloud-config | ||||||
|  | cloud_final_modules: | ||||||
|  | - [scripts-user, always] | ||||||
|  | 
 | ||||||
|  | --// | ||||||
|  | Content-Type: text/x-shellscript; charset="us-ascii" | ||||||
|  | MIME-Version: 1.0 | ||||||
|  | Content-Transfer-Encoding: 7bit | ||||||
|  | Content-Disposition: attachment; filename="userdata.txt" | ||||||
|  | 
 | ||||||
|  | #!/bin/bash | ||||||
|  | # --- paste contents of userdata.sh here --- | ||||||
|  | --// | ||||||
							
								
								
									
										37
									
								
								algorithm/Car_recognition/utils/aws/resume.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,37 @@ | |||||||
|  | # Resume all interrupted trainings in yolov5/ dir including DDP trainings | ||||||
|  | # Usage: $ python utils/aws/resume.py | ||||||
|  | 
 | ||||||
|  | import os | ||||||
|  | import sys | ||||||
|  | from pathlib import Path | ||||||
|  | 
 | ||||||
|  | import torch | ||||||
|  | import yaml | ||||||
|  | 
 | ||||||
|  | sys.path.append('./')  # to run '$ python *.py' files in subdirectories | ||||||
|  | 
 | ||||||
|  | port = 0  # --master_port | ||||||
|  | path = Path('').resolve() | ||||||
|  | for last in path.rglob('*/**/last.pt'): | ||||||
|  |     ckpt = torch.load(last) | ||||||
|  |     if ckpt['optimizer'] is None: | ||||||
|  |         continue | ||||||
|  | 
 | ||||||
|  |     # Load opt.yaml | ||||||
|  |     with open(last.parent.parent / 'opt.yaml') as f: | ||||||
|  |         opt = yaml.load(f, Loader=yaml.SafeLoader) | ||||||
|  | 
 | ||||||
|  |     # Get device count | ||||||
|  |     d = opt['device'].split(',')  # devices | ||||||
|  |     nd = len(d)  # number of devices | ||||||
|  |     ddp = nd > 1 or (nd == 0 and torch.cuda.device_count() > 1)  # distributed data parallel | ||||||
|  | 
 | ||||||
|  |     if ddp:  # multi-GPU | ||||||
|  |         port += 1 | ||||||
|  |         cmd = f'python -m torch.distributed.launch --nproc_per_node {nd} --master_port {port} train.py --resume {last}' | ||||||
|  |     else:  # single-GPU | ||||||
|  |         cmd = f'python train.py --resume {last}' | ||||||
|  | 
 | ||||||
|  |     cmd += ' > /dev/null 2>&1 &'  # redirect output to dev/null and run in daemon thread | ||||||
|  |     print(cmd) | ||||||
|  |     os.system(cmd) | ||||||
							
								
								
									
										27
									
								
								algorithm/Car_recognition/utils/aws/userdata.sh
									
									
									
									
									
										Normal file
									
								
							
							
						
						| @ -0,0 +1,27 @@ | |||||||
|  | #!/bin/bash | ||||||
|  | # AWS EC2 instance startup script https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html | ||||||
|  | # This script will run only once on first instance start (for a re-start script see mime.sh) | ||||||
|  | # /home/ubuntu (ubuntu) or /home/ec2-user (amazon-linux) is working dir | ||||||
|  | # Use >300 GB SSD | ||||||
|  | 
 | ||||||
|  | cd home/ubuntu | ||||||
|  | if [ ! -d yolov5 ]; then | ||||||
|  |   echo "Running first-time script." # install dependencies, download COCO, pull Docker | ||||||
|  |   git clone https://github.com/ultralytics/yolov5 && sudo chmod -R 777 yolov5 | ||||||
|  |   cd yolov5 | ||||||
|  |   bash data/scripts/get_coco.sh && echo "Data done." & | ||||||
|  |   sudo docker pull ultralytics/yolov5:latest && echo "Docker done." & | ||||||
|  |   python -m pip install --upgrade pip && pip install -r requirements.txt && python detect.py && echo "Requirements done." & | ||||||
|  |   wait && echo "All tasks done." # finish background tasks | ||||||
|  | else | ||||||
|  |   echo "Running re-start script." # resume interrupted runs | ||||||
|  |   i=0 | ||||||
|  |   list=$(sudo docker ps -qa) # container list i.e. $'one\ntwo\nthree\nfour' | ||||||
|  |   while IFS= read -r id; do | ||||||
|  |     ((i++)) | ||||||
|  |     echo "restarting container $i: $id" | ||||||
|  |     sudo docker start $id | ||||||
|  |     # sudo docker exec -it $id python train.py --resume # single-GPU | ||||||
|  |     sudo docker exec -d $id python utils/aws/resume.py # multi-scenario | ||||||
|  |   done <<<"$list" | ||||||
|  | fi | ||||||