diff --git a/cmake/QtCreatorIDEBranding.cmake b/cmake/QtCreatorIDEBranding.cmake index 5cdff672dd73..ab9314416a7f 100644 --- a/cmake/QtCreatorIDEBranding.cmake +++ b/cmake/QtCreatorIDEBranding.cmake @@ -25,15 +25,15 @@ set(IDE_ICON_PATH "") set(IDE_LOGO_PATH "") # OPENMV-DIFF # -set(IDE_VERSION "4.0.6") +set(IDE_VERSION "4.0.7") set(IDE_VERSION_COMPAT "${IDE_VERSION}") set(IDE_VERSION_DISPLAY "${IDE_VERSION}") set(IDE_AUTHOR "Canaan Inc.") set(IDE_COPYRIGHT_YEAR_FOUNDED "2013") set(IDE_COPYRIGHT_YEAR "2024") set(IDE_SETTINGSVARIANT "CanMV") -set(IDE_DISPLAY_NAME "CanMV IDE 4") +set(IDE_DISPLAY_NAME "CanMV IDE K230") set(IDE_ID "canmvide") -set(IDE_CASED_ID "CanMVIDE4") +set(IDE_CASED_ID "CanMVIDEK230") set(IDE_BUNDLE_IDENTIFIER "com.canaan-creative.${IDE_ID}") # OPENMV-DIFF # diff --git a/dist/installer/ifw/config/config-windows.xml.in b/dist/installer/ifw/config/config-windows.xml.in index d832f67eeb7e..98949ab9ca51 100644 --- a/dist/installer/ifw/config/config-windows.xml.in +++ b/dist/installer/ifw/config/config-windows.xml.in @@ -1,13 +1,13 @@ - CanMV IDE 4 + CanMV IDE K230 {version} - CanMV IDE 4 + CanMV IDE K230 Canaan Inc. https://www.canaan.io CanMVIDEUninst - @ApplicationsDir@/CanMV IDE 4 - CanMV IDE 4 + @ApplicationsDir@/CanMV IDE K230 + CanMV IDE K230 controlscript.qs diff --git a/dist/installer/ifw/packages/io.openmv.openmvide.application/meta/installscript.qs b/dist/installer/ifw/packages/io.openmv.openmvide.application/meta/installscript.qs index 551c7f8b594f..e3b8d7d67eaf 100644 --- a/dist/installer/ifw/packages/io.openmv.openmvide.application/meta/installscript.qs +++ b/dist/installer/ifw/packages/io.openmv.openmvide.application/meta/installscript.qs @@ -83,11 +83,11 @@ Component.prototype.createOperations = function() { component.addOperation( "CreateShortcut", component.qtCreatorBinaryPath, - "@StartMenuDir@/CanMV K230 IDE.lnk", + "@StartMenuDir@/CanMV IDE K230.lnk", "workingDirectory=@homeDir@" ); component.addOperation( "CreateShortcut", component.qtCreatorBinaryPath, - "@DesktopDir@/CanMV K230 IDE.lnk", + "@DesktopDir@/CanMV IDE K230.lnk", "workingDirectory=@homeDir@" ); component.addOperation( "CreateShortcut", "@TargetDir@/CanMVIDEUninst.exe", @@ -104,8 +104,8 @@ Component.prototype.createOperations = function() component.addOperation( "CreateDesktopEntry", "CanMV-openmvide.desktop", "Type=Application\n" + - "Name=CanMV IDE 4\n" + - "GenericName=CanMV IDE 4\n" + + "Name=CanMV IDE K230\n" + + "GenericName=CanMV IDE K230\n" + "Comment=The IDE of choice for CanMV Cam Development.\n" + "Exec=" + component.qtCreatorBinaryPath + " %F\n" + "Icon=CanMV-canmvide\n" + diff --git a/dist/installer/ifw/packages/io.openmv.openmvide.application/meta/launchopenmvidecheckboxform.ui b/dist/installer/ifw/packages/io.openmv.openmvide.application/meta/launchopenmvidecheckboxform.ui index 155574d687a2..5dbe7b696398 100644 --- a/dist/installer/ifw/packages/io.openmv.openmvide.application/meta/launchopenmvidecheckboxform.ui +++ b/dist/installer/ifw/packages/io.openmv.openmvide.application/meta/launchopenmvidecheckboxform.ui @@ -17,7 +17,7 @@ - Launch CanMV IDE + Launch CanMV IDE K230 true diff --git a/share/qtcreator/examples/01-Media/acodec.py b/share/qtcreator/examples/01-Media/acodec.py old mode 100644 new mode 100755 index ff4906c9e8bf..9269ad01146f --- a/share/qtcreator/examples/01-Media/acodec.py +++ b/share/qtcreator/examples/01-Media/acodec.py @@ -4,12 +4,11 @@ # # You can collect raw data and encode it into g711 or decode it into raw data output. - -from media.pyaudio import * #导入pyaudio模块,用于采集和播放音频 +import os +from mpp.payload_struct import * #导入payload模块,用于获取音视频编解码类型 from media.media import * #导入media模块,用于初始化vb buffer +from media.pyaudio import * #导入pyaudio模块,用于采集和播放音频 import media.g711 as g711 #导入g711模块,用于g711编解码 -from mpp.payload_struct import * #导入payload模块,用于获取音视频编解码类型 -import os def exit_check(): try: @@ -25,78 +24,82 @@ def encode_audio(filename, duration): CHANNELS = 2 #设置声道数 RATE = 44100 #设置采样率 - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - enc = g711.Encoder(K_PT_G711A,CHUNK) #创建g711编码器对象 - media.buffer_init() #vb buffer初始化 - - enc.create() #创建编码器 - #创建音频输入流 - stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - input=True, - frames_per_buffer=CHUNK) - - frames = [] - #采集音频数据编码并存入列表 - for i in range(0, int(RATE / CHUNK * duration)): - frame_data = stream.read() #从音频输入流中读取音频数据 - data = enc.encode(frame_data) #编码音频数据为g711 - frames.append(data) #将g711编码数据保存到列表中 - if exit_check(): - break - - stream.stop_stream() #停止音频输入流 - stream.close() #关闭音频输入流 - p.terminate() #释放音频对象 - enc.destroy() #销毁g711音频编码器 - - #将g711编码数据存入文件中 - with open(filename,mode='w') as wf: - wf.write(b''.join(frames)) - - media.buffer_deinit() #释放vb buffer + try: + p = PyAudio() + p.initialize(CHUNK) #初始化PyAudio对象 + enc = g711.Encoder(K_PT_G711A,CHUNK) #创建g711编码器对象 + MediaManager.init() #vb buffer初始化 + + enc.create() #创建编码器 + #创建音频输入流 + stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK) + + frames = [] + #采集音频数据编码并存入列表 + for i in range(0, int(RATE / CHUNK * duration)): + frame_data = stream.read() #从音频输入流中读取音频数据 + data = enc.encode(frame_data) #编码音频数据为g711 + frames.append(data) #将g711编码数据保存到列表中 + if exit_check(): + break + #将g711编码数据存入文件中 + with open(filename,mode='w') as wf: + wf.write(b''.join(frames)) + stream.stop_stream() #停止音频输入流 + stream.close() #关闭音频输入流 + p.terminate() #释放音频对象 + enc.destroy() #销毁g711音频编码器 + except BaseException as e: + print(f"Exception {e}") + finally: + MediaManager.deinit() #释放vb buffer def decode_audio(filename): - wf = open(filename,mode='rb') #打开g711文件 FORMAT = paInt16 #设置音频chunk值 CHANNELS = 2 #设置声道数 RATE = 44100 #设置采样率 CHUNK = int(RATE/25) #设置音频chunk值 - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - dec = g711.Decoder(K_PT_G711A,CHUNK) #创建g711解码器对象 - media.buffer_init() #vb buffer初始化 - - dec.create() #创建解码器 + try: + wf = open(filename,mode='rb') #打开g711文件 + p = PyAudio() + p.initialize(CHUNK) #初始化PyAudio对象 + dec = g711.Decoder(K_PT_G711A,CHUNK) #创建g711解码器对象 + MediaManager.init() #vb buffer初始化 - #创建音频输出流 - stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - output=True, - frames_per_buffer=CHUNK) + dec.create() #创建解码器 - stream_len = CHUNK*CHANNELS*2//2 #设置每次读取的g711数据流长度 - stream_data = wf.read(stream_len) #从g711文件中读取数据 + #创建音频输出流 + stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + output=True, + frames_per_buffer=CHUNK) - #解码g711文件并播放 - while stream_data: - frame_data = dec.decode(stream_data) #解码g711文件 - stream.write(frame_data) #播放raw数据 + stream_len = CHUNK*CHANNELS*2//2 #设置每次读取的g711数据流长度 stream_data = wf.read(stream_len) #从g711文件中读取数据 - if exit_check(): - break - - stream.stop_stream() #停止音频输入流 - stream.close() #关闭音频输入流 - p.terminate() #释放音频对象 - dec.destroy() #销毁解码器 - wf.close() #关闭g711文件 - media.buffer_deinit() #释放vb buffer + #解码g711文件并播放 + while stream_data: + frame_data = dec.decode(stream_data) #解码g711文件 + stream.write(frame_data) #播放raw数据 + stream_data = wf.read(stream_len) #从g711文件中读取数据 + if exit_check(): + break + stream.stop_stream() #停止音频输入流 + stream.close() #关闭音频输入流 + p.terminate() #释放音频对象 + dec.destroy() #销毁解码器 + wf.close() #关闭g711文件 + + except BaseException as e: + print(f"Exception {e}") + finally: + MediaManager.deinit() #释放vb buffer def loop_codec(duration): CHUNK = int(44100/25) #设置音频chunk值 @@ -104,47 +107,49 @@ def loop_codec(duration): CHANNELS = 2 #设置声道数 RATE = 44100 #设置采样率 - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - dec = g711.Decoder(K_PT_G711A,CHUNK) #创建g711解码器对象 - enc = g711.Encoder(K_PT_G711A,CHUNK) #创建g711编码器对象 - media.buffer_init() #vb buffer初始化 - - dec.create() #创建g711解码器 - enc.create() #创建g711编码器 - - #创建音频输入流 - input_stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - input=True, - frames_per_buffer=CHUNK) - - #创建音频输出流 - output_stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - output=True, - frames_per_buffer=CHUNK) - - #从音频输入流中获取数据->编码->解码->写入到音频输出流中 - for i in range(0, int(RATE / CHUNK * duration)): - frame_data = input_stream.read() #从音频输入流中获取raw音频数据 - stream_data = enc.encode(frame_data) #编码音频数据为g711 - frame_data = dec.decode(stream_data) #解码g711数据为raw数据 - output_stream.write(frame_data) #播放raw数据 - if exit_check(): - break - - input_stream.stop_stream() #停止音频输入流 - output_stream.stop_stream() #停止音频输出流 - input_stream.close() #关闭音频输入流 - output_stream.close() #关闭音频输出流 - p.terminate() #释放音频对象 - dec.destroy() #销毁g711解码器 - enc.destroy() #销毁g711编码器 - - media.buffer_deinit() #释放vb buffer + try: + p = PyAudio() + p.initialize(CHUNK) #初始化PyAudio对象 + dec = g711.Decoder(K_PT_G711A,CHUNK) #创建g711解码器对象 + enc = g711.Encoder(K_PT_G711A,CHUNK) #创建g711编码器对象 + MediaManager.init() #vb buffer初始化 + + dec.create() #创建g711解码器 + enc.create() #创建g711编码器 + + #创建音频输入流 + input_stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK) + + #创建音频输出流 + output_stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + output=True, + frames_per_buffer=CHUNK) + + #从音频输入流中获取数据->编码->解码->写入到音频输出流中 + for i in range(0, int(RATE / CHUNK * duration)): + frame_data = input_stream.read() #从音频输入流中获取raw音频数据 + stream_data = enc.encode(frame_data) #编码音频数据为g711 + frame_data = dec.decode(stream_data) #解码g711数据为raw数据 + output_stream.write(frame_data) #播放raw数据 + if exit_check(): + break + input_stream.stop_stream() #停止音频输入流 + output_stream.stop_stream() #停止音频输出流 + input_stream.close() #关闭音频输入流 + output_stream.close() #关闭音频输出流 + p.terminate() #释放音频对象 + dec.destroy() #销毁g711解码器 + enc.destroy() #销毁g711编码器 + except BaseException as e: + print(f"Exception {e}") + finally: + MediaManager.deinit() #释放vb buffer if __name__ == "__main__": os.exitpoint(os.EXITPOINT_ENABLE) diff --git a/share/qtcreator/examples/01-Media/audio.py b/share/qtcreator/examples/01-Media/audio.py old mode 100644 new mode 100755 index 015d99758089..7fdc0b1a7790 --- a/share/qtcreator/examples/01-Media/audio.py +++ b/share/qtcreator/examples/01-Media/audio.py @@ -4,10 +4,10 @@ # # You can play wav files or capture audio to save as wav +import os +from media.media import * #导入media模块,用于初始化vb buffer from media.pyaudio import * #导入pyaudio模块,用于采集和播放音频 import media.wave as wave #导入wav模块,用于保存和加载wav音频文件 -from media.media import * #导入media模块,用于初始化vb buffer -import os def exit_check(): try: @@ -23,68 +23,71 @@ def record_audio(filename, duration): CHANNELS = 2 #设置声道数 RATE = 44100 #设置采样率 - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - media.buffer_init() #vb buffer初始化 - - #创建音频输入流 - stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - input=True, - frames_per_buffer=CHUNK) - - frames = [] - #采集音频数据并存入列表 - for i in range(0, int(RATE / CHUNK * duration)): - data = stream.read() - frames.append(data) - if exit_check(): - break - - stream.stop_stream() #停止采集音频数据 - stream.close()#关闭音频输入流 - p.terminate()#释放音频对象 - - #将列表中的数据保存到wav文件中 - wf = wave.open(filename, 'wb') #创建wav 文件 - wf.set_channels(CHANNELS) #设置wav 声道数 - wf.set_sampwidth(p.get_sample_size(FORMAT)) #设置wav 采样精度 - wf.set_framerate(RATE) #设置wav 采样率 - wf.write_frames(b''.join(frames)) #存储wav音频数据 - wf.close() #关闭wav文件 - - media.buffer_deinit() #释放vb buffer + try: + p = PyAudio() + p.initialize(CHUNK) #初始化PyAudio对象 + MediaManager.init() #vb buffer初始化 + + #创建音频输入流 + stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK) + + frames = [] + #采集音频数据并存入列表 + for i in range(0, int(RATE / CHUNK * duration)): + data = stream.read() + frames.append(data) + if exit_check(): + break + #将列表中的数据保存到wav文件中 + wf = wave.open(filename, 'wb') #创建wav 文件 + wf.set_channels(CHANNELS) #设置wav 声道数 + wf.set_sampwidth(p.get_sample_size(FORMAT)) #设置wav 采样精度 + wf.set_framerate(RATE) #设置wav 采样率 + wf.write_frames(b''.join(frames)) #存储wav音频数据 + wf.close() #关闭wav文件 + except BaseException as e: + print(f"Exception {e}") + finally: + stream.stop_stream() #停止采集音频数据 + stream.close()#关闭音频输入流 + p.terminate()#释放音频对象 + MediaManager.deinit() #释放vb buffer def play_audio(filename): + try: + wf = wave.open(filename, 'rb')#打开wav文件 + CHUNK = int(wf.get_framerate()/25)#设置音频chunk值 - wf = wave.open(filename, 'rb')#打开wav文件 - CHUNK = int(wf.get_framerate()/25)#设置音频chunk值 - - p = PyAudio() - p.initialize(CHUNK) #初始化PyAudio对象 - media.buffer_init()#vb buffer初始化 - - #创建音频输出流,设置的音频参数均为wave中获取到的参数 - stream = p.open(format=p.get_format_from_width(wf.get_sampwidth()), - channels=wf.get_channels(), - rate=wf.get_framerate(), - output=True,frames_per_buffer=CHUNK) + p = PyAudio() + p.initialize(CHUNK) #初始化PyAudio对象 + MediaManager.init() #vb buffer初始化 - data = wf.read_frames(CHUNK)#从wav文件中读取数一帧数据 + #创建音频输出流,设置的音频参数均为wave中获取到的参数 + stream = p.open(format=p.get_format_from_width(wf.get_sampwidth()), + channels=wf.get_channels(), + rate=wf.get_framerate(), + output=True,frames_per_buffer=CHUNK) - while data: - stream.write(data) #将帧数据写入到音频输出流中 - data = wf.read_frames(CHUNK) #从wav文件中读取数一帧数据 - if exit_check(): - break + data = wf.read_frames(CHUNK)#从wav文件中读取数一帧数据 - stream.stop_stream() #停止音频输出流 - stream.close()#关闭音频输出流 - p.terminate()#释放音频对象 - wf.close()#关闭wav文件 + while data: + stream.write(data) #将帧数据写入到音频输出流中 + data = wf.read_frames(CHUNK) #从wav文件中读取数一帧数据 + if exit_check(): + break + except BaseException as e: + print(f"Exception {e}") + finally: + stream.stop_stream() #停止音频输出流 + stream.close()#关闭音频输出流 + p.terminate()#释放音频对象 + wf.close()#关闭wav文件 - media.buffer_deinit()#释放vb buffer + MediaManager.deinit() #释放vb buffer def loop_audio(duration): @@ -93,36 +96,39 @@ def loop_audio(duration): CHANNELS = 2 #设置音频声道数 RATE = 44100 #设置音频采样率 - p = PyAudio() - p.initialize(CHUNK)#初始化PyAudio对象 - media.buffer_init() #初始化vb buffer - - #创建音频输入流 - input_stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - input=True, - frames_per_buffer=CHUNK) - - #创建音频输出流 - output_stream = p.open(format=FORMAT, - channels=CHANNELS, - rate=RATE, - output=True,frames_per_buffer=CHUNK) - - #从音频输入流中获取数据写入到音频输出流中 - for i in range(0, int(RATE / CHUNK * duration)): - output_stream.write(input_stream.read()) - if exit_check(): - break - - input_stream.stop_stream()#停止音频输入流 - output_stream.stop_stream()#停止音频输出流 - input_stream.close() #关闭音频输入流 - output_stream.close() #关闭音频输出流 - p.terminate() #释放音频对象 - - media.buffer_deinit() #释放vb buffer + try: + p = PyAudio() + p.initialize(CHUNK)#初始化PyAudio对象 + MediaManager.init() #vb buffer初始化 + + #创建音频输入流 + input_stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + input=True, + frames_per_buffer=CHUNK) + + #创建音频输出流 + output_stream = p.open(format=FORMAT, + channels=CHANNELS, + rate=RATE, + output=True,frames_per_buffer=CHUNK) + + #从音频输入流中获取数据写入到音频输出流中 + for i in range(0, int(RATE / CHUNK * duration)): + output_stream.write(input_stream.read()) + if exit_check(): + break + except BaseException as e: + print(f"Exception {e}") + finally: + input_stream.stop_stream()#停止音频输入流 + output_stream.stop_stream()#停止音频输出流 + input_stream.close() #关闭音频输入流 + output_stream.close() #关闭音频输出流 + p.terminate() #释放音频对象 + + MediaManager.deinit() #释放vb buffer if __name__ == "__main__": os.exitpoint(os.EXITPOINT_ENABLE) diff --git a/share/qtcreator/examples/01-Media/camera.py b/share/qtcreator/examples/01-Media/camera.py deleted file mode 100755 index e02dd12c33ea..000000000000 --- a/share/qtcreator/examples/01-Media/camera.py +++ /dev/null @@ -1,95 +0,0 @@ -# Camera Example -# -# Note: You will need an SD card to run this example. -# -# You can start camera preview and capture yuv image. - -from media.camera import * -from media.display import * -from media.media import * -import time, os -import sys - -def camera_test(): - print("camera_test") - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - out_width = 1920 - out_height = 1080 - # set camera out width align up with 16Bytes - out_width = ALIGN_UP(out_width, 16) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - # set chn0 out format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - out_width = 640 - out_height = 480 - out_width = ALIGN_UP(out_width, 16) - # set chn1 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, out_width, out_height) - # set chn1 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # set chn1 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_2, out_width, out_height) - # set chn2 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - # init meida buffer - media.buffer_init() - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - try: - while True: - os.exitpoint() - time.sleep(5) - for dev_num in range(CAM_DEV_ID_MAX): - if not camera.cam_dev[dev_num].dev_attr.dev_enable: - continue - - for chn_num in range(CAM_CHN_ID_MAX): - if not camera.cam_dev[dev_num].chn_attr[chn_num].chn_enable: - continue - - print(f"camera_test, dev({dev_num}) chn({chn_num}) capture frame.") - # capture image from dev and chn - img = camera.capture_image(dev_num, chn_num) - if img.format() == image.YUV420: - suffix = "yuv420sp" - elif img.format() == image.RGB888: - suffix = "rgb888" - elif img.format() == image.RGBP888: - suffix = "rgb888p" - else: - suffix = "unkown" - - filename = f"/sdcard/dev_{dev_num:02d}_chn_{chn_num:02d}_{img.width()}x{img.height()}.{suffix}" - print("save capture image to file:", filename) - img.save(filename) - # release image for dev and chn - camera.release_image(dev_num, chn_num, img) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - # destroy media link - media.destroy_link(meida_source, meida_sink) - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # deinit media buffer - media.buffer_deinit() - -if __name__ == "__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - camera_test() diff --git a/share/qtcreator/examples/01-Media/camera_2sensors.py b/share/qtcreator/examples/01-Media/camera_2sensors.py deleted file mode 100755 index ff252ce1ca21..000000000000 --- a/share/qtcreator/examples/01-Media/camera_2sensors.py +++ /dev/null @@ -1,83 +0,0 @@ -# Camera Example -# -# Note: You will need an SD card to run this example. -# -# You can start 2 camera preview. - -from media.camera import * -from media.display import * -from media.media import * -import time, os -import sys - -def camera_test(): - print("camera_test") - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - out_width = 640 - out_height = 480 - # set camera out width align up with 16Bytes - out_width = ALIGN_UP(out_width, 16) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - # set chn0 out format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(160, 160, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - - - camera.sensor_init(CAM_DEV_ID_1, CAM_OV5647_1920X1080_CSI1_30FPS_10BIT_USEMCLK_LINEAR) - out_width = 640 - out_height = 480 - # set camera out width align up with 16Bytes - out_width = ALIGN_UP(out_width, 16) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_1, CAM_CHN_ID_0, out_width, out_height) - # set chn0 out format - camera.set_outfmt(CAM_DEV_ID_1, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - meida_source1 = media_device(CAMERA_MOD_ID, CAM_DEV_ID_1, CAM_CHN_ID_0) - # create meida sink device - meida_sink1 = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO2) - # create meida link - media.create_link(meida_source1, meida_sink1) - # set display plane with video channel - display.set_plane(960, 480, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO2) - - # init meida buffer - media.buffer_init() - # start stream for camera device0 - camera.start_mcm_stream() - - try: - while True: - os.exitpoint() - time.sleep(5) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - # stop stream for camera device0 - camera.stop_mcm_stream() - - # deinit display - display.deinit() - # destroy media link - media.destroy_link(meida_source, meida_sink) - media.destroy_link(meida_source1, meida_sink1) - - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # deinit media buffer - media.buffer_deinit() - -if __name__ == "__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - camera_test() diff --git a/share/qtcreator/examples/01-Media/camera_3sensors.py b/share/qtcreator/examples/01-Media/camera_3sensors.py deleted file mode 100755 index 43810596dfc0..000000000000 --- a/share/qtcreator/examples/01-Media/camera_3sensors.py +++ /dev/null @@ -1,103 +0,0 @@ -# Camera Example -# -# Note: You will need an SD card to run this example. -# -# You can start 3 camera preview. - -from media.camera import * -from media.display import * -from media.media import * -import time, os -import sys - -def camera_test(): - print("camera_test") - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - out_width = 640 - out_height = 480 - # set camera out width align up with 16Bytes - out_width = ALIGN_UP(out_width, 16) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - # set chn0 out format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - - - camera.sensor_init(CAM_DEV_ID_1, CAM_OV5647_1920X1080_CSI1_30FPS_10BIT_USEMCLK_LINEAR) - out_width = 640 - out_height = 480 - # set camera out width align up with 16Bytes - out_width = ALIGN_UP(out_width, 16) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_1, CAM_CHN_ID_0, out_width, out_height) - # set chn0 out format - camera.set_outfmt(CAM_DEV_ID_1, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - meida_source1 = media_device(CAMERA_MOD_ID, CAM_DEV_ID_1, CAM_CHN_ID_0) - # create meida sink device - meida_sink1 = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO2) - # create meida link - media.create_link(meida_source1, meida_sink1) - # set display plane with video channel - display.set_plane(640, 320, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO2) - - - camera.sensor_init(CAM_DEV_ID_2, CAM_OV5647_1920X1080_CSI2_30FPS_10BIT_USEMCLK_LINEAR) - out_width = 640 - out_height = 480 - # set camera out width align up with 16Bytes - out_width = ALIGN_UP(out_width, 16) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_2, CAM_CHN_ID_0, out_width, out_height) - # set chn0 out format - camera.set_outfmt(CAM_DEV_ID_2, CAM_CHN_ID_0, PIXEL_FORMAT_RGB_888) - # create meida source device - meida_source2 = media_device(CAMERA_MOD_ID, CAM_DEV_ID_2, CAM_CHN_ID_0) - # create meida sink device - meida_sink2 = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_OSD0) - # create meida link1 - media.create_link(meida_source2, meida_sink2) - # set display plane with video channel - display.set_plane(1280, 600, out_width, out_height, PIXEL_FORMAT_RGB_888, DISPLAY_MIRROR_NONE, DISPLAY_CHN_OSD0) - - - # init meida buffer - media.buffer_init() - # start stream for camera device0 - camera.start_mcm_stream() - - try: - while True: - os.exitpoint() - time.sleep(5) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - # stop stream for camera device0 - camera.stop_mcm_stream() - - # deinit display - display.deinit() - # destroy media link - media.destroy_link(meida_source, meida_sink) - media.destroy_link(meida_source1, meida_sink1) - media.destroy_link(meida_source2, meida_sink2) - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # deinit media buffer - media.buffer_deinit() - -if __name__ == "__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - camera_test() diff --git a/share/qtcreator/examples/01-Media/camera_480p.py b/share/qtcreator/examples/01-Media/camera_480p.py deleted file mode 100644 index a51d8bf7401e..000000000000 --- a/share/qtcreator/examples/01-Media/camera_480p.py +++ /dev/null @@ -1,56 +0,0 @@ -# Camera Example - -from media.camera import * -from media.display import * -from media.media import * -import time, os -import sys -def camera_test(): - print("camera_test") - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - out_width = 640 - out_height = 480 - # set camera out width align up with 16Bytes - out_width = ALIGN_UP(out_width, 16) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, out_width, out_height) - # set chn0 out format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - meida_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - meida_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, out_width, out_height, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # init meida buffer - media.buffer_init() - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - try: - while True: - os.exitpoint() - img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_0) - img.compress_for_ide() - # release image for dev and chn - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_0, img) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - # destroy media link - media.destroy_link(meida_source, meida_sink) - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # deinit media buffer - media.buffer_deinit() -if __name__ == "__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - camera_test() diff --git a/share/qtcreator/examples/01-Media/display.py b/share/qtcreator/examples/01-Media/display.py deleted file mode 100755 index 2e0145e5199c..000000000000 --- a/share/qtcreator/examples/01-Media/display.py +++ /dev/null @@ -1,67 +0,0 @@ -from media.camera import * -from media.display import * -from media.media import * -import time, os, urandom, sys - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -def display_test(): - print("display test") - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - try: - while True: - img.clear() - for i in range(10): - x = (urandom.getrandbits(11) % img.width()) - y = (urandom.getrandbits(11) % img.height()) - r = (urandom.getrandbits(8)) - g = (urandom.getrandbits(8)) - b = (urandom.getrandbits(8)) - # If the first argument is a scaler then this method expects - # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. - # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. - img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, - char_rotation = 0, char_hmirror = False, char_vflip = False, - string_rotation = 0, string_hmirror = False, string_vflip = False) - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # deinit media buffer - media.buffer_deinit() - -if __name__ == "__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - display_test() diff --git a/share/qtcreator/examples/01-Media/media.py b/share/qtcreator/examples/01-Media/media.py deleted file mode 100755 index 805e2a06aa41..000000000000 --- a/share/qtcreator/examples/01-Media/media.py +++ /dev/null @@ -1,88 +0,0 @@ -# Meida Example -# -# Note: You will need an SD card to run this example. -# -# You can get how to use the meida api form this example. - -from media.media import * -import os - -def media_buf_test(): - print("media_buf_test start") - config = k_vb_config() - - config.max_pool_cnt = 10 - - config.comm_pool[0].blk_size = 1024*1024 - config.comm_pool[0].blk_cnt = 10 - config.comm_pool[0].mode = VB_REMAP_MODE_NONE - - config.comm_pool[1].blk_size = 2*1024*1024 - config.comm_pool[1].blk_cnt = 10 - config.comm_pool[1].mode = VB_REMAP_MODE_NOCACHE - - config.comm_pool[2].blk_size = 3*1024*1024 - config.comm_pool[2].blk_cnt = 10 - config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - - # config meida buffer - media.buffer_config(config) - - config.max_pool_cnt = 20 - - config.comm_pool[0].blk_size = 4*1024*1024 - config.comm_pool[0].blk_cnt = 3 - config.comm_pool[0].mode = VB_REMAP_MODE_NONE - - config.comm_pool[1].blk_size = 5*1024*1024 - config.comm_pool[1].blk_cnt = 3 - config.comm_pool[1].mode = VB_REMAP_MODE_NOCACHE - - config.comm_pool[2].blk_size = 6*1024*1024 - config.comm_pool[2].blk_cnt = 3 - config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - - # config meida buffer - media.buffer_config(config) - - config.max_pool_cnt = 30 - - config.comm_pool[0].blk_size = 4*1024*1024 - config.comm_pool[0].blk_cnt = 5 - config.comm_pool[0].mode = VB_REMAP_MODE_NONE - - config.comm_pool[1].blk_size = 4*1024*1024 - config.comm_pool[1].blk_cnt = 5 - config.comm_pool[1].mode = VB_REMAP_MODE_NOCACHE - - config.comm_pool[2].blk_size = 4*1024*1024 - config.comm_pool[2].blk_cnt = 5 - config.comm_pool[3].mode = VB_REMAP_MODE_CACHED - - # config meida buffer - media.buffer_config(config) - - print("media_buf_test buffer_init") - # init meida buffer - media.buffer_init() - - print("media_buf_test request_buffer") - # request meida buffer - buffer = media.request_buffer(4*1024*1024) - print(f"buffer handle({buffer.handle})") - print(f"buffer pool_id({buffer.pool_id})") - print(f"buffer phys_addr({buffer.phys_addr})") - print(f"buffer virt_addr({buffer.virt_addr})") - print(f"buffer size({buffer.size})") - # release meida buffer - media.release_buffer(buffer) - - print("media_buf_test buffer_deinit") - # deinit meida buffer - media.buffer_deinit() - - print("media_buf_test end") - -if __name__ == "__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - media_buf_test() diff --git a/share/qtcreator/examples/01-Media/mp4muxer.py b/share/qtcreator/examples/01-Media/mp4muxer.py old mode 100644 new mode 100755 index 978b65e90772..16ce6c34527b --- a/share/qtcreator/examples/01-Media/mp4muxer.py +++ b/share/qtcreator/examples/01-Media/mp4muxer.py @@ -29,7 +29,7 @@ def mp4_muxer_test(): # 处理音视频数据,按MP4格式写入文件 mp4_muxer.Process() frame_count += 1 - print("frame_coutn = ", frame_count) + print("frame_count = ", frame_count) if frame_count >= 200: break except BaseException as e: diff --git a/share/qtcreator/examples/01-Media/venc.py b/share/qtcreator/examples/01-Media/video_encoder.py old mode 100644 new mode 100755 similarity index 75% rename from share/qtcreator/examples/01-Media/venc.py rename to share/qtcreator/examples/01-Media/video_encoder.py index 1711083a3bdc..1cc4bfc2b073 --- a/share/qtcreator/examples/01-Media/venc.py +++ b/share/qtcreator/examples/01-Media/video_encoder.py @@ -5,10 +5,12 @@ # You can capture videos and encode them into 264 files from media.vencoder import * -from media.camera import * +from media.sensor import * from media.media import * import time, os +# NOT WORK NOW!!! + def venc_test(): print("venc_test start") width = 1280 @@ -16,31 +18,37 @@ def venc_test(): venc_chn = VENC_CHN_ID_0 width = ALIGN_UP(width, 16) # 初始化sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + + sensor = Sensor() + sensor.reset() # 设置camera 输出buffer - camera.set_outbufs(CAM_DEV_ID_0, CAM_CHN_ID_0, 6) - # 设置camera 输出buffer size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, width, height) - # 设置camera 输出格式 - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) + # set chn0 output size + sensor.set_framesize(width = width, height = height, alignment=12) + # set chn0 output format + sensor.set_pixformat(Sensor.YUV420SP) + + # 实例化video encoder encoder = Encoder() # 设置video encoder 输出buffer encoder.SetOutBufs(venc_chn, 15, width, height) - # 初始化设置的buffer - media.buffer_init() + + # 绑定camera和venc + link = MediaManager.link(sensor.bind_info()['src'], (VIDEO_ENCODE_MOD_ID, VENC_DEV_ID, venc_chn)) + + # init media manager + MediaManager.init() + chnAttr = ChnAttrStr(encoder.PAYLOAD_TYPE_H265, encoder.H265_PROFILE_MAIN, width, height) streamData = StreamData() + # 创建编码器 encoder.Create(venc_chn, chnAttr) - # 绑定camera和venc - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(VIDEO_ENCODE_MOD_ID, VENC_DEV_ID, venc_chn) - media.create_link(media_source, media_sink) + # 开始编码 encoder.Start(venc_chn) # 启动camera - camera.start_stream(CAM_DEV_ID_0) + sensor.run() frame_count = 0 if chnAttr.payload_type == encoder.PAYLOAD_TYPE_H265: @@ -76,15 +84,15 @@ def venc_test(): sys.print_exception(e) # 停止camera - camera.stop_stream(CAM_DEV_ID_0) + sensor.stop() # 销毁camera和venc的绑定 - media.destroy_link(media_source, media_sink) + del link # 停止编码 encoder.Stop(venc_chn) # 销毁编码器 encoder.Destroy(venc_chn) # 清理buffer - media.buffer_deinit() + MediaManager.deinit() print("venc_test stop") if __name__ == "__main__": diff --git a/share/qtcreator/examples/01-Media/player.py b/share/qtcreator/examples/01-Media/video_player.py old mode 100644 new mode 100755 similarity index 100% rename from share/qtcreator/examples/01-Media/player.py rename to share/qtcreator/examples/01-Media/video_player.py diff --git a/share/qtcreator/examples/02-Machine/adc/adc.py b/share/qtcreator/examples/02-Machine/adc/adc.py deleted file mode 100644 index 9e8af2db91eb..000000000000 --- a/share/qtcreator/examples/02-Machine/adc/adc.py +++ /dev/null @@ -1,8 +0,0 @@ -from machine import ADC - -# 实例化ADC通道0 -adc = ADC(0) -# 获取ADC通道0采样值 -print(adc.read_u16()) -# 获取ADC通道0电压值 -print(adc.read_uv(), "uV") diff --git a/share/qtcreator/examples/02-Machine/fft/fft.py b/share/qtcreator/examples/02-Machine/fft/fft.py deleted file mode 100644 index c4584d6e31eb..000000000000 --- a/share/qtcreator/examples/02-Machine/fft/fft.py +++ /dev/null @@ -1,34 +0,0 @@ -# 基础示例 -# -# 欢迎使用CanMV IDE, 点击IDE左下角的绿色按钮开始执行脚本 - -from machine import FFT -import array -import math -from ulab import numpy as np -PI = 3.14159265358979323846264338327950288419716939937510 - -rx = [] -def input_data(): - for i in range(64): - data0 = 10 * math.cos(2 * PI * i / 64) - data1 = 20 * math.cos(2 * 2 * PI * i / 64) - data2 = 30 * math.cos(3 * 2 * PI * i / 64) - data3 = 0.2 * math.cos(4 * 2 * PI * i / 64) - data4 = 1000 * math.cos(5 * 2 * PI * i / 64) - rx.append((int(data0 + data1 + data2 + data3 + data4))) -input_data() #初始化需要进行FFT的数据,列表类型 -print(rx) -data = np.array(rx,dtype=np.uint16) #把列表数据转换成数组 -print(data) -fft1 = FFT(data, 64, 0x555) #创建一个FFT对象,运算点数为64,偏移是0x555 -res = fft1.run() #获取FFT转换后的数据 -print(res) - -res = fft1.amplitude(res) #获取各个频率点的幅值 -print(res) - -res = fft1.freq(64,38400) #获取所有频率点的频率值 -print(res) - - diff --git a/share/qtcreator/examples/02-Machine/fpioa/fpioa.py b/share/qtcreator/examples/02-Machine/fpioa/fpioa.py deleted file mode 100644 index 28939456b177..000000000000 --- a/share/qtcreator/examples/02-Machine/fpioa/fpioa.py +++ /dev/null @@ -1,18 +0,0 @@ -from machine import FPIOA - -# 实例化FPIOA -fpioa = FPIOA() -# 打印所有引脚配置 -fpioa.help() -# 打印指定引脚详细配置 -fpioa.help(0) -# 打印指定功能所有可用的配置引脚 -fpioa.help(FPIOA.IIC0_SDA, func=True) -# 设置Pin0为GPIO0 -fpioa.set_function(0, FPIOA.GPIO0) -# 设置Pin2为GPIO2, 同时配置其它项 -fpioa.set_function(2, FPIOA.GPIO2, ie=1, oe=1, pu=0, pd=0, st=1, sl=0, ds=7) -# 获取指定功能当前所在的引脚 -fpioa.get_pin_num(FPIOA.UART0_TXD) -# 获取指定引脚当前功能 -fpioa.get_pin_func(0) diff --git a/share/qtcreator/examples/02-Machine/gpio/gpio.py b/share/qtcreator/examples/02-Machine/gpio/gpio.py deleted file mode 100644 index c6d8a1dc5bda..000000000000 --- a/share/qtcreator/examples/02-Machine/gpio/gpio.py +++ /dev/null @@ -1,18 +0,0 @@ -# 基础示例 -# -# 欢迎使用CanMV IDE, 点击IDE左下角的绿色按钮开始执行脚本 - -from machine import GPIO -from machine import FPIOA - -a = FPIOA() -a.help(8) -a.set_function(8,a.GPIO8) -a.help(8) - -gpio = GPIO(8, GPIO.OUT, GPIO.PULL_UP, value=0) #构造GPIO对象,gpio编号为8,设置为上拉输出低电平 -value = gpio.value() #获取gpio的值 -print("value = %d" % value) -gpio.value(1) #设置gpio输出为高电平 -value = gpio.value() -print("value = %d" % value) diff --git a/share/qtcreator/examples/02-Machine/i2c/i2c.py b/share/qtcreator/examples/02-Machine/i2c/i2c.py deleted file mode 100755 index ac1e19afda92..000000000000 --- a/share/qtcreator/examples/02-Machine/i2c/i2c.py +++ /dev/null @@ -1,27 +0,0 @@ -from machine import I2C - -i2c4=I2C(4) # init i2c4 - -a=i2c4.scan() #scan i2c slave -print(a) - -i2c4.writeto_mem(0x3b,0xff,bytes([0x80]),mem_size=8) # write hdmi page address(0x80) -i2c4.readfrom_mem(0x3b,0x00,1,mem_size=8) # read hdmi id0 ,value =0x17 -i2c4.readfrom_mem(0x3b,0x01,1,mem_size=8) # read hdmi id1 ,value =0x2 - -i2c4.writeto(0x3b,bytes([0xff,0x80]),True) # write hdmi page address(0x80) -i2c4.writeto(0x3b,bytes([0x00]),True) #send the address0 of being readed -i2c4.readfrom(0x3b,1) #read hdmi id0 ,value =0x17 -i2c4.writeto(0x3b,bytes([0x01]),True) #send the address1 of being readed -i2c4.readfrom(0x3b,1) #read hdmi id0 ,value =0x17 - -i2c4.writeto_mem(0x3b,0xff,bytes([0x80]),mem_size=8) # write hdmi page address(0x80) -a=bytearray(1) -i2c4.readfrom_mem_into(0x3b,0x0,a,mem_size=8) # read hdmi id0 into a ,value =0x17 -print(a) #printf a,value =0x17 - -i2c4.writeto(0x3b,bytes([0xff,0x80]),True) # write hdmi page address(0x80) -i2c4.writeto(0x3b,bytes([0x00]),True) #send the address0 of being readed -b=bytearray(1) -i2c4.readfrom_into(0x3b,b) #read hdmi id0 into b ,value =0x17 -print(b) #printf a,value =0x17 \ No newline at end of file diff --git a/share/qtcreator/examples/02-Machine/pin/pin.py b/share/qtcreator/examples/02-Machine/pin/pin.py deleted file mode 100644 index 46d8261d2bcb..000000000000 --- a/share/qtcreator/examples/02-Machine/pin/pin.py +++ /dev/null @@ -1,34 +0,0 @@ -from machine import Pin -from machine import FPIOA - -# 实例化FPIOA -fpioa = FPIOA() -# 设置Pin2为GPIO2 -fpioa.set_function(2, FPIOA.GPIO2) - -# 实例化Pin2为输出 -pin = Pin(2, Pin.OUT, pull=Pin.PULL_NONE, drive=7) -# 设置输出为高 -pin.value(1) -# pin.on() -# pin.high() -# 设置输出为低 -pin.value(0) -# pin.off() -# pin.low() -# 初始化Pin2为输入 -pin.init(Pin.IN, pull=Pin.PULL_UP, drive=7) -# 获取输入 -print(pin.value()) -# 设置模式 -pin.mode(Pin.IN) -# 获取模式 -print(pin.mode()) -# 设置上下拉 -pin.pull(Pin.PULL_NONE) -# 获取上下拉 -print(pin.pull()) -# 设置驱动能力 -pin.drive(7) -# 获取驱动能力 -print(pin.drive()) diff --git a/share/qtcreator/examples/02-Machine/pwm/pwm.py b/share/qtcreator/examples/02-Machine/pwm/pwm.py deleted file mode 100644 index cdf95e282a5c..000000000000 --- a/share/qtcreator/examples/02-Machine/pwm/pwm.py +++ /dev/null @@ -1,17 +0,0 @@ -from machine import PWM -from machine import FPIOA - -# 实例化FPIOA -fpioa = FPIOA() -# 设置PIN60为PWM通道0 -fpioa.set_function(60, fpioa.PWM0) -# 实例化PWM通道0,频率为1000Hz,占空比为50%,默认使能输出 -pwm0 = PWM(0, 1000, 50, enable = True) -# 关闭通道0输出 -pwm0.enable(0) -# 调整通道0频率为2000Hz -pwm0.freq(2000) -# 调整通道0占空比为40% -pwm0.duty(40) -# 打开通道0输出 -pwm0.enable(1) diff --git a/share/qtcreator/examples/02-Machine/rtc/rtc.py b/share/qtcreator/examples/02-Machine/rtc/rtc.py deleted file mode 100644 index 948a22154870..000000000000 --- a/share/qtcreator/examples/02-Machine/rtc/rtc.py +++ /dev/null @@ -1,8 +0,0 @@ -from machine import RTC - -# 实例化RTC -rtc = RTC() -# 获取当前时间 -print(rtc.datetime()) -# 设置当前时间 -rtc.init((2024,2,28,2,23,59,0,0)) diff --git a/share/qtcreator/examples/02-Machine/spi/spi.py b/share/qtcreator/examples/02-Machine/spi/spi.py deleted file mode 100755 index 929056623fde..000000000000 --- a/share/qtcreator/examples/02-Machine/spi/spi.py +++ /dev/null @@ -1,35 +0,0 @@ -from machine import SPI -from machine import FPIOA -a = FPIOA() - -a.help(14) -a.set_function(14,a.QSPI0_CS0) -a.help(14) - -a.help(15) -a.set_function(15,a.QSPI0_CLK) -a.help(15) - -a.help(16) -a.set_function(16,a.QSPI0_D0) -a.help(16) - -a.help(17) -a.set_function(17,a.QSPI0_D1) -a.help(17) - -spi=SPI(1,baudrate=5000000, polarity=0, phase=0, bits=8) # spi init clock 5MHz, polarity 0, phase 0, data bitwide 8bits - -spi.write(bytes([0x66])) # enable gd25lq128 reset - -spi.write(bytes([0x99])) # gd25lq128 reset - -a=bytes([0x9f]) # send buff -b=bytearray(3) # receive buf -spi.write_readinto(a,b) # read gd25lq128 id -print(b) # bytearray(b'\xc8`\x18') - -a=bytes([0x90,0,0,0]) # send buff -b=bytearray(2) # receive buf -spi.write_readinto(a,b) # read gd25lq128 id -print(b) # bytearray(b'\xc8\x17') diff --git a/share/qtcreator/examples/02-Machine/timer/timer.py b/share/qtcreator/examples/02-Machine/timer/timer.py deleted file mode 100644 index 08f4dd03e81d..000000000000 --- a/share/qtcreator/examples/02-Machine/timer/timer.py +++ /dev/null @@ -1,13 +0,0 @@ -from machine import Timer -import time - -# 实例化一个软定时器 -tim = Timer(-1) -# 初始化定时器为单次模式,周期100ms -tim.init(period=100, mode=Timer.ONE_SHOT, callback=lambda t:print(1)) -time.sleep(0.2) -# 初始化定时器为周期模式,频率为1Hz -tim.init(freq=1, mode=Timer.PERIODIC, callback=lambda t:print(2)) -time.sleep(2) -# 释放定时器资源 -tim.deinit() diff --git a/share/qtcreator/examples/02-Machine/uart/uart.py b/share/qtcreator/examples/02-Machine/uart/uart.py deleted file mode 100644 index 18ed964098f6..000000000000 --- a/share/qtcreator/examples/02-Machine/uart/uart.py +++ /dev/null @@ -1,23 +0,0 @@ -from machine import UART -from machine import FPIOA - -# 实例化FPIOA -fpioa = FPIOA() -# 设置PIN60为PWM通道0 -fpioa.set_function(5, fpioa.UART2_TXD) -fpioa.set_function(6, fpioa.UART2_RXD) -# UART2: baudrate 115200, 8bits, parity none, one stopbits -uart = UART(UART.UART2, baudrate=115200, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UART.STOPBITS_ONE) -# UART write -r = uart.write("UART test") -print(r) -# UART read -r = uart.read() -print(r) -# UART readline -r = uart.readline() -print(r) -# UART readinto -b = bytearray(8) -r = uart.readinto(b) -print(r) diff --git a/share/qtcreator/examples/02-Machine/uart/uart1.py b/share/qtcreator/examples/02-Machine/uart/uart1.py deleted file mode 100644 index b3d148fcc440..000000000000 --- a/share/qtcreator/examples/02-Machine/uart/uart1.py +++ /dev/null @@ -1,49 +0,0 @@ -from machine import UART -from machine import FPIOA -import time - -# 实例化FPIOA -from machine import FPIOA -fpioa = FPIOA() -#pin3 设置为串口1发送管脚 -fpioa.set_function(3, fpioa.UART1_TXD) -#设置pin4为串口1接收管脚 -fpioa.set_function(4, fpioa.UART1_RXD) -#使能pin3的输入输出功能 -fpioa.set_function(3,set_ie=1,set_oe=1) -#使能pin3的输入输出功能 -fpioa.set_function(4,set_ie=1,set_oe=1) - - -#fpioa.set_function(5, fpioa.UART2_TXD) -#fpioa.set_function(6, fpioa.UART2_RXD) - - -#UART: baudrate 115200, 8bits, parity none, one stopbits -uart = UART(UART.UART1, baudrate=115200, bits=UART.EIGHTBITS, parity=UART.PARITY_NONE, stop=UART.STOPBITS_ONE) -#打印串口配置 -print(uart) -# UART write -r = uart.write("UART test wwwwwwwwwwwww") -print(r) -# UART read -r = uart.read() -print(r) -# UART readline -r = uart.readline() -print(r) -# UART readinto -b = bytearray(8) -r = uart.readinto(b) -print(r) -i=0 -while True: - #print( "xxx %d" % (0) ) - uart.write("i={0}".format(0)) - i=i+1 - #print(uart.read()) - a=uart.read() - if len(a) > 1 : - print(a,len(a)) - time.sleep(0.1) - diff --git a/share/qtcreator/examples/02-Machine/wdt/wdt.py b/share/qtcreator/examples/02-Machine/wdt/wdt.py deleted file mode 100644 index 131f340074aa..000000000000 --- a/share/qtcreator/examples/02-Machine/wdt/wdt.py +++ /dev/null @@ -1,9 +0,0 @@ -import time -from machine import WDT - -# 实例化wdt1,timeout为3s -wdt1 = WDT(1,3) -time.sleep(2) -# 喂狗操作 -wdt1.feed() -time.sleep(2) diff --git a/share/qtcreator/examples/04-AI-Demo/dynamic_gesture.py b/share/qtcreator/examples/04-AI-Demo/dynamic_gesture.py deleted file mode 100644 index eaabf57b6b28..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/dynamic_gesture.py +++ /dev/null @@ -1,881 +0,0 @@ -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -root_dir = '/sdcard/app/tests/' - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -hd_kmodel_file = root_dir + 'kmodel/hand_det.kmodel' # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -#--------for hand gesture---------- -#kmodel输入shape -gesture_kmodel_input_shape = [[1, 3, 224, 224], # 动态手势识别kmodel输入分辨率 - [1,3,56,56], - [1,4,28,28], - [1,4,28,28], - [1,8,14,14], - [1,8,14,14], - [1,8,14,14], - [1,12,14,14], - [1,12,14,14], - [1,20,7,7], - [1,20,7,7]] - -#kmodel相关参数设置 -resize_shape = 256 -mean_values = np.array([0.485, 0.456, 0.406]).reshape((3,1,1)) # 动态手势识别预处理均值 -std_values = np.array([0.229, 0.224, 0.225]).reshape((3,1,1)) # 动态手势识别预处理方差 -gesture_kmodel_frame_size = [224,224] # 动态手势识别输入图片尺寸 - -gesture_kmodel_file = root_dir + 'kmodel/gesture.kmodel' # 动态手势识别kmodel文件的路径 - -shang_bin = root_dir + "utils/shang.bin" # 动态手势识别屏幕坐上角标志状态文件的路径 -xia_bin = root_dir + "utils/xia.bin" # 动态手势识别屏幕坐上角标志状态文件的路径 -zuo_bin = root_dir + "utils/zuo.bin" # 动态手势识别屏幕坐上角标志状态文件的路径 -you_bin = root_dir + "utils/you.bin" # 动态手势识别屏幕坐上角标志状态文件的路径 - -bin_width = 150 # 动态手势识别屏幕坐上角标志状态文件的短边尺寸 -bin_height = 216 # 动态手势识别屏幕坐上角标志状态文件的长边尺寸 -shang_argb = np.fromfile(shang_bin, dtype=np.uint8) -shang_argb = shang_argb.reshape((bin_height, bin_width, 4)) -xia_argb = np.fromfile(xia_bin, dtype=np.uint8) -xia_argb = xia_argb.reshape((bin_height, bin_width, 4)) -zuo_argb = np.fromfile(zuo_bin, dtype=np.uint8) -zuo_argb = zuo_argb.reshape((bin_width, bin_height, 4)) -you_argb = np.fromfile(you_bin, dtype=np.uint8) -you_argb = you_argb.reshape((bin_width, bin_height, 4)) - -TRIGGER = 0 # 动态手势识别应用的结果状态 -MIDDLE = 1 -UP = 2 -DOWN = 3 -LEFT = 4 -RIGHT = 5 - -max_hist_len = 20 # 最多存储多少帧的结果 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global gesture_ai2d_resize, gesture_ai2d_resize_builder, gesture_ai2d_crop, gesture_ai2d_crop_builder # 定义动态手势识别全局 ai2d 对象,以及 builder -global gesture_ai2d_input_tensor, gesture_kpu_input_tensors, gesture_ai2d_middle_output_tensor, gesture_ai2d_output_tensor # 定义动态手势识别全局 ai2d 的输入、输出 - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor, hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) # kpu结果后处理 - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): #删除hd_ai2d变量,释放对它所引用对象的内存引用 - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): #删除hd_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): #删除hd_ai2d_builder变量,释放对它所引用对象的内存引用 - global hd_ai2d_builder - del hd_ai2d_builder - - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor, hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 输出后处理 -def hk_kpu_post_process(results, x, y, w, h): - results_show = np.zeros(results.shape,dtype=np.int16) - results_show[0::2] = results[0::2] * w + x - results_show[1::2] = results[1::2] * h + y - return results_show - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)手掌关键点检测 kpu 结果后处理 - result = hk_kpu_post_process(results[0],x,y,w,h) - # (6)返回手掌关键点检测结果 - return result - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): #删除hk_ai2d变量,释放对它所引用对象的内存引用 - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): #删除hk_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -# 求两个vector之间的夹角 -def hk_vector_2d_angle(v1,v2): - with ScopedTiming("hk_vector_2d_angle",debug_mode > 0): - v1_x = v1[0] - v1_y = v1[1] - v2_x = v2[0] - v2_y = v2[1] - v1_norm = np.sqrt(v1_x * v1_x+ v1_y * v1_y) - v2_norm = np.sqrt(v2_x * v2_x + v2_y * v2_y) - dot_product = v1_x * v2_x + v1_y * v2_y - cos_angle = dot_product/(v1_norm*v2_norm) - angle = np.acos(cos_angle)*180/np.pi - return angle - -# 根据手掌关键点检测结果判断手势类别 -def hk_gesture(results): - with ScopedTiming("hk_gesture",debug_mode > 0): - angle_list = [] - for i in range(5): - angle = hk_vector_2d_angle([(results[0]-results[i*8+4]), (results[1]-results[i*8+5])],[(results[i*8+6]-results[i*8+8]),(results[i*8+7]-results[i*8+9])]) - angle_list.append(angle) - - thr_angle = 65. - thr_angle_thumb = 53. - thr_angle_s = 49. - gesture_str = None - if 65535. not in angle_list: - if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "fist" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "gun" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "one" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): - gesture_str = "three" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "thumbUp" - elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "yeah" - - return gesture_str - -#-------dynamic gesture--------: -# 动态手势识别 ai2d 初始化 -def gesture_ai2d_init(kpu_obj, resize_shape): - with ScopedTiming("gesture_ai2d_init",debug_mode > 0): - global gesture_ai2d_resize, gesture_ai2d_resize_builder - global gesture_ai2d_crop, gesture_ai2d_crop_builder - global gesture_ai2d_middle_output_tensor, gesture_ai2d_output_tensor - - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = gesture_kmodel_frame_size[0] - height = gesture_kmodel_frame_size[1] - ratiow = float(resize_shape) / ori_w - ratioh = float(resize_shape) / ori_h - if ratiow < ratioh: - ratio = ratioh - else: - ratio = ratiow - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - - top = int((new_h-height)/2) - left = int((new_w-width)/2) - - gesture_ai2d_resize = nn.ai2d() - gesture_ai2d_resize.set_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) - gesture_ai2d_resize.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - gesture_ai2d_resize_builder = gesture_ai2d_resize.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,new_h,new_w]) - - gesture_ai2d_crop = nn.ai2d() - gesture_ai2d_crop.set_dtype(nn.ai2d_format.NCHW_FMT, nn.ai2d_format.NCHW_FMT, np.uint8, np.uint8) - gesture_ai2d_crop.set_crop_param(True, left, top, width, height) - gesture_ai2d_crop_builder = gesture_ai2d_crop.build([1,3,new_h,new_w], [1,3,height,width]) - - global gesture_kpu_input_tensor, gesture_kpu_input_tensors, current_kmodel_obj - current_kmodel_obj = kpu_obj - gesture_kpu_input_tensors = [] - for i in range(current_kmodel_obj.inputs_size()): - data = np.zeros(gesture_kmodel_input_shape[i], dtype=np.float) - gesture_kpu_input_tensor = nn.from_numpy(data) - gesture_kpu_input_tensors.append(gesture_kpu_input_tensor) - - data = np.ones(gesture_kmodel_input_shape[0], dtype=np.uint8) - gesture_ai2d_output_tensor = nn.from_numpy(data) - - global data_float - data_float = np.ones(gesture_kmodel_input_shape[0], dtype=np.float) - - data_middle = np.ones((1,3,new_h,new_w), dtype=np.uint8) - gesture_ai2d_middle_output_tensor = nn.from_numpy(data_middle) - -def gesture_ai2d_run(rgb888p_img): - with ScopedTiming("gesture_ai2d_run",debug_mode > 0): - global gesture_ai2d_input_tensor, gesture_kpu_input_tensors, gesture_ai2d_middle_output_tensor, gesture_ai2d_output_tensor - global gesture_ai2d_resize_builder, gesture_ai2d_crop_builder - - gesture_ai2d_input = rgb888p_img.to_numpy_ref() - gesture_ai2d_input_tensor = nn.from_numpy(gesture_ai2d_input) - - gesture_ai2d_resize_builder.run(gesture_ai2d_input_tensor, gesture_ai2d_middle_output_tensor) - gesture_ai2d_crop_builder.run(gesture_ai2d_middle_output_tensor, gesture_ai2d_output_tensor) - - result = gesture_ai2d_output_tensor.to_numpy() - global data_float - data_float[0] = result[0].copy() - data_float[0] = (data_float[0]*1.0/255 -mean_values)/std_values - tmp = nn.from_numpy(data_float) - gesture_kpu_input_tensors[0] = tmp - -# 动态手势识别 ai2d 释放内存 -def gesture_ai2d_release(): - with ScopedTiming("gesture_ai2d_release",debug_mode > 0): - global gesture_ai2d_input_tensor - del gesture_ai2d_input_tensor - -# 动态手势识别 kpu 初始化 -def gesture_kpu_init(gesture_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("gesture_kpu_init",debug_mode > 0): - gesture_kpu_obj = nn.kpu() - gesture_kpu_obj.load_kmodel(gesture_kmodel_file) - gesture_ai2d_init(gesture_kpu_obj, resize_shape) - return gesture_kpu_obj - -# 动态手势识别 kpu 输入预处理 -def gesture_kpu_pre_process(rgb888p_img): - gesture_ai2d_run(rgb888p_img) - with ScopedTiming("gesture_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,gesture_kpu_input_tensors - # set kpu input - for i in range(current_kmodel_obj.inputs_size()): - current_kmodel_obj.set_input_tensor(i, gesture_kpu_input_tensors[i]) - -# 动态手势识别 kpu 获得 kmodel 输出 -def gesture_kpu_get_output(): - with ScopedTiming("gesture_kpu_get_output",debug_mode > 0): - global current_kmodel_obj, gesture_kpu_input_tensors - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - if (i==0): - result = data.to_numpy() - tmp2 = result.copy() - else: - gesture_kpu_input_tensors[i] = data - return tmp2 - -# 动态手势识别结果处理 -def gesture_process_output(pred,history): - if (pred == 7 or pred == 8 or pred == 21 or pred == 22 or pred == 3 ): - pred = history[-1] - if (pred == 0 or pred == 4 or pred == 6 or pred == 9 or pred == 14 or pred == 1 or pred == 19 or pred == 20 or pred == 23 or pred == 24) : - pred = history[-1] - if (pred == 0) : - pred = 2 - if (pred != history[-1]) : - if (len(history)>= 2) : - if (history[-1] != history[len(history)-2]) : - pred = history[-1] - history.append(pred) - if (len(history) > max_hist_len) : - history = history[-max_hist_len:] - return history[-1] - -# 动态手势识别结果后处理 -def gesture_kpu_post_process(results, his_logit, history): - with ScopedTiming("gesture_kpu_post_process",debug_mode > 0): - his_logit.append(results[0]) - avg_logit = sum(np.array(his_logit)) - idx_ = np.argmax(avg_logit) - - idx = gesture_process_output(idx_, history) - if (idx_ != idx): - his_logit_last = his_logit[-1] - his_logit = [] - his_logit.append(his_logit_last) - return idx, avg_logit - -# 动态手势识别 kpu 运行 -def gesture_kpu_run(kpu_obj,rgb888p_img, his_logit, history): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - gesture_kpu_pre_process(rgb888p_img) - # (2)动态手势识别 kpu 运行 - with ScopedTiming("gesture_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放动态手势识别 ai2d 资源 - gesture_ai2d_release() - # (4)获取动态手势识别 kpu 输出 - results = gesture_kpu_get_output() - # (5)动态手势识别 kpu 结果后处理 - result, avg_logit= gesture_kpu_post_process(results,his_logit, history) - # (6)返回动态手势识别结果 - return result, avg_logit - -def gesture_kpu_deinit(): - with ScopedTiming("gesture_kpu_deinit",debug_mode > 0): - if 'gesture_ai2d_resize' in globals(): #删除gesture_ai2d_resize变量,释放对它所引用对象的内存引用 - global gesture_ai2d_resize - del gesture_ai2d_resize - if 'gesture_ai2d_middle_output_tensor' in globals(): #删除gesture_ai2d_middle_output_tensor变量,释放对它所引用对象的内存引用 - global gesture_ai2d_middle_output_tensor - del gesture_ai2d_middle_output_tensor - if 'gesture_ai2d_crop' in globals(): #删除gesture_ai2d_crop变量,释放对它所引用对象的内存引用 - global gesture_ai2d_crop - del gesture_ai2d_crop - if 'gesture_ai2d_output_tensor' in globals(): #删除gesture_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global gesture_ai2d_output_tensor - del gesture_ai2d_output_tensor - if 'gesture_kpu_input_tensors' in globals(): #删除gesture_kpu_input_tensors变量,释放对它所引用对象的内存引用 - global gesture_kpu_input_tensors - del gesture_kpu_input_tensors - if 'gesture_ai2d_resize_builder' in globals(): #删除gesture_ai2d_resize_builder变量,释放对它所引用对象的内存引用 - global gesture_ai2d_resize_builder - del gesture_ai2d_resize_builder - if 'gesture_ai2d_crop_builder' in globals(): #删除gesture_ai2d_crop_builder变量,释放对它所引用对象的内存引用 - global gesture_ai2d_crop_builder - del gesture_ai2d_crop_builder - - -#media_utils.py -global draw_img,osd_img,draw_numpy #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img, draw_numpy - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_numpy = np.zeros((DISPLAY_HEIGHT, DISPLAY_WIDTH,4), dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF, data=draw_numpy) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for dynamic_gesture.py********** -def dynamic_gesture_inference(): - print("dynamic_gesture_test start") - cur_state = TRIGGER - pre_state = TRIGGER - draw_state = TRIGGER - - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - kpu_dynamic_gesture = gesture_kpu_init(gesture_kmodel_file) # 创建动态手势识别的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - vec_flag = [] - his_logit = [] - history = [2] - s_start = time.time_ns() - - count = 0 - global draw_img,draw_numpy,osd_img - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - draw_img.clear() - if (cur_state == TRIGGER): - with ScopedTiming("trigger time", debug_mode > 0): - dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - - for det_box in dets: - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - gesture = hk_gesture(hk_results) # 根据关键点检测结果判断手势类别 - - if ((gesture == "five") or (gesture == "yeah")): - v_x = hk_results[24]-hk_results[0] - v_y = hk_results[25]-hk_results[1] - angle = hk_vector_2d_angle([v_x,v_y],[1.0,0.0]) # 计算手指(中指)的朝向 - - if (v_y>0): - angle = 360-angle - - if ((70.0<=angle) and (angle<110.0)): # 手指向上 - if ((pre_state != UP) or (pre_state != MIDDLE)): - vec_flag.append(pre_state) - if ((len(vec_flag)>10)or(pre_state == UP) or (pre_state == MIDDLE) or(pre_state == TRIGGER)): - draw_numpy[:bin_height,:bin_width,:] = shang_argb - cur_state = UP - - elif ((110.0<=angle) and (angle<225.0)): # 手指向右(实际方向) - if (pre_state != RIGHT): - vec_flag.append(pre_state) - if ((len(vec_flag)>10)or(pre_state == RIGHT)or(pre_state == TRIGGER)): - draw_numpy[:bin_width,:bin_height,:] = you_argb - cur_state = RIGHT - - elif((225.0<=angle) and (angle<315.0)): # 手指向下 - if (pre_state != DOWN): - vec_flag.append(pre_state) - if ((len(vec_flag)>10)or(pre_state == DOWN)or(pre_state == TRIGGER)): - draw_numpy[:bin_height,:bin_width,:] = xia_argb - cur_state = DOWN - - else: # 手指向左(实际方向) - if (pre_state != LEFT): - vec_flag.append(pre_state) - if ((len(vec_flag)>10)or(pre_state == LEFT)or(pre_state == TRIGGER)): - draw_numpy[:bin_width,:bin_height,:] = zuo_argb - cur_state = LEFT - - m_start = time.time_ns() - his_logit = [] - else: - with ScopedTiming("swip time",debug_mode > 0): - idx, avg_logit = gesture_kpu_run(kpu_dynamic_gesture,rgb888p_img, his_logit, history) # 执行动态手势识别 kpu 运行 以及 后处理过程 - if (cur_state == UP): - draw_numpy[:bin_height,:bin_width,:] = shang_argb - if ((idx==15) or (idx==10)): - vec_flag.clear() - if (((avg_logit[idx] >= 0.7) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 4))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = DOWN - history = [2] - pre_state = UP - elif ((idx==25)or(idx==26)) : - vec_flag.clear() - if (((avg_logit[idx] >= 0.4) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 3))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = MIDDLE - history = [2] - pre_state = MIDDLE - else: - his_logit.clear() - elif (cur_state == RIGHT): - draw_numpy[:bin_width,:bin_height,:] = you_argb - if ((idx==16)or(idx==11)) : - vec_flag.clear() - if (((avg_logit[idx] >= 0.4) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 3))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = RIGHT - history = [2] - pre_state = RIGHT - else: - his_logit.clear() - elif (cur_state == DOWN): - draw_numpy[:bin_height,:bin_width,:] = xia_argb - if ((idx==18)or(idx==13)): - vec_flag.clear() - if (((avg_logit[idx] >= 0.4) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 3))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = UP - history = [2] - pre_state = DOWN - else: - his_logit.clear() - elif (cur_state == LEFT): - draw_numpy[:bin_width,:bin_height,:] = zuo_argb - if ((idx==17)or(idx==12)): - vec_flag.clear() - if (((avg_logit[idx] >= 0.4) and (len(his_logit) >= 2)) or ((avg_logit[idx] >= 0.3) and (len(his_logit) >= 3))): - s_start = time.time_ns() - cur_state = TRIGGER - draw_state = LEFT - history = [2] - pre_state = LEFT - else: - his_logit.clear() - - elapsed_time = round((time.time_ns() - m_start)/1000000) - - if ((cur_state != TRIGGER) and (elapsed_time>2000)): - cur_state = TRIGGER - pre_state = TRIGGER - - elapsed_ms_show = round((time.time_ns()-s_start)/1000000) - if (elapsed_ms_show<1000): - if (draw_state == UP): - draw_img.draw_arrow(1068,330,1068,130, (255,170,190,230), thickness=13) # 判断为向上挥动时,画一个向上的箭头 - elif (draw_state == RIGHT): - draw_img.draw_arrow(1290,540,1536,540, (255,170,190,230), thickness=13) # 判断为向右挥动时,画一个向右的箭头 - elif (draw_state == DOWN): - draw_img.draw_arrow(1068,750,1068,950, (255,170,190,230), thickness=13) # 判断为向下挥动时,画一个向下的箭头 - elif (draw_state == LEFT): - draw_img.draw_arrow(846,540,600,540, (255,170,190,230), thickness=13) # 判断为向左挥动时,画一个向左的箭头 - elif (draw_state == MIDDLE): - draw_img.draw_circle(1068,540,100, (255,170,190,230), thickness=2, fill=True) # 判断为五指捏合手势时,画一个实心圆 - else: - draw_state = TRIGGER - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>5): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - gesture_kpu_deinit() # 释放动态手势识别 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - del kpu_dynamic_gesture - - if 'draw_numpy' in globals(): - global draw_numpy - del draw_numpy - - if 'draw_img' in globals(): - global draw_img - del draw_img - - gc.collect() -# nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("dynamic_gesture_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - dynamic_gesture_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/eye_gaze.py b/share/qtcreator/examples/04-AI-Demo/eye_gaze.py deleted file mode 100644 index 5661d799d4ed..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/eye_gaze.py +++ /dev/null @@ -1,522 +0,0 @@ -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 注视估计kmodel输入shape -feg_kmodel_input_shape = (1,3,448,448) -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 注视估计kmodel -fr_kmodel_file = root_dir + 'kmodel/eye_gaze.kmodel' -# anchor文件 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# feg_ai2d: 注视估计ai2d实例 -# feg_ai2d_input_tensor: 注视估计ai2d输入 -# feg_ai2d_output_tensor:注视估计ai2d输入 -# feg_ai2d_builder: 根据注视估计ai2d参数,构建的注视估计ai2d_builder对象 -global feg_ai2d,feg_ai2d_input_tensor,feg_ai2d_output_tensor,feg_ai2d_builder -global matrix_dst #人脸仿射变换矩阵 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals(): #删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - -###############for face recognition############### -def feg_ai2d_init(): - # 注视估计ai2d初始化 - with ScopedTiming("feg_ai2d_init",debug_mode > 0): - # (1)创建注视估计ai2d对象 - global feg_ai2d - feg_ai2d = nn.ai2d() - - # (2)创建注视估计ai2d_output_tensor对象,用于存放ai2d输出 - global feg_ai2d_output_tensor - data = np.ones(feg_kmodel_input_shape, dtype=np.uint8) - feg_ai2d_output_tensor = nn.from_numpy(data) - -def feg_ai2d_run(rgb888p_img,det): - # 注视估计ai2d推理 - with ScopedTiming("feg_ai2d_run",debug_mode > 0): - global feg_ai2d,feg_ai2d_input_tensor,feg_ai2d_output_tensor - #(1)根据原图ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - feg_ai2d_input_tensor = nn.from_numpy(ai2d_input) - - # (2)根据新的det设置新的注视估计ai2d参数 - feg_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - - x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) - feg_ai2d.set_crop_param(True,x,y,w,h) - feg_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - # (3)根据新的注视估计ai2d参数,构建注视估计ai2d_builder - global feg_ai2d_builder - feg_ai2d_builder = feg_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], feg_kmodel_input_shape) - # (4)推理注视估计ai2d,将预处理的结果保存到feg_ai2d_output_tensor - feg_ai2d_builder.run(feg_ai2d_input_tensor, feg_ai2d_output_tensor) - -def feg_ai2d_release(): - # 释放注视估计ai2d_input_tensor、ai2d_builder - with ScopedTiming("feg_ai2d_release",debug_mode > 0): - global feg_ai2d_input_tensor,feg_ai2d_builder - del feg_ai2d_input_tensor - del feg_ai2d_builder - -def feg_kpu_init(kmodel_file): - # 注视估计kpu初始化 - with ScopedTiming("feg_kpu_init",debug_mode > 0): - # 初始化注视估计kpu对象 - kpu_obj = nn.kpu() - # 加载注视估计kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化注视估计ai2d - feg_ai2d_init() - return kpu_obj - -def feg_kpu_pre_process(rgb888p_img,det): - # 注视估计kpu预处理 - # 注视估计ai2d推理,根据det对原图进行预处理 - feg_ai2d_run(rgb888p_img,det) - with ScopedTiming("feg_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,feg_ai2d_output_tensor - # 将注视估计ai2d输出设置为注视估计kpu输入 - current_kmodel_obj.set_input_tensor(0, feg_ai2d_output_tensor) - -def feg_kpu_get_output(): - with ScopedTiming("feg_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取注视估计kpu输出 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def feg_kpu_post_process(results): - # 注视估计kpu推理结果后处理 - with ScopedTiming("feg_kpu_post_process",debug_mode > 0): - post_ret = aidemo.eye_gaze_post_process(results) - return post_ret[0],post_ret[1] - -def feg_kpu_run(kpu_obj,rgb888p_img,det): - # 注视估计kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)注视估计kpu预处理,设置kpu输入 - feg_kpu_pre_process(rgb888p_img,det) - # (2)注视估计kpu推理 - with ScopedTiming("feg_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放注视估计ai2d - feg_ai2d_release() - # (4)获取注视估计kpu输出 - results = feg_kpu_get_output() - # (5)注视估计后处理 - pitch,yaw = feg_kpu_post_process(results) - return pitch,yaw - -def feg_kpu_deinit(): - # 注视估计kpu释放 - with ScopedTiming("feg_kpu_deinit",debug_mode > 0): - if 'feg_ai2d' in globals(): # 删除feg_ai2d变量,释放对它所引用对象的内存引用 - global feg_ai2d - del feg_ai2d - if 'feg_ai2d_output_tensor' in globals(): # 删除feg_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global feg_ai2d_output_tensor - del feg_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,gaze_results): - # 在显示器画人脸轮廓 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - for det,gaze_ret in zip(dets,gaze_results): - pitch , yaw = gaze_ret - length = DISPLAY_WIDTH / 2 - x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) - x = x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y = y * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - center_x = (x + w / 2.0) - center_y = (y + h / 2.0) - dx = -length * math.sin(pitch) * math.cos(yaw) - target_x = int(center_x + dx) - dy = -length * math.sin(yaw) - target_y = int(center_y + dy) - - draw_img.draw_arrow(int(center_x), int(center_y), target_x, target_y, color = (255,255,0,0), size = 30, thickness = 2) - - # (4)将轮廓结果拷贝到osd - draw_img.copy_to(osd_img) - # (5)将osd显示到屏幕 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def eye_gaze_inference(): - print("eye_gaze_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 注视估计kpu初始化 - kpu_eye_gaze = feg_kpu_init(fr_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)针对每个人脸框,推理得到对应注视估计 - gaze_results = [] - for det in dets: - pitch ,yaw = feg_kpu_run(kpu_eye_gaze,rgb888p_img,det) - gaze_results.append([pitch ,yaw]) - # (2.3)将注视估计画到屏幕上 - display_draw(dets,gaze_results) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - with ScopedTiming("gc collect", debug_mode > 0): - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - feg_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_eye_gaze - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("eye_gaze_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - eye_gaze_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_detection.py b/share/qtcreator/examples/04-AI-Demo/face_detection.py deleted file mode 100644 index 53af586520cd..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/face_detection.py +++ /dev/null @@ -1,386 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# kmodel输入shape -kmodel_input_shape = (1,3,320,320) -# ai原图padding -rgb_mean = [104,117,123] -# kmodel其它参数设置 -confidence_threshold = 0.5 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# kmodel文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# anchor文件配置 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -# 当前kmodel -global current_kmodel_obj -# ai2d: ai2d实例 -# ai2d_input_tensor: ai2d输入 -# ai2d_output_tensor:ai2d输出 -# ai2d_builder: 根据ai2d参数,构建的ai2d_builder对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder #for ai2d -print('anchors_path:',anchors_path) -# 读取anchor文件,为后处理做准备 -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("ai2d_init",debug_mode > 0): - # (1)创建ai2d对象 - global ai2d - ai2d = nn.ai2d() - # (2)设置ai2d参数 - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - # (3)创建ai2d_output_tensor,用于保存ai2d输出 - global ai2d_output_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - - # (4)ai2d_builder,根据ai2d参数、输入输出大小创建ai2d_builder对象 - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], kmodel_input_shape) - - -def ai2d_run(rgb888p_img): - # 对原图rgb888p_img进行预处理 - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_output_tensor - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行ai2d_builder,将结果保存到ai2d_output_tensor中 - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -def ai2d_release(): - # 释放ai2d_input_tensor - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -def kpu_init(kmodel_file): - # 初始化kpu对象,并加载kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - # 初始化kpu对象 - kpu_obj = nn.kpu() - # 加载kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化ai2d - ai2d_init() - return kpu_obj - -def kpu_pre_process(rgb888p_img): - # 使用ai2d对原图进行预处理(padding,resize) - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # 将ai2d输出设置为kpu输入 - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def kpu_run(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)kpu推理 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放ai2d资源 - ai2d_release() - # (4)获取kpu输出 - results = kpu_get_output() - # (5)kpu结果后处理 - with ScopedTiming("kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,kmodel_input_shape[2],prior_data,[OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] - - -def kpu_deinit(): - # kpu释放 - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): #删除ai2d变量,释放对它所引用对象的内存引用 - global ai2d - del ai2d - - if 'ai2d_output_tensor' in globals(): #删除ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global ai2d_output_tensor - del ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # hdmi显示初始化 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets): - # hdmi画检测框 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - for det in dets: - x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) - x = x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y = y * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - draw_img.draw_rectangle(x,y, w, h, color=(255, 255, 0, 255), thickness = 2) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_detect_inference(): - print("face_detect_test start") - # kpu初始化 - kpu_face_detect = kpu_init(kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - # 启动camera - camera_start(CAM_DEV_ID_0) -# time.sleep(5) - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取检测结果 - dets = kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)将结果画到显示器 - display_draw(dets) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - nn.shrink_memory_pool() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - - print("face_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_detect_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_landmark.py b/share/qtcreator/examples/04-AI-Demo/face_landmark.py deleted file mode 100644 index 3f651aca2ea6..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/face_landmark.py +++ /dev/null @@ -1,624 +0,0 @@ -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os, sys # 操作系统接口模块 -import math # 数学模块 - - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸关键点kmodel输入shape -fld_kmodel_input_shape = (1,3,192,192) -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸关键点kmodel -fr_kmodel_file = root_dir + 'kmodel/face_landmark.kmodel' -# anchor文件 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -# 人脸关键点不同部位关键点列表 -dict_kp_seq = [ - [43, 44, 45, 47, 46, 50, 51, 49, 48], # left_eyebrow - [97, 98, 99, 100, 101, 105, 104, 103, 102], # right_eyebrow - [35, 36, 33, 37, 39, 42, 40, 41], # left_eye - [89, 90, 87, 91, 93, 96, 94, 95], # right_eye - [34, 88], # pupil - [72, 73, 74, 86], # bridge_nose - [77, 78, 79, 80, 85, 84, 83], # wing_nose - [52, 55, 56, 53, 59, 58, 61, 68, 67, 71, 63, 64], # out_lip - [65, 54, 60, 57, 69, 70, 62, 66], # in_lip - [1, 9, 10, 11, 12, 13, 14, 15, 16, 2, 3, 4, 5, 6, 7, 8, 0, 24, 23, 22, 21, 20, 19, 18, 32, 31, 30, 29, 28, 27, 26, 25, 17] # basin -] - -# 人脸关键点不同部位(顺序同dict_kp_seq)颜色配置,argb -color_list_for_osd_kp = [ - (255, 0, 255, 0), - (255, 0, 255, 0), - (255, 255, 0, 255), - (255, 255, 0, 255), - (255, 255, 0, 0), - (255, 255, 170, 0), - (255, 255, 255, 0), - (255, 0, 255, 255), - (255, 255, 220, 50), - (255, 30, 30, 255) -] - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fld_ai2d: 人脸关键点ai2d实例 -# fld_ai2d_input_tensor: 人脸关键点ai2d输入 -# fld_ai2d_output_tensor:人脸关键点ai2d输入 -# fld_ai2d_builder: 根据人脸关键点ai2d参数,构建的人脸关键点ai2d_builder对象 -global fld_ai2d,fld_ai2d_input_tensor,fld_ai2d_output_tensor,fld_ai2d_builder -global matrix_dst #人脸仿射变换矩阵 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): - global fd_ai2d - del fd_ai2d #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - if 'fd_ai2d_output_tensor' in globals(): - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor #删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - -###############for face recognition############### -def get_affine_matrix(bbox): - # 获取仿射矩阵,用于将边界框映射到模型输入空间 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 从边界框提取坐标和尺寸 - x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) - # 计算缩放比例,使得边界框映射到模型输入空间的一部分 - scale_ratio = (fld_kmodel_input_shape[2]) / (max(w, h) * 1.5) - # 计算边界框中心点在模型输入空间的坐标 - cx = (x1 + w / 2) * scale_ratio - cy = (y1 + h / 2) * scale_ratio - # 计算模型输入空间的一半长度 - half_input_len = fld_kmodel_input_shape[2] / 2 - - # 创建仿射矩阵并进行设置 - matrix_dst = np.zeros((2, 3), dtype=np.float) - matrix_dst[0, 0] = scale_ratio - matrix_dst[0, 1] = 0 - matrix_dst[0, 2] = half_input_len - cx - matrix_dst[1, 0] = 0 - matrix_dst[1, 1] = scale_ratio - matrix_dst[1, 2] = half_input_len - cy - return matrix_dst - -def fld_ai2d_init(): - # 人脸关键点ai2d初始化 - with ScopedTiming("fld_ai2d_init",debug_mode > 0): - # (1)创建人脸关键点ai2d对象 - global fld_ai2d - fld_ai2d = nn.ai2d() - - # (2)创建人脸关键点ai2d_output_tensor对象,用于存放ai2d输出 - global fld_ai2d_output_tensor - data = np.ones(fld_kmodel_input_shape, dtype=np.uint8) - fld_ai2d_output_tensor = nn.from_numpy(data) - -def fld_ai2d_run(rgb888p_img,det): - # 人脸关键点ai2d推理 - with ScopedTiming("fld_ai2d_run",debug_mode > 0): - global fld_ai2d,fld_ai2d_input_tensor,fld_ai2d_output_tensor - #(1)根据原图ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fld_ai2d_input_tensor = nn.from_numpy(ai2d_input) - - # (2)根据新的det设置新的人脸关键点ai2d参数 - fld_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - global matrix_dst - matrix_dst = get_affine_matrix(det) - affine_matrix = [matrix_dst[0][0],matrix_dst[0][1],matrix_dst[0][2], - matrix_dst[1][0],matrix_dst[1][1],matrix_dst[1][2]] - fld_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) - - # (3)根据新的人脸关键点ai2d参数,构建人脸关键点ai2d_builder - global fld_ai2d_builder - fld_ai2d_builder = fld_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fld_kmodel_input_shape) - # (4)推理人脸关键点ai2d,将预处理的结果保存到fld_ai2d_output_tensor - fld_ai2d_builder.run(fld_ai2d_input_tensor, fld_ai2d_output_tensor) - -def fld_ai2d_release(): - # 释放人脸关键点ai2d_input_tensor、ai2d_builder - with ScopedTiming("fld_ai2d_release",debug_mode > 0): - global fld_ai2d_input_tensor,fld_ai2d_builder - del fld_ai2d_input_tensor - del fld_ai2d_builder - -def fld_kpu_init(kmodel_file): - # 人脸关键点kpu初始化 - with ScopedTiming("fld_kpu_init",debug_mode > 0): - # 初始化人脸关键点kpu对象 - kpu_obj = nn.kpu() - # 加载人脸关键点kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸关键点ai2d - fld_ai2d_init() - return kpu_obj - -def fld_kpu_pre_process(rgb888p_img,det): - # 人脸关键点kpu预处理 - # 人脸关键点ai2d推理,根据det对原图进行预处理 - fld_ai2d_run(rgb888p_img,det) - with ScopedTiming("fld_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fld_ai2d_output_tensor - # 将人脸关键点ai2d输出设置为人脸关键点kpu输入 - current_kmodel_obj.set_input_tensor(0, fld_ai2d_output_tensor) - #ai2d_out_data = fld_ai2d_output_tensor.to_numpy() - #with open("/sdcard/app/ai2d_out.bin", "wb") as file: - #file.write(ai2d_out_data.tobytes()) - -def fld_kpu_get_output(): - with ScopedTiming("fld_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取人脸关键点kpu输出 - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -def fld_kpu_post_process(pred): - # 人脸关键点kpu推理结果后处理 - with ScopedTiming("fld_kpu_post_process",debug_mode > 0): - # (1)将人脸关键点输出变换模型输入 - half_input_len = fld_kmodel_input_shape[2] // 2 - pred = pred.flatten() - for i in range(len(pred)): - pred[i] += (pred[i] + 1) * half_input_len - - # (2)获取仿射矩阵的逆矩阵 - global matrix_dst - matrix_dst_inv = aidemo.invert_affine_transform(matrix_dst) - matrix_dst_inv = matrix_dst_inv.flatten() - - # (3)对每个关键点进行逆变换 - half_out_len = len(pred) // 2 - for kp_id in range(half_out_len): - old_x = pred[kp_id * 2] - old_y = pred[kp_id * 2 + 1] - - # 逆变换公式 - new_x = old_x * matrix_dst_inv[0] + old_y * matrix_dst_inv[1] + matrix_dst_inv[2] - new_y = old_x * matrix_dst_inv[3] + old_y * matrix_dst_inv[4] + matrix_dst_inv[5] - - pred[kp_id * 2] = new_x - pred[kp_id * 2 + 1] = new_y - - return pred - -def fld_kpu_run(kpu_obj,rgb888p_img,det): - # 人脸关键点kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)人脸关键点kpu预处理,设置kpu输入 - fld_kpu_pre_process(rgb888p_img,det) - # (2)人脸关键点kpu推理 - with ScopedTiming("fld_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸关键点ai2d - fld_ai2d_release() - # (4)获取人脸关键点kpu输出 - result = fld_kpu_get_output() - # (5)人脸关键点后处理 - result = fld_kpu_post_process(result) - return result - -def fld_kpu_deinit(): - # 人脸关键点kpu释放 - with ScopedTiming("fld_kpu_deinit",debug_mode > 0): - if 'fld_ai2d' in globals(): - global fld_ai2d - del fld_ai2d - if 'fld_ai2d_output_tensor' in globals(): - global fld_ai2d_output_tensor - del fld_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img_ulab,draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,landmark_preds): - # 在显示器画人脸轮廓 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img_ulab,draw_img,osd_img - if dets: - draw_img.clear() - for pred in landmark_preds: - # (1)获取单个人脸框对应的人脸关键点 - for sub_part_index in range(len(dict_kp_seq)): - # (2)构建人脸某个区域关键点集 - sub_part = dict_kp_seq[sub_part_index] - face_sub_part_point_set = [] - for kp_index in range(len(sub_part)): - real_kp_index = sub_part[kp_index] - x, y = pred[real_kp_index * 2], pred[real_kp_index * 2 + 1] - - x = int(x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y = int(y * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH) - face_sub_part_point_set.append((x, y)) - - # (3)画人脸不同区域的轮廓 - if sub_part_index in (9, 6): - color = np.array(color_list_for_osd_kp[sub_part_index],dtype = np.uint8) - face_sub_part_point_set = np.array(face_sub_part_point_set) - - aidemo.polylines(draw_img_ulab, face_sub_part_point_set,False,color,5,8,0) - - elif sub_part_index == 4: - color = color_list_for_osd_kp[sub_part_index] - for kp in face_sub_part_point_set: - x,y = kp[0],kp[1] - draw_img.draw_circle(x,y ,2, color, 1) - else: - color = np.array(color_list_for_osd_kp[sub_part_index],dtype = np.uint8) - face_sub_part_point_set = np.array(face_sub_part_point_set) - aidemo.contours(draw_img_ulab, face_sub_part_point_set,-1,color,2,8) - - # (4)将轮廓结果拷贝到osd - draw_img.copy_to(osd_img) - # (5)将osd显示到屏幕 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img_ulab,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img_ulab = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_ulab) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_landmark_inference(): - print("face_landmark_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸关键点kpu初始化 - kpu_face_landmark = fld_kpu_init(fr_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)针对每个人脸框,推理得到对应人脸关键点 - landmark_result = [] - for det in dets: - ret = fld_kpu_run(kpu_face_landmark,rgb888p_img,det) - landmark_result.append(ret) - # (2.3)将人脸关键点画到屏幕上 - display_draw(dets,landmark_result) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - # 捕捉运行运行中异常,并打印错误 - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fld_kpu_deinit() - global current_kmodel_obj - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_landmark - nn.shrink_memory_pool() - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'draw_img_ulab' in globals(): - global draw_img_ulab - del draw_img_ulab - # 垃圾回收 - gc.collect() - # 释放媒体资源 - media_deinit() - - print("face_landmark_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_landmark_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_mesh.py b/share/qtcreator/examples/04-AI-Demo/face_mesh.py deleted file mode 100644 index 569ed036a2fa..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/face_mesh.py +++ /dev/null @@ -1,621 +0,0 @@ -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸mesh kmodel输入shape -fm_kmodel_input_shape = (1,3,120,120) -fmpost_kmodel_input_shapes = [(3,3),(3,1),(40,1),(10,1)] -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸mesh kmodel -fm_kmodel_file = root_dir + 'kmodel/face_alignment.kmodel' -# 人脸mesh后处理kmodel -fmpost_kmodel_file = root_dir + 'kmodel/face_alignment_post.kmodel' -# anchor文件 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 人脸mesh参数均值 -param_mean = np.array([0.0003492636315058917,2.52790130161884e-07,-6.875197868794203e-07,60.1679573059082,-6.295513230725192e-07,0.0005757200415246189,-5.085391239845194e-05,74.2781982421875,5.400917189035681e-07,6.574138387804851e-05,0.0003442012530285865,-66.67157745361328,-346603.6875,-67468.234375,46822.265625,-15262.046875,4350.5888671875,-54261.453125,-18328.033203125,-1584.328857421875,-84566.34375,3835.960693359375,-20811.361328125,38094.9296875,-19967.85546875,-9241.3701171875,-19600.71484375,13168.08984375,-5259.14404296875,1848.6478271484375,-13030.662109375,-2435.55615234375,-2254.20654296875,-14396.5615234375,-6176.3291015625,-25621.919921875,226.39447021484375,-6326.12353515625,-10867.2509765625,868.465087890625,-5831.14794921875,2705.123779296875,-3629.417724609375,2043.9901123046875,-2446.6162109375,3658.697021484375,-7645.98974609375,-6674.45263671875,116.38838958740234,7185.59716796875,-1429.48681640625,2617.366455078125,-1.2070955038070679,0.6690792441368103,-0.17760828137397766,0.056725528091192245,0.03967815637588501,-0.13586315512657166,-0.09223993122577667,-0.1726071834564209,-0.015804484486579895,-0.1416848599910736],dtype=np.float) -# 人脸mesh参数方差 -param_std = np.array([0.00017632152594160289,6.737943476764485e-05,0.00044708489440381527,26.55023193359375,0.0001231376954820007,4.493021697271615e-05,7.923670636955649e-05,6.982563018798828,0.0004350444069132209,0.00012314890045672655,0.00017400001524947584,20.80303955078125,575421.125,277649.0625,258336.84375,255163.125,150994.375,160086.109375,111277.3046875,97311.78125,117198.453125,89317.3671875,88493.5546875,72229.9296875,71080.2109375,50013.953125,55968.58203125,47525.50390625,49515.06640625,38161.48046875,44872.05859375,46273.23828125,38116.76953125,28191.162109375,32191.4375,36006.171875,32559.892578125,25551.1171875,24267.509765625,27521.3984375,23166.53125,21101.576171875,19412.32421875,19452.203125,17454.984375,22537.623046875,16174.28125,14671.640625,15115.6884765625,13870.0732421875,13746.3125,12663.1337890625,1.5870834589004517,1.5077009201049805,0.5881357789039612,0.5889744758605957,0.21327851712703705,0.2630201280117035,0.2796429395675659,0.38030216097831726,0.16162841022014618,0.2559692859649658],dtype=np.float) -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fm_ai2d: 人脸mesh ai2d实例 -# fm_ai2d_input_tensor: 人脸mesh ai2d输入 -# fm_ai2d_output_tensor:人脸mesh ai2d输入 -# fm_ai2d_builder: 根据人脸mesh ai2d参数,构建的人脸mesh ai2d_builder对象 -global fm_ai2d,fm_ai2d_input_tensor,fm_ai2d_output_tensor,fm_ai2d_builder -global roi #人脸区域 -global vertices #3D关键点 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals(): #删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - -###############for face recognition############### -def parse_roi_box_from_bbox(bbox): - # 获取人脸roi - x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) - old_size = (w + h) / 2 - center_x = x1 + w / 2 - center_y = y1 + h / 2 + old_size * 0.14 - size = int(old_size * 1.58) - - x0 = center_x - float(size) / 2 - y0 = center_y - float(size) / 2 - x1 = x0 + size - y1 = y0 + size - - x0 = max(0, min(x0, OUT_RGB888P_WIDTH)) - y0 = max(0, min(y0, OUT_RGB888P_HEIGH)) - x1 = max(0, min(x1, OUT_RGB888P_WIDTH)) - y1 = max(0, min(y1, OUT_RGB888P_HEIGH)) - - roi = (x0, y0, x1 - x0, y1 - y0) - return roi - -def fm_ai2d_init(): - # 人脸mesh ai2d初始化 - with ScopedTiming("fm_ai2d_init",debug_mode > 0): - # (1)创建人脸mesh ai2d对象 - global fm_ai2d - fm_ai2d = nn.ai2d() - - # (2)创建人脸mesh ai2d_output_tensor对象,用于存放ai2d输出 - global fm_ai2d_output_tensor - data = np.ones(fm_kmodel_input_shape, dtype=np.uint8) - fm_ai2d_output_tensor = nn.from_numpy(data) - -def fm_ai2d_run(rgb888p_img,det): - # 人脸mesh ai2d推理 - with ScopedTiming("fm_ai2d_run",debug_mode > 0): - global fm_ai2d,fm_ai2d_input_tensor,fm_ai2d_output_tensor - #(1)根据原图ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fm_ai2d_input_tensor = nn.from_numpy(ai2d_input) - - # (2)根据新的det设置新的人脸mesh ai2d参数 - fm_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - global roi - roi = parse_roi_box_from_bbox(det) - fm_ai2d.set_crop_param(True,int(roi[0]),int(roi[1]),int(roi[2]),int(roi[3])) - fm_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - # (3)根据新的人脸mesh ai2d参数,构建人脸mesh ai2d_builder - global fm_ai2d_builder - fm_ai2d_builder = fm_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fm_kmodel_input_shape) - # (4)推理人脸mesh ai2d,将预处理的结果保存到fm_ai2d_output_tensor - fm_ai2d_builder.run(fm_ai2d_input_tensor, fm_ai2d_output_tensor) - -def fm_ai2d_release(): - # 释放人脸mesh ai2d_input_tensor、ai2d_builder - with ScopedTiming("fm_ai2d_release",debug_mode > 0): - global fm_ai2d_input_tensor,fm_ai2d_builder - del fm_ai2d_input_tensor - del fm_ai2d_builder - -def fm_kpu_init(kmodel_file): - # 人脸mesh kpu初始化 - with ScopedTiming("fm_kpu_init",debug_mode > 0): - # 初始化人脸mesh kpu对象 - kpu_obj = nn.kpu() - # 加载人脸mesh kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸mesh ai2d - fm_ai2d_init() - return kpu_obj - -def fm_kpu_pre_process(rgb888p_img,det): - # 人脸mesh kpu预处理 - # 人脸mesh ai2d推理,根据det对原图进行预处理 - fm_ai2d_run(rgb888p_img,det) - with ScopedTiming("fm_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fm_ai2d_output_tensor - # 将人脸mesh ai2d输出设置为人脸mesh kpu输入 - current_kmodel_obj.set_input_tensor(0, fm_ai2d_output_tensor) - -def fm_kpu_get_output(): - with ScopedTiming("fm_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取人脸mesh kpu输出 - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -def fm_kpu_post_process(param): - # 人脸mesh kpu结果后处理,反标准化 - with ScopedTiming("fm_kpu_post_process",debug_mode > 0): - param = param * param_std + param_mean - return param - -def fm_kpu_run(kpu_obj,rgb888p_img,det): - # 人脸mesh kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)人脸mesh kpu预处理,设置kpu输入 - fm_kpu_pre_process(rgb888p_img,det) - # (2)人脸mesh kpu推理 - with ScopedTiming("fm_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸mesh ai2d - fm_ai2d_release() - # (4)获取人脸mesh kpu输出 - param = fm_kpu_get_output() - # (5)人脸mesh 后处理 - param = fm_kpu_post_process(param) - return param - -def fm_kpu_deinit(): - # 人脸mesh kpu释放 - with ScopedTiming("fm_kpu_deinit",debug_mode > 0): - if 'fm_ai2d' in globals(): # 删除fm_ai2d变量,释放对它所引用对象的内存引用 - global fm_ai2d - del fm_ai2d - if 'fm_ai2d_output_tensor' in globals(): # 删除fm_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fm_ai2d_output_tensor - del fm_ai2d_output_tensor - -def fmpost_kpu_init(kmodel_file): - # face mesh post模型初始化 - with ScopedTiming("fmpost_kpu_init",debug_mode > 0): - # 初始化人脸mesh kpu post对象 - kpu_obj = nn.kpu() - # 加载人脸mesh后处理kmodel - kpu_obj.load_kmodel(kmodel_file) - return kpu_obj - -def fmpost_kpu_pre_process(param): - # face mesh post模型预处理,param解析 - with ScopedTiming("fmpost_kpu_pre_process",debug_mode > 0): - param = param[0] - trans_dim, shape_dim, exp_dim = 12, 40, 10 - - # reshape前务必进行copy,否则会导致模型输入错误 - R_ = param[:trans_dim].copy().reshape((3, -1)) - R = R_[:, :3].copy() - offset = R_[:, 3].copy() - offset = offset.reshape((3, 1)) - alpha_shp = param[trans_dim:trans_dim + shape_dim].copy().reshape((-1, 1)) - alpha_exp = param[trans_dim + shape_dim:].copy().reshape((-1, 1)) - - R_tensor = nn.from_numpy(R) - current_kmodel_obj.set_input_tensor(0, R_tensor) - del R_tensor - - offset_tensor = nn.from_numpy(offset) - current_kmodel_obj.set_input_tensor(1, offset_tensor) - del offset_tensor - - alpha_shp_tensor = nn.from_numpy(alpha_shp) - current_kmodel_obj.set_input_tensor(2, alpha_shp_tensor) - del alpha_shp_tensor - - alpha_exp_tensor = nn.from_numpy(alpha_exp) - current_kmodel_obj.set_input_tensor(3, alpha_exp_tensor) - del alpha_exp_tensor - - return - -def fmpost_kpu_get_output(): - # 获取face mesh post模型输出 - with ScopedTiming("fmpost_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取人脸mesh kpu输出 - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -def fmpost_kpu_post_process(roi): - # face mesh post模型推理结果后处理 - with ScopedTiming("fmpost_kpu_post_process",debug_mode > 0): - x, y, w, h = map(lambda x: int(round(x, 0)), roi[:4]) - x = x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y = y * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - roi_array = np.array([x,y,w,h],dtype=np.float) - global vertices - aidemo.face_mesh_post_process(roi_array,vertices) - return - -def fmpost_kpu_run(kpu_obj,param): - # face mesh post模型推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - fmpost_kpu_pre_process(param) - with ScopedTiming("fmpost_kpu_run",debug_mode > 0): - kpu_obj.run() - global vertices - vertices = fmpost_kpu_get_output() - global roi - fmpost_kpu_post_process(roi) - return -#********************for media_utils.py******************** -global draw_img_ulab,draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,vertices_list): - # 在显示器画人脸轮廓 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img_ulab,draw_img,osd_img - if dets: - draw_img.clear() - for vertices in vertices_list: - aidemo.face_draw_mesh(draw_img_ulab, vertices) - # (4)将轮廓结果拷贝到osd - draw_img.copy_to(osd_img) - # (5)将osd显示到屏幕 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img_ulab,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img_ulab = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_ulab) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_mesh_inference(): - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸mesh kpu初始化 - kpu_face_mesh = fm_kpu_init(fm_kmodel_file) - # face_mesh_post kpu初始化 - kpu_face_mesh_post = fmpost_kpu_init(fmpost_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - ## (2.2)针对每个人脸框,推理得到对应人脸mesh - mesh_result = [] - for det in dets: - param = fm_kpu_run(kpu_face_mesh,rgb888p_img,det) - fmpost_kpu_run(kpu_face_mesh_post,param) - global vertices - mesh_result.append(vertices) - ## (2.3)将人脸mesh 画到屏幕上 - display_draw(dets,mesh_result) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fm_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_mesh - del kpu_face_mesh_post - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'draw_img_ulab' in globals(): - global draw_img_ulab - del draw_img_ulab - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("face_mesh_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_mesh_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_parse.py b/share/qtcreator/examples/04-AI-Demo/face_parse.py deleted file mode 100644 index e4bb0495a278..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/face_parse.py +++ /dev/null @@ -1,529 +0,0 @@ -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸解析kmodel输入shape -fp_kmodel_input_shape = (1,3,320,320) -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸解析kmodel -fp_kmodel_file = root_dir + 'kmodel/face_parse.kmodel' -# anchor文件 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fld_ai2d: 人脸解析ai2d实例 -# fld_ai2d_input_tensor: 人脸解析ai2d输入 -# fld_ai2d_output_tensor:人脸解析ai2d输入 -# fld_ai2d_builder: 根据人脸解析ai2d参数,构建的人脸解析ai2d_builder对象 -global fp_ai2d,fp_ai2d_input_tensor,fp_ai2d_output_tensor,fp_ai2d_builder -global matrix_dst #人脸仿射变换矩阵 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals():#删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - -###############for face recognition############### -def get_affine_matrix(bbox): - # 获取仿射矩阵,用于将边界框映射到模型输入空间 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 设置缩放因子 - factor = 2.7 - # 从边界框提取坐标和尺寸 - x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) - # 模型输入大小 - edge_size = fp_kmodel_input_shape[2] - # 平移距离,使得模型输入空间的中心对准原点 - trans_distance = edge_size / 2.0 - # 计算边界框中心点的坐标 - center_x = x1 + w / 2.0 - center_y = y1 + h / 2.0 - # 计算最大边长 - maximum_edge = factor * (h if h > w else w) - # 计算缩放比例 - scale = edge_size * 2.0 / maximum_edge - # 计算平移参数 - cx = trans_distance - scale * center_x - cy = trans_distance - scale * center_y - # 创建仿射矩阵 - affine_matrix = [scale, 0, cx, 0, scale, cy] - return affine_matrix - -def fp_ai2d_init(): - # 人脸解析ai2d初始化 - with ScopedTiming("fp_ai2d_init",debug_mode > 0): - # (1)创建人脸解析ai2d对象 - global fp_ai2d - fp_ai2d = nn.ai2d() - - # (2)创建人脸解析ai2d_output_tensor对象 - global fp_ai2d_output_tensor - data = np.ones(fp_kmodel_input_shape, dtype=np.uint8) - fp_ai2d_output_tensor = nn.from_numpy(data) - -def fp_ai2d_run(rgb888p_img,det): - # 人脸解析ai2d推理 - with ScopedTiming("fp_ai2d_run",debug_mode > 0): - global fp_ai2d,fp_ai2d_input_tensor,fp_ai2d_output_tensor - #(1)根据原图构建人脸解析ai2d_input_tensor - ai2d_input = rgb888p_img.to_numpy_ref() - fp_ai2d_input_tensor = nn.from_numpy(ai2d_input) - #(2)设置人脸解析ai2d参数 - fp_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - global matrix_dst - matrix_dst = get_affine_matrix(det) - fp_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,matrix_dst) - - # (3)构建人脸解析ai2d_builder - global fp_ai2d_builder - fp_ai2d_builder = fp_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fp_kmodel_input_shape) - # (4)推理人脸解析ai2d,将结果保存到ai2d_output_tensor - fp_ai2d_builder.run(fp_ai2d_input_tensor, fp_ai2d_output_tensor) - -def fp_ai2d_release(): - # 释放部分人脸解析ai2d资源 - with ScopedTiming("fp_ai2d_release",debug_mode > 0): - global fp_ai2d_input_tensor,fp_ai2d_builder - del fp_ai2d_input_tensor - del fp_ai2d_builder - -def fp_kpu_init(kmodel_file): - # 初始化人脸解析kpu及ai2d - with ScopedTiming("fp_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - fp_ai2d_init() - return kpu_obj - -def fp_kpu_pre_process(rgb888p_img,det): - # 人脸解析kpu预处理 - fp_ai2d_run(rgb888p_img,det) - with ScopedTiming("fp_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fp_ai2d_output_tensor - current_kmodel_obj.set_input_tensor(0, fp_ai2d_output_tensor) - #ai2d_out_data = fp_ai2d_output_tensor.to_numpy() - #with open("/sdcard/app/ai2d_out.bin", "wb") as file: - #file.write(ai2d_out_data.tobytes()) - -def fp_kpu_get_output(): - # 获取人脸解析kpu输出 - with ScopedTiming("fp_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -def fp_kpu_run(kpu_obj,rgb888p_img,det): - # 人脸解析kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)根据人脸检测框进行人脸解析kpu预处理 - fp_kpu_pre_process(rgb888p_img,det) - # (2)人脸解析kpu推理 - with ScopedTiming("fp_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸解析ai2d资源 - fp_ai2d_release() - # (4)释放人脸解析kpu输出 - result = fp_kpu_get_output() - return result - -def fp_kpu_deinit(): - # 释放人脸解析kpu和ai2d资源 - with ScopedTiming("fp_kpu_deinit",debug_mode > 0): - if 'fp_ai2d' in globals(): - global fp_ai2d - del fp_ai2d - if 'fp_ai2d_output_tensor' in globals(): - global fp_ai2d_output_tensor - del fp_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img_ulab,draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -#for display -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,parse_results): - # 在显示器画出人脸解析结果 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img_ulab,draw_img,osd_img - if dets: - draw_img.clear() - for i,det in enumerate(dets): - # (1)将人脸检测框画到draw_img - x, y, w, h = map(lambda x: int(round(x, 0)), det[:4]) - x = x * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y = y * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - draw_img.draw_rectangle(x,y, w, h, color=(255, 255, 0, 255)) - # (2)将人脸解析结果画到draw_img(draw_img_ulab和draw_img指同一内存) - aidemo.face_parse_post_process(draw_img_ulab,[OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH], - [DISPLAY_WIDTH,DISPLAY_HEIGHT],fp_kmodel_input_shape[2],det.tolist(),parse_results[i]) - # (3)将绘制好的图像拷贝到显示缓冲区,并在显示器上展示 - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - ret = media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img_ulab,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框,draw_img->draw_img_ulab(两者指向同一块内存) - draw_img_ulab = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_ulab) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_parse_inference(): - print("face_parse_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸解析kpu初始化 - kpu_face_parse = fp_kpu_init(fp_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - # 启动camera - camera_start(CAM_DEV_ID_0) - while True: - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)针对每个人脸框,推理得到对应人脸解析结果 - parse_results = [] - for det in dets: - parse_ret = fp_kpu_run(kpu_face_parse,rgb888p_img,det) - parse_results.append(parse_ret) - # (2.3)将人脸解析结果画到显示器上 - display_draw(dets,parse_results) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fp_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_parse - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'draw_img_ulab' in globals(): - global draw_img_ulab - del draw_img_ulab - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("face_parse_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_parse_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_pose.py b/share/qtcreator/examples/04-AI-Demo/face_pose.py deleted file mode 100644 index d228c9930eb4..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/face_pose.py +++ /dev/null @@ -1,622 +0,0 @@ -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel参数设置 -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸姿态估计kmodel输入shape -fp_kmodel_input_shape = (1,3,120,120) -# ai原图padding -rgb_mean = [104,117,123] - -#人脸检测kmodel其它参数设置 -confidence_threshold = 0.5 # 人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 - -# 文件配置 -# 人脸检测kmodel文件配置 -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸姿态估计kmodel文件配置 -fp_kmodel_file = root_dir + 'kmodel/face_pose.kmodel' -# anchor文件配置 -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu对象 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fld_ai2d: 人脸姿态估计ai2d实例 -# fld_ai2d_input_tensor: 人脸姿态估计ai2d输入 -# fld_ai2d_output_tensor:人脸姿态估计ai2d输入 -# fld_ai2d_builder: 根据人脸姿态估计ai2d参数,构建的人脸姿态估计ai2d_builder对象 -global fp_ai2d,fp_ai2d_input_tensor,fp_ai2d_output_tensor,fp_ai2d_builder -global matrix_dst #人脸仿射变换矩阵 -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals():#删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - - -###############for face recognition############### -def get_affine_matrix(bbox): - # 获取仿射矩阵,用于将边界框映射到模型输入空间 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 设置缩放因子 - factor = 2.7 - # 从边界框提取坐标和尺寸 - x1, y1, w, h = map(lambda x: int(round(x, 0)), bbox[:4]) - # 模型输入大小 - edge_size = fp_kmodel_input_shape[2] - # 平移距离,使得模型输入空间的中心对准原点 - trans_distance = edge_size / 2.0 - # 计算边界框中心点的坐标 - center_x = x1 + w / 2.0 - center_y = y1 + h / 2.0 - # 计算最大边长 - maximum_edge = factor * (h if h > w else w) - # 计算缩放比例 - scale = edge_size * 2.0 / maximum_edge - # 计算平移参数 - cx = trans_distance - scale * center_x - cy = trans_distance - scale * center_y - # 创建仿射矩阵 - affine_matrix = [scale, 0, cx, 0, scale, cy] - return affine_matrix - -def build_projection_matrix(det): - x1, y1, w, h = map(lambda x: int(round(x, 0)), det[:4]) - - # 计算边界框中心坐标 - center_x = x1 + w / 2.0 - center_y = y1 + h / 2.0 - - # 定义后部(rear)和前部(front)的尺寸和深度 - rear_width = 0.5 * w - rear_height = 0.5 * h - rear_depth = 0 - factor = np.sqrt(2.0) - front_width = factor * rear_width - front_height = factor * rear_height - front_depth = factor * rear_width # 使用宽度来计算深度,也可以使用高度,取决于需求 - - # 定义立方体的顶点坐标 - temp = [ - [-rear_width, -rear_height, rear_depth], - [-rear_width, rear_height, rear_depth], - [rear_width, rear_height, rear_depth], - [rear_width, -rear_height, rear_depth], - [-front_width, -front_height, front_depth], - [-front_width, front_height, front_depth], - [front_width, front_height, front_depth], - [front_width, -front_height, front_depth] - ] - - projections = np.array(temp) - # 返回投影矩阵和中心坐标 - return projections, (center_x, center_y) - -def rotation_matrix_to_euler_angles(R): - # 将旋转矩阵(3x3 矩阵)转换为欧拉角(pitch、yaw、roll) - # 计算 sin(yaw) - sy = np.sqrt(R[0, 0] ** 2 + R[1, 0] ** 2) - - if sy < 1e-6: - # 若 sin(yaw) 过小,说明 pitch 接近 ±90 度 - pitch = np.arctan2(-R[1, 2], R[1, 1]) * 180 / np.pi - yaw = np.arctan2(-R[2, 0], sy) * 180 / np.pi - roll = 0 - else: - # 计算 pitch、yaw、roll 的角度 - pitch = np.arctan2(R[2, 1], R[2, 2]) * 180 / np.pi - yaw = np.arctan2(-R[2, 0], sy) * 180 / np.pi - roll = np.arctan2(R[1, 0], R[0, 0]) * 180 / np.pi - return [pitch,yaw,roll] - -def get_euler(data): - # 获取旋转矩阵和欧拉角 - R = data[:3, :3].copy() - eular = rotation_matrix_to_euler_angles(R) - return R,eular - -def fp_ai2d_init(): - # 人脸姿态估计ai2d初始化 - with ScopedTiming("fp_ai2d_init",debug_mode > 0): - # (1)创建人脸姿态估计ai2d对象 - global fp_ai2d - fp_ai2d = nn.ai2d() - - # (2)创建人脸姿态估计ai2d_output_tensor对象 - global fp_ai2d_output_tensor - data = np.ones(fp_kmodel_input_shape, dtype=np.uint8) - fp_ai2d_output_tensor = nn.from_numpy(data) - -def fp_ai2d_run(rgb888p_img,det): - # 人脸姿态估计ai2d推理 - with ScopedTiming("fp_ai2d_run",debug_mode > 0): - global fp_ai2d,fp_ai2d_input_tensor,fp_ai2d_output_tensor - #(1)根据原图构建人脸姿态估计ai2d_input_tensor - ai2d_input = rgb888p_img.to_numpy_ref() - fp_ai2d_input_tensor = nn.from_numpy(ai2d_input) - #(2)设置人脸姿态估计ai2d参数 - fp_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - global matrix_dst - matrix_dst = get_affine_matrix(det) - fp_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,matrix_dst) - # (3)构建人脸姿态估计ai2d_builder - global fp_ai2d_builder - fp_ai2d_builder = fp_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fp_kmodel_input_shape) - # (4)推理人脸姿态估计ai2d,将结果保存到ai2d_output_tensor - fp_ai2d_builder.run(fp_ai2d_input_tensor, fp_ai2d_output_tensor) - -def fp_ai2d_release(): - # 释放部分人脸姿态估计ai2d资源 - with ScopedTiming("fp_ai2d_release",debug_mode > 0): - global fp_ai2d_input_tensor,fp_ai2d_builder - del fp_ai2d_input_tensor - del fp_ai2d_builder - -def fp_kpu_init(kmodel_file): - # 初始化人脸姿态估计kpu及ai2d - with ScopedTiming("fp_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - fp_ai2d_init() - return kpu_obj - -def fp_kpu_pre_process(rgb888p_img,det): - # 人脸姿态估计kpu预处理 - fp_ai2d_run(rgb888p_img,det) - with ScopedTiming("fp_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fp_ai2d_output_tensor - current_kmodel_obj.set_input_tensor(0, fp_ai2d_output_tensor) - #ai2d_out_data = _ai2d_output_tensor.to_numpy() - #with open("/sdcard/app/ai2d_out.bin", "wb") as file: - #file.write(ai2d_out_data.tobytes()) - -def fp_kpu_get_output(): - # 获取人脸姿态估计kpu输出 - with ScopedTiming("fp_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - result = result[0] - del data - return result - -def fp_kpu_post_process(pred): - # 人脸姿态估计kpu推理结果后处理 - R,eular = get_euler(pred) - return R,eular - -def fp_kpu_run(kpu_obj,rgb888p_img,det): - # 人脸姿态估计kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)根据人脸检测框进行人脸姿态估计kpu预处理 - fp_kpu_pre_process(rgb888p_img,det) - # (2)人脸姿态估计kpu推理 - with ScopedTiming("fp_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸姿态估计ai2d资源 - fp_ai2d_release() - # (4)释放人脸姿态估计kpu推理输出 - result = fp_kpu_get_output() - # (5)释放人脸姿态估计后处理 - R,eular = fp_kpu_post_process(result) - return R,eular - -def fp_kpu_deinit(): - # 释放人脸姿态估计kpu及ai2d资源 - with ScopedTiming("fp_kpu_deinit",debug_mode > 0): - if 'fp_ai2d' in globals(): - global fp_ai2d - del fp_ai2d - if 'fp_ai2d_output_tensor' in globals(): - global fp_ai2d_output_tensor - del fp_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img_ulab,draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # 设置使用hdmi进行显示 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,pose_results): - # 在显示器画人脸轮廓 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img_ulab,draw_img,osd_img - if dets: - draw_img.clear() - line_color = np.array([255, 0, 0 ,255],dtype = np.uint8) #bgra - for i,det in enumerate(dets): - # (1)获取人脸姿态矩阵和欧拉角 - projections,center_point = build_projection_matrix(det) - R,euler = pose_results[i] - - # (2)遍历人脸投影矩阵的关键点,进行投影,并将结果画在图像上 - first_points = [] - second_points = [] - for pp in range(8): - sum_x, sum_y = 0.0, 0.0 - for cc in range(3): - sum_x += projections[pp][cc] * R[cc][0] - sum_y += projections[pp][cc] * (-R[cc][1]) - - center_x,center_y = center_point[0],center_point[1] - x = (sum_x + center_x) / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y = (sum_y + center_y) / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - x = max(0, min(x, DISPLAY_WIDTH)) - y = max(0, min(y, DISPLAY_HEIGHT)) - - if pp < 4: - first_points.append((x, y)) - else: - second_points.append((x, y)) - first_points = np.array(first_points,dtype=np.float) - aidemo.polylines(draw_img_ulab,first_points,True,line_color,2,8,0) - second_points = np.array(second_points,dtype=np.float) - aidemo.polylines(draw_img_ulab,second_points,True,line_color,2,8,0) - - for ll in range(4): - x0, y0 = int(first_points[ll][0]),int(first_points[ll][1]) - x1, y1 = int(second_points[ll][0]),int(second_points[ll][1]) - draw_img.draw_line(x0, y0, x1, y1, color = (255, 0, 0 ,255), thickness = 2) - - # (3)将绘制好的图像拷贝到显示缓冲区,并在显示器上展示 - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img_ulab,draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框,draw_img->draw_img_ulab(两者指向同一块内存) - draw_img_ulab = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_REF,data = draw_img_ulab) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_pose_inference(): - print("face_pose_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸姿态估计kpu初始化 - kpu_face_pose = fp_kpu_init(fp_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - rgb888p_img = None - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count = 0 - while True: - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets = fd_kpu_run(kpu_face_detect,rgb888p_img) - # (2.2)针对每个人脸框,推理得到对应人脸旋转矩阵、欧拉角 - pose_results = [] - for det in dets: - R,eular = fp_kpu_run(kpu_face_pose,rgb888p_img,det) - pose_results.append((R,eular)) - # (2.3)将人脸姿态估计结果画到显示器上 - display_draw(dets,pose_results) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fp_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_pose - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'draw_img_ulab' in globals(): - global draw_img_ulab - del draw_img_ulab - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("face_pose_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_pose_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_recognition.py b/share/qtcreator/examples/04-AI-Demo/face_recognition.py deleted file mode 100644 index 0362f85801f9..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/face_recognition.py +++ /dev/null @@ -1,695 +0,0 @@ -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * # 摄像头模块 -from media.display import * # 显示模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo # aidemo模块,封装ai demo相关后处理、画图操作 -import image # 图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time # 时间统计 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 -import math # 数学模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGH = 1080 - -# kmodel输入shape -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸识别kmodel输入shape -fr_kmodel_input_shape = (1,3,112,112) -# ai原图padding -rgb_mean = [104,117,123] - -#kmodel相关参数设置 -#人脸检测 -confidence_threshold = 0.5 #人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 -#人脸识别 -max_register_face = 100 # 数据库最多人脸个数 -feature_num = 128 # 人脸识别特征维度 -face_recognition_threshold = 0.75 # 人脸识别阈值 - -#文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸识别kmodel -fr_kmodel_file = root_dir + 'kmodel/face_recognition.kmodel' -# 人脸检测anchor -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 人脸数据库 -database_dir = root_dir + 'utils/db/' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu实例 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fr_ai2d: 人脸识别ai2d实例 -# fr_ai2d_input_tensor: 人脸识别ai2d输入 -# fr_ai2d_output_tensor: 人脸识别ai2d输入 -# fr_ai2d_builder: 根据人脸识别ai2d参数,构建的人脸识别ai2d_builder对象 -global fr_ai2d,fr_ai2d_input_tensor,fr_ai2d_output_tensor,fr_ai2d_builder -# valid_register_face: 数据库中有效人脸个数 -# db_name: 数据库人名列表 -# db_data: 数据库特征列表 -global valid_register_face,db_name,db_data - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(): - # 右padding或下padding,获取padding参数 - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # OUT_RGB888P_WIDTH:原图宽(w) - # OUT_RGB888P_HEIGH:原图高(h) - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / OUT_RGB888P_WIDTH - ratio_h = dst_h / OUT_RGB888P_HEIGH - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGH) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - # (2)设置人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - #(3)人脸检测ai2d_builder,根据人脸检测ai2d参数、输入输出大小创建ai2d_builder对象 - global fd_ai2d_builder - fd_ai2d_builder = fd_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fd_kmodel_input_shape) - - #(4)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fd_ai2d_input_tensor = nn.from_numpy(ai2d_input) - # (2)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d_input_tensor - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor - del fd_ai2d_input_tensor - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH],results) - # (6)返回人脸检测框 - if len(post_ret)==0: - return post_ret,post_ret - else: - return post_ret[0],post_ret[1] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals():#删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - -###############for face recognition############### -##for database -def database_init(): - # 数据初始化,构建数据库人名列表和数据库特征列表 - with ScopedTiming("database_init", debug_mode > 1): - global valid_register_face,db_name,db_data - valid_register_face = 0 - db_name = [] - db_data = [] - - db_file_list = os.listdir(database_dir) - for db_file in db_file_list: - if not db_file.endswith('.bin'): - continue - if valid_register_face >= max_register_face: - break - valid_index = valid_register_face - full_db_file = database_dir + db_file - with open(full_db_file, 'rb') as f: - data = f.read() - feature = np.frombuffer(data, dtype=np.float) - db_data.append(feature) - name = db_file.split('.')[0] - db_name.append(name) - valid_register_face += 1 - -def database_reset(): - # 数据库清空 - with ScopedTiming("database_reset", debug_mode > 1): - global valid_register_face,db_name,db_data - print("database clearing...") - db_name = [] - db_data = [] - valid_register_face = 0 - print("database clear Done!") - -def database_search(feature): - # 数据库查询 - with ScopedTiming("database_search", debug_mode > 1): - global valid_register_face,db_name,db_data - v_id = -1 - v_score_max = 0.0 - - # 将当前人脸特征归一化 - feature /= np.linalg.norm(feature) - # 遍历当前人脸数据库,统计最高得分 - for i in range(valid_register_face): - db_feature = db_data[i] - db_feature /= np.linalg.norm(db_feature) - # 计算数据库特征与当前人脸特征相似度 - v_score = np.dot(feature, db_feature)/2 + 0.5 - if v_score > v_score_max: - v_score_max = v_score - v_id = i - - if v_id == -1: - # 数据库中无人脸 - return 'unknown' - elif v_score_max < face_recognition_threshold: - # 小于人脸识别阈值,未识别 -# print('v_score_max:',v_score_max) - return 'unknown' - else: - # 识别成功 - result = 'name: {}, score:{}'.format(db_name[v_id],v_score_max) - return result - -# 标准5官 -umeyama_args_112 = [ - 38.2946 , 51.6963 , - 73.5318 , 51.5014 , - 56.0252 , 71.7366 , - 41.5493 , 92.3655 , - 70.7299 , 92.2041 -] - -def svd22(a): - # svd - s = [0.0, 0.0] - u = [0.0, 0.0, 0.0, 0.0] - v = [0.0, 0.0, 0.0, 0.0] - - s[0] = (math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2) + math.sqrt((a[0] + a[3]) ** 2 + (a[1] - a[2]) ** 2)) / 2 - s[1] = abs(s[0] - math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2)) - v[2] = math.sin((math.atan2(2 * (a[0] * a[1] + a[2] * a[3]), a[0] ** 2 - a[1] ** 2 + a[2] ** 2 - a[3] ** 2)) / 2) if \ - s[0] > s[1] else 0 - v[0] = math.sqrt(1 - v[2] ** 2) - v[1] = -v[2] - v[3] = v[0] - u[0] = -(a[0] * v[0] + a[1] * v[2]) / s[0] if s[0] != 0 else 1 - u[2] = -(a[2] * v[0] + a[3] * v[2]) / s[0] if s[0] != 0 else 0 - u[1] = (a[0] * v[1] + a[1] * v[3]) / s[1] if s[1] != 0 else -u[2] - u[3] = (a[2] * v[1] + a[3] * v[3]) / s[1] if s[1] != 0 else u[0] - v[0] = -v[0] - v[2] = -v[2] - - return u, s, v - - -def image_umeyama_112(src): - # 使用Umeyama算法计算仿射变换矩阵 - SRC_NUM = 5 - SRC_DIM = 2 - src_mean = [0.0, 0.0] - dst_mean = [0.0, 0.0] - - for i in range(0,SRC_NUM * 2,2): - src_mean[0] += src[i] - src_mean[1] += src[i + 1] - dst_mean[0] += umeyama_args_112[i] - dst_mean[1] += umeyama_args_112[i + 1] - - src_mean[0] /= SRC_NUM - src_mean[1] /= SRC_NUM - dst_mean[0] /= SRC_NUM - dst_mean[1] /= SRC_NUM - - src_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] - dst_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] - - for i in range(SRC_NUM): - src_demean[i][0] = src[2 * i] - src_mean[0] - src_demean[i][1] = src[2 * i + 1] - src_mean[1] - dst_demean[i][0] = umeyama_args_112[2 * i] - dst_mean[0] - dst_demean[i][1] = umeyama_args_112[2 * i + 1] - dst_mean[1] - - A = [[0.0, 0.0], [0.0, 0.0]] - for i in range(SRC_DIM): - for k in range(SRC_DIM): - for j in range(SRC_NUM): - A[i][k] += dst_demean[j][i] * src_demean[j][k] - A[i][k] /= SRC_NUM - - T = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] - U, S, V = svd22([A[0][0], A[0][1], A[1][0], A[1][1]]) - - T[0][0] = U[0] * V[0] + U[1] * V[2] - T[0][1] = U[0] * V[1] + U[1] * V[3] - T[1][0] = U[2] * V[0] + U[3] * V[2] - T[1][1] = U[2] * V[1] + U[3] * V[3] - - scale = 1.0 - src_demean_mean = [0.0, 0.0] - src_demean_var = [0.0, 0.0] - for i in range(SRC_NUM): - src_demean_mean[0] += src_demean[i][0] - src_demean_mean[1] += src_demean[i][1] - - src_demean_mean[0] /= SRC_NUM - src_demean_mean[1] /= SRC_NUM - - for i in range(SRC_NUM): - src_demean_var[0] += (src_demean_mean[0] - src_demean[i][0]) * (src_demean_mean[0] - src_demean[i][0]) - src_demean_var[1] += (src_demean_mean[1] - src_demean[i][1]) * (src_demean_mean[1] - src_demean[i][1]) - - src_demean_var[0] /= SRC_NUM - src_demean_var[1] /= SRC_NUM - - scale = 1.0 / (src_demean_var[0] + src_demean_var[1]) * (S[0] + S[1]) - T[0][2] = dst_mean[0] - scale * (T[0][0] * src_mean[0] + T[0][1] * src_mean[1]) - T[1][2] = dst_mean[1] - scale * (T[1][0] * src_mean[0] + T[1][1] * src_mean[1]) - T[0][0] *= scale - T[0][1] *= scale - T[1][0] *= scale - T[1][1] *= scale - return T - -def get_affine_matrix(sparse_points): - # 获取放射变换矩阵 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 使用Umeyama算法计算仿射变换矩阵 - matrix_dst = image_umeyama_112(sparse_points) - matrix_dst = [matrix_dst[0][0],matrix_dst[0][1],matrix_dst[0][2], - matrix_dst[1][0],matrix_dst[1][1],matrix_dst[1][2]] - return matrix_dst - -def fr_ai2d_init(): - with ScopedTiming("fr_ai2d_init",debug_mode > 0): - # (1)人脸识别ai2d初始化 - global fr_ai2d - fr_ai2d = nn.ai2d() - - # (2)人脸识别ai2d_output_tensor初始化,用于存放ai2d输出 - global fr_ai2d_output_tensor - data = np.ones(fr_kmodel_input_shape, dtype=np.uint8) - fr_ai2d_output_tensor = nn.from_numpy(data) - -def fr_ai2d_run(rgb888p_img,sparse_points): - # 人脸识别ai2d推理 - with ScopedTiming("fr_ai2d_run",debug_mode > 0): - global fr_ai2d,fr_ai2d_input_tensor,fr_ai2d_output_tensor - #(1)根据原图创建人脸识别ai2d_input_tensor对象 - ai2d_input = rgb888p_img.to_numpy_ref() - fr_ai2d_input_tensor = nn.from_numpy(ai2d_input) - #(2)根据新的人脸关键点设置新的人脸识别ai2d参数 - fr_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - affine_matrix = get_affine_matrix(sparse_points) - fr_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) - global fr_ai2d_builder - # (3)根据新的人脸识别ai2d参数,构建识别ai2d_builder - fr_ai2d_builder = fr_ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], fr_kmodel_input_shape) - # (4)推理人脸识别ai2d,将预处理的结果保存到fr_ai2d_output_tensor - fr_ai2d_builder.run(fr_ai2d_input_tensor, fr_ai2d_output_tensor) - -def fr_ai2d_release(): - # 释放人脸识别ai2d_input_tensor、ai2d_builder - with ScopedTiming("fr_ai2d_release",debug_mode > 0): - global fr_ai2d_input_tensor,fr_ai2d_builder - del fr_ai2d_input_tensor - del fr_ai2d_builder - -def fr_kpu_init(kmodel_file): - # 人脸识别kpu初始化 - with ScopedTiming("fr_kpu_init",debug_mode > 0): - # 初始化人脸识别kpu对象 - kpu_obj = nn.kpu() - # 加载人脸识别kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸识别ai2d - fr_ai2d_init() - # 数据库初始化 - database_init() - return kpu_obj - -def fr_kpu_pre_process(rgb888p_img,sparse_points): - # 人脸识别kpu预处理 - # 人脸识别ai2d推理,根据关键点对原图进行预处理 - fr_ai2d_run(rgb888p_img,sparse_points) - with ScopedTiming("fr_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fr_ai2d_output_tensor - # 将人脸识别ai2d输出设置为人脸识别kpu输入 - current_kmodel_obj.set_input_tensor(0, fr_ai2d_output_tensor) - -def fr_kpu_get_output(): - # 获取人脸识别kpu输出 - with ScopedTiming("fr_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result[0] - -def fr_kpu_run(kpu_obj,rgb888p_img,sparse_points): - # 人脸识别kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)人脸识别kpu预处理,设置kpu输入 - fr_kpu_pre_process(rgb888p_img,sparse_points) - # (2)人脸识别kpu推理 - with ScopedTiming("fr kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸识别ai2d - fr_ai2d_release() - # (4)获取人脸识别kpu输出 - results = fr_kpu_get_output() - # (5)在数据库中查找当前人脸特征 - recg_result = database_search(results) - return recg_result - -def fr_kpu_deinit(): - # 人脸识别kpu相关资源释放 - with ScopedTiming("fr_kpu_deinit",debug_mode > 0): - if 'fr_ai2d' in globals(): - global fr_ai2d - del fr_ai2d - if 'fr_ai2d_output_tensor' in globals(): - global fr_ai2d_output_tensor - del fr_ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.2 -def display_init(): - # hdmi显示初始化 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -def display_draw(dets,recg_results): - # 在显示器上写人脸识别结果 - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - for i,det in enumerate(dets): - # (1)画人脸框 - x1, y1, w, h = map(lambda x: int(round(x, 0)), det[:4]) - x1 = x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - y1 = y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - w = w * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = h * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - draw_img.draw_rectangle(x1,y1, w, h, color=(255,0, 0, 255), thickness = 4) - - # (2)写人脸识别结果 - recg_text = recg_results[i] - draw_img.draw_string(x1,y1,recg_text,color=(255, 255, 0, 0),scale=4) - - # (3)将画图结果拷贝到osd - draw_img.copy_to(osd_img) - # (4)将osd显示到屏幕 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - # (1)清空用来画框的图像 - draw_img.clear() - # (2)清空osd - draw_img.copy_to(osd_img) - # (3)显示透明图层 - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.1 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可,详细解析请查看1.6.3 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - # meida资源释放 - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for face_detect.py******************** -def face_recognition_inference(): - print("face_recognition_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸关键点kpu初始化 - kpu_face_recg = fr_kpu_init(fr_kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取人脸检测结果 - dets,landms = fd_kpu_run(kpu_face_detect,rgb888p_img) - recg_result = [] - for landm in landms: - # (2.2)针对每个人脸五官点,推理得到人脸特征,并计算特征在数据库中相似度 - ret = fr_kpu_run(kpu_face_recg,rgb888p_img,landm) - recg_result.append(ret) - # (2.3)将识别结果画到显示器上 - display_draw(dets,recg_result) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - fd_kpu_deinit() - fr_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_recg - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - # 释放媒体资源 - media_deinit() - - print("face_recognition_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - face_recognition_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/face_registration.py b/share/qtcreator/examples/04-AI-Demo/face_registration.py deleted file mode 100644 index 0ed94f63dc45..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/face_registration.py +++ /dev/null @@ -1,494 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os,sys #操作系统接口模块 -import math #数学模块 - -#********************for config.py******************** -# kmodel输入shape -# 人脸检测kmodel输入shape -fd_kmodel_input_shape = (1,3,320,320) -# 人脸识别kmodel输入shape -fr_kmodel_input_shape = (1,3,112,112) -# ai原图padding -rgb_mean = [104,117,123] - -#kmodel相关参数设置 -#人脸检测 -confidence_threshold = 0.5 #人脸检测阈值 -top_k = 5000 -nms_threshold = 0.2 -keep_top_k = 750 -vis_thres = 0.5 -variance = [0.1, 0.2] -anchor_len = 4200 -score_dim = 2 -det_dim = 4 -keypoint_dim = 10 -#人脸识别 -max_register_face = 100 #数据库最多人脸个数 -feature_num = 128 #人脸识别特征维度 - -# 文件配置 -# 人脸检测kmodel -root_dir = '/sdcard/app/tests/' -fd_kmodel_file = root_dir + 'kmodel/face_detection_320.kmodel' -# 人脸识别kmodel -fr_kmodel_file = root_dir + 'kmodel/face_recognition.kmodel' -# 人脸检测anchor -anchors_path = root_dir + 'utils/prior_data_320.bin' -# 人脸注册数据库 -database_dir = root_dir + 'utils/db/' -# 人脸注册数据库原图 -database_img_dir = root_dir + 'utils/db_img/' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -global current_kmodel_obj #当前kpu实例 -# fd_ai2d: 人脸检测ai2d实例 -# fd_ai2d_input_tensor: 人脸检测ai2d输入 -# fd_ai2d_output_tensor: 人脸检测ai2d输入 -# fd_ai2d_builder: 根据人脸检测ai2d参数,构建的人脸检测ai2d_builder对象 -global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder -# fr_ai2d: 人脸识别ai2d实例 -# fr_ai2d_input_tensor: 人脸识别ai2d输入 -# fr_ai2d_output_tensor: 人脸识别ai2d输入 -# fr_ai2d_builder: 根据人脸识别ai2d参数,构建的人脸识别ai2d_builder对象 -global fr_ai2d,fr_ai2d_input_tensor,fr_ai2d_output_tensor,fr_ai2d_builder -global valid_register_face #数据库中有效人脸个数 - -#读取anchor文件,为人脸检测后处理做准备 -print('anchors_path:',anchors_path) -prior_data = np.fromfile(anchors_path, dtype=np.float) -prior_data = prior_data.reshape((anchor_len,det_dim)) - -def get_pad_one_side_param(rgb888p_img): - # 右padding或下padding,获取padding参数 - with ScopedTiming("get_pad_one_side_param", debug_mode > 1): - dst_w = fd_kmodel_input_shape[3] # kmodel输入宽(w) - dst_h = fd_kmodel_input_shape[2] # kmodel输入高(h) - - # 计算最小的缩放比例,等比例缩放 - ratio_w = dst_w / rgb888p_img.shape[3] - ratio_h = dst_h / rgb888p_img.shape[2] - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - # 计算经过缩放后的新宽和新高 - new_w = (int)(ratio * rgb888p_img.shape[3]) - new_h = (int)(ratio * rgb888p_img.shape[2]) - - # 计算需要添加的padding,以使得kmodel输入的宽高和原图一致 - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - # 四舍五入,确保padding是整数 - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -def fd_ai2d_init(): - # 人脸检测模型ai2d初始化 - with ScopedTiming("fd_ai2d_init",debug_mode > 0): - # (1)创建人脸检测ai2d对象 - global fd_ai2d - fd_ai2d = nn.ai2d() - - #(2)创建人脸检测ai2d_output_tensor,用于保存人脸检测ai2d输出 - global fd_ai2d_output_tensor - data = np.ones(fd_kmodel_input_shape, dtype=np.uint8) - fd_ai2d_output_tensor = nn.from_numpy(data) - -def fd_ai2d_run(rgb888p_img): - # 根据人脸检测ai2d参数,对原图rgb888p_img进行预处理 - with ScopedTiming("fd_ai2d_run",debug_mode > 0): - global fd_ai2d,fd_ai2d_input_tensor,fd_ai2d_output_tensor,fd_ai2d_builder - # (1)根据原图构建ai2d_input_tensor对象 - fd_ai2d_input_tensor = nn.from_numpy(rgb888p_img) - # (2)根据新的图像设置新的人脸检测ai2d参数 - fd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - fd_ai2d.set_pad_param(True, get_pad_one_side_param(rgb888p_img), 0, rgb_mean) - fd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - # (3)根据新的人脸检测ai2d参数,构建人脸检测ai2d_builder - fd_ai2d_builder = fd_ai2d.build(rgb888p_img.shape, fd_kmodel_input_shape) - # (4)运行人脸检测ai2d_builder,将结果保存到人脸检测ai2d_output_tensor中 - fd_ai2d_builder.run(fd_ai2d_input_tensor, fd_ai2d_output_tensor) - -def fd_ai2d_release(): - # 释放人脸检测ai2d部分资源 - with ScopedTiming("fd_ai2d_release",debug_mode > 0): - global fd_ai2d_input_tensor,fd_ai2d_builder - del fd_ai2d_input_tensor - del fd_ai2d_builder - - -def fd_kpu_init(kmodel_file): - # 初始化人脸检测kpu对象,并加载kmodel - with ScopedTiming("fd_kpu_init",debug_mode > 0): - # 初始化人脸检测kpu对象 - kpu_obj = nn.kpu() - # 加载人脸检测kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸检测ai2d - fd_ai2d_init() - return kpu_obj - -def fd_kpu_pre_process(rgb888p_img): - # 设置人脸检测kpu输入 - # 使用人脸检测ai2d对原图进行预处理(padding,resize) - fd_ai2d_run(rgb888p_img) - with ScopedTiming("fd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fd_ai2d_output_tensor - # 设置人脸检测kpu输入 - current_kmodel_obj.set_input_tensor(0, fd_ai2d_output_tensor) - -def fd_kpu_get_output(): - # 获取人脸检测kpu输出 - with ScopedTiming("fd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -def fd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - fd_kpu_pre_process(rgb888p_img) - # (2)人脸检测kpu推理 - with ScopedTiming("fd kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸检测ai2d资源 - fd_ai2d_release() - # (4)获取人俩检测kpu输出 - results = fd_kpu_get_output() - # (5)人脸检测kpu结果后处理 - with ScopedTiming("fd kpu_post",debug_mode > 0): - post_ret = aidemo.face_det_post_process(confidence_threshold,nms_threshold,fd_kmodel_input_shape[2],prior_data, - [rgb888p_img.shape[3],rgb888p_img.shape[2]],results) - # (6)返回人脸关键点 - if len(post_ret)==0: - return post_ret - else: - return post_ret[0],post_ret[1] #0:det,1:landm,2:score - -def fd_kpu_deinit(): - # kpu释放 - with ScopedTiming("fd_kpu_deinit",debug_mode > 0): - if 'fd_ai2d' in globals(): #删除人脸检测ai2d变量,释放对它所引用对象的内存引用 - global fd_ai2d - del fd_ai2d - if 'fd_ai2d_output_tensor' in globals():#删除人脸检测ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global fd_ai2d_output_tensor - del fd_ai2d_output_tensor - - -###############for face recognition############### -# 标准5官 -umeyama_args_112 = [ - 38.2946 , 51.6963 , - 73.5318 , 51.5014 , - 56.0252 , 71.7366 , - 41.5493 , 92.3655 , - 70.7299 , 92.2041 -] - -def svd22(a): - # svd - s = [0.0, 0.0] - u = [0.0, 0.0, 0.0, 0.0] - v = [0.0, 0.0, 0.0, 0.0] - - s[0] = (math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2) + math.sqrt((a[0] + a[3]) ** 2 + (a[1] - a[2]) ** 2)) / 2 - s[1] = abs(s[0] - math.sqrt((a[0] - a[3]) ** 2 + (a[1] + a[2]) ** 2)) - v[2] = math.sin((math.atan2(2 * (a[0] * a[1] + a[2] * a[3]), a[0] ** 2 - a[1] ** 2 + a[2] ** 2 - a[3] ** 2)) / 2) if \ - s[0] > s[1] else 0 - v[0] = math.sqrt(1 - v[2] ** 2) - v[1] = -v[2] - v[3] = v[0] - u[0] = -(a[0] * v[0] + a[1] * v[2]) / s[0] if s[0] != 0 else 1 - u[2] = -(a[2] * v[0] + a[3] * v[2]) / s[0] if s[0] != 0 else 0 - u[1] = (a[0] * v[1] + a[1] * v[3]) / s[1] if s[1] != 0 else -u[2] - u[3] = (a[2] * v[1] + a[3] * v[3]) / s[1] if s[1] != 0 else u[0] - v[0] = -v[0] - v[2] = -v[2] - - return u, s, v - - -def image_umeyama_112(src): - # 使用Umeyama算法计算仿射变换矩阵 - SRC_NUM = 5 - SRC_DIM = 2 - src_mean = [0.0, 0.0] - dst_mean = [0.0, 0.0] - - for i in range(0,SRC_NUM * 2,2): - src_mean[0] += src[i] - src_mean[1] += src[i + 1] - dst_mean[0] += umeyama_args_112[i] - dst_mean[1] += umeyama_args_112[i + 1] - - src_mean[0] /= SRC_NUM - src_mean[1] /= SRC_NUM - dst_mean[0] /= SRC_NUM - dst_mean[1] /= SRC_NUM - - src_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] - dst_demean = [[0.0, 0.0] for _ in range(SRC_NUM)] - - for i in range(SRC_NUM): - src_demean[i][0] = src[2 * i] - src_mean[0] - src_demean[i][1] = src[2 * i + 1] - src_mean[1] - dst_demean[i][0] = umeyama_args_112[2 * i] - dst_mean[0] - dst_demean[i][1] = umeyama_args_112[2 * i + 1] - dst_mean[1] - - A = [[0.0, 0.0], [0.0, 0.0]] - for i in range(SRC_DIM): - for k in range(SRC_DIM): - for j in range(SRC_NUM): - A[i][k] += dst_demean[j][i] * src_demean[j][k] - A[i][k] /= SRC_NUM - - T = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] - U, S, V = svd22([A[0][0], A[0][1], A[1][0], A[1][1]]) - - T[0][0] = U[0] * V[0] + U[1] * V[2] - T[0][1] = U[0] * V[1] + U[1] * V[3] - T[1][0] = U[2] * V[0] + U[3] * V[2] - T[1][1] = U[2] * V[1] + U[3] * V[3] - - scale = 1.0 - src_demean_mean = [0.0, 0.0] - src_demean_var = [0.0, 0.0] - for i in range(SRC_NUM): - src_demean_mean[0] += src_demean[i][0] - src_demean_mean[1] += src_demean[i][1] - - src_demean_mean[0] /= SRC_NUM - src_demean_mean[1] /= SRC_NUM - - for i in range(SRC_NUM): - src_demean_var[0] += (src_demean_mean[0] - src_demean[i][0]) * (src_demean_mean[0] - src_demean[i][0]) - src_demean_var[1] += (src_demean_mean[1] - src_demean[i][1]) * (src_demean_mean[1] - src_demean[i][1]) - - src_demean_var[0] /= SRC_NUM - src_demean_var[1] /= SRC_NUM - - scale = 1.0 / (src_demean_var[0] + src_demean_var[1]) * (S[0] + S[1]) - T[0][2] = dst_mean[0] - scale * (T[0][0] * src_mean[0] + T[0][1] * src_mean[1]) - T[1][2] = dst_mean[1] - scale * (T[1][0] * src_mean[0] + T[1][1] * src_mean[1]) - T[0][0] *= scale - T[0][1] *= scale - T[1][0] *= scale - T[1][1] *= scale - return T - -def get_affine_matrix(sparse_points): - # 获取affine变换矩阵 - with ScopedTiming("get_affine_matrix", debug_mode > 1): - # 使用Umeyama算法计算仿射变换矩阵 - matrix_dst = image_umeyama_112(sparse_points) - matrix_dst = [matrix_dst[0][0],matrix_dst[0][1],matrix_dst[0][2], - matrix_dst[1][0],matrix_dst[1][1],matrix_dst[1][2]] - return matrix_dst - -def fr_ai2d_init(): - with ScopedTiming("fr_ai2d_init",debug_mode > 0): - # (1)人脸识别ai2d初始化 - global fr_ai2d - fr_ai2d = nn.ai2d() - - # (2)人脸识别ai2d_output_tensor初始化,用于存放ai2d输出 - global fr_ai2d_output_tensor - data = np.ones(fr_kmodel_input_shape, dtype=np.uint8) - fr_ai2d_output_tensor = nn.from_numpy(data) - -def fr_ai2d_run(rgb888p_img,sparse_points): - # 人脸识别ai2d推理 - with ScopedTiming("fr_ai2d_run",debug_mode > 0): - global fr_ai2d,fr_ai2d_input_tensor,fr_ai2d_output_tensor - #(1)根据原图创建人脸识别ai2d_input_tensor对象 - fr_ai2d_input_tensor = nn.from_numpy(rgb888p_img) - #(2)根据新的人脸关键点设置新的人脸识别ai2d参数 - fr_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - affine_matrix = get_affine_matrix(sparse_points) - fr_ai2d.set_affine_param(True,nn.interp_method.cv2_bilinear,0, 0, 127, 1,affine_matrix) - global fr_ai2d_builder - # (3)根据新的人脸识别ai2d参数,构建识别ai2d_builder - fr_ai2d_builder = fr_ai2d.build(rgb888p_img.shape, fr_kmodel_input_shape) - # (4)推理人脸识别ai2d,将预处理的结果保存到fr_ai2d_output_tensor - fr_ai2d_builder.run(fr_ai2d_input_tensor, fr_ai2d_output_tensor) - -def fr_ai2d_release(): - # 释放人脸识别ai2d_input_tensor、ai2d_builder - with ScopedTiming("fr_ai2d_release",debug_mode > 0): - global fr_ai2d_input_tensor,fr_ai2d_builder - del fr_ai2d_input_tensor - del fr_ai2d_builder - -def fr_kpu_init(kmodel_file): - # 人脸识别kpu初始化 - with ScopedTiming("fr_kpu_init",debug_mode > 0): - # 初始化人脸识别kpu对象 - kpu_obj = nn.kpu() - # 加载人脸识别kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化人脸识别ai2d - fr_ai2d_init() - return kpu_obj - -def fr_kpu_pre_process(rgb888p_img,sparse_points): - # 人脸识别kpu预处理 - # 人脸识别ai2d推理,根据关键点对原图进行预处理 - fr_ai2d_run(rgb888p_img,sparse_points) - with ScopedTiming("fr_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,fr_ai2d_output_tensor - # 将人脸识别ai2d输出设置为人脸识别kpu输入 - current_kmodel_obj.set_input_tensor(0, fr_ai2d_output_tensor) - - #ai2d_out_data = fr_ai2d_output_tensor.to_numpy() - #print('ai2d_out_data.shape:',ai2d_out_data.shape) - #with open("/sdcard/app/ai2d_out.bin", "wb") as file: - #file.write(ai2d_out_data.tobytes()) - -def fr_kpu_get_output(): - # 获取人脸识别kpu输出 - with ScopedTiming("fr_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result[0] - -def fr_kpu_run(kpu_obj,rgb888p_img,sparse_points): - # 人脸识别kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)人脸识别kpu预处理,设置kpu输入 - fr_kpu_pre_process(rgb888p_img,sparse_points) - # (2)人脸识别kpu推理 - with ScopedTiming("fr kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放人脸识别ai2d - fr_ai2d_release() - # (4)获取人脸识别kpu输出 - results = fr_kpu_get_output() - return results - -def fr_kpu_deinit(): - # 人脸识别kpu相关资源释放 - with ScopedTiming("fr_kpu_deinit",debug_mode > 0): - if 'fr_ai2d' in globals(): - global fr_ai2d - del fr_ai2d - if 'fr_ai2d_output_tensor' in globals(): - global fr_ai2d_output_tensor - del fr_ai2d_output_tensor - -#********************for face_detect.py******************** -def image2rgb888array(img): #4维 - # 将Image转换为rgb888格式 - with ScopedTiming("fr_kpu_deinit",debug_mode > 0): - img_data_rgb888=img.to_rgb888() - # hwc,rgb888 - img_hwc=img_data_rgb888.to_numpy_ref() - shape=img_hwc.shape - img_tmp = img_hwc.reshape((shape[0] * shape[1], shape[2])) - img_tmp_trans = img_tmp.transpose() - img_res=img_tmp_trans.copy() - # chw,rgb888 - img_return=img_res.reshape((1,shape[2],shape[0],shape[1])) - return img_return - -def face_registration_inference(): - print("face_registration_test start") - # 人脸检测kpu初始化 - kpu_face_detect = fd_kpu_init(fd_kmodel_file) - # 人脸识别kpu初始化 - kpu_face_reg = fr_kpu_init(fr_kmodel_file) - try: - # 获取图像列表 - img_list = os.listdir(database_img_dir) - for img_file in img_list: - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一张图像 - full_img_file = database_img_dir + img_file - print(full_img_file) - img = image.Image(full_img_file) - rgb888p_img_ndarry = image2rgb888array(img) - - #(2)推理得到人脸检测kpu,得到人脸检测框、人脸五点 - dets,landms = fd_kpu_run(kpu_face_detect,rgb888p_img_ndarry) - if dets: - if dets.shape[0] == 1: - #(3)若是只检测到一张人脸,则将该人脸注册到数据库 - db_i_name = img_file.split('.')[0] - for landm in landms: - reg_result = fr_kpu_run(kpu_face_reg,rgb888p_img_ndarry,landm) - #print('\nwrite bin:',database_dir+'{}.bin'.format(db_i_name)) - with open(database_dir+'{}.bin'.format(db_i_name), "wb") as file: - file.write(reg_result.tobytes()) - else: - print('Only one person in a picture when you sign up') - else: - print('No person detected') - - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - # 释放kpu资源 - fd_kpu_deinit() - fr_kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_face_detect - del kpu_face_reg - # 垃圾回收 - gc.collect() - nn.shrink_memory_pool() - - print("face_registration_test end") - return 0 - -if __name__ == '__main__': - nn.shrink_memory_pool() - face_registration_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/falldown_detection.py b/share/qtcreator/examples/04-AI-Demo/falldown_detection.py deleted file mode 100644 index 693f89089a67..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/falldown_detection.py +++ /dev/null @@ -1,343 +0,0 @@ -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#kmodel输入shape -kmodel_input_shape = (1,3,640,640) # kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.3 # 摔倒检测阈值,用于过滤roi -nms_threshold = 0.45 # 摔倒检测框阈值,用于过滤重复roi -kmodel_frame_size = [640,640] # 摔倒检测输入图片尺寸 -frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 2 # 模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS -labels = ["Fall","NoFall"] # 模型输出类别名称 - -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/yolov5n-falldown.kmodel' # kmodel文件的路径 -anchors = [10,13, 16,30, 33,23, 30,61, 62,45, 50,119, 116,90, 156,198, 373,326] # anchor设置 - -colors = [(255,0, 0, 255), (255,0, 255, 0), (255,255,0, 0), (255,255,0, 255)] # 颜色设置 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - global ai2d_builder - global ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = kmodel_frame_size[0] - height = kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_output_tensor, ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)摔倒检测 kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放摔倒检测 ai2d 资源 - ai2d_release() - # (4)获取摔倒检测 kpu 输出 - results = kpu_get_output() - # (5)摔倒检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], kmodel_frame_size, frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回摔倒检测结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - global ai2d, ai2d_output_tensor, ai2d_builder - if 'ai2d' in globals(): #删除ai2d变量,释放对它所引用对象的内存引用 - global ai2d - del ai2d - if 'ai2d_output_tensor' in globals(): #删除ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global ai2d_output_tensor - del ai2d_output_tensor - if 'ai2d_builder' in globals(): #删除ai2d_builder变量,释放对它所引用对象的内存引用 - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 框出所有检测到的行人以及标出是否摔倒的结果 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if dets: - draw_img.clear() - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - - x1 = int(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x2 = int(x2 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y2 = int(y2 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - draw_img.draw_rectangle(x1 , y1 , int(w) , int(h) , color=colors[det_box[0]], thickness = 2) - draw_img.draw_string( x1 , y1-20, " " + labels[det_box[0]] + " " + str(round(det_box[1],2)) , color=colors[det_box[0]+2], scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for falldown_detect.py********** -def falldown_detect_inference(): - print("falldown_detect_test start") - kpu_falldown_detect = kpu_init(kmodel_file) # 创建摔倒检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_falldown_detect,rgb888p_img) # 执行摔倒检测 kpu 运行 以及 后处理过程 - display_draw(dets) # 将得到的检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_falldown_detect - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("falldown_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - falldown_detect_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/finger_guessing.py b/share/qtcreator/examples/04-AI-Demo/finger_guessing.py deleted file mode 100644 index 2ac118475896..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/finger_guessing.py +++ /dev/null @@ -1,626 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -from random import randint #随机整数生成 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aicube #aicube模块,封装ai cube 相关后处理 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#手掌检测 和 手掌关键点检测 kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) -hk_kmodel_input_shape = (1,3,256,256) - -#手掌检测 相关参数设置 -confidence_threshold = 0.2 #手掌检测 分数阈值 -nms_threshold = 0.5 #非极大值抑制 阈值 -hd_kmodel_frame_size = [512,512] #手掌检测kmodel输入 w h -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] #手掌检测原始输入图像 w h -strides = [8,16,32] #手掌检测模型 下采样输出倍数 -num_classes = 1 #检测类别数, 及手掌一种 -nms_option = False #控制最大值抑制的方式 False 类内 True 类间 -labels = ["hand"] #标签名称 -anchors = [26,27,53,52,75,71,80,99,106,82,99,134,140,113,161,172,245,276] #手掌检测模型 锚框 -#手掌关键点检测 相关参数 -hk_kmodel_frame_size = [256,256] #手掌关键点检测 kmodel 输入 w h - -# kmodel 路径 -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + 'kmodel/hand_det.kmodel' #手掌检测kmodel路径 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' #手掌关键点kmodel路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -# 猜拳模式 0 玩家稳赢 , 1 玩家必输 , n > 2 多局多胜 -guess_mode = 3 - -# 读取石头剪刀布的bin文件方法 -def read_file(file_name): - image_arr = np.fromfile(file_name,dtype=np.uint8) - image_arr = image_arr.reshape((400,400,4)) - return image_arr -# 石头剪刀布的 array -five_image = read_file(root_dir + "utils/five.bin") -fist_image = read_file(root_dir + "utils/fist.bin") -shear_image = read_file(root_dir + "utils/shear.bin") - - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global counts_guess, player_win, k230_win, sleep_end, set_stop_id # 定义猜拳游戏的参数:猜拳次数、玩家赢次、k230赢次、是否停顿、是狗暂停 - -# 手掌检测 ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - # init kpu and load kmodel - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - - global hd_ai2d_output_tensor - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获取 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原始图像预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3) 释放ai2d资源 - hd_ai2d_release() - # (4) 获取kpu输出 - results = hd_kpu_get_output() - # (5) kpu结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6) 返回 手掌检测 结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): - global hd_ai2d_builder - del hd_ai2d_builder - -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - - global hk_ai2d_output_tensor - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor,hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 接收kmodel结果的后处理 -def hk_kpu_post_process(results, x, y, w, h): - results_show = np.zeros(results.shape,dtype=np.int16) - # results_show = np.zeros(len(results),dtype=np.int16) - results_show[0::2] = results[0::2] * w + x - results_show[1::2] = results[1::2] * h + y - return results_show - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2) kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3) 释放ai2d资源 - hk_ai2d_release() - # (4) 获取kpu输出 - results = hk_kpu_get_output() - # (5) kpu结果后处理 - result = hk_kpu_post_process(results[0],x,y,w,h) - # (6) 返回 关键点检测 结果 - return result - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -# 手掌关键点检测 计算角度 -def hk_vector_2d_angle(v1,v2): - v1_x = v1[0] - v1_y = v1[1] - v2_x = v2[0] - v2_y = v2[1] - v1_norm = np.sqrt(v1_x * v1_x+ v1_y * v1_y) - v2_norm = np.sqrt(v2_x * v2_x + v2_y * v2_y) - dot_product = v1_x * v2_x + v1_y * v2_y - cos_angle = dot_product/(v1_norm*v2_norm) - angle = np.acos(cos_angle)*180/np.pi - # if (angle>180): - # return 65536 - return angle - -# 利用手掌关键点检测的结果 判断手掌手势 -def hk_gesture(kpu_hand_keypoint_detect,rgb888p_img,det_box): - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - return - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - return - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - return - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) - - angle_list = [] - for i in range(5): - angle = hk_vector_2d_angle([(results[0]-results[i*8+4]), (results[1]-results[i*8+5])],[(results[i*8+6]-results[i*8+8]),(results[i*8+7]-results[i*8+9])]) - angle_list.append(angle) - - thr_angle = 65. - thr_angle_thumb = 53. - thr_angle_s = 49. - gesture_str = None - if 65535. not in angle_list: - if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "fist" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "gun" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "one" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): - gesture_str = "three" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "thumbUp" - elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "yeah" - - return gesture_str - - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img, masks - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - masks = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=masks) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for finger_guessing.py********** -def finger_guessing_inference(): - print("finger_guessing_test start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) # 开启 camera - counts_guess = -1 # 猜拳次数 计数 - player_win = 0 # 玩家 赢次计数 - k230_win = 0 # k230 赢次计数 - sleep_end = False # 是否 停顿 - set_stop_id = True # 是否 暂停猜拳 - LIBRARY = ["fist","yeah","five"] # 猜拳 石头剪刀布 三种方案的dict - - count = 0 - global draw_img,masks,osd_img - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图像 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - with ScopedTiming("trigger time", debug_mode > 0): - dets_no_pro = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - gesture = "" - draw_img.clear() - - dets = [] - for det_box in dets_no_pro: - if det_box[4] < OUT_RGB888P_WIDTH - 10 : - dets.append(det_box) - - for det_box in dets: - gesture = hk_gesture(kpu_hand_keypoint_detect,rgb888p_img,det_box) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 得到手势类型 - if (len(dets) >= 2): - draw_img.draw_string( 300 , 500, "Must have one hand !", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - elif (guess_mode == 0): - if (gesture == "fist"): - masks[:400,:400,:] = shear_image - elif (gesture == "five"): - masks[:400,:400,:] = fist_image - elif (gesture == "yeah"): - masks[:400,:400,:] = five_image - draw_img.copy_to(osd_img) - elif (guess_mode == 1): - if (gesture == "fist"): - masks[:400,:400,:] = five_image - elif (gesture == "five"): - masks[:400,:400,:] = shear_image - elif (gesture == "yeah"): - masks[:400,:400,:] = fist_image - draw_img.copy_to(osd_img) - else: - if (sleep_end): - time.sleep_ms(2000) - sleep_end = False - if (len(dets) == 0): - set_stop_id = True - if (counts_guess == -1 and gesture != "fist" and gesture != "yeah" and gesture != "five"): - draw_img.draw_string( 400 , 450, "G A M E S T A R T", color=(255,255,0,0), scale=7) - draw_img.draw_string( 400 , 550, " 1 S E T ", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - elif (counts_guess == guess_mode): - draw_img.clear() - if (k230_win > player_win): - draw_img.draw_string( 400 , 450, "Y O U L O S E", color=(255,255,0,0), scale=7) - elif (k230_win < player_win): - draw_img.draw_string( 400 , 450, "Y O U W I N", color=(255,255,0,0), scale=7) - else: - draw_img.draw_string( 400 , 450, "T I E G A M E", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - counts_guess = -1 - player_win = 0 - k230_win = 0 - - sleep_end = True - else: - if (set_stop_id): - if (counts_guess == -1 and (gesture == "fist" or gesture == "yeah" or gesture == "five")): - counts_guess = 0 - if (counts_guess != -1 and (gesture == "fist" or gesture == "yeah" or gesture == "five")): - k230_guess = randint(1,10000) % 3 - if (gesture == "fist" and LIBRARY[k230_guess] == "yeah"): - player_win += 1 - elif (gesture == "fist" and LIBRARY[k230_guess] == "five"): - k230_win += 1 - if (gesture == "yeah" and LIBRARY[k230_guess] == "fist"): - k230_win += 1 - elif (gesture == "yeah" and LIBRARY[k230_guess] == "five"): - player_win += 1 - if (gesture == "five" and LIBRARY[k230_guess] == "fist"): - player_win += 1 - elif (gesture == "five" and LIBRARY[k230_guess] == "yeah"): - k230_win += 1 - - if (LIBRARY[k230_guess] == "fist"): - masks[:400,:400,:] = fist_image - elif (LIBRARY[k230_guess] == "five"): - masks[:400,:400,:] = five_image - elif (LIBRARY[k230_guess] == "yeah"): - masks[:400,:400,:] = shear_image - - counts_guess += 1; - draw_img.draw_string( 400 , 450, " " + str(counts_guess) + " S E T ", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - set_stop_id = False - sleep_end = True - - else: - draw_img.draw_string( 400 , 450, " " + str(counts_guess+1) + " S E T ", color=(255,255,0,0), scale=7) - draw_img.copy_to(osd_img) - else: - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) # 将得到的图像 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图形 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 停止 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'masks' in globals(): - global masks - del masks - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个 media - - - print("finger_guessing_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - finger_guessing_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/hand_detection.py b/share/qtcreator/examples/04-AI-Demo/hand_detection.py deleted file mode 100644 index b3db8abc5640..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/hand_detection.py +++ /dev/null @@ -1,347 +0,0 @@ -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#kmodel输入shape -kmodel_input_shape = (1,3,512,512) # kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS -labels = ["hand"] # 模型输出类别名称 - -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/hand_det.kmodel' # kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - global ai2d_builder - global ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = kmodel_frame_size[0] - height = kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor, ai2d_output_tensor, ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], kmodel_frame_size, frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): #删除ai2d变量,释放对它所引用对象的内存引用 - global ai2d - del ai2d - if 'ai2d_output_tensor' in globals(): #删除ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global ai2d_output_tensor - del ai2d_output_tensor - if 'ai2d_builder' in globals(): #删除ai2d_builder变量,释放对它所引用对象的内存引用 - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 框出所有检测到的手以及标出得分 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if dets: - draw_img.clear() - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - - x1 = int(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x2 = int(x2 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y2 = int(y2 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - if (h<(0.1*DISPLAY_HEIGHT)): - continue - if (w<(0.25*DISPLAY_WIDTH) and ((x1<(0.03*DISPLAY_WIDTH)) or (x2>(0.97*DISPLAY_WIDTH)))): - continue - if (w<(0.15*DISPLAY_WIDTH) and ((x1<(0.01*DISPLAY_WIDTH)) or (x2>(0.99*DISPLAY_WIDTH)))): - continue - draw_img.draw_rectangle(x1 , y1 , int(w) , int(h), color=(255, 0, 255, 0), thickness = 2) - draw_img.draw_string( x1 , y1-50, " " + labels[det_box[0]] + " " + str(round(det_box[1],2)), color=(255,0, 255, 0), scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for hand_detect.py********** -def hand_detect_inference(): - print("hand_detect_test start") - kpu_hand_detect = kpu_init(kmodel_file) # 创建手掌检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - display_draw(dets) # 将得到的检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("hand_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - hand_detect_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/hand_keypoint_class.py b/share/qtcreator/examples/04-AI-Demo/hand_keypoint_class.py deleted file mode 100644 index 6bbc0d3e42ba..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/hand_keypoint_class.py +++ /dev/null @@ -1,546 +0,0 @@ -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -root_dir = '/sdcard/app/tests/' - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -hd_kmodel_file = root_dir + 'kmodel/hand_det.kmodel' # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor, hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) # kpu结果后处理 - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): #删除hd_ai2d变量,释放对它所引用对象的内存引用 - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): #删除hd_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): #删除hd_ai2d_builder变量,释放对它所引用对象的内存引用 - global hd_ai2d_builder - del hd_ai2d_builder - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor, hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 输出后处理 -def hk_kpu_post_process(results, x, y, w, h): - results_show = np.zeros(results.shape,dtype=np.int16) - results_show[0::2] = results[0::2] * w + x - results_show[1::2] = results[1::2] * h + y - return results_show - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)手掌关键点检测 kpu 结果后处理 - result = hk_kpu_post_process(results[0],x,y,w,h) - # (6)返回手掌关键点检测结果 - return result - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): #删除hk_ai2d变量,释放对它所引用对象的内存引用 - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): #删除hk_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -# 求两个vector之间的夹角 -def hk_vector_2d_angle(v1,v2): - with ScopedTiming("hk_vector_2d_angle",debug_mode > 0): - v1_x = v1[0] - v1_y = v1[1] - v2_x = v2[0] - v2_y = v2[1] - v1_norm = np.sqrt(v1_x * v1_x+ v1_y * v1_y) - v2_norm = np.sqrt(v2_x * v2_x + v2_y * v2_y) - dot_product = v1_x * v2_x + v1_y * v2_y - cos_angle = dot_product/(v1_norm*v2_norm) - angle = np.acos(cos_angle)*180/np.pi - return angle - -# 根据手掌关键点检测结果判断手势类别 -def hk_gesture(results): - with ScopedTiming("hk_gesture",debug_mode > 0): - angle_list = [] - for i in range(5): - angle = hk_vector_2d_angle([(results[0]-results[i*8+4]), (results[1]-results[i*8+5])],[(results[i*8+6]-results[i*8+8]),(results[i*8+7]-results[i*8+9])]) - angle_list.append(angle) - - thr_angle = 65. - thr_angle_thumb = 53. - thr_angle_s = 49. - gesture_str = None - if 65535. not in angle_list: - if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "fist" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "gun" - elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "one" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle): - gesture_str = "three" - elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "thumbUp" - elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle): - gesture_str = "yeah" - - return gesture_str - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 标出检测到的21个关键点并用不同颜色的线段连接 -def display_draw(results, x, y, w, h): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if results: - results_show = np.zeros(results.shape,dtype=np.int16) - results_show[0::2] = results[0::2] * (DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - results_show[1::2] = results[1::2] * (DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - - for i in range(len(results_show)/2): - draw_img.draw_circle(results_show[i*2], results_show[i*2+1], 1, color=(255, 0, 255, 0),fill=False) - for i in range(5): - j = i*8 - if i==0: - R = 255; G = 0; B = 0 - if i==1: - R = 255; G = 0; B = 255 - if i==2: - R = 255; G = 255; B = 0 - if i==3: - R = 0; G = 255; B = 0 - if i==4: - R = 0; G = 0; B = 255 - draw_img.draw_line(results_show[0], results_show[1], results_show[j+2], results_show[j+3], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+2], results_show[j+3], results_show[j+4], results_show[j+5], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+4], results_show[j+5], results_show[j+6], results_show[j+7], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+6], results_show[j+7], results_show[j+8], results_show[j+9], color=(255,R,G,B), thickness = 3) - - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for hand_keypoint_class.py********** -def hand_keypoint_class_inference(): - print("hand_keypoint_class_test start") - - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total", 1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - draw_img.clear() - dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - w_det = int(float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - h_det = int(float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x_det = int(x1*DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y_det = int(y1*DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - length = max(w, h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - gesture = hk_gesture(hk_results) # 根据关键点检测结果判断手势类别 - - draw_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2) # 将得到的手掌检测结果 绘制到 display - display_draw(hk_results, x1_kp, y1_kp, w_kp, h_kp) # 将得到的手掌关键点检测结果 绘制到 display - draw_img.draw_string( x_det , y_det-50, " " + str(gesture), color=(255,0, 255, 0), scale=4) # 将根据关键点检测结果判断的手势类别 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("hand_keypoint_class_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - hand_keypoint_class_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/hand_keypoint_detection.py b/share/qtcreator/examples/04-AI-Demo/hand_keypoint_detection.py deleted file mode 100644 index 3add9c8d4ec4..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/hand_keypoint_detection.py +++ /dev/null @@ -1,484 +0,0 @@ -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + "kmodel/hand_det.kmodel" # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor, hd_ai2d_output_tensor, hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): #删除hd_ai2d变量,释放对它所引用对象的内存引用 - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): #删除hd_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): #删除hd_ai2d_builder变量,释放对它所引用对象的内存引用 - global hd_ai2d_builder - del hd_ai2d_builder - - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor, hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)返回手掌关键点检测结果 - return results - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): #删除hk_ai2d变量,释放对它所引用对象的内存引用 - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): #删除hk_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 标出检测到的21个关键点并用不同颜色的线段连接 -def display_draw(results, x, y, w, h): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if results: - results_show = np.zeros(results.shape,dtype=np.int16) - results_show[0::2] = (results[0::2] * w + x) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - results_show[1::2] = (results[1::2] * h + y) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - for i in range(len(results_show)/2): - draw_img.draw_circle(results_show[i*2], results_show[i*2+1], 1, color=(255, 0, 255, 0),fill=False) - for i in range(5): - j = i*8 - if i==0: - R = 255; G = 0; B = 0 - if i==1: - R = 255; G = 0; B = 255 - if i==2: - R = 255; G = 255; B = 0 - if i==3: - R = 0; G = 255; B = 0 - if i==4: - R = 0; G = 0; B = 255 - draw_img.draw_line(results_show[0], results_show[1], results_show[j+2], results_show[j+3], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+2], results_show[j+3], results_show[j+4], results_show[j+5], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+4], results_show[j+5], results_show[j+6], results_show[j+7], color=(255,R,G,B), thickness = 3) - draw_img.draw_line(results_show[j+6], results_show[j+7], results_show[j+8], results_show[j+9], color=(255,R,G,B), thickness = 3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for hand_keypoint_detect.py********** -def hand_keypoint_detect_inference(): - print("hand_keypoint_detect_test start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - draw_img.clear() - - for det_box in dets: - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - w_det = int(float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - h_det = int(float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x_det = int(x1*DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y_det = int(y1*DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - - draw_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2) # 将得到的手掌检测结果 绘制到 display - display_draw(hk_results[0], x1_kp, y1_kp, w_kp, h_kp) # 将得到的手掌关键点检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("hand_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - hand_keypoint_detect_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/hand_recognition.py b/share/qtcreator/examples/04-AI-Demo/hand_recognition.py deleted file mode 100644 index 86f57fd25e5f..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/hand_recognition.py +++ /dev/null @@ -1,472 +0,0 @@ -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + 'kmodel/hand_det.kmodel' # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand recognition---------- -#kmodel输入shape -hr_kmodel_input_shape = (1,3,224,224) # 手势识别kmodel输入分辨率 - -#kmodel相关参数设置 -hr_kmodel_frame_size = [224,224] # 手势识别输入图片尺寸 -labels = ["gun","other","yeah","five"] # 模型输出类别名称 - -hr_kmodel_file = root_dir + "kmodel/hand_reco.kmodel" # 手势识别kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hr_ai2d,hr_ai2d_input_tensor,hr_ai2d_output_tensor,hr_ai2d_builder # 定义手势识别全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -#-------hand detect--------: -# 手掌检测 ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor, hd_ai2d_output_tensor, hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): #删除hd_ai2d变量,释放对它所引用对象的内存引用 - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): #删除hd_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): #删除hd_ai2d_builder变量,释放对它所引用对象的内存引用 - global hd_ai2d_builder - del hd_ai2d_builder - - -#-------hand recognition--------: -# 手势识别 ai2d 初始化 -def hr_ai2d_init(): - with ScopedTiming("hr_ai2d_init",debug_mode > 0): - global hr_ai2d, hr_ai2d_output_tensor - hr_ai2d = nn.ai2d() - hr_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hr_kmodel_input_shape, dtype=np.uint8) - hr_ai2d_output_tensor = nn.from_numpy(data) - -# 手势识别 ai2d 运行 -def hr_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hr_ai2d_run",debug_mode > 0): - global hr_ai2d,hr_ai2d_input_tensor,hr_ai2d_output_tensor - hr_ai2d_input = rgb888p_img.to_numpy_ref() - hr_ai2d_input_tensor = nn.from_numpy(hr_ai2d_input) - - hr_ai2d.set_crop_param(True, x, y, w, h) - hr_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hr_ai2d_builder - hr_ai2d_builder = hr_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hr_kmodel_frame_size[1],hr_kmodel_frame_size[0]]) - hr_ai2d_builder.run(hr_ai2d_input_tensor, hr_ai2d_output_tensor) - -# 手势识别 ai2d 释放内存 -def hr_ai2d_release(): - with ScopedTiming("hr_ai2d_release",debug_mode > 0): - global hr_ai2d_input_tensor, hr_ai2d_builder - del hr_ai2d_input_tensor - del hr_ai2d_builder - -# 手势识别 kpu 初始化 -def hr_kpu_init(hr_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hr_kpu_init",debug_mode > 0): - hr_kpu_obj = nn.kpu() - hr_kpu_obj.load_kmodel(hr_kmodel_file) - - hr_ai2d_init() - return hr_kpu_obj - -# 手势识别 kpu 输入预处理 -def hr_kpu_pre_process(rgb888p_img, x, y, w, h): - hr_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hr_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hr_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hr_ai2d_output_tensor) - -# 手势识别 kpu 获得 kmodel 输出 -def hr_kpu_get_output(): - with ScopedTiming("hr_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# softmax实现 -def softmax(x): - x -= np.max(x) - x = np.exp(x) / np.sum(np.exp(x)) - return x - -# 手势识别 kpu 输出后处理 -def hr_kpu_post_process(results): - x_softmax = softmax(results[0]) - result = np.argmax(x_softmax) - text = " " + labels[result] + ": " + str(round(x_softmax[result],2)) - return text - -# 手势识别 kpu 运行 -def hr_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hr_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手势识别 kpu 运行 - with ScopedTiming("hr_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手势识别 ai2d 资源 - hr_ai2d_release() - # (4)获取手势识别 kpu 输出 - results = hr_kpu_get_output() - # (5)手势识别 kpu 结果后处理 - result = hr_kpu_post_process(results) - # (6)返回手势识别结果 - return result - -# 手势识别 kpu 释放内存 -def hr_kpu_deinit(): - with ScopedTiming("hr_kpu_deinit",debug_mode > 0): - if 'hr_ai2d' in globals(): #删除hr_ai2d变量,释放对它所引用对象的内存引用 - global hr_ai2d - del hr_ai2d - if 'hr_ai2d_output_tensor' in globals(): #删除hr_ai2d_output_tensor变量,释放对它所引用对象的内存引用 - global hr_ai2d_output_tensor - del hr_ai2d_output_tensor - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for hand_recognition.py********** -def hand_recognition_inference(): - print("hand_recognition start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_recognition = hr_kpu_init(hr_kmodel_file) # 创建手势识别的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - draw_img.clear() - - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - w_det = int(float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - h_det = int(float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - x_det = int(x1*DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y_det = int(y1*DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.1*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hr_results = hr_kpu_run(kpu_hand_recognition,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手势识别 kpu 运行 以及 后处理过程 - draw_img.draw_rectangle(x_det, y_det, w_det, h_det, color=(255, 0, 255, 0), thickness = 2) # 将得到的手掌检测结果 绘制到 display - draw_img.draw_string( x_det, y_det-50, hr_results, color=(255,0, 255, 0), scale=4) # 将得到的手势识别结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - if (count>10): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hr_kpu_deinit() # 释放手势识别 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_recognition - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("hand_recognition_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - hand_recognition_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/keyword_spotting.py b/share/qtcreator/examples/04-AI-Demo/keyword_spotting.py deleted file mode 100644 index 79af27329f06..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/keyword_spotting.py +++ /dev/null @@ -1,197 +0,0 @@ -from media.pyaudio import * # 音频模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import media.wave as wave # wav音频处理模块 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import aidemo # aidemo模块,封装ai demo相关前处理、后处理等操作 -import time # 时间统计 -import struct # 字节字符转换模块 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 - -# key word spotting任务 -# 检测阈值 -THRESH = 0.5 -# 有关音频流的宏变量 -SAMPLE_RATE = 16000 # 采样率16000Hz,即每秒采样16000次 -CHANNELS = 1 # 通道数 1为单声道,2为立体声 -FORMAT = paInt16 # 音频输入输出格式 paInt16 -CHUNK = int(0.3 * 16000) # 每次读取音频数据的帧数,设置为0.3s的帧数16000*0.3=4800 - -root_dir='/sdcard/app/tests/' -kmodel_file_kws = root_dir+"kmodel/kws.kmodel" # kmodel加载路径 -reply_wav_file = root_dir+"utils/wozai.wav" # kws唤醒词回复音频路径 -debug_mode = 0 # 调试模式,大于0(调试)、 反之 (不调试) - - -# scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -# 当前kmodel -global current_kmodel_obj # 定义全局kpu对象 -global p,cache_np,fp,input_stream,output_stream,audio_input_tensor,cache_input_tensor # 定义全局音频流对象,输入输出流对象,并且定义kws处理接口FeaturePipeline对象fp,输入输出tensor和缓冲cache_np - -# 初始化kws音频流相关变量 -def init_kws(): - with ScopedTiming("init_kws",debug_mode > 0): - global p,cache_np,fp,input_stream,output_stream,cache_input_tensor - # 初始化模型的cache输入 - cache_np = np.zeros((1, 256, 105), dtype=np.float) - cache_input_tensor = nn.from_numpy(cache_np) - # 初始化音频预处理接口 - fp = aidemo.kws_fp_create() - # 初始化音频流 - p = PyAudio() - p.initialize(CHUNK) - media.buffer_init() - # 用于采集实时音频数据 - input_stream = p.open( - format=FORMAT, - channels=CHANNELS, - rate=SAMPLE_RATE, - input=True, - frames_per_buffer=CHUNK - ) - - # 用于播放回复音频 - output_stream = p.open( - format=FORMAT, - channels=CHANNELS, - rate=SAMPLE_RATE, - output=True, - frames_per_buffer=CHUNK - ) - -# kws 初始化kpu -def kpu_init_kws(): - with ScopedTiming("init_kpu",debug_mode > 0): - # 初始化kpu并加载kmodel - kpu = nn.kpu() - kpu.load_kmodel(kmodel_file_kws) - return kpu - -# kws 释放kpu -def kpu_deinit(): - # kpu释放 - with ScopedTiming("kpu_deinit",debug_mode > 0): - global current_kmodel_obj,audio_input_tensor,cache_input_tensor - if "current_kmodel_obj" in globals(): - del current_kmodel_obj - if "audio_input_tensor" in globals(): - del audio_input_tensor - if "cache_input_tensor" in globals(): - del cache_input_tensor - -# kws音频预处理 -def kpu_pre_process_kws(pcm_data_list): - global current_kmodel_obj - global fp,input_stream,audio_input_tensor,cache_input_tensor - with ScopedTiming("pre_process",debug_mode > 0): - # 将pcm数据处理为模型输入的特征向量 - mp_feats = aidemo.kws_preprocess(fp, pcm_data_list)[0] - mp_feats_np = np.array(mp_feats) - mp_feats_np = mp_feats_np.reshape((1, 30, 40)) - audio_input_tensor = nn.from_numpy(mp_feats_np) - cache_input_tensor = nn.from_numpy(cache_np) - current_kmodel_obj.set_input_tensor(0, audio_input_tensor) - current_kmodel_obj.set_input_tensor(1, cache_input_tensor) - -# kws任务kpu运行并完成后处理 -def kpu_run_kws(kpu_obj,pcm_data_list): - global current_kmodel_obj,cache_np,output_stream - current_kmodel_obj = kpu_obj - # (1)kws音频数据预处理 - kpu_pre_process_kws(pcm_data_list) - # (2)kpu推理 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)获取模型输出 - logits = kpu_obj.get_output_tensor(0) - cache_tensor = kpu_obj.get_output_tensor(1) # 更新缓存输入 - logits_np = logits.to_numpy() - cache_np=cache_tensor.to_numpy() - del logits - del cache_tensor - # (4)后处理argmax - max_logits = np.max(logits_np, axis=1)[0] - max_p = np.max(max_logits) - idx = np.argmax(max_logits) - # 如果分数大于阈值,且idx==1(即包含唤醒词),播放回复音频 - if max_p > THRESH and idx == 1: - print("====Detected XiaonanXiaonan!====") - wf = wave.open(reply_wav_file, "rb") - wav_data = wf.read_frames(CHUNK) - while wav_data: - output_stream.write(wav_data) - wav_data = wf.read_frames(CHUNK) - time.sleep(1) # 时间缓冲,用于播放声音 - wf.close() - else: - print("Deactivated!") - - -# kws推理过程 -def kws_inference(): - # 记录音频帧帧数 - global p,fp,input_stream,output_stream,current_kmodel_obj - # 初始化 - init_kws() - kpu_kws=kpu_init_kws() - pcm_data_list = [] - try: - gc_count=0 - while True: - os.exitpoint() - with ScopedTiming("total", 1): - pcm_data_list.clear() - # 对实时音频流进行推理 - pcm_data = input_stream.read() # 获取的音频流数据字节数,len(pcm_data)=0.3*16000*2=9600,即以16000Hz的采样率采样0.3s,每次采样数据为paInt16格式占2个字节 - # 获取音频流数据 - for i in range(0, len(pcm_data), 2): - # 每两个字节组织成一个有符号整数,然后将其转换为浮点数,即为一次采样的数据,加入到当前一帧(0.3s)的数据列表中 - int_pcm_data = struct.unpack(" 10: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - input_stream.stop_stream() - output_stream.stop_stream() - input_stream.close() - output_stream.close() - p.terminate() - media.buffer_deinit() - aidemo.kws_fp_destroy(fp) - kpu_deinit() - del kpu_kws - if "current_kmodel_obj" in globals(): - del current_kmodel_obj - gc.collect() - nn.shrink_memory_pool() - -if __name__=="__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - kws_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/licence_det.py b/share/qtcreator/examples/04-AI-Demo/licence_det.py deleted file mode 100644 index dab592c1c7fc..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/licence_det.py +++ /dev/null @@ -1,319 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#车牌检测 kmodel 输入shape -kmodel_input_shape = (1,3,640,640) - -#车牌检测 相关参数设置 -obj_thresh = 0.2 #车牌检测分数阈值 -nms_thresh = 0.2 #检测框 非极大值抑制 阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/LPD_640.kmodel' # kmodel 文件的路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -# 车牌检测 接收kmodel输出的后处理方法 -def kpu_post_process(output_data): - with ScopedTiming("kpu_post_process", debug_mode > 0): - results = aidemo.licence_det_postprocess(output_data,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],[kmodel_input_shape[2],kmodel_input_shape[3]],obj_thresh,nms_thresh) - return results - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_out_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_out_tensor = nn.from_numpy(data) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_out_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d_builder.run(ai2d_input_tensor, ai2d_out_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_out_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - tmp2 = result.copy() - del data - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - ai2d_release() - # (4) 获取kpu输出 - results = kpu_get_output() - # (5) kpu结果后处理 - dets = kpu_post_process(results) - # (6) 返回 车牌检测框 结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_out_tensor' in globals(): - global ai2d_out_tensor - del ai2d_out_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有车牌检测框绘制到屏幕上 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - point_8 = np.zeros((8),dtype=np.int16) - for det in dets: - for i in range(4): - x = det[i * 2 + 0]/OUT_RGB888P_WIDTH*DISPLAY_WIDTH - y = det[i * 2 + 1]/OUT_RGB888P_HEIGHT*DISPLAY_HEIGHT - point_8[i * 2 + 0] = int(x) - point_8[i * 2 + 1] = int(y) - for i in range(4): - draw_img.draw_line(point_8[i * 2 + 0],point_8[i * 2 + 1],point_8[(i+1) % 4 * 2 + 0],point_8[(i+1) % 4 * 2 + 1],color=(255, 0, 255, 0),thickness=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for licence_det.py********** -def licence_det_inference(): - print("licence_det start") - kpu_licence_det = kpu_init(kmodel_file) # 创建车牌检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_licence_det,rgb888p_img) # 执行车牌检测 kpu 运行 以及后处理过程 - display_draw(dets) # 将得到的 检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_licence_det - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放整个media - - print("licence_det end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - licence_det_inference() - - diff --git a/share/qtcreator/examples/04-AI-Demo/licence_det_rec.py b/share/qtcreator/examples/04-AI-Demo/licence_det_rec.py deleted file mode 100644 index ef44eafb76a3..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/licence_det_rec.py +++ /dev/null @@ -1,457 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(640, 16) -OUT_RGB888P_HEIGHT = 360 - -#车牌检测 和 车牌识别 kmodel输入shape -det_kmodel_input_shape = (1,3,640,640) -rec_kmodel_input_shape = (1,1,32,220) - -#车牌检测 相关参数设置 -obj_thresh = 0.2 #车牌检测分数阈值 -nms_thresh = 0.2 #检测框 非极大值抑制 阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -det_kmodel_file = root_dir + 'kmodel/LPD_640.kmodel' # 车牌检测 kmodel 文件路径 -rec_kmodel_file = root_dir + 'kmodel/licence_reco.kmodel' # 车牌识别 kmodel 文件路径 -#dict_rec = ["挂", "使", "领", "澳", "港", "皖", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "京", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂", "琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "警", "学", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "_", "-"] -dict_rec = ["gua","shi","ling","ao","gang","wan","hu","jin","yu","ji","jin","meng","liao","ji","hei","su","zhe","jing","min","gan","lu","yu","e","xiang","yue","gui","qiong","chuan","gui","yun","zang","shan","gan","qing","ning","xin","jing","xue","0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "_", "-"] -dict_size = len(dict_rec) -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global det_ai2d,det_ai2d_input_tensor,det_ai2d_output_tensor,det_ai2d_builder # 定义车牌检测 ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global rec_ai2d,rec_ai2d_input_tensor,rec_ai2d_output_tensor,rec_ai2d_builder # 定义车牌识别 ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder - -# 车牌检测 接收kmodel输出的后处理方法 -def det_kpu_post_process(output_data): - with ScopedTiming("det_kpu_post_process", debug_mode > 0): - results = aidemo.licence_det_postprocess(output_data,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],[det_kmodel_input_shape[2],det_kmodel_input_shape[3]],obj_thresh,nms_thresh) - return results - -# 车牌识别 接收kmodel输出的后处理方法 -def rec_kpu_post_process(output_data): - with ScopedTiming("rec_kpu_post_process", debug_mode > 0): - size = rec_kmodel_input_shape[3] / 4 - result = [] - for i in range(size): - maxs = float("-inf") - index = -1 - for j in range(dict_size): - if (maxs < float(output_data[i * dict_size +j])): - index = j - maxs = output_data[i * dict_size +j] - result.append(index) - - result_str = "" - for i in range(size): - if (result[i] >= 0 and result[i] != 0 and not(i > 0 and result[i-1] == result[i])): - result_str += dict_rec[result[i]-1] - return result_str - -# 车牌检测 ai2d 初始化 -def det_ai2d_init(): - with ScopedTiming("det_ai2d_init",debug_mode > 0): - global det_ai2d - det_ai2d = nn.ai2d() - det_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - det_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global det_ai2d_out_tensor - data = np.ones(det_kmodel_input_shape, dtype=np.uint8) - det_ai2d_out_tensor = nn.from_numpy(data) - - global det_ai2d_builder - det_ai2d_builder = det_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], det_kmodel_input_shape) - -# 车牌识别 ai2d 初始化 -def rec_ai2d_init(): - with ScopedTiming("rec_ai2d_init",debug_mode > 0): - global rec_ai2d - rec_ai2d = nn.ai2d() - rec_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - - global rec_ai2d_out_tensor - data = np.ones(rec_kmodel_input_shape, dtype=np.uint8) - rec_ai2d_out_tensor = nn.from_numpy(data) - - rec_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - -# 车牌检测 ai2d 运行 -def det_ai2d_run(rgb888p_img): - with ScopedTiming("det_ai2d_run",debug_mode > 0): - global det_ai2d_input_tensor,det_ai2d_out_tensor,det_ai2d_builder - det_ai2d_input = rgb888p_img.to_numpy_ref() - det_ai2d_input_tensor = nn.from_numpy(det_ai2d_input) - - det_ai2d_builder.run(det_ai2d_input_tensor, det_ai2d_out_tensor) - -# 车牌识别 ai2d 运行 -def rec_ai2d_run(img_array): - with ScopedTiming("rec_ai2d_run",debug_mode > 0): - global rec_ai2d_input_tensor,rec_ai2d_out_tensor,rec_ai2d_builder - rec_ai2d_builder = rec_ai2d.build([1,1,img_array.shape[2],img_array.shape[3]], rec_kmodel_input_shape) - rec_ai2d_input_tensor = nn.from_numpy(img_array) - - rec_ai2d_builder.run(rec_ai2d_input_tensor, rec_ai2d_out_tensor) - -# 车牌检测 ai2d 释放内存 -def det_ai2d_release(): - with ScopedTiming("det_ai2d_release",debug_mode > 0): - global det_ai2d_input_tensor - del det_ai2d_input_tensor - -# 车牌识别 ai2d 释放内存 -def rec_ai2d_release(): - with ScopedTiming("rec_ai2d_release",debug_mode > 0): - global rec_ai2d_input_tensor, rec_ai2d_builder - del rec_ai2d_input_tensor - del rec_ai2d_builder - -# 车牌检测 kpu 初始化 -def det_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("det_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - det_ai2d_init() - return kpu_obj - -# 车牌识别 kpu 初始化 -def rec_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("rec_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - rec_ai2d_init() - return kpu_obj - -# 车牌检测 kpu 输入预处理 -def det_kpu_pre_process(rgb888p_img): - det_ai2d_run(rgb888p_img) - with ScopedTiming("det_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,det_ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, det_ai2d_out_tensor) - -# 车牌识别 kpu 输入预处理 -def rec_kpu_pre_process(img_array): - rec_ai2d_run(img_array) - with ScopedTiming("rec_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,rec_ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, rec_ai2d_out_tensor) - -# 车牌识别 抠图 -def rec_array_pre_process(rgb888p_img,dets): - with ScopedTiming("rec_array_pre_process",debug_mode > 0): - isp_image = rgb888p_img.to_numpy_ref() - imgs_array_boxes = aidemo.ocr_rec_preprocess(isp_image,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],dets) - return imgs_array_boxes - -# 车牌检测 获取 kmodel 输出 -def det_kpu_get_output(): - with ScopedTiming("det_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - tmp2 = result.copy() - del data - results.append(tmp2) - return results - -# 车牌识别 获取 kmodel 输出 -def rec_kpu_get_output(): - with ScopedTiming("rec_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - result = result.reshape((result.shape[0] * result.shape[1] * result.shape[2])) - tmp = result.copy() - del data - return tmp - -# 车牌检测 kpu 运行 -def det_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - det_kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("det_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - det_ai2d_release() - # (4) 获取kpu输出 - results = det_kpu_get_output() - # (5) kpu结果后处理 - dets = det_kpu_post_process(results) - # 返回 车牌检测结果 - return dets - -# 车牌识别 kpu 运行 -def rec_kpu_run(kpu_obj,rgb888p_img,dets): - global current_kmodel_obj - if (len(dets) == 0): - return [] - current_kmodel_obj = kpu_obj - # (1) 原始图像抠图,车牌检测结果 points 排序 - imgs_array_boxes = rec_array_pre_process(rgb888p_img,dets) - imgs_array = imgs_array_boxes[0] - boxes = imgs_array_boxes[1] - recs = [] - for img_array in imgs_array: - # (2) 抠出后的图像 进行预处理,设置模型输入 - rec_kpu_pre_process(img_array) - # (3) kpu 运行 - with ScopedTiming("rec_kpu_run",debug_mode > 0): - kpu_obj.run() - # (4) 释放ai2d资源 - rec_ai2d_release() - # (5) 获取 kpu 输出 - result = rec_kpu_get_output() - # (6) kpu 结果后处理 - rec = rec_kpu_post_process(result) - recs.append(rec) - # (7) 返回 车牌检测 和 识别结果 - return [boxes,recs] - - -# 车牌检测 kpu 释放内存 -def det_kpu_deinit(): - with ScopedTiming("det_kpu_deinit",debug_mode > 0): - if 'det_ai2d' in globals(): - global det_ai2d - del det_ai2d - if 'det_ai2d_builder' in globals(): - global det_ai2d_builder - del det_ai2d_builder - if 'det_ai2d_out_tensor' in globals(): - global det_ai2d_out_tensor - del det_ai2d_out_tensor - -# 车牌识别 kpu 释放内存 -def rec_kpu_deinit(): - with ScopedTiming("rec_kpu_deinit",debug_mode > 0): - if 'rec_ai2d' in globals(): - global rec_ai2d - del rec_ai2d - if 'rec_ai2d_out_tensor' in globals(): - global rec_ai2d_out_tensor - del rec_ai2d_out_tensor - - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有车牌检测框 和 识别结果绘制到屏幕 -def display_draw(dets_recs): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets_recs: - dets = dets_recs[0] - recs = dets_recs[1] - draw_img.clear() - point_8 = np.zeros((8),dtype=np.int16) - for det_index in range(len(dets)): - for i in range(4): - x = dets[det_index][i * 2 + 0]/OUT_RGB888P_WIDTH*DISPLAY_WIDTH - y = dets[det_index][i * 2 + 1]/OUT_RGB888P_HEIGHT*DISPLAY_HEIGHT - point_8[i * 2 + 0] = int(x) - point_8[i * 2 + 1] = int(y) - for i in range(4): - draw_img.draw_line(point_8[i * 2 + 0],point_8[i * 2 + 1],point_8[(i+1) % 4 * 2 + 0],point_8[(i+1) % 4 * 2 + 1],color=(255, 0, 255, 0),thickness=4) - draw_img.draw_string( point_8[6], point_8[7] + 20, recs[det_index] , color=(255,255,153,18) , scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for licence_det_rec.py********** -def licence_det_rec_inference(): - print("licence_det_rec start") - kpu_licence_det = det_kpu_init(det_kmodel_file) # 创建车牌检测的 kpu 对象 - kpu_licence_rec = rec_kpu_init(rec_kmodel_file) # 创建车牌识别的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = det_kpu_run(kpu_licence_det,rgb888p_img) # 执行车牌检测 kpu 运行 以及 后处理过程 - dets_recs = rec_kpu_run(kpu_licence_rec,rgb888p_img,dets) # 执行车牌识别 kpu 运行 以及 后处理过程 - display_draw(dets_recs) # 将得到的检测结果和识别结果 绘制到display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - det_kpu_deinit() # 释放 车牌检测 kpu - rec_kpu_deinit() # 释放 车牌识别 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_licence_det - del kpu_licence_rec - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("licence_det_rec end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - licence_det_rec_inference() - - diff --git a/share/qtcreator/examples/04-AI-Demo/nanotracker.py b/share/qtcreator/examples/04-AI-Demo/nanotracker.py deleted file mode 100644 index 1cab436c9c81..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/nanotracker.py +++ /dev/null @@ -1,610 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1280, 16) -OUT_RGB888P_HEIGHT = 720 - -#单目标跟踪 kmodel 输入 shape -crop_kmodel_input_shape = (1,3,127,127) -src_kmodel_input_shape = (1,3,255,255) - - -#单目标跟踪 相关参数设置 -head_thresh = 0.1 #单目标跟踪分数阈值 -CONTEXT_AMOUNT = 0.5 #跟踪框宽、高调整系数 -rgb_mean = [114,114,114] #padding颜色值 -ratio_src_crop = float(src_kmodel_input_shape[2])/float(crop_kmodel_input_shape[2]) #src模型和crop模型输入比值 -track_x1 = float(300) #起始跟踪目标框左上角点x -track_y1 = float(300) #起始跟踪目标框左上角点y -track_w = float(100) #起始跟踪目标框w -track_h = float(100) #起始跟踪目标框h - - -#文件配置 -root_dir = '/sdcard/app/tests/' -crop_kmodel_file = root_dir + 'kmodel/cropped_test127.kmodel' #单目标跟踪 crop kmodel 文件路径 -src_kmodel_file = root_dir + 'kmodel/nanotrack_backbone_sim.kmodel' #单目标跟踪 src kmodel 文件路径 -track_kmodel_file = root_dir + 'kmodel/nanotracker_head_calib_k230.kmodel' #单目标跟踪 head kmodel 文件路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global crop_ai2d,crop_ai2d_input_tensor,crop_ai2d_output_tensor,crop_ai2d_builder # 对应crop模型: ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global crop_pad_ai2d,crop_pad_ai2d_input_tensor,crop_pad_ai2d_output_tensor,crop_pad_ai2d_builder # 对应crop模型: ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global src_ai2d,src_ai2d_input_tensor,src_ai2d_output_tensor,src_ai2d_builder # 对应src模型: ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global src_pad_ai2d,src_pad_ai2d_input_tensor,src_pad_ai2d_output_tensor,src_pad_ai2d_builder # 对应src模型: ai2d 对象 ,并且定义 ai2d 的输入、输出 以及 builder -global track_kpu_input_0,track_kpu_input_1 # 对应head模型: 两个输入 - - -# 单目标跟踪的后处理 -def track_kpu_post_process(output_data,center_xy_wh): - with ScopedTiming("track_kpu_post_process", debug_mode > 0): - det = aidemo.nanotracker_postprocess(output_data[0],output_data[1],[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],head_thresh,center_xy_wh,crop_kmodel_input_shape[2],CONTEXT_AMOUNT) - return det - -# 单目标跟踪 对应crop模型的 ai2d 初始化 -def crop_ai2d_init(): - with ScopedTiming("crop_ai2d_init",debug_mode > 0): - global crop_ai2d, crop_pad_ai2d - crop_ai2d = nn.ai2d() - crop_pad_ai2d = nn.ai2d() - - crop_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) - crop_pad_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) - - crop_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - crop_pad_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global crop_ai2d_out_tensor - data = np.ones(crop_kmodel_input_shape, dtype=np.uint8) - crop_ai2d_out_tensor = nn.from_numpy(data) - -# 单目标跟踪 对应crop模型的 ai2d 运行 -def crop_ai2d_run(rgb888p_img,center_xy_wh): - with ScopedTiming("crop_ai2d_run",debug_mode > 0): - global crop_ai2d, crop_pad_ai2d - global crop_ai2d_input_tensor,crop_ai2d_out_tensor,crop_ai2d_builder - global crop_pad_ai2d_input_tensor,crop_pad_ai2d_out_tensor,crop_pad_ai2d_builder - - s_z = round(np.sqrt((center_xy_wh[2] + CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])) * (center_xy_wh[3] + CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])))) - c = (s_z + 1) / 2 - context_xmin = np.floor(center_xy_wh[0] - c + 0.5) - context_xmax = int(context_xmin + s_z - 1) - context_ymin = np.floor(center_xy_wh[1] - c + 0.5) - context_ymax = int(context_ymin + s_z - 1) - - left_pad = int(max(0, -context_xmin)) - top_pad = int(max(0, -context_ymin)) - right_pad = int(max(0, int(context_xmax - OUT_RGB888P_WIDTH + 1))) - bottom_pad = int(max(0, int(context_ymax - OUT_RGB888P_HEIGHT + 1))) - context_xmin = context_xmin + left_pad - context_xmax = context_xmax + left_pad - context_ymin = context_ymin + top_pad - context_ymax = context_ymax + top_pad - - if (left_pad != 0 or right_pad != 0 or top_pad != 0 or bottom_pad != 0): - crop_pad_ai2d.set_pad_param(True, [0, 0, 0, 0, top_pad, bottom_pad, left_pad, right_pad], 0, rgb_mean) - crop_pad_ai2d_builder = crop_pad_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad]) - crop_pad_ai2d_input = rgb888p_img.to_numpy_ref() - crop_pad_ai2d_input_tensor = nn.from_numpy(crop_pad_ai2d_input) - crop_pad_ai2d_output = np.ones([1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad], dtype=np.uint8) - crop_pad_ai2d_out_tensor = nn.from_numpy(crop_pad_ai2d_output) - crop_pad_ai2d_builder.run(crop_pad_ai2d_input_tensor, crop_pad_ai2d_out_tensor) - - crop_ai2d.set_crop_param(True, int(context_xmin), int(context_ymin), int(context_xmax - context_xmin + 1), int(context_ymax - context_ymin + 1)) - crop_ai2d_builder = crop_ai2d.build([1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad], crop_kmodel_input_shape) - crop_ai2d_input_tensor = crop_pad_ai2d_out_tensor - crop_ai2d_builder.run(crop_ai2d_input_tensor, crop_ai2d_out_tensor) - del crop_pad_ai2d_input_tensor - del crop_pad_ai2d_out_tensor - del crop_pad_ai2d_builder - else: - crop_ai2d.set_crop_param(True, int(center_xy_wh[0] - s_z/2.0), int(center_xy_wh[1] - s_z/2.0), int(s_z), int(s_z)) - crop_ai2d_builder = crop_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], crop_kmodel_input_shape) - crop_ai2d_input = rgb888p_img.to_numpy_ref() - crop_ai2d_input_tensor = nn.from_numpy(crop_ai2d_input) - crop_ai2d_builder.run(crop_ai2d_input_tensor, crop_ai2d_out_tensor) - -# 单目标跟踪 对应crop模型的 ai2d 释放 -def crop_ai2d_release(): - with ScopedTiming("crop_ai2d_release",debug_mode > 0): - global crop_ai2d_input_tensor,crop_ai2d_builder - del crop_ai2d_input_tensor - del crop_ai2d_builder - - -# 单目标跟踪 对应src模型的 ai2d 初始化 -def src_ai2d_init(): - with ScopedTiming("src_ai2d_init",debug_mode > 0): - global src_ai2d, src_pad_ai2d - src_ai2d = nn.ai2d() - src_pad_ai2d = nn.ai2d() - - src_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) - src_pad_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) - - src_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - src_pad_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global src_ai2d_out_tensor - data = np.ones(src_kmodel_input_shape, dtype=np.uint8) - src_ai2d_out_tensor = nn.from_numpy(data) - -# 单目标跟踪 对应src模型的 ai2d 运行 -def src_ai2d_run(rgb888p_img,center_xy_wh): - with ScopedTiming("src_ai2d_run",debug_mode > 0): - global src_ai2d, src_pad_ai2d - global src_ai2d_input_tensor,src_ai2d_out_tensor,src_ai2d_builder - global src_pad_ai2d_input_tensor,src_pad_ai2d_out_tensor,src_pad_ai2d_builder - - s_z = round(np.sqrt((center_xy_wh[2] + CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])) * (center_xy_wh[3] + CONTEXT_AMOUNT * (center_xy_wh[2] + center_xy_wh[3])))) * ratio_src_crop - c = (s_z + 1) / 2 - context_xmin = np.floor(center_xy_wh[0] - c + 0.5) - context_xmax = int(context_xmin + s_z - 1) - context_ymin = np.floor(center_xy_wh[1] - c + 0.5) - context_ymax = int(context_ymin + s_z - 1) - - left_pad = int(max(0, -context_xmin)) - top_pad = int(max(0, -context_ymin)) - right_pad = int(max(0, int(context_xmax - OUT_RGB888P_WIDTH + 1))) - bottom_pad = int(max(0, int(context_ymax - OUT_RGB888P_HEIGHT + 1))) - context_xmin = context_xmin + left_pad - context_xmax = context_xmax + left_pad - context_ymin = context_ymin + top_pad - context_ymax = context_ymax + top_pad - - if (left_pad != 0 or right_pad != 0 or top_pad != 0 or bottom_pad != 0): - src_pad_ai2d.set_pad_param(True, [0, 0, 0, 0, top_pad, bottom_pad, left_pad, right_pad], 0, rgb_mean) - src_pad_ai2d_builder = src_pad_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad]) - src_pad_ai2d_input = rgb888p_img.to_numpy_ref() - src_pad_ai2d_input_tensor = nn.from_numpy(src_pad_ai2d_input) - src_pad_ai2d_output = np.ones([1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad], dtype=np.uint8) - src_pad_ai2d_out_tensor = nn.from_numpy(src_pad_ai2d_output) - src_pad_ai2d_builder.run(src_pad_ai2d_input_tensor, src_pad_ai2d_out_tensor) - - src_ai2d.set_crop_param(True, int(context_xmin), int(context_ymin), int(context_xmax - context_xmin + 1), int(context_ymax - context_ymin + 1)) - src_ai2d_builder = src_ai2d.build([1, 3, OUT_RGB888P_HEIGHT + top_pad + bottom_pad, OUT_RGB888P_WIDTH + left_pad + right_pad], src_kmodel_input_shape) - src_ai2d_input_tensor = src_pad_ai2d_out_tensor - src_ai2d_builder.run(src_ai2d_input_tensor, src_ai2d_out_tensor) - del src_pad_ai2d_input_tensor - del src_pad_ai2d_out_tensor - del src_pad_ai2d_builder - else: - src_ai2d.set_crop_param(True, int(center_xy_wh[0] - s_z/2.0), int(center_xy_wh[1] - s_z/2.0), int(s_z), int(s_z)) - src_ai2d_builder = src_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], src_kmodel_input_shape) - src_ai2d_input = rgb888p_img.to_numpy_ref() - src_ai2d_input_tensor = nn.from_numpy(src_ai2d_input) - src_ai2d_builder.run(src_ai2d_input_tensor, src_ai2d_out_tensor) - -# 单目标跟踪 对应src模型的 ai2d 释放 -def src_ai2d_release(): - with ScopedTiming("src_ai2d_release",debug_mode > 0): - global src_ai2d_input_tensor,src_ai2d_builder - del src_ai2d_input_tensor - del src_ai2d_builder - - -# 单目标跟踪 crop kpu 初始化 -def crop_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("crop_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - crop_ai2d_init() - return kpu_obj - -# 单目标跟踪 crop kpu 输入预处理 -def crop_kpu_pre_process(rgb888p_img,center_xy_wh): - crop_ai2d_run(rgb888p_img,center_xy_wh) - with ScopedTiming("crop_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,crop_ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, crop_ai2d_out_tensor) - -# 单目标跟踪 crop kpu 获取输出 -def crop_kpu_get_output(): - with ScopedTiming("crop_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -# 单目标跟踪 crop kpu 运行 -def crop_kpu_run(kpu_obj,rgb888p_img,center_xy_wh): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - crop_kpu_pre_process(rgb888p_img,center_xy_wh) - # (2) kpu 运行 - with ScopedTiming("crop_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - crop_ai2d_release() - # (4) 获取kpu输出 - result = crop_kpu_get_output() - # 返回 crop kpu 的输出 - return result - -# 单目标跟踪 crop kpu 释放 -def crop_kpu_deinit(): - with ScopedTiming("crop_kpu_deinit",debug_mode > 0): - if 'crop_ai2d' in globals(): - global crop_ai2d - del crop_ai2d - if 'crop_pad_ai2d' in globals(): - global crop_pad_ai2d - del crop_pad_ai2d - if 'crop_ai2d_out_tensor' in globals(): - global crop_ai2d_out_tensor - del crop_ai2d_out_tensor - -# 单目标跟踪 src kpu 初始化 -def src_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("src_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - src_ai2d_init() - return kpu_obj - -# 单目标跟踪 src kpu 输入预处理 -def src_kpu_pre_process(rgb888p_img,center_xy_wh): - src_ai2d_run(rgb888p_img,center_xy_wh) - with ScopedTiming("src_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,src_ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, src_ai2d_out_tensor) - -# 单目标跟踪 src kpu 获取输出 -def src_kpu_get_output(): - with ScopedTiming("src_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - return result - -# 单目标跟踪 src kpu 运行 -def src_kpu_run(kpu_obj,rgb888p_img,center_xy_wh): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - src_kpu_pre_process(rgb888p_img,center_xy_wh) - # (2) kpu 运行 - with ScopedTiming("src_kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - src_ai2d_release() - # (4) 获取kpu输出 - result = src_kpu_get_output() - # 返回 src kpu 的输出 - return result - -# 单目标跟踪 src kpu 释放 -def src_kpu_deinit(): - with ScopedTiming("src_kpu_deinit",debug_mode > 0): - if 'src_ai2d' in globals(): - global src_ai2d - del src_ai2d - if 'src_pad_ai2d' in globals(): - global src_pad_ai2d - del src_pad_ai2d - if 'src_ai2d_out_tensor' in globals(): - global src_ai2d_out_tensor - del src_ai2d_out_tensor - -# 单目标跟踪 track kpu 初始化 -def track_kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("track_kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - return kpu_obj - -# 单目标跟踪 track kpu 输入预处理 -def track_kpu_pre_process(): - with ScopedTiming("track_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,track_kpu_input_0,track_kpu_input_1 - # set kpu input - current_kmodel_obj.set_input_tensor(0, track_kpu_input_0) - current_kmodel_obj.set_input_tensor(1, track_kpu_input_1) - -# 单目标跟踪 track kpu 获取输出 -def track_kpu_get_output(): - with ScopedTiming("track_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -# 单目标跟踪 track kpu 运行 -def track_kpu_run(kpu_obj,center_xy_wh): - global current_kmodel_obj,track_kpu_input_1 - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - track_kpu_pre_process() - # (2) kpu 运行 - with ScopedTiming("track_kpu_run",debug_mode > 0): - kpu_obj.run() - - del track_kpu_input_1 - # (4) 获取kpu输出 - results = track_kpu_get_output() - # (5) track 后处理 - det = track_kpu_post_process(results,center_xy_wh) - # 返回 跟踪的结果 - return det - -# 单目标跟踪 track kpu 释放 -def track_kpu_deinit(): - with ScopedTiming("track_kpu_deinit",debug_mode > 0): - if 'track_kpu_input_0' in globals(): - global track_kpu_input_0 - del track_kpu_input_0 - - - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for nanotracker.py********** -def nanotracker_inference(): - print("nanotracker start") - kpu_crop = crop_kpu_init(crop_kmodel_file) # 创建单目标跟踪 crop kpu 对象 - kpu_src = src_kpu_init(src_kmodel_file) # 创建单目标跟踪 src kpu 对象 - kpu_track = track_kpu_init(track_kmodel_file) # 创建单目标跟踪 track kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - run_bool = True - if (track_x1 < 50 or track_y1 < 50 or track_x1+track_w >= OUT_RGB888P_WIDTH-50 or track_y1+track_h >= OUT_RGB888P_HEIGHT-50): - print("**剪切范围超出图像范围**") - run_bool = False - - track_mean_x = track_x1 + track_w / 2.0 - track_mean_y = track_y1 + track_h / 2.0 - draw_mean_w = int(track_w / OUT_RGB888P_WIDTH * DISPLAY_WIDTH) - draw_mean_h = int(track_h / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT) - draw_mean_x = int(track_mean_x / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - draw_mean_w / 2.0) - draw_mean_y = int(track_mean_y / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT - draw_mean_h / 2.0) - track_w_src = track_w - track_h_src = track_h - - center_xy_wh = [track_mean_x,track_mean_y,track_w_src,track_h_src] - center_xy_wh_tmp = [track_mean_x,track_mean_y,track_w_src,track_h_src] - - seconds = 8 - endtime = time.time() + seconds - enter_init = True - - track_boxes = [track_x1,track_y1,track_w,track_h,1] - track_boxes_tmp = np.array([track_x1,track_y1,track_w,track_h,1]) - global draw_img,osd_img - - count = 0 - while run_bool: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - nowtime = time.time() - draw_img.clear() - if (enter_init and nowtime <= endtime): - print("倒计时: " + str(endtime - nowtime) + " 秒") - draw_img.draw_rectangle(draw_mean_x , draw_mean_y , draw_mean_w , draw_mean_h , color=(255, 0, 255, 0),thickness = 4) - print(" >>>>>> get trackWindow <<<<<<<<") - global track_kpu_input_0 - track_kpu_input_0 = nn.from_numpy(crop_kpu_run(kpu_crop,rgb888p_img,center_xy_wh)) - - time.sleep(1) - if (nowtime > endtime): - print(">>>>>>> Play <<<<<<<") - enter_init = False - else: - global track_kpu_input_1 - track_kpu_input_1 = nn.from_numpy(src_kpu_run(kpu_src,rgb888p_img,center_xy_wh)) - det = track_kpu_run(kpu_track,center_xy_wh) - track_boxes = det[0] - center_xy_wh = det[1] - track_bool = True - if (len(track_boxes) != 0): - track_bool = track_boxes[0] > 10 and track_boxes[1] > 10 and track_boxes[0] + track_boxes[2] < OUT_RGB888P_WIDTH - 10 and track_boxes[1] + track_boxes[3] < OUT_RGB888P_HEIGHT - 10 - else: - track_bool = False - - if (len(center_xy_wh) != 0): - track_bool = track_bool and center_xy_wh[2] * center_xy_wh[3] < 40000 - else: - track_bool = False - - if (track_bool): - center_xy_wh_tmp = center_xy_wh - track_boxes_tmp = track_boxes - x1 = int(float(track_boxes[0]) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - y1 = int(float(track_boxes[1]) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - w = int(float(track_boxes[2]) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - h = int(float(track_boxes[3]) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - draw_img.draw_rectangle(x1, y1, w, h, color=(255, 255, 0, 0),thickness = 4) - else: - center_xy_wh = center_xy_wh_tmp - track_boxes = track_boxes_tmp - x1 = int(float(track_boxes[0]) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - y1 = int(float(track_boxes[1]) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - w = int(float(track_boxes[2]) * DISPLAY_WIDTH / OUT_RGB888P_WIDTH) - h = int(float(track_boxes[3]) * DISPLAY_HEIGHT / OUT_RGB888P_HEIGHT) - draw_img.draw_rectangle(x1, y1, w, h, color=(255, 255, 0, 0),thickness = 4) - draw_img.draw_string( x1 , y1-50, "Step away from the camera, please !" , color=(255, 255 ,0 , 0), scale=4, thickness = 1) - draw_img.draw_string( x1 , y1-100, "Near the center, please !" , color=(255, 255 ,0 , 0), scale=4, thickness = 1) - - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - crop_kpu_deinit() # 释放 单目标跟踪 crop kpu - src_kpu_deinit() # 释放 单目标跟踪 src kpu - track_kpu_deinit() # 释放 单目标跟踪 track kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_crop - del kpu_src - del kpu_track - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("nanotracker end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - nanotracker_inference() - - diff --git a/share/qtcreator/examples/04-AI-Demo/object_detect_yolov8n.py b/share/qtcreator/examples/04-AI-Demo/object_detect_yolov8n.py deleted file mode 100644 index 4557f66a9b5f..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/object_detect_yolov8n.py +++ /dev/null @@ -1,436 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai 原图输入分辨率 -OUT_RGB888P_WIDTH = ALIGN_UP(320, 16) -OUT_RGB888P_HEIGHT = 320 - -#多目标检测 kmodel 输入 shape -kmodel_input_shape = (1,3,320,320) - -#多目标检测 相关参数设置 -confidence_threshold = 0.2 # 多目标检测分数阈值 -nms_threshold = 0.2 # 非最大值抑制阈值 -x_factor = float(OUT_RGB888P_WIDTH)/kmodel_input_shape[3] # 原始图像分辨率宽与kmodel宽输入大小比值 -y_factor = float(OUT_RGB888P_HEIGHT)/kmodel_input_shape[2] # 原始图像分辨率高与kmodel高输入大小比值 -keep_top_k = 50 # 最大输出检测框的数量 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/yolov8n_320.kmodel' # kmodel文件的路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#颜色板 用于作图 -color_four = [(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230), - (255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70), - (255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0), - (255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255), - (255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157), - (255, 110, 76, 0), (255, 174, 57, 255), (255, 199, 100, 0), (255, 72, 0, 118), - (255, 255, 179, 240), (255, 0, 125, 92), (255, 209, 0, 151), (255, 188, 208, 182), - (255, 0, 220, 176), (255, 255, 99, 164), (255, 92, 0, 73), (255, 133, 129, 255), - (255, 78, 180, 255), (255, 0, 228, 0), (255, 174, 255, 243), (255, 45, 89, 255), - (255, 134, 134, 103), (255, 145, 148, 174), (255, 255, 208, 186), - (255, 197, 226, 255), (255, 171, 134, 1), (255, 109, 63, 54), (255, 207, 138, 255), - (255, 151, 0, 95), (255, 9, 80, 61), (255, 84, 105, 51), (255, 74, 65, 105), - (255, 166, 196, 102), (255, 208, 195, 210), (255, 255, 109, 65), (255, 0, 143, 149), - (255, 179, 0, 194), (255, 209, 99, 106), (255, 5, 121, 0), (255, 227, 255, 205), - (255, 147, 186, 208), (255, 153, 69, 1), (255, 3, 95, 161), (255, 163, 255, 0), - (255, 119, 0, 170), (255, 0, 182, 199), (255, 0, 165, 120), (255, 183, 130, 88), - (255, 95, 32, 0), (255, 130, 114, 135), (255, 110, 129, 133), (255, 166, 74, 118), - (255, 219, 142, 185), (255, 79, 210, 114), (255, 178, 90, 62), (255, 65, 70, 15), - (255, 127, 167, 115), (255, 59, 105, 106), (255, 142, 108, 45), (255, 196, 172, 0), - (255, 95, 54, 80), (255, 128, 76, 255), (255, 201, 57, 1), (255, 246, 0, 122), - (255, 191, 162, 208)] - -#标签 多目标检测的所有可识别类别 -labels = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -# 多目标检测 非最大值抑制方法实现 -def py_cpu_nms(boxes,scores,thresh): - """Pure Python NMS baseline.""" - x1 = boxes[:, 0] - y1 = boxes[:, 1] - x2 = boxes[:, 2] - y2 = boxes[:, 3] - - areas = (x2 - x1 + 1) * (y2 - y1 + 1) - order = np.argsort(scores,axis = 0)[::-1] - - keep = [] - while order.size > 0: - i = order[0] - keep.append(i) - new_x1 = [] - new_x2 = [] - new_y1 = [] - new_y2 = [] - new_areas = [] - for order_i in order: - new_x1.append(x1[order_i]) - new_x2.append(x2[order_i]) - new_y1.append(y1[order_i]) - new_y2.append(y2[order_i]) - new_areas.append(areas[order_i]) - new_x1 = np.array(new_x1) - new_x2 = np.array(new_x2) - new_y1 = np.array(new_y1) - new_y2 = np.array(new_y2) - xx1 = np.maximum(x1[i], new_x1) - yy1 = np.maximum(y1[i], new_y1) - xx2 = np.minimum(x2[i], new_x2) - yy2 = np.minimum(y2[i], new_y2) - - w = np.maximum(0.0, xx2 - xx1 + 1) - h = np.maximum(0.0, yy2 - yy1 + 1) - inter = w * h - - new_areas = np.array(new_areas) - ovr = inter / (areas[i] + new_areas - inter) - new_order = [] - for ovr_i,ind in enumerate(ovr): - if ind < thresh: - new_order.append(order[ovr_i]) - order = np.array(new_order,dtype=np.uint8) - return keep - -# 多目标检测 接收kmodel输出的后处理方法 -def kpu_post_process(output_data): - with ScopedTiming("kpu_post_process", debug_mode > 0): - boxes_ori = output_data[:,0:4] - scores_ori = output_data[:,4:] - confs_ori = np.max(scores_ori,axis=-1) - inds_ori = np.argmax(scores_ori,axis=-1) - - boxes = [] - scores = [] - inds = [] - - for i in range(len(boxes_ori)): - if confs_ori[i] > confidence_threshold: - scores.append(confs_ori[i]) - inds.append(inds_ori[i]) - x = boxes_ori[i,0] - y = boxes_ori[i,1] - w = boxes_ori[i,2] - h = boxes_ori[i,3] - left = int((x - 0.5 * w) * x_factor) - top = int((y - 0.5 * h) * y_factor) - right = int((x + 0.5 * w) * x_factor) - bottom = int((y + 0.5 * h) * y_factor) - boxes.append([left,top,right,bottom]) - - if len(boxes)==0: - return [] - - boxes = np.array(boxes) - scores = np.array(scores) - inds = np.array(inds) - - # do NMS - keep = py_cpu_nms(boxes,scores,nms_threshold) - dets = np.concatenate((boxes, scores.reshape((len(boxes),1)), inds.reshape((len(boxes),1))), axis=1) - - dets_out = [] - for keep_i in keep: - dets_out.append(dets[keep_i]) - dets_out = np.array(dets_out) - - # keep top-K faster NMS - dets_out = dets_out[:keep_top_k, :] - return dets_out - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_out_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_out_tensor = nn.from_numpy(data) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_out_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d_builder.run(ai2d_input_tensor, ai2d_out_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_out_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - - result = result.reshape((result.shape[0] * result.shape[1], result.shape[2])) - result = result.transpose() - tmp2 = result.copy() - del data - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2) kpu运行 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - ai2d_release() - # (4) 获取kpu输出 - results = kpu_get_output() - # (5) kpu结果后处理 - dets = kpu_post_process(results[0]) - # (6) 返回多目标检测结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_out_tensor' in globals(): - global ai2d_out_tensor - del ai2d_out_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有目标检测框以及类别、分数值的作图 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if dets: - draw_img.clear() - for det in dets: - x1, y1, x2, y2 = map(lambda x: int(round(x, 0)), det[:4]) - w = (x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = (y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - draw_img.draw_rectangle(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH, - y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT, w, h, color=color_four[int(det[5])],thickness=4) - draw_img.draw_string( int(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) , int(y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT)-50, - " " + labels[int(det[5])] + " " + str(round(det[4],2)) , color=color_four[int(det[5])] , scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for ob_detect.py********** -def ob_detect_inference(): - print("ob_detect start") - kpu_ob_detect = kpu_init(kmodel_file) # 创建多目标检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_ob_detect,rgb888p_img) # 执行多目标检测 kpu运行 以及 后处理过程 - display_draw(dets) # 将得到的检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_ob_detect - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("ob_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - ob_detect_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/ocr_det.py b/share/qtcreator/examples/04-AI-Demo/ocr_det.py deleted file mode 100644 index 4b42c7ea7d0f..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/ocr_det.py +++ /dev/null @@ -1,369 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os,sys #操作系统接口模块 -import aicube #aicube模块,封装检测分割等任务相关后处理 - -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(640, 16) -OUT_RGB888P_HEIGH = 360 - -# kmodel输入参数配置 -kmodel_input_shape_det = (1,3,640,640) # kmodel输入分辨率 -rgb_mean = [0,0,0] # ai2d padding的值 - -# kmodel相关参数设置 -mask_threshold = 0.25 # 二值化mask阈值 -box_threshold = 0.3 # 检测框分数阈值 - -# 文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file_det = root_dir + 'kmodel/ocr_det_int16.kmodel' # kmodel加载路径 -debug_mode = 0 # 调试模式 大于0(调试)、 反之 (不调试) - -# scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -# ai utils -# 当前kmodel -global current_kmodel_obj # 定义全局kpu对象 -# ai2d_det: ai2d实例 -# ai2d_input_tensor_det: ai2d输入 -# ai2d_output_tensor_det: ai2d输出 -# ai2d_builder_det: 根据ai2d参数,构建的ai2d_builder_det对象 -# ai2d_input_det: ai2d输入的numpy数据 -global ai2d_det,ai2d_input_tensor_det,ai2d_output_tensor_det,ai2d_builder_det,ai2d_input_det # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -# padding方法,一边padding,右padding或者下padding -def get_pad_one_side_param(out_img_size,input_img_size): - # 右padding或下padding - dst_w = out_img_size[0] - dst_h = out_img_size[1] - - input_width = input_img_size[0] - input_high = input_img_size[1] - - ratio_w = dst_w / input_width - ratio_h = dst_h / input_high - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - new_w = (int)(ratio * input_width) - new_h = (int)(ratio * input_high) - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - - -# ai2d 初始化,用于实现输入的预处理 -def ai2d_init_det(): - with ScopedTiming("ai2d_init",debug_mode > 0): - # 创建ai2d对象 - global ai2d_det - ai2d_det = nn.ai2d() - # 设置ai2d参数 - ai2d_det.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d_det.set_pad_param(True, get_pad_one_side_param([kmodel_input_shape_det[3],kmodel_input_shape_det[2]], [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH]), 0, [0, 0, 0]) - ai2d_det.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - # 创建ai2d_output_tensor_det,用于保存ai2d的输出 - global ai2d_output_tensor_det - data = np.ones(kmodel_input_shape_det, dtype=np.uint8) - ai2d_output_tensor_det = nn.from_numpy(data) - - # ai2d_builder_det,根据ai2d参数、输入输出大小创建ai2d_builder_det对象 - global ai2d_builder_det - ai2d_builder_det = ai2d_det.build([1, 3, OUT_RGB888P_HEIGH, OUT_RGB888P_WIDTH], [1, 3, kmodel_input_shape_det[2], kmodel_input_shape_det[3]]) - - -# ai2d 运行,完成ai2d_init_det设定的预处理 -def ai2d_run_det(rgb888p_img): - # 对原图rgb888p_img进行预处理 - with ScopedTiming("ai2d_run",debug_mode > 0): - # 根据原图构建ai2d_input_tensor_det - global ai2d_input_tensor_det,ai2d_builder_det,ai2d_input_det,ai2d_output_tensor_det - ai2d_input_det = rgb888p_img.to_numpy_ref() - ai2d_input_tensor_det = nn.from_numpy(ai2d_input_det) - # 运行ai2d_builder_det,将结果保存到ai2d_output_tensor_det - ai2d_builder_det.run(ai2d_input_tensor_det, ai2d_output_tensor_det) - - -# ai2d 释放输入tensor -def ai2d_release_det(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor_det - del ai2d_input_tensor_det - -# kpu 初始化 -def kpu_init_det(kmodel_file): - # 初始化kpu对象,并加载kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - # 初始化kpu对象 - kpu_obj = nn.kpu() - # 加载kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化ai2d - ai2d_init_det() - return kpu_obj - -# 预处理方法 -def kpu_pre_process_det(rgb888p_img): - # 运行ai2d,将ai2d预处理的输出设置为kmodel的输入tensor - ai2d_run_det(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor_det - # 将ai2d的输出设置为kmodel的输入 - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor_det) - -# 获取kmodel的推理输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - # 获取模型输出,并将结果转换为numpy,以便后续处理 - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -# kpu 运行 -def kpu_run_det(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - #(1)原图像预处理并设置模型输入 - kpu_pre_process_det(rgb888p_img) - #(2)kpu推理 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - #(3)释放ai2d资源 - ai2d_release_det() - #(4)获取kpu输出 - results = kpu_get_output() - #(5)CHW转HWC - global ai2d_input_det - tmp = (ai2d_input_det.shape[0], ai2d_input_det.shape[1], ai2d_input_det.shape[2]) - ai2d_input_det = ai2d_input_det.reshape((ai2d_input_det.shape[0], ai2d_input_det.shape[1] * ai2d_input_det.shape[2])) - ai2d_input_det = ai2d_input_det.transpose() - tmp2 = ai2d_input_det.copy() - tmp2 = tmp2.reshape((tmp[1], tmp[2], tmp[0])) - #(6)后处理,aicube.ocr_post_process接口说明: - # 接口:aicube.ocr_post_process(threshold_map,ai_isp,kmodel_input_shape,isp_shape,mask_threshold,box_threshold); - # 参数说明: - # threshold_map: DBNet模型的输出为(N,kmodel_input_shape_det[2],kmodel_input_shape_det[3],2),两个通道分别为threshold map和segmentation map - # 后处理过程只使用threshold map,因此将results[0][:,:,:,0] reshape成一维传给接口使用。 - # ai_isp:后处理还会返回基于原图的检测框裁剪数据,因此要将原图数据reshape为一维传给接口处理。 - # kmodel_input_shape:kmodel输入分辨率。 - # isp_shape:AI原图分辨率。要将kmodel输出分辨率的检测框坐标映射到原图分辨率上,需要使用这两个分辨率的值。 - # mask_threshold:用于二值化图像获得文本区域。 - # box_threshold:检测框分数阈值,低于该阈值的检测框不计入结果。 - with ScopedTiming("kpu_post",debug_mode > 0): - # 调用aicube模块的ocr_post_process完成ocr检测的后处理 - # det_results结构为[[crop_array_nhwc,[p1_x,p1_y,p2_x,p2_y,p3_x,p3_y,p4_x,p4_y]],...] - det_results = aicube.ocr_post_process(results[0][:, :, :, 0].reshape(-1), tmp2.reshape(-1), - [kmodel_input_shape_det[3], kmodel_input_shape_det[2]], - [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH], mask_threshold, box_threshold) - return det_results - - -# kpu 释放内存 -def kpu_deinit_det(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - global ai2d_det,ai2d_output_tensor_det,ai2d_input_tensor_det - if "ai2d" in globals(): - del ai2d_det - if "ai2d_output_tensor_det" in globals(): - del ai2d_output_tensor_det - -#********************for media_utils.py******************** - -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -#display 初始化 -def display_init(): - # hdmi显示初始化 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程,将OCR检测后处理得到的框绘制到OSD上并显示 -def display_draw(det_results): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if det_results: - draw_img.clear() - # 循环绘制所有检测到的框 - for j in det_results: - # 将原图的坐标点转换成显示的坐标点,循环绘制四条直线,得到一个矩形框 - for i in range(4): - x1 = j[1][(i * 2)] / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y1 = j[1][(i * 2 + 1)] / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - x2 = j[1][((i + 1) * 2) % 8] / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y2 = j[1][((i + 1) * 2 + 1) % 8] / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - draw_img.draw_line((int(x1), int(y1), int(x2), int(y2)), color=(255, 0, 0, 255), - thickness=5) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 启动视频流 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 捕获一帧图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 释放内存 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# 停止视频流 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - ret = media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - return ret - -# media 释放buffer,销毁link -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - global buffer,media_source, media_sink - if "buffer" in globals(): - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - media.destroy_link(media_source, media_sink) - media.buffer_deinit() - - -def ocr_det_inference(): - print("ocr_det_test start") - kpu_ocr_det = kpu_init_det(kmodel_file_det) # 创建ocr检测任务的kpu对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - # 启动camera - camera_start(CAM_DEV_ID_0) - gc_count=0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # 读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图像 - # 若图像获取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - det_results = kpu_run_det(kpu_ocr_det,rgb888p_img) # kpu运行获取kmodel的推理输出 - display_draw(det_results) # 绘制检测结果,并显示 - # 释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # 释放内存 - if (gc_count>2): - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止camera - display_deinit() # 释放display - kpu_deinit_det() # 释放kpu - if "current_kmodel_obj" in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_ocr_det - gc.collect() - nn.shrink_memory_pool() - time.sleep(1) - media_deinit() # 释放整个media - print("ocr_det_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - ocr_det_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/ocr_rec.py b/share/qtcreator/examples/04-AI-Demo/ocr_rec.py deleted file mode 100644 index c4d5b051553b..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/ocr_rec.py +++ /dev/null @@ -1,444 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aicube #aicube模块,封装检测分割等任务相关后处理 -import os, sys - -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(640, 16) -OUT_RGB888P_HEIGH = 360 - -#kmodel输入参数设置 -kmodel_input_shape_det = (1,3,640,640) # OCR检测模型的kmodel输入分辨率 -kmodel_input_shape_rec = (1,3,32,512) # OCR识别模型的kmodel输入分辨率 -rgb_mean = [0,0,0] # ai2d padding的值 - -#检测步骤kmodel相关参数设置 -mask_threshold = 0.25 # 二值化mask阈值 -box_threshold = 0.3 # 检测框分数阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file_det = root_dir + 'kmodel/ocr_det_int16.kmodel' # 检测模型路径 -kmodel_file_rec = root_dir + "kmodel/ocr_rec_int16.kmodel" # 识别模型路径 -dict_path = root_dir + 'utils/dict.txt' # 调试模式 大于0(调试)、 反之 (不调试) -debug_mode = 0 - -# OCR字典读取 -with open(dict_path, 'r') as file: - line_one = file.read(100000) - line_list = line_one.split("\r\n") -DICT = {num: char.replace("\r", "").replace("\n", "") for num, char in enumerate(line_list)} - -# scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -# utils 设定全局变量 -# 当前kmodel -global current_kmodel_obj # 设置全局kpu对象 -# 检测阶段预处理应用的ai2d全局变量 -global ai2d_det,ai2d_input_tensor_det,ai2d_output_tensor_det,ai2d_builder_det,ai2d_input_det # 设置检测模型的ai2d对象,并定义ai2d的输入、输出和builder -# 识别阶段预处理应用的ai2d全局变量 -global ai2d_rec,ai2d_input_tensor_rec,ai2d_output_tensor_rec,ai2d_builder_rec # 设置识别模型的ai2d对象,并定义ai2d的输入、输出和builder - -# padding方法,一边padding,右padding或者下padding -def get_pad_one_side_param(out_img_size,input_img_size): - dst_w = out_img_size[0] - dst_h = out_img_size[1] - - input_width = input_img_size[0] - input_high = input_img_size[1] - - ratio_w = dst_w / input_width - ratio_h = dst_h / input_high - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - new_w = (int)(ratio * input_width) - new_h = (int)(ratio * input_high) - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - - top = (int)(round(0)) - bottom = (int)(round(dh * 2 + 0.1)) - left = (int)(round(0)) - right = (int)(round(dw * 2 - 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -# 检测步骤ai2d初始化 -def ai2d_init_det(): - with ScopedTiming("ai2d_init_det",debug_mode > 0): - global ai2d_det - ai2d_det = nn.ai2d() - ai2d_det.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d_det.set_pad_param(True, get_pad_one_side_param([kmodel_input_shape_det[3],kmodel_input_shape_det[2]], [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH]), 0, [0, 0, 0]) - ai2d_det.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - global ai2d_output_tensor_det - data = np.ones(kmodel_input_shape_det, dtype=np.uint8) - ai2d_output_tensor_det = nn.from_numpy(data) - global ai2d_builder_det - ai2d_builder_det = ai2d_det.build([1, 3, OUT_RGB888P_HEIGH, OUT_RGB888P_WIDTH], [1, 3, kmodel_input_shape_det[2], kmodel_input_shape_det[3]]) - - -# 检测步骤的ai2d 运行,完成ai2d_init_det预设的预处理 -def ai2d_run_det(rgb888p_img): - with ScopedTiming("ai2d_run_det",debug_mode > 0): - global ai2d_input_tensor_det,ai2d_builder_det,ai2d_input_det - ai2d_input_det = rgb888p_img.to_numpy_ref() - ai2d_input_tensor_det = nn.from_numpy(ai2d_input_det) - global ai2d_output_tensor_det - ai2d_builder_det.run(ai2d_input_tensor_det, ai2d_output_tensor_det) - -# 识别步骤ai2d初始化 -def ai2d_init_rec(): - with ScopedTiming("ai2d_init_res",debug_mode > 0): - global ai2d_rec,ai2d_output_tensor_rec - ai2d_rec = nn.ai2d() - ai2d_rec.set_dtype(nn.ai2d_format.RGB_packed, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d_out_data = np.ones((1, 3, kmodel_input_shape_rec[2], kmodel_input_shape_rec[3]), dtype=np.uint8) - ai2d_output_tensor_rec = nn.from_numpy(ai2d_out_data) - - -# 识别步骤ai2d运行 -def ai2d_run_rec(rgb888p_img): - with ScopedTiming("ai2d_run_rec",debug_mode > 0): - global ai2d_rec,ai2d_builder_rec,ai2d_input_tensor_rec,ai2d_output_tensor_rec - ai2d_rec.set_pad_param(True, get_pad_one_side_param([kmodel_input_shape_rec[3],kmodel_input_shape_rec[2]],[rgb888p_img.shape[2],rgb888p_img.shape[1]]), 0, [0, 0, 0]) - ai2d_rec.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - ai2d_builder_rec = ai2d_rec.build([rgb888p_img.shape[0], rgb888p_img.shape[1], rgb888p_img.shape[2],rgb888p_img.shape[3]], - [1, 3, kmodel_input_shape_rec[2], kmodel_input_shape_rec[3]]) - ai2d_input_tensor_rec = nn.from_numpy(rgb888p_img) - ai2d_builder_rec.run(ai2d_input_tensor_rec, ai2d_output_tensor_rec) - -# 检测步骤ai2d释放内存 -def ai2d_release_det(): - with ScopedTiming("ai2d_release_det",debug_mode > 0): - if "ai2d_input_tensor_det" in globals(): - global ai2d_input_tensor_det - del ai2d_input_tensor_det - -# 识别步骤ai2d释放内存 -def ai2d_release_rec(): - with ScopedTiming("ai2d_release_rec",debug_mode > 0): - if "ai2d_input_tensor_rec" in globals(): - global ai2d_input_tensor_rec - del ai2d_input_tensor_rec - -# 检测步骤kpu初始化 -def kpu_init_det(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init_det",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - ai2d_init_det() - return kpu_obj - -# 识别步骤kpu初始化 -def kpu_init_rec(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init_rec",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - ai2d_init_rec() - return kpu_obj - -# 检测步骤预处理,调用ai2d_run_det实现,并将ai2d的输出设置为kmodel的输入 -def kpu_pre_process_det(rgb888p_img): - ai2d_run_det(rgb888p_img) - with ScopedTiming("kpu_pre_process_det",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor_det - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor_det) - -# 识别步骤预处理,调用ai2d_init_run_rec实现,并将ai2d的输出设置为kmodel的输入 -def kpu_pre_process_rec(rgb888p_img): - ai2d_run_rec(rgb888p_img) - with ScopedTiming("kpu_pre_process_rec",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor_rec - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor_rec) - - -# 获取kmodel的输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - del data - results.append(result) - return results - -# 检测步骤kpu运行 -def kpu_run_det(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - #(1)原图像预处理并设置模型输入 - kpu_pre_process_det(rgb888p_img) - #(2)kpu推理 - with ScopedTiming("kpu_run_det",debug_mode > 0): - # 检测运行 - kpu_obj.run() - #(3)检测释放ai2d资源 - ai2d_release_det() - #(4)获取检测kpu输出 - results = kpu_get_output() - #(5)CHW转HWC - global ai2d_input_det - tmp = (ai2d_input_det.shape[0], ai2d_input_det.shape[1], ai2d_input_det.shape[2]) - ai2d_input_det = ai2d_input_det.reshape((ai2d_input_det.shape[0], ai2d_input_det.shape[1] * ai2d_input_det.shape[2])) - ai2d_input_det = ai2d_input_det.transpose() - tmp2 = ai2d_input_det.copy() - tmp2 = tmp2.reshape((tmp[1], tmp[2], tmp[0])) - #(6)后处理,aicube.ocr_post_process接口说明: - # 接口:aicube.ocr_post_process(threshold_map,ai_isp,kmodel_input_shape,isp_shape,mask_threshold,box_threshold); - # 参数说明: - # threshold_map: DBNet模型的输出为(N,kmodel_input_shape_det[2],kmodel_input_shape_det[3],2),两个通道分别为threshold map和segmentation map - # 后处理过程只使用threshold map,因此将results[0][:,:,:,0] reshape成一维传给接口使用。 - # ai_isp:后处理还会返回基于原图的检测框裁剪数据,因此要将原图数据reshape为一维传给接口处理。 - # kmodel_input_shape:kmodel输入分辨率。 - # isp_shape:AI原图分辨率。要将kmodel输出分辨率的检测框坐标映射到原图分辨率上,需要使用这两个分辨率的值。 - # mask_threshold:用于二值化图像获得文本区域。 - # box_threshold:检测框分数阈值,低于该阈值的检测框不计入结果。 - with ScopedTiming("kpu_post",debug_mode > 0): - # 调用aicube模块的ocr_post_process完成ocr检测的后处理 - # det_results结构为[[crop_array_nhwc,[p1_x,p1_y,p2_x,p2_y,p3_x,p3_y,p4_x,p4_y]],...] - det_results = aicube.ocr_post_process(results[0][:, :, :, 0].reshape(-1), tmp2.reshape(-1), - [kmodel_input_shape_det[3], kmodel_input_shape_det[2]], - [OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH], mask_threshold, box_threshold) - return det_results - -# 识别步骤后处理 -def kpu_run_rec(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - #(1)识别预处理并设置模型输入 - kpu_pre_process_rec(rgb888p_img) - #(2)kpu推理 - with ScopedTiming("kpu_run_rec",debug_mode > 0): - # 识别运行 - kpu_obj.run() - #(3)识别释放ai2d资源 - ai2d_release_rec() - #(4)获取识别kpu输出 - results = kpu_get_output() - #(5)识别后处理,results结构为[(N,MAX_LENGTH,DICT_LENGTH),...],在axis=2维度上取argmax获取当前识别字符在字典中的索引 - preds = np.argmax(results[0], axis=2).reshape((-1)) - output_txt = "" - for i in range(len(preds)): - # 当前识别字符不是字典的最后一个字符并且和前一个字符不重复(去重),加入识别结果字符串 - if preds[i] != (len(DICT) - 1) and (not (i > 0 and preds[i - 1] == preds[i])): - output_txt = output_txt + DICT[preds[i]] - return output_txt - -# 释放检测步骤kpu、ai2d以及ai2d相关的tensor -def kpu_deinit_det(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - global ai2d_det,ai2d_output_tensor_det - if "ai2d_det" in globals(): - del ai2d_det - if "ai2d_output_tensor_det" in globals(): - del ai2d_output_tensor_det - -# 释放识别步骤kpu -def kpu_deinit_rec(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - global ai2d_rec,ai2d_output_tensor_rec - if "ai2d_rec" in globals(): - del ai2d_rec - if "ai2d_output_tensor_rec" in globals(): - del ai2d_output_tensor_rec - - -#********************for media_utils.py******************** - -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# display初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# 释放display -def display_deinit(): - display.deinit() - -# display显示检测识别框 -def display_draw(det_results): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if det_results: - draw_img.clear() - # 循环绘制所有检测到的框 - for j in det_results: - # 将原图的坐标点转换成显示的坐标点,循环绘制四条直线,得到一个矩形框 - for i in range(4): - x1 = j[1][(i * 2)] / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y1 = j[1][(i * 2 + 1)] / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - x2 = j[1][((i + 1) * 2) % 8] / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - y2 = j[1][((i + 1) * 2 + 1) % 8] / OUT_RGB888P_HEIGH * DISPLAY_HEIGHT - draw_img.draw_line((int(x1), int(y1), int(x2), int(y2)), color=(255, 0, 0, 255), - thickness=5) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -# camera初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - # camera获取的通道0图像送display显示 - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # camera获取的通道2图像送ai处理 - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 启动视频流 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 捕获一帧图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 释放内存 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 停止视频流 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放buffer,销毁link -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - global buffer,media_source, media_sink - if "buffer" in globals(): - media.release_buffer(buffer) - if 'media_source' in globals() and 'media_sink' in globals(): - media.destroy_link(media_source, media_sink) - media.buffer_deinit() - -def ocr_rec_inference(): - print("ocr_rec_test start") - kpu_ocr_det = kpu_init_det(kmodel_file_det) # 创建OCR检测kpu对象 - kpu_ocr_rec = kpu_init_rec(kmodel_file_rec) # 创建OCR识别kpu对象 - camera_init(CAM_DEV_ID_0) # camera初始化 - display_init() # display初始化 - try: - media_init() - camera_start(CAM_DEV_ID_0) - gc_count=0 - while True: - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图像 - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - det_results = kpu_run_det(kpu_ocr_det,rgb888p_img) # kpu运行获取OCR检测kmodel的推理输出 - ocr_results="" - if det_results: - for j in det_results: - ocr_result = kpu_run_rec(kpu_ocr_rec,j[0]) # j[0]为检测框的裁剪部分,kpu运行获取OCR识别kmodel的推理输出 - ocr_results = ocr_results+" ["+ocr_result+"] " - gc.collect() - print("\n"+ocr_results) - display_draw(det_results) - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - if (gc_count>1): - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - camera_stop(CAM_DEV_ID_0) # 停止camera - display_deinit() # 释放display - kpu_deinit_det() # 释放OCR检测步骤kpu - kpu_deinit_rec() # 释放OCR识别步骤kpu - if "current_kmodel_obj" in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_ocr_det - del kpu_ocr_rec - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放整个media - print("ocr_rec_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - ocr_rec_inference() \ No newline at end of file diff --git a/share/qtcreator/examples/04-AI-Demo/person_detection.py b/share/qtcreator/examples/04-AI-Demo/person_detection.py deleted file mode 100644 index 9369bced187a..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/person_detection.py +++ /dev/null @@ -1,353 +0,0 @@ -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGH = 1080 - -#kmodel输入shape -kmodel_input_shape = (1,3,640,640) # kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 行人检测阈值,用于过滤roi -nms_threshold = 0.6 # 行人检测框阈值,用于过滤重复roi -kmodel_frame_size = [640,640] # 行人检测输入图片尺寸 -frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGH] # 直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS -labels = ["person"] # 模型输出类别名称 - -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/person_detect_yolov5n.kmodel' # kmodel文件的路径 -anchors = [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326] #anchor设置 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) -total_debug_mode = 1 - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - global ai2d_builder - global ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGH - width = kmodel_frame_size[0] - height = kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGH,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_output_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)行人检测 kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放行人检测 ai2d 资源 - ai2d_release() - # (4)获取行人检测 kpu 输出 - results = kpu_get_output() - # (5)行人检测 kpu 结果后处理 - with ScopedTiming("kpu_post_process",debug_mode > 0): - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], kmodel_frame_size, frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回行人检测结果 - return dets - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_output_tensor' in globals(): - global ai2d_output_tensor - del ai2d_output_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 框出所有检测到的人以及标出得分 -def display_draw(dets): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - - if dets: - draw_img.clear() - for det_box in dets: - x1, y1, x2, y2 = det_box[2],det_box[3],det_box[4],det_box[5] - w = float(x2 - x1) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH - h = float(y2 - y1) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH - - x1 = int(x1 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(y1 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH) - x2 = int(x2 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y2 = int(y2 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGH) - - if (h<(0.1*DISPLAY_HEIGHT)): - continue - if (w<(0.25*DISPLAY_WIDTH) and ((x1<(0.03*DISPLAY_WIDTH)) or (x2>(0.97*DISPLAY_WIDTH)))): - continue - if (w<(0.15*DISPLAY_WIDTH) and ((x1<(0.01*DISPLAY_WIDTH)) or (x2>(0.99*DISPLAY_WIDTH)))): - continue - draw_img.draw_rectangle(x1 , y1 , int(w) , int(h) , color=(255, 0, 255, 0),thickness = 4) - draw_img.draw_string( x1 , y1-50, " " + labels[det_box[0]] + " " + str(round(det_box[1],2)) , color=(255,0, 255, 0), scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGH) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for person_detect.py********** -def person_detect_inference(): - print("person_detect_test start") - kpu_person_detect = kpu_init(kmodel_file) # 创建行人检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",total_debug_mode): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - dets = kpu_run(kpu_person_detect,rgb888p_img) # 执行行人检测 kpu 运行 以及 后处理过程 - display_draw(dets) # 将得到的检测结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_person_detect - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("person_detect_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - person_detect_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/person_kp_detect.py b/share/qtcreator/examples/04-AI-Demo/person_kp_detect.py deleted file mode 100644 index 6baef0ca34fc..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/person_kp_detect.py +++ /dev/null @@ -1,370 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#人体关键点检测 kmodel 输入参数配置 -kmodel_input_shape = (1,3,640,640) # kmodel输入分辨率 -rgb_mean = [114,114,114] # ai2d padding 值 - -#人体关键点 相关参数设置 -confidence_threshold = 0.2 # 人体关键点检测分数阈值 -nms_threshold = 0.5 # 非最大值抑制阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/yolov8n-pose.kmodel' # kmodel文件的路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#骨骼信息 -SKELETON = [(16, 14),(14, 12),(17, 15),(15, 13),(12, 13),(6, 12),(7, 13),(6, 7),(6, 8),(7, 9),(8, 10),(9, 11),(2, 3),(1, 2),(1, 3),(2, 4),(3, 5),(4, 6),(5, 7)] -#肢体颜色 -LIMB_COLORS = [(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 255, 51, 255),(255, 255, 51, 255),(255, 255, 51, 255),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0)] -#关键点颜色 -KPS_COLORS = [(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 0, 255, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 255, 128, 0),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255),(255, 51, 153, 255)] - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -# 人体关键点检测 接收kmodel输出的后处理方法 -def kpu_post_process(output_datas): - with ScopedTiming("kpu_post_process", debug_mode > 0): - results = aidemo.person_kp_postprocess(output_datas,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],[kmodel_input_shape[2],kmodel_input_shape[3]],confidence_threshold,nms_threshold) - return results - -# 获取kmodel输入图像resize比例 以及 padding的上下左右像素数量 -def get_pad_param(): - #右padding或下padding - dst_w = kmodel_input_shape[3] - dst_h = kmodel_input_shape[2] - - ratio_w = float(dst_w) / OUT_RGB888P_WIDTH - ratio_h = float(dst_h) / OUT_RGB888P_HEIGHT - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGHT) - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - - top = (int)(round(dh)) - bottom = (int)(round(dh)) - left = (int)(round(dw)) - right = (int)(round(dw)) - return [0, 0, 0, 0, top, bottom, left, right] - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, get_pad_param(), 0, rgb_mean) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_out_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_out_tensor = nn.from_numpy(data) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_out_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d_builder.run(ai2d_input_tensor, ai2d_out_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_out_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - data = current_kmodel_obj.get_output_tensor(0) - result = data.to_numpy() - del data - - return result - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - ai2d_release() - # (4) 获取kpu输出 - results = kpu_get_output() - # (5) kpu结果后处理 - kp_res = kpu_post_process(results) - # (6) 返回 人体关键点检测 结果 - return kp_res - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_out_tensor' in globals(): - global ai2d_out_tensor - del ai2d_out_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有目标关键点作图 -def display_draw(res): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img - if res[0]: - draw_img.clear() - kpses = res[1] - for i in range(len(res[0])): - for k in range(17+2): - if (k < 17): - kps_x = round(kpses[i][k][0]) - kps_y = round(kpses[i][k][1]) - kps_s = kpses[i][k][2] - - kps_x1 = int(float(kps_x) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - kps_y1 = int(float(kps_y) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - if (kps_s > 0): - draw_img.draw_circle(kps_x1,kps_y1,5,KPS_COLORS[k],4) - ske = SKELETON[k] - pos1_x = round(kpses[i][ske[0]-1][0]) - pos1_y = round(kpses[i][ske[0]-1][1]) - - pos1_x_ = int(float(pos1_x) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - pos1_y_ = int(float(pos1_y) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - pos2_x = round(kpses[i][(ske[1] -1)][0]) - pos2_y = round(kpses[i][(ske[1] -1)][1]) - - pos2_x_ = int(float(pos2_x) * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - pos2_y_ = int(float(pos2_y) * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - - pos1_s = kpses[i][(ske[0] -1)][2] - pos2_s = kpses[i][(ske[1] -1)][2] - - if (pos1_s > 0.0 and pos2_s >0.0): - draw_img.draw_line(pos1_x,pos1_y,pos2_x,pos2_y,LIMB_COLORS[k],4) - - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for person_kp_detect.py********** -def person_kp_detect_inference(): - print("person_kp_detect start") - kpu_person_kp_detect = kpu_init(kmodel_file) # 创建人体关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - person_kp_detect_res = kpu_run(kpu_person_kp_detect,rgb888p_img) # 执行人体关键点检测 kpu 运行 以及 后处理过程 - display_draw(person_kp_detect_res) # 将得到的人体关键点结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_person_kp_detect - - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("person_kp_detect end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - person_kp_detect_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/puzzle_game.py b/share/qtcreator/examples/04-AI-Demo/puzzle_game.py deleted file mode 100644 index f64e023482f7..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/puzzle_game.py +++ /dev/null @@ -1,584 +0,0 @@ -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import random -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -level = 3 # 游戏级别 目前只支持设置为 3 - - -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + "kmodel/hand_det.kmodel" # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - # init kpu and load kmodel - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): - global hd_ai2d_builder - del hd_ai2d_builder - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor,hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)返回手掌关键点检测结果 - return results - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img, masks - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - masks = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=masks) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for puzzle_game.py********** -def puzzle_game_inference(): - print("puzzle_game_inference start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - global draw_img,osd_img - puzzle_width = DISPLAY_HEIGHT # 设定 拼图宽 - puzzle_height = DISPLAY_HEIGHT # 设定 拼图高 - puzzle_ori_width = DISPLAY_WIDTH - puzzle_width - 50 # 设定 原始拼图宽 - puzzle_ori_height = DISPLAY_WIDTH - puzzle_height - 50 # 设定 原始拼图高 - - every_block_width = int(puzzle_width/level) # 设定 拼图块宽 - every_block_height = int(puzzle_height/level) # 设定 拼图块高 - ori_every_block_width = int(puzzle_ori_width/level) # 设定 原始拼图宽 - ori_every_block_height = int(puzzle_ori_height/level) # 设定 原始拼图高 - ratio_num = every_block_width/360.0 # 字体比例 - blank_x = 0 # 空白块 角点x - blank_y = 0 # 空白块 角点y - direction_vec = [-1,1,-1,1] # 空白块四种移动方向 - - exact_division_x = 0 # 交换块 角点x - exact_division_y = 0 # 交换块 角点y - distance_tow_points = DISPLAY_WIDTH # 两手指距离 - distance_thred = every_block_width*0.4 # 两手指距离阈值 - - move_mat = np.zeros((every_block_height,every_block_width,4),dtype=np.uint8) - - osd_frame_tmp = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - osd_frame_tmp_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=osd_frame_tmp) - osd_frame_tmp[0:puzzle_height,0:puzzle_width,3] = 100 - osd_frame_tmp[0:puzzle_height,0:puzzle_width,2] = 150 - osd_frame_tmp[0:puzzle_height,0:puzzle_width,1] = 130 - osd_frame_tmp[0:puzzle_height,0:puzzle_width,0] = 127 - osd_frame_tmp[(1080-puzzle_ori_height)//2:(1080-puzzle_ori_height)//2+puzzle_ori_width,puzzle_width+25:puzzle_width+25+puzzle_ori_height,3] = 100 - osd_frame_tmp[(1080-puzzle_ori_height)//2:(1080-puzzle_ori_height)//2+puzzle_ori_width,puzzle_width+25:puzzle_width+25+puzzle_ori_height,2] = 150 - osd_frame_tmp[(1080-puzzle_ori_height)//2:(1080-puzzle_ori_height)//2+puzzle_ori_width,puzzle_width+25:puzzle_width+25+puzzle_ori_height,1] = 130 - osd_frame_tmp[(1080-puzzle_ori_height)//2:(1080-puzzle_ori_height)//2+puzzle_ori_width,puzzle_width+25:puzzle_width+25+puzzle_ori_height,0] = 127 - for i in range(level*level): - osd_frame_tmp_img.draw_rectangle((i%level)*every_block_width,(i//level)*every_block_height,every_block_width,every_block_height,(255,0,0,0),5) - osd_frame_tmp_img.draw_string((i%level)*every_block_width + 55,(i//level)*every_block_height + 45,str(i),(255,0,0,255),30*ratio_num) - osd_frame_tmp_img.draw_rectangle(puzzle_width+25 + (i%level)*ori_every_block_width,(1080-puzzle_ori_height)//2 + (i//level)*ori_every_block_height,ori_every_block_width,ori_every_block_height,(255,0,0,0),5) - osd_frame_tmp_img.draw_string(puzzle_width+25 + (i%level)*ori_every_block_width + 50,(1080-puzzle_ori_height)//2 + (i//level)*ori_every_block_height + 25,str(i),(255,0,0,255),20*ratio_num) - osd_frame_tmp[0:every_block_height,0:every_block_width,3] = 114 - osd_frame_tmp[0:every_block_height,0:every_block_width,2] = 114 - osd_frame_tmp[0:every_block_height,0:every_block_width,1] = 114 - osd_frame_tmp[0:every_block_height,0:every_block_width,0] = 220 - - for i in range(level*10): - k230_random = int(random.random() * 100) % 4 - blank_x_tmp = blank_x - blank_y_tmp = blank_y - if (k230_random < 2): - blank_x_tmp = blank_x + direction_vec[k230_random] - else: - blank_y_tmp = blank_y + direction_vec[k230_random] - - if ((blank_x_tmp >= 0 and blank_x_tmp < level) and (blank_y_tmp >= 0 and blank_y_tmp < level) and (abs(blank_x - blank_x_tmp) <= 1 and abs(blank_y - blank_y_tmp) <= 1)): - move_rect = [blank_x_tmp*every_block_width,blank_y_tmp*every_block_height,every_block_width,every_block_height] - blank_rect = [blank_x*every_block_width,blank_y*every_block_height,every_block_width,every_block_height] - - move_mat[:] = osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] - osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] = osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] - osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] = move_mat[:] - - blank_x = blank_x_tmp - blank_y = blank_y_tmp - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - two_point = np.zeros((4),dtype=np.int16) - dets_no_pro = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - draw_img.clear() - - osd_frame_tmp_img.copy_to(draw_img) - - dets = [] - for det_box in dets_no_pro: - if det_box[4] < OUT_RGB888P_WIDTH - 10 : - dets.append(det_box) - - if (len(dets)==1): - for det_box in dets: - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - - results_show = np.zeros(hk_results[0].shape,dtype=np.int16) - results_show[0::2] = (hk_results[0][0::2] * w_kp + x1_kp) #* DISPLAY_WIDTH // OUT_RGB888P_WIDTH - results_show[1::2] = (hk_results[0][1::2] * h_kp + y1_kp) #* DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT - - two_point[0] = results_show[8+8] - two_point[1] = results_show[8+9] - two_point[2] = results_show[16+8] - two_point[3] = results_show[16+9] - - if (two_point[1] <= OUT_RGB888P_WIDTH): - distance_tow_points = np.sqrt(pow((two_point[0]-two_point[2]),2) + pow((two_point[1] - two_point[3]),2))* 1.0 / OUT_RGB888P_WIDTH * DISPLAY_WIDTH - exact_division_x = int((two_point[0] * 1.0 / OUT_RGB888P_WIDTH * DISPLAY_WIDTH)//every_block_width) - exact_division_y = int((two_point[1] * 1.0 / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT)//every_block_height) - - - if (distance_tow_points < distance_thred and exact_division_x >= 0 and exact_division_x < level and exact_division_y >= 0 and exact_division_y < level): - if (abs(blank_x - exact_division_x) == 1 and abs(blank_y - exact_division_y) == 0): - move_rect = [exact_division_x*every_block_width,exact_division_y*every_block_height,every_block_width,every_block_height] - blank_rect = [blank_x*every_block_width,blank_y*every_block_height,every_block_width,every_block_height] - - move_mat[:] = osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] - osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] = osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] - osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] = move_mat[:] - - blank_x = exact_division_x - elif (abs(blank_y - exact_division_y) == 1 and abs(blank_x - exact_division_x) == 0): - move_rect = [exact_division_x*every_block_width,exact_division_y*every_block_height,every_block_width,every_block_height] - blank_rect = [blank_x*every_block_width,blank_y*every_block_height,every_block_width,every_block_height] - - move_mat[:] = osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] - osd_frame_tmp[move_rect[1]:move_rect[1]+move_rect[3],move_rect[0]:move_rect[0]+move_rect[2],:] = osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] - osd_frame_tmp[blank_rect[1]:blank_rect[1]+blank_rect[3],blank_rect[0]:blank_rect[0]+blank_rect[2],:] = move_mat[:] - - blank_y = exact_division_y - - osd_frame_tmp_img.copy_to(draw_img) - x1 = int(two_point[0] * 1.0 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(two_point[1] * 1.0 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - draw_img.draw_circle(x1, y1, 1, color=(255, 0, 255, 255),thickness=4,fill=False) - else: - osd_frame_tmp_img.copy_to(draw_img) - x1 = int(two_point[0] * 1.0 * DISPLAY_WIDTH // OUT_RGB888P_WIDTH) - y1 = int(two_point[1] * 1.0 * DISPLAY_HEIGHT // OUT_RGB888P_HEIGHT) - draw_img.draw_circle(x1, y1, 1, color=(255, 255, 255, 0),thickness=4,fill=False) - else: - draw_img.draw_string( 300 , 500, "Must have one hand !", color=(255,255,0,0), scale=7) - first_start = True - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'masks' in globals(): - global masks - del masks - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("puzzle_game_inference end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - puzzle_game_inference() - diff --git a/share/qtcreator/examples/04-AI-Demo/segment_yolov8n.py b/share/qtcreator/examples/04-AI-Demo/segment_yolov8n.py deleted file mode 100644 index 12f3b6705d89..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/segment_yolov8n.py +++ /dev/null @@ -1,373 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -#ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(320, 16) -OUT_RGB888P_HEIGHT = 320 - -#多目标分割 kmodel 输入参数配置 -kmodel_input_shape = (1,3,320,320) # kmodel输入分辨率 -rgb_mean = [114,114,114] # ai2d padding 值 - -#多目标分割 相关参数设置 -confidence_threshold = 0.2 # 多目标分割分数阈值 -nms_threshold = 0.5 # 非最大值抑制阈值 -mask_thres = 0.5 # 多目标分割掩码阈值 - -#文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/yolov8n_seg_320.kmodel' # kmodel文件的路径 -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#标签 多目标分割的所有可识别类别 -labels = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"] - -#颜色板 用于作图 -color_four = [(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230), - (255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70), - (255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0), - (255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255), - (255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157), - (255, 110, 76, 0), (255, 174, 57, 255), (255, 199, 100, 0), (255, 72, 0, 118), - (255, 255, 179, 240), (255, 0, 125, 92), (255, 209, 0, 151), (255, 188, 208, 182), - (255, 0, 220, 176), (255, 255, 99, 164), (255, 92, 0, 73), (255, 133, 129, 255), - (255, 78, 180, 255), (255, 0, 228, 0), (255, 174, 255, 243), (255, 45, 89, 255), - (255, 134, 134, 103), (255, 145, 148, 174), (255, 255, 208, 186), - (255, 197, 226, 255), (255, 171, 134, 1), (255, 109, 63, 54), (255, 207, 138, 255), - (255, 151, 0, 95), (255, 9, 80, 61), (255, 84, 105, 51), (255, 74, 65, 105), - (255, 166, 196, 102), (255, 208, 195, 210), (255, 255, 109, 65), (255, 0, 143, 149), - (255, 179, 0, 194), (255, 209, 99, 106), (255, 5, 121, 0), (255, 227, 255, 205), - (255, 147, 186, 208), (255, 153, 69, 1), (255, 3, 95, 161), (255, 163, 255, 0), - (255, 119, 0, 170), (255, 0, 182, 199), (255, 0, 165, 120), (255, 183, 130, 88), - (255, 95, 32, 0), (255, 130, 114, 135), (255, 110, 129, 133), (255, 166, 74, 118), - (255, 219, 142, 185), (255, 79, 210, 114), (255, 178, 90, 62), (255, 65, 70, 15), - (255, 127, 167, 115), (255, 59, 105, 106), (255, 142, 108, 45), (255, 196, 172, 0), - (255, 95, 54, 80), (255, 128, 76, 255), (255, 201, 57, 1), (255, 246, 0, 122), - (255, 191, 162, 208)] - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder # 定义全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder - -# 多目标分割 接收kmodel输出的后处理方法 -def kpu_post_process(output_datas): - with ScopedTiming("kpu_post_process", debug_mode > 0): - global masks - mask_dets = aidemo.segment_postprocess(output_datas,[OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH],[kmodel_input_shape[2],kmodel_input_shape[3]],[DISPLAY_HEIGHT,DISPLAY_WIDTH],confidence_threshold,nms_threshold,mask_thres,masks) - return mask_dets - -# 获取kmodel输入图像resize比例 以及 padding的上下左右像素数量 -def get_pad_param(): - #右padding或下padding - dst_w = kmodel_input_shape[3] - dst_h = kmodel_input_shape[2] - - ratio_w = float(dst_w) / OUT_RGB888P_WIDTH - ratio_h = float(dst_h) / OUT_RGB888P_HEIGHT - if ratio_w < ratio_h: - ratio = ratio_w - else: - ratio = ratio_h - - new_w = (int)(ratio * OUT_RGB888P_WIDTH) - new_h = (int)(ratio * OUT_RGB888P_HEIGHT) - dw = (dst_w - new_w) / 2 - dh = (dst_h - new_h) / 2 - - top = (int)(round(dh - 0.1)) - bottom = (int)(round(dh + 0.1)) - left = (int)(round(dw - 0.1)) - right = (int)(round(dw + 0.1)) - return [0, 0, 0, 0, top, bottom, left, right] - -# ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - ai2d.set_pad_param(True, get_pad_param(), 0, rgb_mean) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_out_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_out_tensor = nn.from_numpy(data) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - -# ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d_input_tensor,ai2d_out_tensor,ai2d_builder - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d_builder.run(ai2d_input_tensor, ai2d_out_tensor) - -# ai2d 释放内存 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor - del ai2d_input_tensor - -# kpu 初始化 -def kpu_init(kmodel_file): - # init kpu and load kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - kpu_obj = nn.kpu() - kpu_obj.load_kmodel(kmodel_file) - - ai2d_init() - return kpu_obj - -# kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_out_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, ai2d_out_tensor) - -# kpu 获得 kmodel 输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - data_0 = current_kmodel_obj.get_output_tensor(0) - result_0 = data_0.to_numpy() - del data_0 - results.append(result_0) - - data_1 = current_kmodel_obj.get_output_tensor(1) - result_1 = data_1.to_numpy() - del data_1 - results.append(result_1) - - return results - -# kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1) 原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2) kpu 运行 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3) 释放ai2d资源 - ai2d_release() - # (4) 获取kpu输出 - results = kpu_get_output() - # (5) kpu结果后处理 - seg_res = kpu_post_process(results) - # (6) 返回 分割 mask 结果 - return seg_res - -# kpu 释放内存 -def kpu_deinit(): - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_out_tensor' in globals(): - global ai2d_out_tensor - del ai2d_out_tensor - if 'ai2d_builder' in globals(): - global ai2d_builder - del ai2d_builder - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -# display 作图过程 将所有目标分割对象以及类别、分数值的作图 -def display_draw(seg_res): - with ScopedTiming("display_draw",debug_mode >0): - global draw_img,osd_img,masks - if seg_res[0]: - dets = seg_res[0] - ids = seg_res[1] - scores = seg_res[2] - - for i, det in enumerate(dets): - x1, y1, w, h = map(lambda x: int(round(x, 0)), det) - draw_img.draw_string( int(x1) , int(y1)-50, " " + labels[int(ids[i])] + " " + str(round(scores[i],2)) , color=color_four[int(ids[i])], scale=4) - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - else: - draw_img.clear() - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img, masks - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - masks = np.zeros((1,DISPLAY_HEIGHT,DISPLAY_WIDTH,4)) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=masks) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - - -#**********for seg.py********** -def seg_inference(): - print("seg start") - kpu_seg = kpu_init(kmodel_file) # 创建多目标分割的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - seg_res = kpu_run(kpu_seg,rgb888p_img) # 执行多目标分割 kpu 运行 以及 后处理过程 - display_draw(seg_res) # 将得到的分割结果 绘制到 display - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - kpu_deinit() # 释放 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_seg - - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'masks' in globals(): - global masks - del masks - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("seg end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - seg_inference() \ No newline at end of file diff --git a/share/qtcreator/examples/04-AI-Demo/self_learning.py b/share/qtcreator/examples/04-AI-Demo/self_learning.py deleted file mode 100644 index 612fdab029fd..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/self_learning.py +++ /dev/null @@ -1,408 +0,0 @@ -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import aidemo #aidemo模块,封装ai demo相关后处理、画图操作 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 -import time #时间统计 -import gc #垃圾回收模块 -import os #基本的操作系统交互功能 -import os, sys #操作系统接口模块 - -#********************for config.py******************** -# display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) # 显示宽度要求16位对齐 -DISPLAY_HEIGHT = 1080 - -# ai原图分辨率,sensor默认出图为16:9,若需不形变原图,最好按照16:9比例设置宽高 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) # ai原图宽度要求16位对齐 -OUT_RGB888P_HEIGHT = 1080 - -# kmodel参数设置 -# kmodel输入shape -kmodel_input_shape = (1,3,224,224) -# kmodel其它参数设置 -crop_w = 400 #图像剪切范围w -crop_h = 400 #图像剪切范围h -crop_x = OUT_RGB888P_WIDTH / 2.0 - crop_w / 2.0 #图像剪切范围x -crop_y = OUT_RGB888P_HEIGHT / 2.0 - crop_h / 2.0 #图像剪切范围y -thres = 0.5 #特征判别阈值 -top_k = 3 #识别范围 -categories = ['apple','banana'] #识别类别 -features = [2,2] #对应类别注册特征数量 -time_one = 100 #注册单个特征中途间隔帧数 - -# 文件配置 -# kmodel文件配置 -root_dir = '/sdcard/app/tests/' -kmodel_file = root_dir + 'kmodel/recognition.kmodel' -# 调试模型,0:不调试,>0:打印对应级别调试信息 -debug_mode = 0 - -#********************for scoped_timing.py******************** -# 时间统计类 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#********************for ai_utils.py******************** -# 当前kmodel -global current_kmodel_obj -# ai2d: ai2d实例 -# ai2d_input_tensor: ai2d输入 -# ai2d_output_tensor:ai2d输出 -# ai2d_builder: 根据ai2d参数,构建的ai2d_builder对象 -global ai2d,ai2d_input_tensor,ai2d_output_tensor,ai2d_builder #for ai2d - -# 获取两个特征向量的相似度 -def getSimilarity(output_vec,save_vec): - tmp = sum(output_vec * save_vec) - mold_out = np.sqrt(sum(output_vec * output_vec)) - mold_save = np.sqrt(sum(save_vec * save_vec)) - return tmp / (mold_out * mold_save) - -# 自学习 ai2d 初始化 -def ai2d_init(): - with ScopedTiming("ai2d_init",debug_mode > 0): - global ai2d - ai2d = nn.ai2d() - global ai2d_output_tensor - data = np.ones(kmodel_input_shape, dtype=np.uint8) - ai2d_output_tensor = nn.from_numpy(data) - -# 自学习 ai2d 运行 -def ai2d_run(rgb888p_img): - with ScopedTiming("ai2d_run",debug_mode > 0): - global ai2d,ai2d_input_tensor,ai2d_output_tensor - ai2d_input = rgb888p_img.to_numpy_ref() - ai2d_input_tensor = nn.from_numpy(ai2d_input) - - ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - - ai2d.set_crop_param(True,int(crop_x),int(crop_y),int(crop_w),int(crop_h)) - ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) - - global ai2d_builder - ai2d_builder = ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], kmodel_input_shape) - ai2d_builder.run(ai2d_input_tensor, ai2d_output_tensor) - -# 自学习 ai2d 释放 -def ai2d_release(): - with ScopedTiming("ai2d_release",debug_mode > 0): - global ai2d_input_tensor,ai2d_builder - del ai2d_input_tensor - del ai2d_builder - -# 自学习 kpu 初始化 -def kpu_init(kmodel_file): - # 初始化kpu对象,并加载kmodel - with ScopedTiming("kpu_init",debug_mode > 0): - # 初始化kpu对象 - kpu_obj = nn.kpu() - # 加载kmodel - kpu_obj.load_kmodel(kmodel_file) - # 初始化ai2d - ai2d_init() - return kpu_obj - -# 自学习 kpu 输入预处理 -def kpu_pre_process(rgb888p_img): - # 使用ai2d对原图进行预处理(crop,resize) - ai2d_run(rgb888p_img) - with ScopedTiming("kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,ai2d_output_tensor - # 将ai2d输出设置为kpu输入 - current_kmodel_obj.set_input_tensor(0, ai2d_output_tensor) - -# 自学习 kpu 获取输出 -def kpu_get_output(): - with ScopedTiming("kpu_get_output",debug_mode > 0): - global current_kmodel_obj - # 获取模型输出,并将结果转换为numpy,以便进行人脸检测后处理 - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape(-1) - del data - results.append(result) - return results - -# 自学习 kpu 运行 -def kpu_run(kpu_obj,rgb888p_img): - # kpu推理 - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - kpu_pre_process(rgb888p_img) - # (2)kpu推理 - with ScopedTiming("kpu_run",debug_mode > 0): - kpu_obj.run() - # (3)释放ai2d资源 - ai2d_release() - # (4)获取kpu输出 - results = kpu_get_output() - - # (5)返回输出 - return results - -# 自学习 kpu 释放 -def kpu_deinit(): - # kpu释放 - with ScopedTiming("kpu_deinit",debug_mode > 0): - if 'ai2d' in globals(): - global ai2d - del ai2d - if 'ai2d_output_tensor' in globals(): - global ai2d_output_tensor - del ai2d_output_tensor - -#********************for media_utils.py******************** -global draw_img,osd_img #for display -global buffer,media_source,media_sink #for media - -# for display,已经封装好,无需自己再实现,直接调用即可 -def display_init(): - # hdmi显示初始化 - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -def display_deinit(): - # 释放显示资源 - display.deinit() - -#for camera,已经封装好,无需自己再实现,直接调用即可 -def camera_init(dev_id): - # camera初始化 - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -def camera_start(dev_id): - # camera启动 - camera.start_stream(dev_id) - -def camera_read(dev_id): - # 读取一帧图像 - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -def camera_release_image(dev_id,rgb888p_img): - # 释放一帧图像 - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -def camera_stop(dev_id): - # 停止camera - camera.stop_stream(dev_id) - -#for media,已经封装好,无需自己再实现,直接调用即可 -def media_init(): - # meida初始化 - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 用于画框 - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # 用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#********************for self_learning.py******************** -def self_learning_inference(): - print("self_learning_test start") - # kpu初始化 - kpu_self_learning = kpu_init(kmodel_file) - # camera初始化 - camera_init(CAM_DEV_ID_0) - # 显示初始化 - display_init() - - # 注意:将一定要将一下过程包在try中,用于保证程序停止后,资源释放完毕;确保下次程序仍能正常运行 - try: - # 注意:媒体初始化(注:媒体初始化必须在camera_start之前,确保media缓冲区已配置完全) - media_init() - - # 启动camera - camera_start(CAM_DEV_ID_0) - - crop_x_osd = int(crop_x / OUT_RGB888P_WIDTH * DISPLAY_WIDTH) - crop_y_osd = int(crop_y / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT) - crop_w_osd = int(crop_w / OUT_RGB888P_WIDTH * DISPLAY_WIDTH) - crop_h_osd = int(crop_h / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT) - -# stat_info = os.stat(root_dir + 'utils/features') -# if (not (stat_info[0] & 0x4000)): -# os.mkdir(root_dir + 'utils/features') - - time_all = 0 - time_now = 0 - category_index = 0 - for i in range(len(categories)): - for j in range(features[i]): - time_all += time_one - - gc_count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - # (1)读取一帧图像 - rgb888p_img = camera_read(CAM_DEV_ID_0) - - # (2)若读取成功,推理当前帧 - if rgb888p_img.format() == image.RGBP888: - # (2.1)推理当前图像,并获取检测结果 - results = kpu_run(kpu_self_learning,rgb888p_img) - global draw_img, osd_img - draw_img.clear() - draw_img.draw_rectangle(crop_x_osd,crop_y_osd, crop_w_osd, crop_h_osd, color=(255, 255, 0, 255), thickness = 4) - - if (category_index < len(categories)): - time_now += 1 - draw_img.draw_string( 50 , 200, categories[category_index] + "_" + str(int(time_now-1) // time_one) + ".bin", color=(255,255,0,0), scale=7) - with open(root_dir + 'utils/features/' + categories[category_index] + "_" + str(int(time_now-1) // time_one) + ".bin", 'wb') as f: - f.write(results[0].tobytes()) - if (time_now // time_one == features[category_index]): - category_index += 1 - time_all -= time_now - time_now = 0 - else: - results_learn = [] - list_features = os.listdir(root_dir + 'utils/features/') - for feature in list_features: - with open(root_dir + 'utils/features/' + feature, 'rb') as f: - data = f.read() - save_vec = np.frombuffer(data, dtype=np.float) - score = getSimilarity(results[0], save_vec) - - if (score > thres): - res = feature.split("_") - is_same = False - for r in results_learn: - if (r["category"] == res[0]): - if (r["score"] < score): - r["bin_file"] = feature - r["score"] = score - is_same = True - - if (not is_same): - if(len(results_learn) < top_k): - evec = {} - evec["category"] = res[0] - evec["score"] = score - evec["bin_file"] = feature - results_learn.append( evec ) - results_learn = sorted(results_learn, key=lambda x: -x["score"]) - else: - if( score <= results_learn[top_k-1]["score"] ): - continue - else: - evec = {} - evec["category"] = res[0] - evec["score"] = score - evec["bin_file"] = feature - results_learn.append( evec ) - results_learn = sorted(results_learn, key=lambda x: -x["score"]) - - results_learn.pop() - draw_y = 200 - for r in results_learn: - draw_img.draw_string( 50 , draw_y, r["category"] + " : " + str(r["score"]), color=(255,255,0,0), scale=7) - draw_y += 50 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - - # (3)释放当前帧 - camera_release_image(CAM_DEV_ID_0,rgb888p_img) - - if gc_count > 5: - gc.collect() - gc_count = 0 - else: - gc_count += 1 - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - # 停止camera - camera_stop(CAM_DEV_ID_0) - # 释放显示资源 - display_deinit() - # 释放kpu资源 - kpu_deinit() - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_self_learning - # 删除features文件夹 - stat_info = os.stat(root_dir + 'utils/features') - if (stat_info[0] & 0x4000): - list_files = os.listdir(root_dir + 'utils/features') - for l in list_files: - os.remove(root_dir + 'utils/features/' + l) - # 垃圾回收 - gc.collect() - # 释放媒体资源 - nn.shrink_memory_pool() - media_deinit() - - print("self_learning_test end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - self_learning_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/space_resize.py b/share/qtcreator/examples/04-AI-Demo/space_resize.py deleted file mode 100644 index 74d5b288dcf4..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/space_resize.py +++ /dev/null @@ -1,572 +0,0 @@ -import aicube #aicube模块,封装检测分割等任务相关后处理 -from media.camera import * #摄像头模块 -from media.display import * #显示模块 -from media.media import * #软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 - -import nncase_runtime as nn #nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np #类似python numpy操作,但也会有一些接口不同 - -import time #时间统计 -import image #图像模块,主要用于读取、图像绘制元素(框、点等)等操作 - -import gc #垃圾回收模块 -import os, sys #操作系统接口模块 - -##config.py -#display分辨率 -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -##ai原图分辨率输入 -OUT_RGB888P_WIDTH = ALIGN_UP(1920, 16) -OUT_RGB888P_HEIGHT = 1080 - -#--------for hand detection---------- -#kmodel输入shape -hd_kmodel_input_shape = (1,3,512,512) # 手掌检测kmodel输入分辨率 - -#kmodel相关参数设置 -confidence_threshold = 0.2 # 手掌检测阈值,用于过滤roi -nms_threshold = 0.5 # 手掌检测框阈值,用于过滤重复roi -hd_kmodel_frame_size = [512,512] # 手掌检测输入图片尺寸 -hd_frame_size = [OUT_RGB888P_WIDTH,OUT_RGB888P_HEIGHT] # 手掌检测直接输入图片尺寸 -strides = [8,16,32] # 输出特征图的尺寸与输入图片尺寸的比 -num_classes = 1 # 手掌检测模型输出类别数 -nms_option = False # 是否所有检测框一起做NMS,False则按照不同的类分别应用NMS - -root_dir = '/sdcard/app/tests/' -hd_kmodel_file = root_dir + "kmodel/hand_det.kmodel" # 手掌检测kmodel文件的路径 -anchors = [26,27, 53,52, 75,71, 80,99, 106,82, 99,134, 140,113, 161,172, 245,276] #anchor设置 - -#--------for hand keypoint detection---------- -#kmodel输入shape -hk_kmodel_input_shape = (1,3,256,256) # 手掌关键点检测kmodel输入分辨率 - -#kmodel相关参数设置 -hk_kmodel_frame_size = [256,256] # 手掌关键点检测输入图片尺寸 -hk_kmodel_file = root_dir + 'kmodel/handkp_det.kmodel' # 手掌关键点检测kmodel文件的路径 - -debug_mode = 0 # debug模式 大于0(调试)、 反之 (不调试) - -#scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -#ai_utils.py -global current_kmodel_obj # 定义全局的 kpu 对象 -global hd_ai2d,hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder # 定义手掌检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor,hk_ai2d_builder # 定义手掌关键点检测全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -global space_ai2d,space_ai2d_input_tensor,space_ai2d_output_tensor,space_ai2d_builder,space_draw_ai2d_release # 定义缩放剪切图像全局 ai2d 对象,并且定义 ai2d 的输入、输出 以及 builder -space_draw_ai2d_release = False - -#-------hand detect--------: -# 手掌检测ai2d 初始化 -def hd_ai2d_init(): - with ScopedTiming("hd_ai2d_init",debug_mode > 0): - global hd_ai2d - global hd_ai2d_builder - global hd_ai2d_output_tensor - # 计算padding值 - ori_w = OUT_RGB888P_WIDTH - ori_h = OUT_RGB888P_HEIGHT - width = hd_kmodel_frame_size[0] - height = hd_kmodel_frame_size[1] - ratiow = float(width) / ori_w - ratioh = float(height) / ori_h - if ratiow < ratioh: - ratio = ratiow - else: - ratio = ratioh - new_w = int(ratio * ori_w) - new_h = int(ratio * ori_h) - dw = float(width - new_w) / 2 - dh = float(height - new_h) / 2 - top = int(round(dh - 0.1)) - bottom = int(round(dh + 0.1)) - left = int(round(dw - 0.1)) - right = int(round(dw - 0.1)) - - # init kpu and load kmodel - hd_ai2d = nn.ai2d() - hd_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - hd_ai2d.set_pad_param(True, [0,0,0,0,top,bottom,left,right], 0, [114,114,114]) - hd_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - hd_ai2d_builder = hd_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,height,width]) - data = np.ones(hd_kmodel_input_shape, dtype=np.uint8) - hd_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌检测 ai2d 运行 -def hd_ai2d_run(rgb888p_img): - with ScopedTiming("hd_ai2d_run",debug_mode > 0): - global hd_ai2d_input_tensor,hd_ai2d_output_tensor,hd_ai2d_builder - hd_ai2d_input = rgb888p_img.to_numpy_ref() - hd_ai2d_input_tensor = nn.from_numpy(hd_ai2d_input) - - hd_ai2d_builder.run(hd_ai2d_input_tensor, hd_ai2d_output_tensor) - -# 手掌检测 ai2d 释放内存 -def hd_ai2d_release(): - with ScopedTiming("hd_ai2d_release",debug_mode > 0): - global hd_ai2d_input_tensor - del hd_ai2d_input_tensor - -# 手掌检测 kpu 初始化 -def hd_kpu_init(hd_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hd_kpu_init",debug_mode > 0): - hd_kpu_obj = nn.kpu() - hd_kpu_obj.load_kmodel(hd_kmodel_file) - - hd_ai2d_init() - return hd_kpu_obj - -# 手掌检测 kpu 输入预处理 -def hd_kpu_pre_process(rgb888p_img): - hd_ai2d_run(rgb888p_img) - with ScopedTiming("hd_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hd_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hd_ai2d_output_tensor) - -# 手掌检测 kpu 获得 kmodel 输出 -def hd_kpu_get_output(): - with ScopedTiming("hd_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - result = result.reshape((result.shape[0]*result.shape[1]*result.shape[2]*result.shape[3])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌检测 kpu 运行 -def hd_kpu_run(kpu_obj,rgb888p_img): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hd_kpu_pre_process(rgb888p_img) - # (2)手掌检测 kpu 运行 - with ScopedTiming("hd_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌检测 ai2d 资源 - hd_ai2d_release() - # (4)获取手掌检测 kpu 输出 - results = hd_kpu_get_output() - # (5)手掌检测 kpu 结果后处理 - dets = aicube.anchorbasedet_post_process( results[0], results[1], results[2], hd_kmodel_frame_size, hd_frame_size, strides, num_classes, confidence_threshold, nms_threshold, anchors, nms_option) - # (6)返回手掌检测结果 - return dets - -# 手掌检测 kpu 释放内存 -def hd_kpu_deinit(): - with ScopedTiming("hd_kpu_deinit",debug_mode > 0): - if 'hd_ai2d' in globals(): - global hd_ai2d - del hd_ai2d - if 'hd_ai2d_output_tensor' in globals(): - global hd_ai2d_output_tensor - del hd_ai2d_output_tensor - if 'hd_ai2d_builder' in globals(): - global hd_ai2d_builder - del hd_ai2d_builder - -#-------hand keypoint detection------: -# 手掌关键点检测 ai2d 初始化 -def hk_ai2d_init(): - with ScopedTiming("hk_ai2d_init",debug_mode > 0): - global hk_ai2d, hk_ai2d_output_tensor - hk_ai2d = nn.ai2d() - hk_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.NCHW_FMT, - np.uint8, np.uint8) - data = np.ones(hk_kmodel_input_shape, dtype=np.uint8) - hk_ai2d_output_tensor = nn.from_numpy(data) - -# 手掌关键点检测 ai2d 运行 -def hk_ai2d_run(rgb888p_img, x, y, w, h): - with ScopedTiming("hk_ai2d_run",debug_mode > 0): - global hk_ai2d,hk_ai2d_input_tensor,hk_ai2d_output_tensor - hk_ai2d_input = rgb888p_img.to_numpy_ref() - hk_ai2d_input_tensor = nn.from_numpy(hk_ai2d_input) - - hk_ai2d.set_crop_param(True, x, y, w, h) - hk_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - global hk_ai2d_builder - hk_ai2d_builder = hk_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,3,hk_kmodel_frame_size[1],hk_kmodel_frame_size[0]]) - hk_ai2d_builder.run(hk_ai2d_input_tensor, hk_ai2d_output_tensor) - -# 手掌关键点检测 ai2d 释放内存 -def hk_ai2d_release(): - with ScopedTiming("hk_ai2d_release",debug_mode > 0): - global hk_ai2d_input_tensor,hk_ai2d_builder - del hk_ai2d_input_tensor - del hk_ai2d_builder - -# 手掌关键点检测 kpu 初始化 -def hk_kpu_init(hk_kmodel_file): - # init kpu and load kmodel - with ScopedTiming("hk_kpu_init",debug_mode > 0): - hk_kpu_obj = nn.kpu() - hk_kpu_obj.load_kmodel(hk_kmodel_file) - - hk_ai2d_init() - return hk_kpu_obj - -# 手掌关键点检测 kpu 输入预处理 -def hk_kpu_pre_process(rgb888p_img, x, y, w, h): - hk_ai2d_run(rgb888p_img, x, y, w, h) - with ScopedTiming("hk_kpu_pre_process",debug_mode > 0): - global current_kmodel_obj,hk_ai2d_output_tensor - # set kpu input - current_kmodel_obj.set_input_tensor(0, hk_ai2d_output_tensor) - -# 手掌关键点检测 kpu 获得 kmodel 输出 -def hk_kpu_get_output(): - with ScopedTiming("hk_kpu_get_output",debug_mode > 0): - global current_kmodel_obj - results = [] - for i in range(current_kmodel_obj.outputs_size()): - data = current_kmodel_obj.get_output_tensor(i) - result = data.to_numpy() - - result = result.reshape((result.shape[0]*result.shape[1])) - tmp2 = result.copy() - del result - results.append(tmp2) - return results - -# 手掌关键点检测 kpu 运行 -def hk_kpu_run(kpu_obj,rgb888p_img, x, y, w, h): - global current_kmodel_obj - current_kmodel_obj = kpu_obj - # (1)原图预处理,并设置模型输入 - hk_kpu_pre_process(rgb888p_img, x, y, w, h) - # (2)手掌关键点检测 kpu 运行 - with ScopedTiming("hk_kpu_run",debug_mode > 0): - current_kmodel_obj.run() - # (3)释放手掌关键点检测 ai2d 资源 - hk_ai2d_release() - # (4)获取手掌关键点检测 kpu 输出 - results = hk_kpu_get_output() - # (5)返回手掌关键点检测结果 - return results - -# 手掌关键点检测 kpu 释放内存 -def hk_kpu_deinit(): - with ScopedTiming("hk_kpu_deinit",debug_mode > 0): - if 'hk_ai2d' in globals(): - global hk_ai2d - del hk_ai2d - if 'hk_ai2d_output_tensor' in globals(): - global hk_ai2d_output_tensor - del hk_ai2d_output_tensor - -# 隔空缩放剪切 ai2d 初始化 -def space_ai2d_init(): - with ScopedTiming("space_ai2d_init",debug_mode > 0): - global space_ai2d - space_ai2d = nn.ai2d() - space_ai2d.set_dtype(nn.ai2d_format.NCHW_FMT, - nn.ai2d_format.RGB_packed, - np.uint8, np.uint8) - -# 隔空缩放剪切 ai2d 运行 -def space_ai2d_run(rgb888p_img, x, y, w, h, out_w, out_h): - with ScopedTiming("space_ai2d_run",debug_mode > 0): - global space_ai2d,space_ai2d_input_tensor,space_ai2d_output_tensor,space_draw_ai2d_release - space_draw_ai2d_release = True - space_ai2d_input = rgb888p_img.to_numpy_ref() - space_ai2d_input_tensor = nn.from_numpy(space_ai2d_input) - - space_ai2d.set_crop_param(True, x, y, w, h) - space_ai2d.set_resize_param(True, nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel ) - - data = np.ones((1,out_h, out_w,3), dtype=np.uint8) - space_ai2d_output_tensor = nn.from_numpy(data) - - global space_ai2d_builder - space_ai2d_builder = space_ai2d.build([1,3,OUT_RGB888P_HEIGHT,OUT_RGB888P_WIDTH], [1,out_h, out_w,3]) - space_ai2d_builder.run(space_ai2d_input_tensor, space_ai2d_output_tensor) - - space_np_out = space_ai2d_output_tensor.to_numpy() - return space_np_out - -# 隔空缩放剪切 ai2d 释放内存 -def space_ai2d_release(re_ai2d): - with ScopedTiming("space_ai2d_release",debug_mode > 0): - global space_ai2d_input_tensor,space_ai2d_output_tensor,space_ai2d_builder,space_draw_ai2d_release,space_ai2d - if (space_draw_ai2d_release): - del space_ai2d_input_tensor - del space_ai2d_output_tensor - del space_ai2d_builder - space_draw_ai2d_release = False - if (re_ai2d): - del space_ai2d - -#media_utils.py -global draw_img,osd_img,masks #for display 定义全局 作图image对象 -global buffer,media_source,media_sink #for media 定义 media 程序中的中间存储对象 - -#for display 初始化 -def display_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - -# display 释放内存 -def display_deinit(): - display.deinit() - -#for camera 初始化 -def camera_init(dev_id): - camera.sensor_init(dev_id, CAM_DEFAULT_SENSOR) - - # set chn0 output yuv420sp - camera.set_outsize(dev_id, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - - # set chn2 output rgb88planar - camera.set_outsize(dev_id, CAM_CHN_ID_2, OUT_RGB888P_WIDTH, OUT_RGB888P_HEIGHT) - camera.set_outfmt(dev_id, CAM_CHN_ID_2, PIXEL_FORMAT_RGB_888_PLANAR) - -# camera 开启 -def camera_start(dev_id): - camera.start_stream(dev_id) - -# camera 读取图像 -def camera_read(dev_id): - with ScopedTiming("camera_read",debug_mode >0): - rgb888p_img = camera.capture_image(dev_id, CAM_CHN_ID_2) - return rgb888p_img - -# camera 图像释放 -def camera_release_image(dev_id,rgb888p_img): - with ScopedTiming("camera_release_image",debug_mode >0): - camera.release_image(dev_id, CAM_CHN_ID_2, rgb888p_img) - -# camera 结束 -def camera_stop(dev_id): - camera.stop_stream(dev_id) - -#for media 初始化 -def media_init(): - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4 * DISPLAY_WIDTH * DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - - media.buffer_config(config) - - global media_source, media_sink - media_source = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - media_sink = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - media.create_link(media_source, media_sink) - - # 初始化多媒体buffer - media.buffer_init() - - global buffer, draw_img, osd_img, masks - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # 图层1,用于画框 - masks = np.zeros((DISPLAY_HEIGHT,DISPLAY_WIDTH,4),dtype=np.uint8) - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888,alloc=image.ALLOC_REF,data=masks) - # 图层2,用于拷贝画框结果,防止画框过程中发生buffer搬运 - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, poolid=buffer.pool_id, alloc=image.ALLOC_VB, - phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr) - -# media 释放内存 -def media_deinit(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - if 'buffer' in globals(): - global buffer - media.release_buffer(buffer) - - if 'media_source' in globals() and 'media_sink' in globals(): - global media_source, media_sink - media.destroy_link(media_source, media_sink) - - media.buffer_deinit() - -#**********for space_resize.py********** -def space_resize_inference(): - print("space_resize start") - kpu_hand_detect = hd_kpu_init(hd_kmodel_file) # 创建手掌检测的 kpu 对象 - kpu_hand_keypoint_detect = hk_kpu_init(hk_kmodel_file) # 创建手掌关键点检测的 kpu 对象 - camera_init(CAM_DEV_ID_0) # 初始化 camera - display_init() # 初始化 display - space_ai2d_init() # 初始化 隔空缩放剪切 ai2d 对象 - - try: - media_init() - - camera_start(CAM_DEV_ID_0) - - global draw_img,osd_img - first_start = True # 首次手掌入镜参数 - two_point_left_x = 0 # 中指食指包括范围 x - two_point_top_y = 0 # 中指食指包括范围 y - two_point_mean_w = 0 # 中指食指首次入镜包括范围 w - two_point_mean_h = 0 # 中指食指首次入镜包括范围 h - two_point_crop_w = 0 # 中指食指包括范围 w - two_point_crop_h = 0 # 中指食指包括范围 h - osd_plot_x = 0 # osd 画缩放图起始点 x - osd_plot_y = 0 # osd 画缩放图起始点 y - ori_new_ratio = 0 # 缩放比例 - new_resize_w = 0 # 缩放后 w - new_resize_h = 0 # 缩放后 h - crop_area = 0 # 剪切区域 - rect_frame_x = 0 # osd绘画起始点 x - rect_frame_y = 0 # osd绘画起始点 y - - count = 0 - while True: - # 设置当前while循环退出点,保证rgb888p_img正确释放 - os.exitpoint() - with ScopedTiming("total",1): - rgb888p_img = camera_read(CAM_DEV_ID_0) # 读取一帧图片 - - # for rgb888planar - if rgb888p_img.format() == image.RGBP888: - two_point = np.zeros((4),dtype=np.int16) - dets_no_pro = hd_kpu_run(kpu_hand_detect,rgb888p_img) # 执行手掌检测 kpu 运行 以及 后处理过程 - draw_img.clear() - - dets = [] - for det_box in dets_no_pro: - if det_box[4] < OUT_RGB888P_WIDTH - 10 : - dets.append(det_box) - - if (len(dets)==1): - for det_box in dets: - x1, y1, x2, y2 = int(det_box[2]),int(det_box[3]),int(det_box[4]),int(det_box[5]) - w = int(x2 - x1) - h = int(y2 - y1) - - if (h<(0.1*OUT_RGB888P_HEIGHT)): - continue - if (w<(0.25*OUT_RGB888P_WIDTH) and ((x1<(0.03*OUT_RGB888P_WIDTH)) or (x2>(0.97*OUT_RGB888P_WIDTH)))): - continue - if (w<(0.15*OUT_RGB888P_WIDTH) and ((x1<(0.01*OUT_RGB888P_WIDTH)) or (x2>(0.99*OUT_RGB888P_WIDTH)))): - continue - - length = max(w,h)/2 - cx = (x1+x2)/2 - cy = (y1+y2)/2 - ratio_num = 1.26*length - - x1_kp = int(max(0,cx-ratio_num)) - y1_kp = int(max(0,cy-ratio_num)) - x2_kp = int(min(OUT_RGB888P_WIDTH-1, cx+ratio_num)) - y2_kp = int(min(OUT_RGB888P_HEIGHT-1, cy+ratio_num)) - w_kp = int(x2_kp - x1_kp + 1) - h_kp = int(y2_kp - y1_kp + 1) - - hk_results = hk_kpu_run(kpu_hand_keypoint_detect,rgb888p_img, x1_kp, y1_kp, w_kp, h_kp) # 执行手掌关键点检测 kpu 运行 以及 后处理过程 - - results_show = np.zeros(hk_results[0].shape,dtype=np.int16) - results_show[0::2] = hk_results[0][0::2] * w_kp + x1_kp - results_show[1::2] = hk_results[0][1::2] * h_kp + y1_kp - - two_point[0] = results_show[8] - two_point[1] = results_show[9] - two_point[2] = results_show[16+8] - two_point[3] = results_show[16+9] - - if (first_start): - if (two_point[0] > 0 and two_point[0] < OUT_RGB888P_WIDTH and two_point[2] > 0 and two_point[2] < OUT_RGB888P_WIDTH and two_point[1] > 0 and two_point[1] < OUT_RGB888P_HEIGHT and two_point[3] > 0 and two_point[3] < OUT_RGB888P_HEIGHT): - two_point_mean_w = np.sqrt(pow(two_point[0] - two_point[2],2) + pow(two_point[1] - two_point[3],2))*0.8 - two_point_mean_h = np.sqrt(pow(two_point[0] - two_point[2],2) + pow(two_point[1] - two_point[3],2))*0.8 - first_start = False - else: - two_point_left_x = int(max((two_point[0] + two_point[2]) / 2 - two_point_mean_w / 2, 0)) - two_point_top_y = int(max((two_point[1] + two_point[3]) / 2 - two_point_mean_h / 2, 0)) - two_point_crop_w = int(min(min((two_point[0] + two_point[2]) / 2 - two_point_mean_w / 2 + two_point_mean_w , two_point_mean_w), OUT_RGB888P_WIDTH - ((two_point[0] + two_point[2]) / 2 - two_point_mean_w / 2))) - two_point_crop_h = int(min(min((two_point[1] + two_point[3]) / 2 - two_point_mean_h / 2 + two_point_mean_h , two_point_mean_h), OUT_RGB888P_HEIGHT - ((two_point[1] + two_point[3]) / 2 - two_point_mean_h / 2))) - - ori_new_ratio = np.sqrt(pow((two_point[0] - two_point[2]),2) + pow((two_point[1] - two_point[3]),2))*0.8 / two_point_mean_w - - new_resize_w = min(int(two_point_crop_w * ori_new_ratio / OUT_RGB888P_WIDTH * DISPLAY_WIDTH),600) - new_resize_h = min(int(two_point_crop_h * ori_new_ratio / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT),600) - - rect_frame_x = int(two_point_left_x * 1.0 / OUT_RGB888P_WIDTH * DISPLAY_WIDTH) - rect_frame_y = int(two_point_top_y * 1.0 / OUT_RGB888P_HEIGHT * DISPLAY_HEIGHT) - - draw_w = min(new_resize_w,DISPLAY_WIDTH-rect_frame_x-1) - draw_h = min(new_resize_h,DISPLAY_HEIGHT-rect_frame_y-1) - - space_np_out = space_ai2d_run(rgb888p_img, two_point_left_x, two_point_top_y, two_point_crop_w, two_point_crop_h, new_resize_w, new_resize_h) # 运行 隔空缩放检测 ai2d - global masks - masks[rect_frame_y:rect_frame_y + draw_h,rect_frame_x:rect_frame_x + draw_w,0] = 255 - masks[rect_frame_y:rect_frame_y + draw_h,rect_frame_x:rect_frame_x + draw_w,1:4] = space_np_out[0][0:draw_h,0:draw_w,:] - space_ai2d_release(False) # 释放 隔空缩放检测 ai2d 相关对象 - - - draw_img.draw_rectangle(rect_frame_x, rect_frame_y, new_resize_w, new_resize_h, color=(255, 0, 255, 0),thickness = 4) - else: - draw_img.draw_string( 300 , 500, "Must have one hand !", color=(255,255,0,0), scale=7) - first_start = True - - camera_release_image(CAM_DEV_ID_0,rgb888p_img) # camera 释放图像 - - if (count > 5): - gc.collect() - count = 0 - else: - count += 1 - - draw_img.copy_to(osd_img) - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD3) - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - - camera_stop(CAM_DEV_ID_0) # 停止 camera - display_deinit() # 释放 display - space_ai2d_release(True) # 释放 隔空缩放检测 ai2d 相关对象 - hd_kpu_deinit() # 释放手掌检测 kpu - hk_kpu_deinit() # 释放手掌关键点检测 kpu - if 'current_kmodel_obj' in globals(): - global current_kmodel_obj - del current_kmodel_obj - del kpu_hand_detect - del kpu_hand_keypoint_detect - - if 'draw_img' in globals(): - global draw_img - del draw_img - if 'masks' in globals(): - global masks - del masks - gc.collect() - nn.shrink_memory_pool() - media_deinit() # 释放 整个media - - print("space_resize end") - return 0 - -if __name__ == '__main__': - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - space_resize_inference() diff --git a/share/qtcreator/examples/04-AI-Demo/tts_zh.py b/share/qtcreator/examples/04-AI-Demo/tts_zh.py deleted file mode 100644 index fab0897ffc71..000000000000 --- a/share/qtcreator/examples/04-AI-Demo/tts_zh.py +++ /dev/null @@ -1,218 +0,0 @@ -from media.pyaudio import * # 音频模块 -from media.media import * # 软件抽象模块,主要封装媒体数据链路以及媒体缓冲区 -import media.wave as wave # wav音频处理模块 -import nncase_runtime as nn # nncase运行模块,封装了kpu(kmodel推理)和ai2d(图片预处理加速)操作 -import ulab.numpy as np # 类似python numpy操作,但也会有一些接口不同 -import time # 时间统计 -import aidemo # aidemo模块,封装ai demo相关前处理、后处理等操作 -import struct # 字节字符转换模块 -import gc # 垃圾回收模块 -import os,sys # 操作系统接口模块 - -# 有关音频流的宏变量 -SAMPLE_RATE = 24000 # 采样率24000Hz,即每秒采样24000次 -CHANNELS = 1 # 通道数 1为单声道,2为立体声 -FORMAT = paInt16 # 音频输入输出格式 paInt16 -CHUNK = int(0.3 * 24000) # 每次读取音频数据的帧数,设置为0.3s的帧数24000*0.3=7200 - -# tts_zh(文字转语音中文)任务 -root_dir='/sdcard/app/tests/' -# 拼音字典 -dict_path=root_dir+"utils/pinyin.txt" -# 汉字转拼音字典文件 -phase_path=root_dir+"utils/small_pinyin.txt" -# 拼音转音素映射文件 -mapfile=root_dir+"utils/phone_map.txt" -# 输入中文语句 -text="嘉楠科技研发了最新款的芯片" -# 中文tts encoder模型 -fastspeech1_path=root_dir+"kmodel/zh_fastspeech_1_f32.kmodel" -# 中文tts decoder模型 -fastspeech2_path=root_dir+"kmodel/zh_fastspeech_2.kmodel" -# 中文tts 声码器模型 -hifigan_path=root_dir+"kmodel/hifigan.kmodel" -# 生成音频存储路径 -save_wav_file = root_dir+"test.wav" -debug_mode=1 - - -# scoped_timing.py 用于debug模式输出程序块运行时间 -class ScopedTiming: - def __init__(self, info="", enable_profile=True): - self.info = info - self.enable_profile = enable_profile - - def __enter__(self): - if self.enable_profile: - self.start_time = time.time_ns() - return self - - def __exit__(self, exc_type, exc_value, traceback): - if self.enable_profile: - elapsed_time = time.time_ns() - self.start_time - print(f"{self.info} took {elapsed_time / 1000000:.2f} ms") - -global ttszh - -def play_audio(): - # 初始化音频流 - p = PyAudio() - p.initialize(CHUNK) - ret = media.buffer_init() - if ret: - print("record_audio, buffer_init failed") - # 用于播放音频 - output_stream = p.open( - format=FORMAT, - channels=CHANNELS, - rate=SAMPLE_RATE, - output=True, - frames_per_buffer=CHUNK - ) - wf = wave.open(save_wav_file, "rb") - wav_data = wf.read_frames(CHUNK) - while wav_data: - output_stream.write(wav_data) - wav_data = wf.read_frames(CHUNK) - time.sleep(2) # 时间缓冲,用于播放声音 - wf.close() - output_stream.stop_stream() - output_stream.close() - p.terminate() - media.buffer_deinit() - -def ttszh_init(): - with ScopedTiming("init_ttszh",debug_mode > 0): - global ttszh - ttszh=aidemo.tts_zh_create(dict_path,phase_path,mapfile) - -# ttszh推理过程 -def ttszh_inference(): - global ttszh - try: - # ttszh初始化 - ttszh_init() - with ScopedTiming("preprocess",debug_mode > 0): - # ttszh数据预处理,获取data和data_len - preprocess_data=aidemo.tts_zh_preprocess(ttszh,text) - data=preprocess_data[0] - data_len=preprocess_data[1] - with ScopedTiming("encode run",debug_mode > 0): - # 创建编码器kpu,加载kmodel - kpu_enc = nn.kpu() - kpu_enc.load_kmodel(fastspeech1_path) - # 创建编码器模型输入并和模型绑定,编码器包含两个输入,一个是文字预处理的序列数据,一个是speaker数据 - # 编码器序列数据 - enc_seq_input_array=np.array(data) - enc_seq_input_tensor = nn.from_numpy(enc_seq_input_array) - # 编码器speaker数据 - enc_speaker_input_array=np.array([0.0]) - enc_speaker_input_tensor=nn.from_numpy(enc_speaker_input_array) - # 和模型绑定 - kpu_enc.set_input_tensor(1, enc_seq_input_tensor) - kpu_enc.set_input_tensor(0, enc_speaker_input_tensor) - # kpu运行 - kpu_enc.run() - # 获取kpu的输入 - enc_output_0=kpu_enc.get_output_tensor(0) - enc_output_1=kpu_enc.get_output_tensor(1) - enc_output_0_np=enc_output_0.to_numpy() - enc_output_1_np=enc_output_1.to_numpy() - with ScopedTiming("encode postprocess",debug_mode > 0): - # 给编码结果添加持续时间属性,每个音素编码向量按照持续时间重复 - duritions=enc_output_1_np[0][:int(data_len[0])] - durition_sum=int(np.sum(duritions)) - # 解码器输入维度为(1,600,256),不足部分需要padding - max_value=13 - while durition_sum>600: - for i in range(len(duritions)): - if duritions[i]>max_value: - duritions[i]=max_value - max_value=max_value-1 - durition_sum=np.sum(duritions) - dec_input=np.zeros((1,600,256),dtype=np.float) - m_pad=600-durition_sum - k=0 - for i in range(len(duritions)): - for j in range(int(duritions[i])): - dec_input[0][k]=enc_output_0_np[0][i] - k+=1 - with ScopedTiming("decode run",debug_mode > 0): - # 定义解码器kpu对象,并加载kmodel - kpu_dec = nn.kpu() - kpu_dec.load_kmodel(fastspeech2_path) - #设置解码器模型输入 - dec_input_tensor=nn.from_numpy(dec_input) - kpu_dec.set_input_tensor(0, dec_input_tensor) - # 运行 - kpu_dec.run() - # 获取解码器模型输出,维度为(1,80,600) - dec_output=kpu_dec.get_output_tensor(0) - dec_output_np=dec_output.to_numpy() - with ScopedTiming("decode postprocess",debug_mode > 0): - # 将有效信息拆分成一个个(1,80,100)的子向量,输入声码器生成音频 - subvector_num=durition_sum//100; - remaining=durition_sum%100; - if remaining>0: - subvector_num+=1 - hifi_input=np.zeros((1,80,subvector_num*100),dtype=np.float) - for i in range(durition_sum): - hifi_input[:,:,i]=dec_output_np[:,:,i] - - with ScopedTiming("hifigan run",debug_mode > 0): - # 定义hifigan声码器模型kpu对象,加载kmodel - kpu_hifi = nn.kpu() - kpu_hifi.load_kmodel(hifigan_path) - - - # 保存生成的所有梅尔声谱数据,后续保存成wav文件 - mel_data=[] - # 依次对每一个子向量进行声码器推理 - for i in range(subvector_num): - hifi_input_tmp=np.zeros((1,80,100),dtype=np.float) - - for j in range(80): - for k in range(i*100,(i+1)*100): - hifi_input_tmp[0][j][k-i*100]=hifi_input[0][j][k] - # 设置模型输入 - hifigan_input_tensor=nn.from_numpy(hifi_input_tmp) - kpu_hifi.set_input_tensor(0, hifigan_input_tensor) - # kpu运行 - kpu_hifi.run() - # 获取模型输出 - hifigan_output=kpu_hifi.get_output_tensor(0) - hifigan_output_np=hifigan_output.to_numpy() - # 汇总输出数据 - for j in range(25600): - mel_data.append(hifigan_output_np[0][0][j]) - del hifigan_input_tensor - with ScopedTiming("save wav file",debug_mode > 0): - # 将生成的音频数据保存为wav文件 - save_data=mel_data[:durition_sum*256] - save_len=len(save_data) - aidemo.save_wav(save_data,save_len,save_wav_file,SAMPLE_RATE) - aidemo.tts_zh_destroy(ttszh) - with ScopedTiming("play audio",debug_mode > 0): - play_audio() - del kpu_enc - del kpu_dec - del kpu_hifi - del enc_seq_input_tensor - del enc_speaker_input_tensor - del enc_output_0 - del enc_output_1 - del dec_input_tensor - del dec_output - del ttszh - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - gc.collect() - - -if __name__=="__main__": - os.exitpoint(os.EXITPOINT_ENABLE) - nn.shrink_memory_pool() - ttszh_inference() diff --git a/share/qtcreator/examples/05-nncase-Runtime/ai2d+kpu.py b/share/qtcreator/examples/05-nncase-Runtime/ai2d+kpu.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/05-nncase-Runtime/kpu.py b/share/qtcreator/examples/05-nncase-Runtime/kpu.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/06-MPP/vo/vo.py b/share/qtcreator/examples/06-MPP/vo/vo.py deleted file mode 100755 index 55918728f922..000000000000 --- a/share/qtcreator/examples/06-MPP/vo/vo.py +++ /dev/null @@ -1,197 +0,0 @@ -from mpp.vo import * -from mpp import * -from time import * - -def display_hardware_init(): - kd_display_set_backlight() - kd_display_reset() - -hx8399 = k_vo_display_resolution( - pclk = 74250, - phyclk = 445500, - htotal = 1240, - hdisplay = 1080, - hsync_len = 20, - hback_porch = 20, - hfront_porch = 120, - vtotal = 1988, - vdisplay = 1920, - vsync_len = 5, - vback_porch = 8, - vfront_porch = 55, -) - -def hx8399_v2_init(test_mode_en): - param1 = bytes((0xB9, 0xFF, 0x83, 0x99)) - param21 = bytes((0xD2, 0xAA)) - param2 = bytes((0xB1, 0x02, 0x04, 0x71, 0x91, 0x01, 0x32, 0x33, 0x11, 0x11, 0xab, 0x4d, 0x56, 0x73, 0x02, 0x02)) - param3 = bytes((0xB2, 0x00, 0x80, 0x80, 0xae, 0x05, 0x07, 0x5a, 0x11, 0x00, 0x00, 0x10, 0x1e, 0x70, 0x03, 0xd4)) - param4 = bytes((0xB4, 0x00, 0xFF, 0x02, 0xC0, 0x02, 0xc0, 0x00, 0x00, 0x08, 0x00, 0x04, 0x06, 0x00, 0x32, 0x04, 0x0a, 0x08, 0x21, 0x03, 0x01, 0x00, 0x0f, 0xb8, 0x8b, 0x02, 0xc0, 0x02, 0xc0, 0x00, 0x00, 0x08, 0x00, 0x04, 0x06, 0x00, 0x32, 0x04, 0x0a, 0x08, 0x01, 0x00, 0x0f, 0xb8, 0x01)) - param5 = bytes((0xD3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x10, 0x04, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, 0x05, 0x07, 0x00, 0x00, 0x00, 0x05, 0x40)) - param6 = bytes((0xD5, 0x18, 0x18, 0x19, 0x19, 0x18, 0x18, 0x21, 0x20, 0x01, 0x00, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x18, 0x18, 0x18, 0x18)) - param7 = bytes((0xD6, 0x18, 0x18, 0x19, 0x19, 0x40, 0x40, 0x20, 0x21, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x40, 0x40, 0x40, 0x40)) - param8 = bytes((0xD8, 0xa2, 0xaa, 0x02, 0xa0, 0xa2, 0xa8, 0x02, 0xa0, 0xb0, 0x00, 0x00, 0x00, 0xb0, 0x00, 0x00, 0x00)) - param9 = bytes((0xBD, 0x01)) - param10 = bytes((0xD8, 0xB0, 0x00, 0x00, 0x00, 0xB0, 0x00, 0x00, 0x00, 0xE2, 0xAA, 0x03, 0xF0, 0xE2, 0xAA, 0x03, 0xF0)) - param11 = bytes((0xBD, 0x02)) - param12 = bytes((0xD8, 0xE2, 0xAA, 0x03, 0xF0, 0xE2, 0xAA, 0x03, 0xF0)) - param13 = bytes((0xBD, 0x00)) - param14 = bytes((0xB6, 0x8D, 0x8D)) - param15 = bytes((0xCC, 0x09)) - param16 = bytes((0xC6, 0xFF, 0xF9)) - param22 = bytes((0xE0, 0x00, 0x12, 0x1f, 0x1a, 0x40, 0x4a, 0x59, 0x55, 0x5e, 0x67, 0x6f, 0x75, 0x7a, 0x82, 0x8b, 0x90, 0x95, 0x9f, 0xa3, 0xad, 0xa2, 0xb2, 0xB6, 0x5e, 0x5a, 0x65, 0x77, 0x00, 0x12, 0x1f, 0x1a, 0x40, 0x4a, 0x59, 0x55, 0x5e, 0x67, 0x6f, 0x75, 0x7a, 0x82, 0x8b, 0x90, 0x95, 0x9f, 0xa3, 0xad, 0xa2, 0xb2, 0xB6, 0x5e, 0x5a, 0x65, 0x77)) - param23 = bytes((0x11,)) - param24 = bytes((0x29,)) - pag20 = bytes((0xB2, 0x0b, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77)) - - kd_mpi_dsi_send_cmd(param1, len(param1)) - kd_mpi_dsi_send_cmd(param21, len(param21)) - kd_mpi_dsi_send_cmd(param2, len(param2)) - kd_mpi_dsi_send_cmd(param3, len(param3)) - kd_mpi_dsi_send_cmd(param4, len(param4)) - kd_mpi_dsi_send_cmd(param5, len(param5)) - kd_mpi_dsi_send_cmd(param6, len(param6)) - kd_mpi_dsi_send_cmd(param7, len(param7)) - kd_mpi_dsi_send_cmd(param8, len(param8)) - kd_mpi_dsi_send_cmd(param9, len(param9)) - - if (test_mode_en): - kd_mpi_dsi_send_cmd(pag20, 10) - - kd_mpi_dsi_send_cmd(param10, len(param10)) - kd_mpi_dsi_send_cmd(param11, len(param11)) - kd_mpi_dsi_send_cmd(param12, len(param12)) - kd_mpi_dsi_send_cmd(param13, len(param13)) - kd_mpi_dsi_send_cmd(param14, len(param14)) - kd_mpi_dsi_send_cmd(param15, len(param15)) - kd_mpi_dsi_send_cmd(param16, len(param16)) - kd_mpi_dsi_send_cmd(param22, len(param22)) - kd_mpi_dsi_send_cmd(param23, 1) - sleep_ms(300) - kd_mpi_dsi_send_cmd(param24, 1) - sleep_ms(100) - -def dwc_dsi_lpmode_test(): - enable = 1 - screen_test_mode = 1 - - phy_attr = k_vo_mipi_phy_attr() - phy_attr.phy_lan_num = K_DSI_4LAN - phy_attr.m = 295 - phy_attr.n = 15 - phy_attr.voc = 0x17 - phy_attr.hs_freq = 0x96 - kd_mpi_set_mipi_phy_attr(phy_attr) - - attr = k_vo_dsi_attr() - attr.lan_num = K_DSI_4LAN - attr.cmd_mode = K_VO_LP_MODE - attr.lp_div = 8 - struct_copy(hx8399, attr.resolution) - kd_mpi_dsi_set_attr(attr) - - hx8399_v2_init(screen_test_mode) - - kd_mpi_dsi_enable(enable) - -def dwc_dsi_hsmode_test(): - enable = 1 - screen_test_mode = 1 - - phy_attr = k_vo_mipi_phy_attr() - phy_attr.phy_lan_num = K_DSI_4LAN - phy_attr.m = 295 - phy_attr.n = 15 - phy_attr.voc = 0x17 - phy_attr.hs_freq = 0x96 - kd_mpi_set_mipi_phy_attr(phy_attr) - - attr = k_vo_dsi_attr() - attr.lan_num = K_DSI_4LAN - attr.cmd_mode = K_VO_HS_MODE - attr.lp_div = 8 - struct_copy(hx8399, attr.resolution) - kd_mpi_dsi_set_attr(attr) - - hx8399_v2_init(screen_test_mode) - - kd_mpi_dsi_enable(enable) - -def dwc_dsi_init_with_test_pattern(): - enable = 1 - screen_test_mode = 0 - - phy_attr = k_vo_mipi_phy_attr() - phy_attr.phy_lan_num = K_DSI_4LAN - phy_attr.m = 295 - phy_attr.n = 15 - phy_attr.voc = 0x17 - phy_attr.hs_freq = 0x96 - kd_mpi_set_mipi_phy_attr(phy_attr) - - attr = k_vo_dsi_attr() - attr.lan_num = K_DSI_4LAN - attr.cmd_mode = K_VO_LP_MODE - attr.lp_div = 8 - struct_copy(hx8399, attr.resolution) - kd_mpi_dsi_set_attr(attr) - - hx8399_v2_init(screen_test_mode) - - kd_mpi_dsi_enable(enable) - - kd_mpi_dsi_set_test_pattern() - -def dwc_dsi_init(): - enable = 1 - screen_test_mode = 0 - - phy_attr = k_vo_mipi_phy_attr() - phy_attr.phy_lan_num = K_DSI_4LAN - phy_attr.m = 295 - phy_attr.n = 15 - phy_attr.voc = 0x17 - phy_attr.hs_freq = 0x96 - kd_mpi_set_mipi_phy_attr(phy_attr) - - attr = k_vo_dsi_attr() - attr.lan_num = K_DSI_4LAN - attr.cmd_mode = K_VO_LP_MODE - attr.lp_div = 8 - struct_copy(hx8399, attr.resolution) - kd_mpi_dsi_set_attr(attr) - - hx8399_v2_init(screen_test_mode) - - kd_mpi_dsi_enable(enable) - -def vo_background_init(): - attr = k_vo_pub_attr() - attr.bg_color = 0xffffff - attr.intf_sync = K_VO_OUT_1080P30 - attr.intf_type = K_VO_INTF_MIPI - attr.sync_info = struct_ptr(hx8399) - - kd_mpi_vo_init() - - kd_mpi_vo_set_dev_param(attr) - - kd_mpi_vo_enable() - -def test_case(index): - display_hardware_init() - sleep_ms(200) - - if (index == 0): - print("DISPLAY_DSI_LP_MODE_TEST ------------------ \n"); - dwc_dsi_lpmode_test() - elif (index == 1): - print("DISPLAY_DSI_HS_MODE_TEST ------------------ \n"); - dwc_dsi_hsmode_test() - elif (index == 2): - print("dwc_dsi_init_with_test_pattern ------------------ \n"); - dwc_dsi_init_with_test_pattern() - elif (index == 3): - print("DISPALY_VO_BACKGROUND_TEST ------------------ \n"); - dwc_dsi_init() - vo_background_init() diff --git a/share/qtcreator/examples/07-April-Tags/find_apriltags.py b/share/qtcreator/examples/07-April-Tags/find_apriltags.py old mode 100644 new mode 100755 index 633b73b2198f..507fbd83cacf --- a/share/qtcreator/examples/07-April-Tags/find_apriltags.py +++ b/share/qtcreator/examples/07-April-Tags/find_apriltags.py @@ -2,16 +2,14 @@ # # This example shows the power of the CanMV Cam to detect April Tags. -from media.camera import * +import time, math, os, gc, sys + +from media.sensor import * from media.display import * from media.media import * -import time, math, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = 320 +DETECT_HEIGHT = 240 # Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. @@ -46,103 +44,73 @@ def family_name(tag): if(tag.family() == image.ARTOOLKIT): return "ARTOOLKIT" -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + while True: + fps.tick() + + # check if should exit. + os.exitpoint() + + img = sensor.snapshot() + for tag in img.find_apriltags(families=tag_families): + img.draw_rectangle([v for v in tag.rect()], color=(255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color=(0, 255, 0)) + print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) + print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() # deinit display - display.deinit() + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) + # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) - draw_img.clear() - for tag in img.find_apriltags(families=tag_families): - draw_img.draw_rectangle([v*SCALE for v in tag.rect()], color=(255, 0, 0)) - draw_img.draw_cross(tag.cx()*SCALE, tag.cy()*SCALE, color=(0, 255, 0)) - print_args = (family_name(tag), tag.id(), (180 * tag.rotation()) / math.pi) - print("Tag Family %s, Tag ID %d, rotation %f (degrees)" % print_args) - draw_img.copy_to(osd_img) - print(fps.fps()) - del img - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + MediaManager.deinit() diff --git a/share/qtcreator/examples/07-April-Tags/find_apriltags_3d_pose.py b/share/qtcreator/examples/07-April-Tags/find_apriltags_3d_pose.py old mode 100644 new mode 100755 index c614fd9216d0..cf42d97f7396 --- a/share/qtcreator/examples/07-April-Tags/find_apriltags_3d_pose.py +++ b/share/qtcreator/examples/07-April-Tags/find_apriltags_3d_pose.py @@ -2,16 +2,11 @@ # # This example shows the power of the CanMV Cam to detect April Tags. -from media.camera import * -from media.display import * -from media.media import * import time, math, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +from media.sensor import * +from media.display import * +from media.media import * # Note! Unlike find_qrcodes the find_apriltags method does not need lens correction on the image to work. @@ -36,6 +31,9 @@ # c_x is the image x center position in pixels. # c_y is the image y center position in pixels. +DETECT_WIDTH = 320 +DETECT_HEIGHT = 240 + f_x = (2.8 / 3.984) * DETECT_WIDTH # find_apriltags defaults to this if not set f_y = (2.8 / 2.952) * DETECT_HEIGHT # find_apriltags defaults to this if not set c_x = DETECT_WIDTH * 0.5 # find_apriltags defaults to this if not set (the image.w * 0.5) @@ -44,105 +42,74 @@ def degrees(radians): return (180 * radians) / math.pi -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + while True: + fps.tick() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11 + img.draw_rectangle([v for v in tag.rect()], color=(255, 0, 0)) + img.draw_cross(tag.cx(), tag.cy(), color=(0, 255, 0)) + print_args = (tag.x_translation(), tag.y_translation(), tag.z_translation(), + degrees(tag.x_rotation()), degrees(tag.y_rotation()), degrees(tag.z_rotation())) + + # Translation units are unknown. Rotation units are in degrees. + print("Tx: %f, Ty %f, Tz %f, Rx %f, Ry %f, Rz %f" % print_args) + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() # deinit display - display.deinit() + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) + # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) - draw_img.clear() - for tag in img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y): # defaults to TAG36H11 - draw_img.draw_rectangle([v*SCALE for v in tag.rect()], color=(255, 0, 0)) - draw_img.draw_cross(tag.cx()*SCALE, tag.cy()*SCALE, color=(0, 255, 0)) - print_args = (tag.x_translation(), tag.y_translation(), tag.z_translation(), - degrees(tag.x_rotation()), degrees(tag.y_rotation()), degrees(tag.z_rotation())) - # Translation units are unknown. Rotation units are in degrees. - print("Tx: %f, Ty %f, Tz %f, Rx %f, Ry %f, Rz %f" % print_args) - draw_img.copy_to(osd_img) - print(fps.fps()) - del img - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + MediaManager.deinit() diff --git a/share/qtcreator/examples/08-Codes/find_barcodes.py b/share/qtcreator/examples/08-Codes/find_barcodes.py old mode 100644 new mode 100755 index 68f70ecda143..7ff51fae8d65 --- a/share/qtcreator/examples/08-Codes/find_barcodes.py +++ b/share/qtcreator/examples/08-Codes/find_barcodes.py @@ -1,66 +1,11 @@ # Barcode Example # # This example shows off how easy it is to detect bar codes. +import time, math, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, math, os, gc, sys - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() def barcode_name(code): if(code.type() == image.EAN2): @@ -96,55 +41,74 @@ def barcode_name(code): if(code.type() == image.CODE128): return "CODE128" -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 + +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # set chn0 output format + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + fps = time.clock() + while True: fps.tick() - try: - os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) - codes = img.find_qrcodes() - draw_img.clear() - for code in codes: - draw_img.draw_rectangle([v*SCALE for v in code.rect()], color=(255, 0, 0)) - print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), fps.fps()) - print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) - draw_img.copy_to(osd_img) - if not codes: - print("FPS %f" % fps.fps()) - del img - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + for code in img.find_barcodes(): + img.draw_rectangle([v for v in code.rect()], color=(255, 0, 0)) + print_args = (barcode_name(code), code.payload(), (180 * code.rotation()) / math.pi, code.quality(), fps.fps()) + print("Barcode %s, Payload \"%s\", rotation %f (degrees), quality %d, FPS %f" % print_args) + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() + # deinit display + Display.deinit() + + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/08-Codes/find_datamatrices.py b/share/qtcreator/examples/08-Codes/find_datamatrices.py old mode 100644 new mode 100755 index 271130574cfb..bc6c2e562a6f --- a/share/qtcreator/examples/08-Codes/find_datamatrices.py +++ b/share/qtcreator/examples/08-Codes/find_datamatrices.py @@ -1,116 +1,80 @@ # Data Matrices Example # # This example shows off how easy it is to detect data matrices. +import time, math, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, math, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 + +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + while True: + fps.tick() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + for matrix in img.find_datamatrices(): + img.draw_rectangle([v for v in matrix.rect()], color=(255, 0, 0)) + print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, fps.fps()) + print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() # deinit display - display.deinit() + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) + # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) - matrices = img.find_datamatrices() - draw_img.clear() - for matrix in matrices: - draw_img.draw_rectangle([v*SCALE for v in matrix.rect()], color=(255, 0, 0)) - print_args = (matrix.rows(), matrix.columns(), matrix.payload(), (180 * matrix.rotation()) / math.pi, fps.fps()) - print("Matrix [%d:%d], Payload \"%s\", rotation %f (degrees), FPS %f" % print_args) - draw_img.copy_to(osd_img) - if not matrices: - print("FPS %f" % fps.fps()) - del img - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + MediaManager.deinit() diff --git a/share/qtcreator/examples/08-Codes/find_qrcodes.py b/share/qtcreator/examples/08-Codes/find_qrcodes.py old mode 100644 new mode 100755 index ec0bd57593e1..fba1771e8dd7 --- a/share/qtcreator/examples/08-Codes/find_qrcodes.py +++ b/share/qtcreator/examples/08-Codes/find_qrcodes.py @@ -1,115 +1,81 @@ # QRCode Example # # This example shows the power of the CanMV Cam to detect QR Codes. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 + +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + while True: + fps.tick() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + for code in img.find_qrcodes(): + rect = code.rect() + img.draw_rectangle([v for v in rect], color=(255, 0, 0), thickness = 5) + img.draw_string_advanced(rect[0], rect[1], 32, code.payload()) + print(code) + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() # deinit display - display.deinit() + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) + # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, alloc=image.ALLOC_HEAP, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) - codes = img.find_qrcodes() - draw_img.clear() - for code in codes: - draw_img.draw_rectangle([v*SCALE for v in code.rect()], color=(255, 0, 0)) - print(code) - draw_img.copy_to(osd_img) - if not codes: - print("FPS %f" % fps.fps()) - del img - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + MediaManager.deinit() diff --git a/share/qtcreator/examples/09-Color-Tracking/automatic_grayscale_color_tracking.py b/share/qtcreator/examples/09-Color-Tracking/automatic_grayscale_color_tracking.py old mode 100644 new mode 100755 index c97e1286ecf3..f72657a150a2 --- a/share/qtcreator/examples/09-Color-Tracking/automatic_grayscale_color_tracking.py +++ b/share/qtcreator/examples/09-Color-Tracking/automatic_grayscale_color_tracking.py @@ -1,145 +1,111 @@ # Automatic Grayscale Color Tracking Example # # This example shows off single color automatic grayscale color tracking using the CanMV Cam. +import time, os, gc, sys, math -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys, math -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 + +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + frame_count = 0 + threshold = [128, 128] # Middle grayscale values. # Capture the color thresholds for whatever was in the center of the image. r = [(DETECT_WIDTH//2)-(50//2), (DETECT_HEIGHT//2)-(50//2), 50, 50] # 50x50 center of QVGA. - threshold = [128, 128] # Middle grayscale values. - frame_count = 0 - fps = time.clock() + while True: fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_grayscale() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - draw_img.clear() - if frame_count < 60: - if frame_count == 0: - print("Letting auto algorithms run. Don't put anything in front of the camera!") - print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") - print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") - draw_img.draw_rectangle([v*SCALE for v in r]) - frame_count = frame_count + 1 - elif frame_count < 120: - if frame_count == 60: - print("Learning thresholds...") - elif frame_count == 119: - print("Thresholds learned...") - print("Tracking colors...") - hist = img.get_histogram(roi=r) - lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! - hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! - # Average in percentile values. - threshold[0] = (threshold[0] + lo.value()) // 2 - threshold[1] = (threshold[1] + hi.value()) // 2 - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): - draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) - draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) - draw_img.draw_rectangle([v*SCALE for v in r]) - frame_count = frame_count + 1 - del hist - else: - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): - draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) - draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) - - draw_img.copy_to(osd_img) - del img - gc.collect() - if frame_count >= 120: - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + if frame_count < 60: + if frame_count == 0: + print("Letting auto algorithms run. Don't put anything in front of the camera!") + print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") + print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") + img.draw_rectangle([v for v in r]) + frame_count = frame_count + 1 + elif frame_count < 120: + if frame_count == 60: + print("Learning thresholds...") + elif frame_count == 119: + print("Thresholds learned...") + print("Tracking colors...") + hist = img.get_histogram(roi=r) + lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! + hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! + # Average in percentile values. + threshold[0] = (threshold[0] + lo.value()) // 2 + threshold[1] = (threshold[1] + hi.value()) // 2 + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + img.draw_rectangle([v for v in blob.rect()]) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_rectangle([v for v in r]) + frame_count = frame_count + 1 + del hist + else: + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + img.draw_rectangle([v for v in blob.rect()]) + img.draw_cross(blob.cx(), blob.cy()) + + # draw result to screen + Display.show_image(img) + gc.collect() + + if frame_count >= 120: + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() + # deinit display + Display.deinit() + + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/09-Color-Tracking/automatic_rgb565_color_tracking.py b/share/qtcreator/examples/09-Color-Tracking/automatic_rgb565_color_tracking.py old mode 100644 new mode 100755 index 85efd03081e3..baab27db9329 --- a/share/qtcreator/examples/09-Color-Tracking/automatic_rgb565_color_tracking.py +++ b/share/qtcreator/examples/09-Color-Tracking/automatic_rgb565_color_tracking.py @@ -1,149 +1,115 @@ # Automatic RGB565 Color Tracking Example # # This example shows off single color automatic RGB565 color tracking using the CanMV Cam. +import time, os, gc, sys, math -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys, math -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 + +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + sensor.set_pixformat(Sensor.RGB565) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + frame_count = 0 + threshold = [50, 50, 0, 0, 0, 0] # Middle L, A, B values. # Capture the color thresholds for whatever was in the center of the image. r = [(DETECT_WIDTH//2)-(50//2), (DETECT_HEIGHT//2)-(50//2), 50, 50] # 50x50 center of QVGA. - threshold = [50, 50, 0, 0, 0, 0] # Middle L, A, B values. - frame_count = 0 - fps = time.clock() + while True: fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - draw_img.clear() - if frame_count < 60: - if frame_count == 0: - print("Letting auto algorithms run. Don't put anything in front of the camera!") - print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") - print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") - draw_img.draw_rectangle([v*SCALE for v in r]) - frame_count = frame_count + 1 - elif frame_count < 120: - if frame_count == 60: - print("Learning thresholds...") - elif frame_count == 119: - print("Thresholds learned...") - print("Tracking colors...") - hist = img.get_histogram(roi=r) - lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! - hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! - # Average in percentile values. - threshold[0] = (threshold[0] + lo.l_value()) // 2 - threshold[1] = (threshold[1] + hi.l_value()) // 2 - threshold[2] = (threshold[2] + lo.a_value()) // 2 - threshold[3] = (threshold[3] + hi.a_value()) // 2 - threshold[4] = (threshold[4] + lo.b_value()) // 2 - threshold[5] = (threshold[5] + hi.b_value()) // 2 - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): - draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) - draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) - draw_img.draw_rectangle([v*SCALE for v in r]) - frame_count = frame_count + 1 - del hist - else: - for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): - draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) - draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) - - draw_img.copy_to(osd_img) - del img - gc.collect() - if frame_count >= 120: - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + if frame_count < 60: + if frame_count == 0: + print("Letting auto algorithms run. Don't put anything in front of the camera!") + print("Auto algorithms done. Hold the object you want to track in front of the camera in the box.") + print("MAKE SURE THE COLOR OF THE OBJECT YOU WANT TO TRACK IS FULLY ENCLOSED BY THE BOX!") + img.draw_rectangle([v for v in r]) + frame_count = frame_count + 1 + elif frame_count < 120: + if frame_count == 60: + print("Learning thresholds...") + elif frame_count == 119: + print("Thresholds learned...") + print("Tracking colors...") + hist = img.get_histogram(roi=r) + lo = hist.get_percentile(0.01) # Get the CDF of the histogram at the 1% range (ADJUST AS NECESSARY)! + hi = hist.get_percentile(0.99) # Get the CDF of the histogram at the 99% range (ADJUST AS NECESSARY)! + # Average in percentile values. + threshold[0] = (threshold[0] + lo.l_value()) // 2 + threshold[1] = (threshold[1] + hi.l_value()) // 2 + threshold[2] = (threshold[2] + lo.a_value()) // 2 + threshold[3] = (threshold[3] + hi.a_value()) // 2 + threshold[4] = (threshold[4] + lo.b_value()) // 2 + threshold[5] = (threshold[5] + hi.b_value()) // 2 + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + img.draw_rectangle([v for v in blob.rect()]) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_rectangle([v for v in r]) + frame_count = frame_count + 1 + del hist + else: + for blob in img.find_blobs([threshold], pixels_threshold=100, area_threshold=100, merge=True, margin=10): + img.draw_rectangle([v for v in blob.rect()]) + img.draw_cross(blob.cx(), blob.cy()) + + # draw result to screen + Display.show_image(img) + gc.collect() + + if frame_count >= 120: + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() + # deinit display + Display.deinit() + + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/09-Color-Tracking/black_grayscale_line_following.py b/share/qtcreator/examples/09-Color-Tracking/black_grayscale_line_following.py old mode 100644 new mode 100755 index beded0344e9f..70a9a9733e09 --- a/share/qtcreator/examples/09-Color-Tracking/black_grayscale_line_following.py +++ b/share/qtcreator/examples/09-Color-Tracking/black_grayscale_line_following.py @@ -9,17 +9,14 @@ # For this script to work properly you should point the camera at a line at a # 45 or so degree angle. Please make sure that only the line is within the # camera's field of view. +import time, os, gc, sys, math -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys, math -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 # Tracks a black line. Use [(128, 255)] for a tracking a white line. GRAYSCALE_THRESHOLD = [(0, 64)] @@ -38,134 +35,103 @@ weight_sum = 0 for r in ROIS: weight_sum += r[4] # r[4] is the roi weight. -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + while True: + fps.tick() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + centroid_sum = 0 + for r in ROIS: + blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True) # r[0:4] is roi tuple. + + if blobs: + # Find the blob with the most pixels. + largest_blob = max(blobs, key=lambda b: b.pixels()) + + # Draw a rect around the blob. + img.draw_rectangle([v for v in largest_blob.rect()]) + img.draw_cross(largest_blob.cx(), largest_blob.cy()) + + centroid_sum += largest_blob.cx() * r[4] # r[4] is the roi weight. + + center_pos = (centroid_sum / weight_sum) # Determine center of line. + + # Convert the center_pos to a deflection angle. We're using a non-linear + # operation so that the response gets stronger the farther off the line we + # are. Non-linear operations are good to use on the output of algorithms + # like this to cause a response "trigger". + deflection_angle = 0 + + # The 80 is from half the X res, the 60 is from half the Y res. The + # equation below is just computing the angle of a triangle where the + # opposite side of the triangle is the deviation of the center position + # from the center and the adjacent side is half the Y res. This limits + # the angle output to around -45 to 45. (It's not quite -45 and 45). + deflection_angle = -math.atan((center_pos-80)/60) + + # Convert angle in radians to degrees. + deflection_angle = math.degrees(deflection_angle) + + # Now you have an angle telling you how much to turn the robot by which + # incorporates the part of the line nearest to the robot and parts of + # the line farther away from the robot for a better prediction. + print("Turn Angle: %f" % deflection_angle) + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() # deinit display - display.deinit() + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) + # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_grayscale() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - draw_img.clear() - centroid_sum = 0 - for r in ROIS: - blobs = img.find_blobs(GRAYSCALE_THRESHOLD, roi=r[0:4], merge=True) # r[0:4] is roi tuple. - - if blobs: - # Find the blob with the most pixels. - largest_blob = max(blobs, key=lambda b: b.pixels()) - - # Draw a rect around the blob. - draw_img.draw_rectangle([v*SCALE for v in largest_blob.rect()]) - draw_img.draw_cross(largest_blob.cx()*SCALE, largest_blob.cy()*SCALE) - - centroid_sum += largest_blob.cx() * r[4] # r[4] is the roi weight. - - center_pos = (centroid_sum / weight_sum) # Determine center of line. - - # Convert the center_pos to a deflection angle. We're using a non-linear - # operation so that the response gets stronger the farther off the line we - # are. Non-linear operations are good to use on the output of algorithms - # like this to cause a response "trigger". - deflection_angle = 0 - - # The 80 is from half the X res, the 60 is from half the Y res. The - # equation below is just computing the angle of a triangle where the - # opposite side of the triangle is the deviation of the center position - # from the center and the adjacent side is half the Y res. This limits - # the angle output to around -45 to 45. (It's not quite -45 and 45). - deflection_angle = -math.atan((center_pos-80)/60) - - # Convert angle in radians to degrees. - deflection_angle = math.degrees(deflection_angle) - - # Now you have an angle telling you how much to turn the robot by which - # incorporates the part of the line nearest to the robot and parts of - # the line farther away from the robot for a better prediction. - print("Turn Angle: %f" % deflection_angle) - draw_img.copy_to(osd_img) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + MediaManager.deinit() diff --git a/share/qtcreator/examples/09-Color-Tracking/image_histogram_info.py b/share/qtcreator/examples/09-Color-Tracking/image_histogram_info.py old mode 100644 new mode 100755 index cea35e2e46f2..2ae214d18193 --- a/share/qtcreator/examples/09-Color-Tracking/image_histogram_info.py +++ b/share/qtcreator/examples/09-Color-Tracking/image_histogram_info.py @@ -5,102 +5,77 @@ # You can also pass get_histogram() an "roi=" to get just the histogram of that area. # get_histogram() allows you to quickly determine the color channel information of # any any area in the image. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 + +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() -def capture_picture(): fps = time.clock() + while True: fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_grayscale() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - print(img.get_histogram(bins=8)) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + print(img.get_histogram(bins=8)) + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() + # deinit display + Display.deinit() + + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/09-Color-Tracking/image_statistics_info.py b/share/qtcreator/examples/09-Color-Tracking/image_statistics_info.py old mode 100644 new mode 100755 index ba5ab393cd1d..76e8f74a24f6 --- a/share/qtcreator/examples/09-Color-Tracking/image_statistics_info.py +++ b/share/qtcreator/examples/09-Color-Tracking/image_statistics_info.py @@ -5,102 +5,79 @@ # You can also pass get_statistics() an "roi=" to get just the statistics of that area. # get_statistics() allows you to quickly determine the color channel information of # any any area in the image. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + + +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 + +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() -def capture_picture(): fps = time.clock() + while True: fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_grayscale() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - print(img.get_statistics()) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + print(img.get_statistics()) + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() + # deinit display + Display.deinit() + + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/09-Color-Tracking/multi_color_code_tracking.py b/share/qtcreator/examples/09-Color-Tracking/multi_color_code_tracking.py old mode 100644 new mode 100755 index 954970307b8b..dd7c50cb4f55 --- a/share/qtcreator/examples/09-Color-Tracking/multi_color_code_tracking.py +++ b/share/qtcreator/examples/09-Color-Tracking/multi_color_code_tracking.py @@ -4,21 +4,18 @@ # # A color code is a blob composed of two or more colors. The example below will # only track colored objects which have two or more the colors below in them. +import time, os, gc, sys, math -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys, math -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) +thresholds = [(12, 100, -47, 14, -1, 58), # generic_red_thresholds -> index is 0 so code == (1 << 0) (30, 100, -64, -8, -32, 32), # generic_green_thresholds -> index is 1 so code == (1 << 1) (0, 15, 0, 40, -80, -20)] # generic_blue_thresholds -> index is 2 so code == (1 << 2) # Codes are or'ed together when "merge=True" for "find_blobs". @@ -27,115 +24,84 @@ # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + while True: + fps.tick() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): + if blob.code() == 3: # r/g code + img.draw_rectangle([v for v in blob.rect()]) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_string_advanced(blob.x() + 2, blob.y() + 2, 32, "r/g") + if blob.code() == 5: # r/b code + img.draw_rectangle([v for v in blob.rect()]) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_string_advanced(blob.x() + 2, blob.y() + 2, 32, "r/b") + if blob.code() == 6: # g/b code + img.draw_rectangle([v for v in blob.rect()]) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_string_advanced(blob.x() + 2, blob.y() + 2, 32, "g/b") + if blob.code() == 7: # r/g/b code + img.draw_rectangle([v for v in blob.rect()]) + img.draw_cross(blob.cx(), blob.cy()) + img.draw_string_advanced(blob.x() + 2, blob.y() + 2, 32, "r/g/b") + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() # deinit display - display.deinit() + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) + # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - draw_img.clear() - for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): - if blob.code() == 3: # r/g code - draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) - draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) - draw_img.draw_string(blob.x()*SCALE + 2, blob.y()*SCALE + 2, "r/g") - if blob.code() == 5: # r/b code - draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) - draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) - draw_img.draw_string(blob.x()*SCALE + 2, blob.y()*SCALE + 2, "r/b") - if blob.code() == 6: # g/b code - draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) - draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) - draw_img.draw_string(blob.x()*SCALE + 2, blob.y()*SCALE + 2, "g/b") - if blob.code() == 7: # r/g/b code - draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) - draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) - draw_img.draw_string(blob.x()*SCALE + 2, blob.y()*SCALE + 2, "r/g/b") - draw_img.copy_to(osd_img) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + MediaManager.deinit() diff --git a/share/qtcreator/examples/09-Color-Tracking/single_color_code_tracking.py b/share/qtcreator/examples/09-Color-Tracking/single_color_code_tracking.py old mode 100644 new mode 100755 index 10838ee7b7e0..132150235584 --- a/share/qtcreator/examples/09-Color-Tracking/single_color_code_tracking.py +++ b/share/qtcreator/examples/09-Color-Tracking/single_color_code_tracking.py @@ -4,21 +4,18 @@ # # A color code is a blob composed of two or more colors. The example below will # only track colored objects which have both the colors below in them. +import time, os, gc, sys, math -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys, math -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... -thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds -> index is 0 so code == (1 << 0) +thresholds = [(12, 100, -47, 14, -1, 58), # generic_red_thresholds -> index is 0 so code == (1 << 0) (30, 100, -64, -8, -32, 32)] # generic_green_thresholds -> index is 1 so code == (1 << 1) # Codes are or'ed together when "merge=True" for "find_blobs" @@ -26,110 +23,79 @@ # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. "merge=True" must be set to merge overlapping color blobs for color codes. -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + while True: + fps.tick() + + # check if should exit. + os.exitpoint() + img = sensor.snapshot() + + for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): + if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0) + # These values depend on the blob not being circular - otherwise they will be shaky. + # if blob.elongation() > 0.5: + # img.draw_edges(blob.min_corners(), color=(255,0,0)) + # img.draw_line(blob.major_axis_line(), color=(0,255,0)) + # img.draw_line(blob.minor_axis_line(), color=(0,0,255)) + # These values are stable all the time. + img.draw_rectangle([v for v in blob.rect()]) + img.draw_cross(blob.cx(), blob.cy()) + # Note - the blob rotation is unique to 0-180 only. + img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) + + # draw result to screen + Display.show_image(img) + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() # deinit display - display.deinit() + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) + # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - draw_img.clear() - for blob in img.find_blobs(thresholds, pixels_threshold=100, area_threshold=100, merge=True): - if blob.code() == 3: # r/g code == (1 << 1) | (1 << 0) - # These values depend on the blob not being circular - otherwise they will be shaky. - # if blob.elongation() > 0.5: - # img.draw_edges(blob.min_corners(), color=(255,0,0)) - # img.draw_line(blob.major_axis_line(), color=(0,255,0)) - # img.draw_line(blob.minor_axis_line(), color=(0,0,255)) - # These values are stable all the time. - draw_img.draw_rectangle([v*SCALE for v in blob.rect()]) - draw_img.draw_cross(blob.cx()*SCALE, blob.cy()*SCALE) - # Note - the blob rotation is unique to 0-180 only. - draw_img.draw_keypoints([(blob.cx()*SCALE, blob.cy()*SCALE, int(math.degrees(blob.rotation())))], size=20) - draw_img.copy_to(osd_img) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/arrow_drawing.py b/share/qtcreator/examples/10-Drawing/arrow_drawing.py old mode 100644 new mode 100755 index bb63d445a52c..231dfee088a6 --- a/share/qtcreator/examples/10-Drawing/arrow_drawing.py +++ b/share/qtcreator/examples/10-Drawing/arrow_drawing.py @@ -1,72 +1,49 @@ # Arrow Drawing # # This example shows off drawing arrows on the CanMV Cam. +import time, os, gc, sys, urandom -from media.camera import * from media.display import * from media.media import * -import time, os, gc, sys, urandom -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DISPLAY_IS_HDMI = True +DISPLAY_IS_LCD = False +DISPLAY_IS_IDE = False -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) +try: + # default size + width = 640 + height = 480 + if DISPLAY_IS_HDMI: + # use hdmi as display output, set to 1080P + Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + width = 1920 + height = 1080 + elif DISPLAY_IS_LCD: + # use lcd as display output + Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) + width = 800 + height = 480 + elif DISPLAY_IS_IDE: + # use IDE as output + Display.init(Display.VIRT, width = 800, height = 480, fps = 100) + width = 800 + height = 480 + else: + raise ValueError("Shoule select a display.") + # init media manager + MediaManager.init() -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def draw(): - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() + # create image for drawing + img = image.Image(width, height, image.ARGB8888) + while True: fps.tick() + + # check if should exit. + os.exitpoint() + img.clear() for i in range(10): x0 = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) @@ -79,27 +56,24 @@ def draw(): # If the first argument is a scaler then this method expects # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. img.draw_arrow(x0, y0, x1, y1, color = (r, g, b), size = 30, thickness = 2) - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() + # draw result to screen + Display.show_image(img) + + print(fps.fps()) + + # max 100 fps + time.sleep_ms(10) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # deinit display + Display.deinit() -if __name__ == "__main__": - main() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/circle_drawing.py b/share/qtcreator/examples/10-Drawing/circle_drawing.py old mode 100644 new mode 100755 index b495f7863164..72777c88be59 --- a/share/qtcreator/examples/10-Drawing/circle_drawing.py +++ b/share/qtcreator/examples/10-Drawing/circle_drawing.py @@ -1,72 +1,49 @@ # Circle Drawing # # This example shows off drawing circles on the CanMV Cam. +import time, os, gc, sys, urandom -from media.camera import * from media.display import * from media.media import * -import time, os, gc, sys, urandom -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DISPLAY_IS_HDMI = True +DISPLAY_IS_LCD = False +DISPLAY_IS_IDE = False -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) +try: + # default size + width = 640 + height = 480 + if DISPLAY_IS_HDMI: + # use hdmi as display output, set to 1080P + Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + width = 1920 + height = 1080 + elif DISPLAY_IS_LCD: + # use lcd as display output + Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) + width = 800 + height = 480 + elif DISPLAY_IS_IDE: + # use IDE as output + Display.init(Display.VIRT, width = 800, height = 480, fps = 100) + width = 800 + height = 480 + else: + raise ValueError("Shoule select a display.") + # init media manager + MediaManager.init() -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def draw(): - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() + # create image for drawing + img = image.Image(width, height, image.ARGB8888) + while True: fps.tick() + + # check if should exit. + os.exitpoint() + img.clear() for i in range(10): x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) @@ -78,27 +55,24 @@ def draw(): # If the first argument is a scaler then this method expects # to see x, y, and radius. Otherwise, it expects a (x,y,radius) tuple. img.draw_circle(x, y, radius, color = (r, g, b), thickness = 2, fill = False) - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() + # draw result to screen + Display.show_image(img) + + print(fps.fps()) + + # max 100 fps + time.sleep_ms(10) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # deinit display + Display.deinit() -if __name__ == "__main__": - main() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/cross_drawing.py b/share/qtcreator/examples/10-Drawing/cross_drawing.py old mode 100644 new mode 100755 index 7ed92f877cb0..0e5a8abbb1a3 --- a/share/qtcreator/examples/10-Drawing/cross_drawing.py +++ b/share/qtcreator/examples/10-Drawing/cross_drawing.py @@ -1,72 +1,50 @@ # Cross Drawing # # This example shows off drawing crosses on the CanMV Cam. +import time, os, gc, sys, urandom -from media.camera import * from media.display import * from media.media import * -import time, os, gc, sys, urandom -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) +DISPLAY_IS_HDMI = True +DISPLAY_IS_LCD = False +DISPLAY_IS_IDE = False -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() +try: + # default size + width = 640 + height = 480 + if DISPLAY_IS_HDMI: + # use hdmi as display output, set to 1080P + Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + width = 1920 + height = 1080 + elif DISPLAY_IS_LCD: + # use lcd as display output + Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) + width = 800 + height = 480 + elif DISPLAY_IS_IDE: + # use IDE as output + Display.init(Display.VIRT, width = 800, height = 480, fps = 100) + width = 800 + height = 480 + else: + raise ValueError("Shoule select a display.") + # init media manager + MediaManager.init() -def draw(): - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() + # create image for drawing + img = image.Image(width, height, image.ARGB8888) + while True: fps.tick() + + # check if should exit. + os.exitpoint() + img.clear() for i in range(10): x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) @@ -77,27 +55,25 @@ def draw(): # If the first argument is a scaler then this method expects # to see x and y. Otherwise, it expects a (x,y) tuple. img.draw_cross(x, y, color = (r, g, b), size = 10, thickness = 2) - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() + # draw result to screen + Display.show_image(img) + + print(fps.fps()) + + # max 100 fps + time.sleep_ms(10) + +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # deinit display + Display.deinit() -if __name__ == "__main__": - main() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/ellipse_drawing.py b/share/qtcreator/examples/10-Drawing/ellipse_drawing.py old mode 100644 new mode 100755 index 767e3a7b7bf1..f20815d8cfa7 --- a/share/qtcreator/examples/10-Drawing/ellipse_drawing.py +++ b/share/qtcreator/examples/10-Drawing/ellipse_drawing.py @@ -1,72 +1,49 @@ # Ellipse Drawing # # This example shows off drawing ellipses on the CanMV Cam. +import time, os, gc, sys, urandom -from media.camera import * from media.display import * from media.media import * -import time, os, gc, sys, urandom -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DISPLAY_IS_HDMI = True +DISPLAY_IS_LCD = False +DISPLAY_IS_IDE = False -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) +try: + # default size + width = 640 + height = 480 + if DISPLAY_IS_HDMI: + # use hdmi as display output, set to 1080P + Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + width = 1920 + height = 1080 + elif DISPLAY_IS_LCD: + # use lcd as display output + Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) + width = 800 + height = 480 + elif DISPLAY_IS_IDE: + # use IDE as output + Display.init(Display.VIRT, width = 800, height = 480, fps = 100) + width = 800 + height = 480 + else: + raise ValueError("Shoule select a display.") + # init media manager + MediaManager.init() -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def draw(): - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() + # create image for drawing + img = image.Image(width, height, image.ARGB8888) + while True: fps.tick() + + # check if should exit. + os.exitpoint() + img.clear() for i in range(10): x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) @@ -81,27 +58,24 @@ def draw(): # to see x, y, radius x, and radius y. # Otherwise, it expects a (x,y,radius_x,radius_y) tuple. img.draw_ellipse(x, y, radius_x, radius_y, rot, color = (r, g, b), thickness = 2, fill = False) - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() + # draw result to screen + Display.show_image(img) + + print(fps.fps()) + + # max 100 fps + time.sleep_ms(10) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # deinit display + Display.deinit() -if __name__ == "__main__": - main() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/flood_fill.py b/share/qtcreator/examples/10-Drawing/flood_fill.py old mode 100644 new mode 100755 index f452341ebbdc..9275b2d3959a --- a/share/qtcreator/examples/10-Drawing/flood_fill.py +++ b/share/qtcreator/examples/10-Drawing/flood_fill.py @@ -1,103 +1,78 @@ # Flood Fill # # This example shows off flood filling areas in the image. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # # set chn0 output size - # camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # # set chn0 output format - # camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # # create meida source device - # globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # # create meida sink device - # globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # # create meida link - # media.create_link(meida_source, meida_sink) - # # set display plane with video channel - # display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) +sensor = None -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - # media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # set chn0 output format + sensor.set_pixformat(Sensor.RGB565) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() -def draw(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() + while True: fps.tick() + # check if should exit. os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + + img = sensor.snapshot() x = img.width() // 2 y = img.height() // 2 img.flood_fill(x, y, seed_threshold=0.05, floating_thresholds=0.05, color=(255, 0, 0), invert=False, clear_background=False) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) gc.collect() + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() + # deinit display + Display.deinit() -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) -if __name__ == "__main__": - main() + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/image_drawing.py b/share/qtcreator/examples/10-Drawing/image_drawing.py old mode 100644 new mode 100755 index bd114b08be3e..c2608a6dd878 --- a/share/qtcreator/examples/10-Drawing/image_drawing.py +++ b/share/qtcreator/examples/10-Drawing/image_drawing.py @@ -1,110 +1,82 @@ # Draw Image Example # # This example shows off how to draw images in the frame buffer. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # # set chn0 output size - # camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # # set chn0 output format - # camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # # create meida source device - # globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # # create meida sink device - # globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # # create meida link - # media.create_link(meida_source, meida_sink) - # # set display plane with video channel - # display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 + +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # set chn0 output format + sensor.set_pixformat(Sensor.RGB565) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + + while True: + fps.tick() + # check if should exit. + os.exitpoint() + + img = sensor.snapshot() + + small_img = img.mean_pooled(4, 4) # Makes a copy. + x = (img.width()//2)-(small_img.width()//2) + y = (img.height()//2)-(small_img.height()//2) + # Draws an image in the frame buffer.Pass an optional + # mask image to control what pixels are drawn. + img.draw_image(small_img, x, y, x_scale=1, y_scale=1) + + # draw result to screen + Display.show_image(img) + del small_img + gc.collect() + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() # deinit display - display.deinit() + Display.deinit() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) + # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - # media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def draw(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - small_img = img.mean_pooled(4, 4) # Makes a copy. - x = (img.width()//2)-(small_img.width()//2) - y = (img.height()//2)-(small_img.height()//2) - # Draws an image in the frame buffer.Pass an optional - # mask image to control what pixels are drawn. - img.draw_image(small_img, x, y, x_scale=1, y_scale=1) - img.copy_to(osd_img) - del img, small_img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/image_drawing_advanced.py b/share/qtcreator/examples/10-Drawing/image_drawing_advanced.py old mode 100644 new mode 100755 index 775f9464042e..65ace962b07b --- a/share/qtcreator/examples/10-Drawing/image_drawing_advanced.py +++ b/share/qtcreator/examples/10-Drawing/image_drawing_advanced.py @@ -1,14 +1,14 @@ # Draw Image Testing script with bounce # # Exercise draw image with many different values for testing +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 BOUNCE = True RESCALE = True @@ -36,66 +36,46 @@ min_rescale = 0.2 # Boundary to bounce within -xmin = -DISPLAY_WIDTH / SMALL_IMAGE_SCALE - 8 -ymin = -DISPLAY_HEIGHT / SMALL_IMAGE_SCALE - 8 -xmax = DISPLAY_WIDTH + 8 -ymax = DISPLAY_HEIGHT + 8 - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # # set chn0 output size - # camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # # set chn0 output format - # camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # # create meida source device - # globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # # create meida sink device - # globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # # create meida link - # media.create_link(meida_source, meida_sink) - # # set display plane with video channel - # display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - # media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def draw(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) +xmin = -DETECT_WIDTH / SMALL_IMAGE_SCALE - 8 +ymin = -DETECT_HEIGHT / SMALL_IMAGE_SCALE - 8 +xmax = DETECT_WIDTH + 8 +ymax = DETECT_HEIGHT + 8 + +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # set chn0 output format + sensor.set_pixformat(Sensor.RGB565) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + value_mixer = value_mixer_init x = x_init y = y_init @@ -103,81 +83,77 @@ def draw(): yd = yd_init rescale = rescale_init rd = rd_init - fps = time.clock() + while True: fps.tick() + # check if should exit. + os.exitpoint() + status = "" value_mixer = value_mixer + 1 - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - small_img = img.mean_pooled(SMALL_IMAGE_SCALE, SMALL_IMAGE_SCALE) - status = 'rgb565 ' - if CYCLE_FORMATS: - image_format = (value_mixer >> 8) & 3 - # To test combining different formats - if (image_format==1): small_img = small_img.to_bitmap(); status = 'bitmap ' - if (image_format==2): small_img = small_img.to_grayscale(); status = 'grayscale ' - if (image_format==3): small_img = small_img.to_rgb565(); status = 'rgb565 ' - - # update small image location - if BOUNCE: - x = x + xd - if (xxmax): - xd = -xd - - y = y + yd - if (yymax): - yd = -yd - - # Update small image scale - if RESCALE: - rescale = rescale + rd - if (rescalemax_rescale): - rd = -rd - - # Find the center of the image - scaled_width = int(small_img.width() * abs(rescale)) - scaled_height= int(small_img.height() * abs(rescale)) - - apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1) - if apply_mask: - img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(), x_scale=rescale, y_scale=rescale, alpha=240) - status += 'alpha:240 ' - status += '+mask ' - else: - img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128) - status += 'alpha:128 ' - - img.draw_string(8, 0, status, mono_space = False) - img.copy_to(osd_img) - del img, small_img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + + img = sensor.snapshot() + + small_img = img.mean_pooled(SMALL_IMAGE_SCALE, SMALL_IMAGE_SCALE) + status = 'rgb565 ' + if CYCLE_FORMATS: + image_format = (value_mixer >> 8) & 3 + # To test combining different formats + if (image_format==1): small_img = small_img.to_bitmap(); status = 'bitmap ' + if (image_format==2): small_img = small_img.to_grayscale(); status = 'grayscale ' + if (image_format==3): small_img = small_img.to_rgb565(); status = 'rgb565 ' + + # update small image location + if BOUNCE: + x = x + xd + if (xxmax): + xd = -xd + + y = y + yd + if (yymax): + yd = -yd + + # Update small image scale + if RESCALE: + rescale = rescale + rd + if (rescalemax_rescale): + rd = -rd + + # Find the center of the image + scaled_width = int(small_img.width() * abs(rescale)) + scaled_height= int(small_img.height() * abs(rescale)) + + apply_mask = CYCLE_MASK and ((value_mixer >> 9) & 1) + if apply_mask: + img.draw_image(small_img, int(x), int(y), mask=small_img.to_bitmap(), x_scale=rescale, y_scale=rescale, alpha=240) + status += 'alpha:240 ' + status += '+mask ' + else: + img.draw_image(small_img, int(x), int(y), x_scale=rescale, y_scale=rescale, alpha=128) + status += 'alpha:128 ' + + img.draw_string_advanced(8, 0, 32, status) + + # draw result to screen + Display.show_image(img) + + del small_img + gc.collect() + + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() + # deinit display + Display.deinit() + + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/image_drawing_alpha_blending_test.py b/share/qtcreator/examples/10-Drawing/image_drawing_alpha_blending_test.py old mode 100644 new mode 100755 index c301d85b74c5..aadb61f4f5b3 --- a/share/qtcreator/examples/10-Drawing/image_drawing_alpha_blending_test.py +++ b/share/qtcreator/examples/10-Drawing/image_drawing_alpha_blending_test.py @@ -4,14 +4,14 @@ # method which can perform nearest neighbor, bilinear, bicubic, and # area scaling along with color channel extraction, alpha blending, # color palette application, and alpha palette application. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DETECT_WIDTH = 640 +DETECT_HEIGHT = 480 small_img = image.Image(4,4,image.RGB565) small_img.set_pixel(0, 0, (0, 0, 127)) @@ -38,118 +38,90 @@ alpha_value_init = 0 alpha_step_init = 2 -x_bounce_init = DISPLAY_WIDTH//2 +x_bounce_init = DETECT_WIDTH//2 x_bounce_toggle_init = 1 -y_bounce_init = DISPLAY_HEIGHT//2 +y_bounce_init = DETECT_HEIGHT//2 y_bounce_toggle_init = 1 -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # # set chn0 output size - # camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # # set chn0 output format - # camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # # create meida source device - # globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # # create meida sink device - # globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # # create meida link - # media.create_link(meida_source, meida_sink) - # # set display plane with video channel - # display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - # media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def draw(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) +sensor = None + +try: + # construct a Sensor object with default configure + sensor = Sensor(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size + sensor.set_framesize(width = DETECT_WIDTH, height = DETECT_HEIGHT) + # set chn0 output format + sensor.set_pixformat(Sensor.RGB565) + + # use hdmi as display output, set to VGA + # Display.init(Display.LT9611, width = 640, height = 480, to_ide = True) + + # use hdmi as display output, set to 1080P + # Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + + # use lcd as display output + # Display.init(Display.ST7701, to_ide = True) + + # use IDE as output + Display.init(Display.VIRT, width = DETECT_WIDTH, height = DETECT_HEIGHT, fps = 100) + + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + + fps = time.clock() + alpha_value = alpha_value_init alpha_step = alpha_step_init x_bounce = x_bounce_init x_bounce_toggle = x_bounce_toggle_init y_bounce = y_bounce_init y_bounce_toggle = y_bounce_toggle_init - fps = time.clock() + while True: fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - img.draw_image(big_img, x_bounce, y_bounce, rgb_channel=-1, alpha=alpha_value//alpha_div) - - x_bounce += x_bounce_toggle - if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle - - y_bounce += y_bounce_toggle - if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle - - alpha_value += alpha_step - if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step - - img.copy_to(osd_img) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() + # check if should exit. + os.exitpoint() + + img = sensor.snapshot() + + img.draw_image(big_img, x_bounce, y_bounce, rgb_channel=-1, alpha=alpha_value//alpha_div) + + x_bounce += x_bounce_toggle + if abs(x_bounce-(img.width()//2)) >= (img.width()//2): x_bounce_toggle = -x_bounce_toggle + + y_bounce += y_bounce_toggle + if abs(y_bounce-(img.height()//2)) >= (img.height()//2): y_bounce_toggle = -y_bounce_toggle + + alpha_value += alpha_step + if not alpha_value or alpha_value//alpha_div == 256: alpha_step = -alpha_step + + # draw result to screen + Display.show_image(img) + + gc.collect() + print(fps.fps()) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # sensor stop run + if isinstance(sensor, Sensor): + sensor.stop() + # deinit display + Display.deinit() + + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/keypoints_drawing.py b/share/qtcreator/examples/10-Drawing/keypoints_drawing.py old mode 100644 new mode 100755 index 09baf624629b..e783f11e952b --- a/share/qtcreator/examples/10-Drawing/keypoints_drawing.py +++ b/share/qtcreator/examples/10-Drawing/keypoints_drawing.py @@ -2,72 +2,49 @@ # # This example shows off drawing keypoints on the CanMV Cam. Usually you call draw_keypoints() # on a keypoints object but you can also call it on a list of 3-value tuples... +import time, os, gc, sys, urandom -from media.camera import * from media.display import * from media.media import * -import time, os, gc, sys, urandom -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DISPLAY_IS_HDMI = True +DISPLAY_IS_LCD = False +DISPLAY_IS_IDE = False -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) +try: + # default size + width = 640 + height = 480 + if DISPLAY_IS_HDMI: + # use hdmi as display output, set to 1080P + Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + width = 1920 + height = 1080 + elif DISPLAY_IS_LCD: + # use lcd as display output + Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) + width = 800 + height = 480 + elif DISPLAY_IS_IDE: + # use IDE as output + Display.init(Display.VIRT, width = 800, height = 480, fps = 100) + width = 800 + height = 480 + else: + raise ValueError("Shoule select a display.") + # init media manager + MediaManager.init() -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def draw(): - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() + # create image for drawing + img = image.Image(width, height, image.ARGB8888) + while True: fps.tick() + + # check if should exit. + os.exitpoint() + img.clear() for i in range(10): x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) @@ -78,27 +55,24 @@ def draw(): b = (urandom.getrandbits(30) % 127) + 128 # This method draws a keypoints object or a list of (x, y, rot) tuples... img.draw_keypoints([(x, y, rot)], color = (r, g, b), size = 20, thickness = 2, fill = False) - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() + # draw result to screen + Display.show_image(img) + + print(fps.fps()) + + # max 100 fps + time.sleep_ms(10) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # deinit display + Display.deinit() -if __name__ == "__main__": - main() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/line_drawing.py b/share/qtcreator/examples/10-Drawing/line_drawing.py old mode 100644 new mode 100755 index 0612dcf227a1..67428bc91c47 --- a/share/qtcreator/examples/10-Drawing/line_drawing.py +++ b/share/qtcreator/examples/10-Drawing/line_drawing.py @@ -1,72 +1,49 @@ # Line Drawing # # This example shows off drawing lines on the CanMV Cam. +import time, os, gc, sys, urandom -from media.camera import * from media.display import * from media.media import * -import time, os, gc, sys, urandom -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DISPLAY_IS_HDMI = True +DISPLAY_IS_LCD = False +DISPLAY_IS_IDE = False -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) +try: + # default size + width = 640 + height = 480 + if DISPLAY_IS_HDMI: + # use hdmi as display output, set to 1080P + Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + width = 1920 + height = 1080 + elif DISPLAY_IS_LCD: + # use lcd as display output + Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) + width = 800 + height = 480 + elif DISPLAY_IS_IDE: + # use IDE as output + Display.init(Display.VIRT, width = 800, height = 480, fps = 100) + width = 800 + height = 480 + else: + raise ValueError("Shoule select a display.") + # init media manager + MediaManager.init() -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def draw(): - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() + # create image for drawing + img = image.Image(width, height, image.ARGB8888) + while True: fps.tick() + + # check if should exit. + os.exitpoint() + img.clear() for i in range(10): x0 = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) @@ -79,27 +56,24 @@ def draw(): # If the first argument is a scaler then this method expects # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. img.draw_line(x0, y0, x1, y1, color = (r, g, b), thickness = 2) - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() + # draw result to screen + Display.show_image(img) + + print(fps.fps()) + + # max 100 fps + time.sleep_ms(10) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # deinit display + Display.deinit() -if __name__ == "__main__": - main() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/rectangle_drawing.py b/share/qtcreator/examples/10-Drawing/rectangle_drawing.py old mode 100644 new mode 100755 index e9685114d2ad..d9d7ce878701 --- a/share/qtcreator/examples/10-Drawing/rectangle_drawing.py +++ b/share/qtcreator/examples/10-Drawing/rectangle_drawing.py @@ -2,71 +2,49 @@ # # This example shows off drawing rectangles on the CanMV Cam. -from media.camera import * +import time, os, gc, sys, urandom + from media.display import * from media.media import * -import time, os, gc, sys, urandom -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DISPLAY_IS_HDMI = True +DISPLAY_IS_LCD = False +DISPLAY_IS_IDE = False -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) +try: + # default size + width = 640 + height = 480 + if DISPLAY_IS_HDMI: + # use hdmi as display output, set to 1080P + Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + width = 1920 + height = 1080 + elif DISPLAY_IS_LCD: + # use lcd as display output + Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) + width = 800 + height = 480 + elif DISPLAY_IS_IDE: + # use IDE as output + Display.init(Display.VIRT, width = 800, height = 480, fps = 100) + width = 800 + height = 480 + else: + raise ValueError("Shoule select a display.") + # init media manager + MediaManager.init() -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def draw(): - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() + # create image for drawing + img = image.Image(width, height, image.ARGB8888) + while True: fps.tick() + + # check if should exit. + os.exitpoint() + img.clear() for i in range(10): x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) @@ -79,27 +57,24 @@ def draw(): # If the first argument is a scaler then this method expects # to see x, y, w, and h. Otherwise, it expects a (x,y,w,h) tuple. img.draw_rectangle(x, y, w, h, color = (r, g, b), thickness = 2, fill = False) - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() + # draw result to screen + Display.show_image(img) + + print(fps.fps()) -if __name__ == "__main__": - main() + # max 100 fps + time.sleep_ms(10) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # deinit display + Display.deinit() + + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/10-Drawing/text_drawing.py b/share/qtcreator/examples/10-Drawing/text_drawing.py old mode 100644 new mode 100755 index d09d37bf13b6..1f2de173420c --- a/share/qtcreator/examples/10-Drawing/text_drawing.py +++ b/share/qtcreator/examples/10-Drawing/text_drawing.py @@ -1,106 +1,80 @@ # Text Drawing # # This example shows off drawing text on the CanMV Cam. +import time, os, gc, sys, urandom -from media.camera import * from media.display import * from media.media import * -import time, os, gc, sys, urandom -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 +DISPLAY_IS_HDMI = True +DISPLAY_IS_LCD = False +DISPLAY_IS_IDE = False -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) +try: + # default size + width = 640 + height = 480 + if DISPLAY_IS_HDMI: + # use hdmi as display output, set to 1080P + Display.init(Display.LT9611, width = 1920, height = 1080, to_ide = True) + width = 1920 + height = 1080 + elif DISPLAY_IS_LCD: + # use lcd as display output + Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) + width = 800 + height = 480 + elif DISPLAY_IS_IDE: + # use IDE as output + Display.init(Display.VIRT, width = 800, height = 480, fps = 100) + width = 800 + height = 480 + else: + raise ValueError("Shoule select a display.") -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + # init media manager + MediaManager.init() -def draw(): - # create image for drawing - img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() + # create image for drawing + img = image.Image(width, height, image.ARGB8888) + while True: fps.tick() + + # check if should exit. + os.exitpoint() + img.clear() - for i in range(10): + for i in range(3): x = (urandom.getrandbits(30) % (2*img.width())) - (img.width()//2) y = (urandom.getrandbits(30) % (2*img.height())) - (img.height()//2) r = (urandom.getrandbits(30) % 127) + 128 g = (urandom.getrandbits(30) % 127) + 128 b = (urandom.getrandbits(30) % 127) + 128 + size = (urandom.getrandbits(30) % 64) + 32 # If the first argument is a scaler then this method expects # to see x, y, and text. Otherwise, it expects a (x,y,text) tuple. # Character and string rotation can be done at 0, 90, 180, 270, and etc. degrees. - img.draw_string(x, y, "Hello World!", color = (r, g, b), scale = 2, mono_space = False, - char_rotation = 0, char_hmirror = False, char_vflip = False, - string_rotation = 0, string_hmirror = False, string_vflip = False) - img.copy_to(osd_img) - time.sleep(1) - os.exitpoint() + img.draw_string_advanced(x,y,size, "Hello World!,你好世界!!!", color = (r, g, b),) + + # draw result to screen + Display.show_image(img) + + print(fps.fps()) -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("draw") - draw() - except KeyboardInterrupt as e: - print("user stop: ", e) - except BaseException as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() + # max 100 fps + time.sleep_ms(10) +except KeyboardInterrupt as e: + print(f"user stop") +except BaseException as e: + print(f"Exception '{e}'") +finally: + # deinit display + Display.deinit() -if __name__ == "__main__": - main() + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + + # release media buffer + MediaManager.deinit() diff --git a/share/qtcreator/examples/11-Feature-Detection/edges.py b/share/qtcreator/examples/11-Feature-Detection/edges.py old mode 100644 new mode 100755 index 817ec3b22f58..e81ceaed106f --- a/share/qtcreator/examples/11-Feature-Detection/edges.py +++ b/share/qtcreator/examples/11-Feature-Detection/edges.py @@ -1,94 +1,78 @@ # Edge detection with Canny: # # This example demonstrates the Canny edge detector. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + global sensor + dect_img = sensor.snapshot() # Use Canny edge detector - img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) + dect_img.find_edges(image.EDGE_CANNY, threshold=(50, 80)) # Faster simpler edge detection - #img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255)) - img.copy_to(osd_img) - del img + #dect_img.find_edges(image.EDGE_SIMPLE, threshold=(100, 255)) + + # draw result to screen + Display.show_image(dect_img) + + del dect_img gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -101,7 +85,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/11-Feature-Detection/find_blobs.py b/share/qtcreator/examples/11-Feature-Detection/find_blobs.py new file mode 100755 index 000000000000..7bd5a51fb537 --- /dev/null +++ b/share/qtcreator/examples/11-Feature-Detection/find_blobs.py @@ -0,0 +1,99 @@ +# Find Blobs Example +# +# This example shows off how to find blobs in the image. +import time, os, gc, sys + +from media.sensor import * +from media.display import * +from media.media import * + +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None + +def camera_init(): + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + + # set chn0 output size + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # set chn0 output format + sensor.set_pixformat(Sensor.RGB565) + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() + +def camera_deinit(): + global sensor + # sensor stop run + sensor.stop() + # deinit display + Display.deinit() + # sleep + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(100) + # release media buffer + MediaManager.deinit() + +def capture_picture(): + + fps = time.clock() + while True: + fps.tick() + try: + os.exitpoint() + global sensor + img = sensor.snapshot() + + # select color + thresholds = [[0, 80, 40, 80, 10, 80]] # red + # thresholds = [[0, 80, -120, -10, 0, 30]] # green + # thresholds = [[0, 80, 30, 100, -120, -60]] # blue + # find all blobs,and draw rectangles + blobs=img.find_blobs(thresholds ,pixels_threshold= 500) + for blob in blobs: + img.draw_rectangle(blob[0], blob[1], blob[2], blob[3], color = (255, 255, 0)) + + # draw result to screen + Display.show_image(img) + img = None + + gc.collect() + print(fps.fps()) + except KeyboardInterrupt as e: + print("user stop: ", e) + break + except BaseException as e: + print(f"Exception {e}") + break + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + camera_is_init = False + try: + print("camera init") + camera_init() + camera_is_init = True + print("camera capture") + capture_picture() + except Exception as e: + print(f"Exception {e}") + finally: + if camera_is_init: + print("camera deinit") + camera_deinit() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/11-Feature-Detection/find_circles.py b/share/qtcreator/examples/11-Feature-Detection/find_circles.py deleted file mode 100644 index fd4f06c8239e..000000000000 --- a/share/qtcreator/examples/11-Feature-Detection/find_circles.py +++ /dev/null @@ -1,131 +0,0 @@ -# Find Circles Example -# -# This example shows off how to find circles in the image using the Hough -# Transform. https://en.wikipedia.org/wiki/Circle_Hough_Transform -# -# Note that the find_circles() method will only find circles which are completely -# inside of the image. Circles which go outside of the image/roi are ignored... - -from media.camera import * -from media.display import * -from media.media import * -import time, os, gc, sys - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - draw_img.clear() - # Circle objects have four values: x, y, r (radius), and magnitude. The - # magnitude is the strength of the detection of the circle. Higher is - # better... - - # `threshold` controls how many circles are found. Increase its value - # to decrease the number of circles detected... - - # `x_margin`, `y_margin`, and `r_margin` control the merging of similar - # circles in the x, y, and r (radius) directions. - - # r_min, r_max, and r_step control what radiuses of circles are tested. - # Shrinking the number of tested circle radiuses yields a big performance boost. - - for c in img.find_circles(threshold = 2000, x_margin = 10, y_margin = 10, r_margin = 10, - r_min = 2, r_max = 100, r_step = 2): - draw_img.draw_circle(c.x()*SCALE, c.y()*SCALE, c.r()*SCALE, color = (255, 0, 0)) - print(c) - draw_img.copy_to(osd_img) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() diff --git a/share/qtcreator/examples/11-Feature-Detection/find_lines.py b/share/qtcreator/examples/11-Feature-Detection/find_lines.py old mode 100644 new mode 100755 index c02d401eb539..9748cb122763 --- a/share/qtcreator/examples/11-Feature-Detection/find_lines.py +++ b/share/qtcreator/examples/11-Feature-Detection/find_lines.py @@ -8,17 +8,14 @@ # Please read about it above for more information on what `theta` and `rho` are. # find_lines() finds infinite length lines. Use find_line_segments() to find non-infinite lines. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 # All line objects have a `theta()` method to get their rotation angle in degrees. # You can filter lines based on their rotation angle. @@ -32,73 +29,53 @@ # About negative rho values: # # A [theta+0:-rho] tuple is the same as [theta+180:+rho]. +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - draw_img.clear() + global sensor + img = sensor.snapshot() + # `threshold` controls how many lines in the image are found. Only lines with # edge difference magnitude sums greater than `threshold` are detected... @@ -113,17 +90,20 @@ def capture_picture(): for l in img.find_lines(threshold = 1000, theta_margin = 25, rho_margin = 25): if (min_degree <= l.theta()) and (l.theta() <= max_degree): - draw_img.draw_line([v*SCALE for v in l.line()], color = (255, 0, 0)) + img.draw_line([v for v in l.line()], color = (255, 0, 0)) print(l) - draw_img.copy_to(osd_img) - del img + + # draw result to screen + Display.show_image(img) + img = None + gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -136,7 +116,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/11-Feature-Detection/find_rects.py b/share/qtcreator/examples/11-Feature-Detection/find_rects.py old mode 100644 new mode 100755 index 889f1af7c98f..e687b08d1b0b --- a/share/qtcreator/examples/11-Feature-Detection/find_rects.py +++ b/share/qtcreator/examples/11-Feature-Detection/find_rects.py @@ -6,101 +6,82 @@ # Transform based methods. For example, it can still detect rectangles even when lens # distortion causes those rectangles to look bent. Rounded rectangles are no problem! # (But, given this the code will also detect small radius circles too)... +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - draw_img.clear() + global sensor + img = sensor.snapshot() + # `threshold` below should be set to a high enough value to filter out noise # rectangles detected in the image which have low edge magnitudes. Rectangles # have larger edge magnitudes the larger and more contrasty they are... for r in img.find_rects(threshold = 10000): - draw_img.draw_rectangle([v*SCALE for v in r.rect()], color = (255, 0, 0)) - for p in r.corners(): draw_img.draw_circle(p[0]*SCALE, p[1]*SCALE, 5*SCALE, color = (0, 255, 0)) + img.draw_rectangle([v for v in r.rect()], color = (255, 0, 0)) + for p in r.corners(): img.draw_circle(p[0], p[1], 5, color = (0, 255, 0)) print(r) - draw_img.copy_to(osd_img) - del img + + # draw result to screen + Display.show_image(img) + img = None + gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -113,7 +94,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/11-Feature-Detection/hog.py b/share/qtcreator/examples/11-Feature-Detection/hog.py old mode 100644 new mode 100755 index f32c070ede37..8c7fa40e10ef --- a/share/qtcreator/examples/11-Feature-Detection/hog.py +++ b/share/qtcreator/examples/11-Feature-Detection/hog.py @@ -4,83 +4,67 @@ # # Note: Due to JPEG artifacts, the HoG visualization looks blurry. To see the # image without JPEG artifacts, uncomment the lines that save the image to uSD. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + + global sensor + img = sensor.snapshot() img.find_hog() - img.copy_to(osd_img) + # draw result to screen + Display.show_image(img) + del img gc.collect() print(fps.fps()) @@ -88,7 +72,7 @@ def capture_picture(): print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -101,7 +85,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/11-Feature-Detection/keypoints.py b/share/qtcreator/examples/11-Feature-Detection/keypoints.py deleted file mode 100644 index 64c013ffb2f4..000000000000 --- a/share/qtcreator/examples/11-Feature-Detection/keypoints.py +++ /dev/null @@ -1,145 +0,0 @@ -# Object tracking with keypoints example. -# Show the camera an object and then run the script. A set of keypoints will be extracted -# once and then tracked in the following frames. If you want a new set of keypoints re-run -# the script. NOTE: see the docs for arguments to tune find_keypoints and match_keypoints. - -from media.camera import * -from media.display import * -from media.media import * -import time, os, gc, sys - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def camera_drop(frame): - for i in range(frame): - os.exitpoint() - img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, img) - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - kpts1 = None - # NOTE: uncomment to load a keypoints descriptor from file - #kpts1 = image.load_descriptor("/desc.orb") - camera_drop(60) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_grayscale() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - draw_img.clear() - if kpts1 == None: - # NOTE: By default find_keypoints returns multi-scale keypoints extracted from an image pyramid. - kpts1 = img.find_keypoints(max_keypoints=150, threshold=10, scale_factor=1.2) - if kpts1: - if SCALE == 1: - draw_img.draw_keypoints(kpts1) - draw_img.copy_to(osd_img) - time.sleep(2) - fps.reset() - else: - # NOTE: When extracting keypoints to match the first descriptor, we use normalized=True to extract - # keypoints from the first scale only, which will match one of the scales in the first descriptor. - kpts2 = img.find_keypoints(max_keypoints=150, threshold=10, normalized=True) - if kpts2: - match = image.match_descriptor(kpts1, kpts2, threshold=85) - if (match.count()>10): - # If we have at least n "good matches" - # Draw bounding rectangle and cross. - draw_img.draw_rectangle([v*SCALE for v in match.rect()]) - draw_img.draw_cross(match.cx()*SCALE, match.cy()*SCALE, size=10) - - print(kpts2, "matched:%d dt:%d"%(match.count(), match.theta())) - # NOTE: uncomment if you want to draw the keypoints - #img.draw_keypoints(kpts2, size=KEYPOINTS_SIZE, matched=True) - draw_img.copy_to(osd_img) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() diff --git a/share/qtcreator/examples/11-Feature-Detection/lbp.py b/share/qtcreator/examples/11-Feature-Detection/lbp.py old mode 100644 new mode 100755 index 6d7e95c26caf..6bc3cdbce4c8 --- a/share/qtcreator/examples/11-Feature-Detection/lbp.py +++ b/share/qtcreator/examples/11-Feature-Detection/lbp.py @@ -6,82 +6,67 @@ # WARNING: LBP supports needs to be reworked! As of right now this feature needs # a lot of work to be made into somethin useful. This script will reamin to show # that the functionality exists, but, in its current state is inadequate. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def camera_drop(frame): for i in range(frame): os.exitpoint() - img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, img) + global sensor + sensor.snapshot() def capture_picture(): # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - osd_img.draw_string(0, 0, "Please wait...") - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + draw_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.ARGB8888) + draw_img.draw_string_advanced(0, 0, 32, "Please wait...") + draw_img.draw_string_advanced(0, 32, 32, "请稍后。。。") + # draw result to screen + Display.show_image(draw_img) # Load Haar Cascade # By default this will use all stages, lower satges is faster but less accurate. face_cascade = image.HaarCascade("frontalface", stages=25) @@ -96,10 +81,11 @@ def capture_picture(): fps.tick() try: os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) draw_img.clear() + + global sensor + img = sensor.snapshot() + objects = img.find_features(face_cascade, threshold=0.5, scale_factor=1.25) if objects: face = objects[0] @@ -108,21 +94,27 @@ def capture_picture(): d0 = d1 else: dist = image.match_descriptor(d0, d1) - draw_img.draw_string(0, 10, "Match %d%%"%(dist)) + draw_img.draw_string_advanced(0, 32, 32, "Match %d%%"%(dist)) print("Match %d%%"%(dist)) draw_img.draw_rectangle([v*SCALE for v in face]) # Draw FPS - draw_img.draw_string(0, 0, "FPS:%.2f"%(fps.fps())) - draw_img.copy_to(osd_img) + draw_img.draw_string_advanced(0, 0, 32, "FPS:%.2f"%(fps.fps())) + + # draw result to screen + Display.show_image(draw_img) + del img gc.collect() except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break + del draw_img + gc.collect() + def main(): os.exitpoint(os.EXITPOINT_ENABLE) @@ -134,7 +126,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/11-Feature-Detection/linear_regression_fast.py b/share/qtcreator/examples/11-Feature-Detection/linear_regression_fast.py old mode 100644 new mode 100755 index 8f8856a01d28..aa554d912258 --- a/share/qtcreator/examples/11-Feature-Detection/linear_regression_fast.py +++ b/share/qtcreator/examples/11-Feature-Detection/linear_regression_fast.py @@ -9,86 +9,68 @@ # This is called the fast linear regression because we use the least-squares # method to fit the line. However, this method is NOT GOOD FOR ANY images that # have a lot (or really any) outlier points which corrupt the line fit... +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE + +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 THRESHOLD = (0, 100) # Grayscale threshold for dark things... BINARY_VISIBLE = True # Does binary first so you can see what the linear regression # is being run on... might lower FPS though. +sensor = None + def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - osd_img.draw_string(0, 0, "Please wait...") - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) + + global sensor + img = sensor.snapshot() img = img.binary([THRESHOLD]) if BINARY_VISIBLE else img # Returns a line object similar to line objects returned by find_lines() and # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), @@ -100,14 +82,17 @@ def capture_picture(): line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD]) if (line): img.draw_line(line.line(), color = 127) print("FPS %f, mag = %s" % (fps.fps(), str(line.magnitude()) if (line) else "N/A")) - img.copy_to(osd_img) + + # draw result to screen + Display.show_image(img) + del img gc.collect() except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -120,7 +105,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/11-Feature-Detection/linear_regression_robust.py b/share/qtcreator/examples/11-Feature-Detection/linear_regression_robust.py deleted file mode 100644 index 7b97425edcc1..000000000000 --- a/share/qtcreator/examples/11-Feature-Detection/linear_regression_robust.py +++ /dev/null @@ -1,132 +0,0 @@ -# Robust Linear Regression Example -# -# This example shows off how to use the get_regression() method on your CanMV Cam -# to get the linear regression of a ROI. Using this method you can easily build -# a robot which can track lines which all point in the same general direction -# but are not actually connected. Use find_blobs() on lines that are nicely -# connected for better filtering options and control. -# -# We're using the robust=True argument for get_regression() in this script which -# computes the linear regression using a much more robust algorithm... but potentially -# much slower. The robust algorithm runs in O(N^2) time on the image. So, YOU NEED -# TO LIMIT THE NUMBER OF PIXELS the robust algorithm works on or it can actually -# take seconds for the algorithm to give you a result... THRESHOLD VERY CAREFULLY! - -from media.camera import * -from media.display import * -from media.media import * -import time, os, gc, sys - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -THRESHOLD = (0, 100) # Grayscale threshold for dark things... -BINARY_VISIBLE = True # Does binary first so you can see what the linear regression - # is being run on... might lower FPS though. - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - osd_img.draw_string(0, 0, "Please wait...") - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) - img = img.binary([THRESHOLD]) if BINARY_VISIBLE else img - # Returns a line object similar to line objects returned by find_lines() and - # find_line_segments(). You have x1(), y1(), x2(), y2(), length(), - # theta() (rotation in degrees), rho(), and magnitude(). - # - # magnitude() represents how well the linear regression worked. It goes from - # (0, INF] where 0 is returned for a circle. The more linear the - # scene is the higher the magnitude. - line = img.get_regression([(255,255) if BINARY_VISIBLE else THRESHOLD], robust = True) - if (line): img.draw_line(line.line(), color = 127) - print("FPS %f, mag = %s" % (fps.fps(), str(line.magnitude()) if (line) else "N/A")) - img.copy_to(osd_img) - del img - gc.collect() - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() diff --git a/share/qtcreator/examples/11-Feature-Detection/template_matching.py b/share/qtcreator/examples/11-Feature-Detection/template_matching.py deleted file mode 100644 index 23e09713410a..000000000000 --- a/share/qtcreator/examples/11-Feature-Detection/template_matching.py +++ /dev/null @@ -1,134 +0,0 @@ -# Template Matching Example - Normalized Cross Correlation (NCC) -# -# This example shows off how to use the NCC feature of your CanMV Cam to match -# image patches to parts of an image... expect for extremely controlled enviorments -# NCC is not all to useful. -# -# WARNING: NCC supports needs to be reworked! As of right now this feature needs -# a lot of work to be made into somethin useful. This script will reamin to show -# that the functionality exists, but, in its current state is inadequate. - -from media.camera import * -from media.display import * -from media.media import * -import time, os, gc, sys -from image import SEARCH_EX, SEARCH_DS - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for drawing - draw_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - osd_img.draw_string(0, 0, "Please wait...") - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - # Load template. - # Template should be a small (eg. 32x32 pixels) grayscale image. - # template = image.Image("/sd/template.bmp") - # template.to_grayscale() - template = image.Image(32, 32, image.GRAYSCALE) - template.draw_circle(16,16,16,color=(255,255,255),fill=True) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - yuv420_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = image.Image(yuv420_img.width(), yuv420_img.height(), image.GRAYSCALE, data=yuv420_img) - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, yuv420_img) - draw_img.clear() - # find_template(template, threshold, [roi, step, search]) - # ROI: The region of interest tuple (x, y, w, h). - # Step: The loop step used (y+=step, x+=step) use a bigger step to make it faster. - # Search is either image.SEARCH_EX for exhaustive search or image.SEARCH_DS for diamond search - # - # Note1: ROI has to be smaller than the image and bigger than the template. - # Note2: In diamond search, step and ROI are both ignored. - r = img.find_template(template, 0.50, step=4, search=SEARCH_EX) #, roi=(10, 0, 60, 60)) - if r: - draw_img.draw_rectangle([v*SCALE for v in r],color=(255,0,0)) - draw_img.copy_to(osd_img) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() diff --git a/share/qtcreator/examples/12-Image-Filters/adaptive_histogram_equalization.py b/share/qtcreator/examples/12-Image-Filters/adaptive_histogram_equalization.py old mode 100644 new mode 100755 index 0444835aa7d6..906c898b112f --- a/share/qtcreator/examples/12-Image-Filters/adaptive_histogram_equalization.py +++ b/share/qtcreator/examples/12-Image-Filters/adaptive_histogram_equalization.py @@ -5,81 +5,64 @@ # into regions and then equalizes the histogram in those regions to improve # the image contrast versus a global histogram equalization. Additionally, # you may specify a clip limit to prevent the contrast from going wild. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + + global sensor + img = sensor.snapshot() + # A clip_limit of < 0 gives you normal adaptive histogram equalization # which may result in huge amounts of contrast noise... @@ -87,15 +70,18 @@ def capture_picture(): # than 1 like below. The higher you go the closer you get back to # standard adaptive histogram equalization with huge contrast swings. img.histeq(adaptive=True, clip_limit=3) - img.copy_to(osd_img) - del img + + # draw result to screen + Display.show_image(img) + + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -108,7 +94,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/blur_filter.py b/share/qtcreator/examples/12-Image-Filters/blur_filter.py old mode 100644 new mode 100755 index 1dadebc45a43..92fbc420d88e --- a/share/qtcreator/examples/12-Image-Filters/blur_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/blur_filter.py @@ -1,92 +1,79 @@ # Blur Filter Example # # This example shows off using the guassian filter to blur images. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + + global sensor + img = sensor.snapshot() + # Run the kernel on every pixel of the image. img.gaussian(1) - img.copy_to(osd_img) - del img + + # draw result to screen + Display.show_image(img) + + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -99,7 +86,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/cartoon_filter.py b/share/qtcreator/examples/12-Image-Filters/cartoon_filter.py deleted file mode 100644 index e1c44c69d469..000000000000 --- a/share/qtcreator/examples/12-Image-Filters/cartoon_filter.py +++ /dev/null @@ -1,119 +0,0 @@ -# Cartoon Filter -# -# This example shows off a simple cartoon filter on images. The cartoon -# filter works by joining similar pixel areas of an image and replacing -# the pixels in those areas with the area mean. - -from media.camera import * -from media.display import * -from media.media import * -import time, os, gc, sys - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - # seed_threshold controls the maximum area growth of a colored - # region. Making this larger will merge more pixels. - - # floating_threshold controls the maximum pixel-to-pixel difference - # when growing a region. Settings this very high will quickly combine - # all pixels in the image. You should keep this small. - - # cartoon() will grow regions while both thresholds are statisfied... - - img.cartoon(seed_threshold=0.05, floating_thresholds=0.05) - img.copy_to(osd_img) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() diff --git a/share/qtcreator/examples/12-Image-Filters/color_bilateral_filter.py b/share/qtcreator/examples/12-Image-Filters/color_bilateral_filter.py deleted file mode 100644 index 62feb0048a48..000000000000 --- a/share/qtcreator/examples/12-Image-Filters/color_bilateral_filter.py +++ /dev/null @@ -1,121 +0,0 @@ -# Color Bilteral Filter Example -# -# This example shows off using the bilateral filter on color images. - -from media.camera import * -from media.display import * -from media.media import * -import time, os, gc, sys - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) - fps = time.clock() - while True: - fps.tick() - try: - os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - # color_sigma controls how close color wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # space_sigma controls how close space wise pixels have to be to each other to be - # blured togheter. A smaller value means they have to be closer. - # A larger value is less strict. - - # Run the kernel on every pixel of the image. - img.bilateral(3, color_sigma=0.1, space_sigma=1) - - # Note that the bilateral filter can introduce image defects if you set - # color_sigma/space_sigma to aggresively. Increase the sigma values until - # the defects go away if you see them. - img.copy_to(osd_img) - del img - gc.collect() - print(fps.fps()) - except KeyboardInterrupt as e: - print("user stop: ", e) - break - except BaseException as e: - sys.print_exception(e) - break - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE) - camera_is_init = False - try: - print("camera init") - camera_init() - camera_is_init = True - print("camera capture") - capture_picture() - except Exception as e: - sys.print_exception(e) - finally: - if camera_is_init: - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() diff --git a/share/qtcreator/examples/12-Image-Filters/color_binary_filter.py b/share/qtcreator/examples/12-Image-Filters/color_binary_filter.py old mode 100644 new mode 100755 index 16f25b294520..cf286bcd4e3e --- a/share/qtcreator/examples/12-Image-Filters/color_binary_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/color_binary_filter.py @@ -2,87 +2,71 @@ # # This script shows off the binary image filter. You may pass binary any # number of thresholds to segment the image by. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 # Use the Tools -> Machine Vision -> Threshold Edtor to pick better thresholds. red_threshold = (0,100, 0,127, 0,127) # L A B green_threshold = (0,100, -128,0, 0,127) # L A B blue_threshold = (0,100, -128,127, -128,0) # L A B +sensor = None + def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + frame_count = 0 fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + + global sensor + img = sensor.snapshot() + # Test red threshold if frame_count < 100: img.binary([red_threshold]) @@ -104,15 +88,16 @@ def capture_picture(): else: frame_count = 0 frame_count = frame_count + 1 - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -125,7 +110,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/color_light_removal.py b/share/qtcreator/examples/12-Image-Filters/color_light_removal.py old mode 100644 new mode 100755 index c6319e794381..617b17f49f9f --- a/share/qtcreator/examples/12-Image-Filters/color_light_removal.py +++ b/share/qtcreator/examples/12-Image-Filters/color_light_removal.py @@ -6,93 +6,78 @@ # Removing bright lights from the image allows you to now use # histeq() on the image without outliers from oversaturated # parts of the image breaking the algorithm... +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 thresholds = (90, 100, -128, 127, -128, 127) +sensor = None + def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() + img.binary([thresholds], invert=False, zero=True) - img.copy_to(osd_img) - del img + + # draw result to screen + Display.show_image(img) + img = None + gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -105,7 +90,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/edge_filter.py b/share/qtcreator/examples/12-Image-Filters/edge_filter.py old mode 100644 new mode 100755 index 84355a86d603..fbfbcd259a8e --- a/share/qtcreator/examples/12-Image-Filters/edge_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/edge_filter.py @@ -1,92 +1,78 @@ # Edge Filter Example # # This example shows off using the laplacian filter to detect edges. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + + global sensor + img = sensor.snapshot() + # Run the kernel on every pixel of the image. img.laplacian(1,sharpen=True) - img.copy_to(osd_img) - del img + + # draw result to screen + Display.show_image(img) + img = None + gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -99,7 +85,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/erode_and_dilate.py b/share/qtcreator/examples/12-Image-Filters/erode_and_dilate.py old mode 100644 new mode 100755 index b38e868ca376..1b7fb10c9ca2 --- a/share/qtcreator/examples/12-Image-Filters/erode_and_dilate.py +++ b/share/qtcreator/examples/12-Image-Filters/erode_and_dilate.py @@ -3,85 +3,66 @@ # This example shows off the erode and dilate functions which you can run on # a binary image to remove noise. This example was originally a test but its # useful for showing off how these functions work. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 grayscale_thres = (170, 255) rgb565_thres = (70, 100, -128, 127, -128, 127) +sensor = None + def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + frame_count = 0 fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # Test red threshold if frame_count < 100: img.binary([rgb565_thres]) @@ -105,15 +86,16 @@ def capture_picture(): else: frame_count = 0 frame_count = frame_count + 1 - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -126,7 +108,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/gamma_correction.py b/share/qtcreator/examples/12-Image-Filters/gamma_correction.py old mode 100644 new mode 100755 index 646abd687b33..e0bf8717724c --- a/share/qtcreator/examples/12-Image-Filters/gamma_correction.py +++ b/share/qtcreator/examples/12-Image-Filters/gamma_correction.py @@ -2,93 +2,74 @@ # # This example shows off gamma correction to make the image brighter. The gamma # correction method can also fix contrast and brightness too. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # Gamma, contrast, and brightness correction are applied to each color channel. The # values are scaled to the range per color channel per image type... img.gamma_corr(gamma = 0.5, contrast = 1.0, brightness = 0.0) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -101,7 +82,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/grayscale_bilateral_filter.py b/share/qtcreator/examples/12-Image-Filters/grayscale_bilateral_filter.py old mode 100644 new mode 100755 index 57a82ed8c66a..f170dcce8e25 --- a/share/qtcreator/examples/12-Image-Filters/grayscale_bilateral_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/grayscale_bilateral_filter.py @@ -1,81 +1,63 @@ # Grayscale Bilteral Filter Example # # This example shows off using the bilateral filter on grayscale images. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_grayscale() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + + global sensor + img = sensor.snapshot() # color_sigma controls how close color wise pixels have to be to each other to be # blured togheter. A smaller value means they have to be closer. # A larger value is less strict. @@ -91,15 +73,17 @@ def capture_picture(): # color_sigma/space_sigma to aggresively. Increase the sigma values until # the defects go away if you see them. - img.copy_to(osd_img) + # draw result to screen + Display.show_image(img) del img + gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -112,7 +96,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/grayscale_binary_filter.py b/share/qtcreator/examples/12-Image-Filters/grayscale_binary_filter.py old mode 100644 new mode 100755 index 08d8e02211ed..da5335b45ab3 --- a/share/qtcreator/examples/12-Image-Filters/grayscale_binary_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/grayscale_binary_filter.py @@ -2,85 +2,65 @@ # # This script shows off the binary image filter. You may pass binary any # number of thresholds to segment the image by. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 low_threshold = (0, 50) high_threshold = (205, 255) +sensor = None + def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) frame_count = 0 fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_grayscale() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # Test low threshold if frame_count < 100: img.binary([low_threshold]) @@ -96,7 +76,9 @@ def capture_picture(): else: frame_count = 0 frame_count = frame_count + 1 - img.copy_to(osd_img) + # draw result to screen + Display.show_image(img) + del img gc.collect() print(fps.fps()) @@ -104,7 +86,7 @@ def capture_picture(): print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -117,7 +99,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/grayscale_light_removal.py b/share/qtcreator/examples/12-Image-Filters/grayscale_light_removal.py old mode 100644 new mode 100755 index b03181aed39f..5a16e9943232 --- a/share/qtcreator/examples/12-Image-Filters/grayscale_light_removal.py +++ b/share/qtcreator/examples/12-Image-Filters/grayscale_light_removal.py @@ -6,85 +6,67 @@ # Removing bright lights from the image allows you to now use # histeq() on the image without outliers from oversaturated # parts of the image breaking the algorithm... +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 thresholds = (220, 255) +sensor = None + def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_grayscale() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() img.binary([thresholds], invert=False, zero=True) - img.copy_to(osd_img) + # draw result to screen + Display.show_image(img) del img gc.collect() print(fps.fps()) @@ -92,7 +74,7 @@ def capture_picture(): print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -105,7 +87,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/histogram_equalization.py b/share/qtcreator/examples/12-Image-Filters/histogram_equalization.py old mode 100644 new mode 100755 index 1165539c7591..b0cb66d98a02 --- a/share/qtcreator/examples/12-Image-Filters/histogram_equalization.py +++ b/share/qtcreator/examples/12-Image-Filters/histogram_equalization.py @@ -2,91 +2,74 @@ # # This example shows off how to use histogram equalization to improve # the contrast in the image. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() + img.histeq() - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -99,7 +82,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/kernel_filters.py b/share/qtcreator/examples/12-Image-Filters/kernel_filters.py old mode 100644 new mode 100755 index 3c8294fd0088..395df4723ea5 --- a/share/qtcreator/examples/12-Image-Filters/kernel_filters.py +++ b/share/qtcreator/examples/12-Image-Filters/kernel_filters.py @@ -1,17 +1,14 @@ # Kernel Filtering Example # # This example shows off how to use a generic kernel filter. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 kernel_size = 1 # 3x3==1, 5x5==2, 7x7==3, etc. @@ -19,80 +16,65 @@ -1, 1, 1, 0, 1, 2] +sensor = None + def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.GRAYSCALE) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.GRAYSCALE, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_grayscale() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # Run the kernel on every pixel of the image. img.morph(kernel_size, kernel) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -105,7 +87,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/lens_correction.py b/share/qtcreator/examples/12-Image-Filters/lens_correction.py old mode 100644 new mode 100755 index 7fc05609477b..68932f318c97 --- a/share/qtcreator/examples/12-Image-Filters/lens_correction.py +++ b/share/qtcreator/examples/12-Image-Filters/lens_correction.py @@ -4,91 +4,74 @@ # distortion in an image. You need to do this for qrcode / barcode / data matrix # detection. Increase the strength below until lines are straight in the view. # Zoom in (higher) or out (lower) until you see enough of the image. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() + img.lens_corr(strength = 1.8, zoom = 1.0) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -101,7 +84,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/linear_polar.py b/share/qtcreator/examples/12-Image-Filters/linear_polar.py old mode 100644 new mode 100755 index a8de929ebb56..d75a96f50a9f --- a/share/qtcreator/examples/12-Image-Filters/linear_polar.py +++ b/share/qtcreator/examples/12-Image-Filters/linear_polar.py @@ -4,91 +4,75 @@ # transformation. Linear polar images are useful in that rotations # become translations in the X direction and linear changes # in scale become linear translations in the Y direction. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() + img.linpolar(reverse=False) - img.copy_to(osd_img) - del img + + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -101,7 +85,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/log_polar.py b/share/qtcreator/examples/12-Image-Filters/log_polar.py old mode 100644 new mode 100755 index 52a185837da7..5bcbced599ab --- a/share/qtcreator/examples/12-Image-Filters/log_polar.py +++ b/share/qtcreator/examples/12-Image-Filters/log_polar.py @@ -4,91 +4,75 @@ # transformation. Log polar images are useful in that rotations # become translations in the X direction and exponential changes # in scale (x2, x4, etc.) become linear translations in the Y direction. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() + img.logpolar(reverse=False) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -101,7 +85,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/mean_adaptive_threshold_filter.py b/share/qtcreator/examples/12-Image-Filters/mean_adaptive_threshold_filter.py old mode 100644 new mode 100755 index c5375cdd5916..378c39e1748c --- a/share/qtcreator/examples/12-Image-Filters/mean_adaptive_threshold_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/mean_adaptive_threshold_filter.py @@ -3,94 +3,76 @@ # This example shows off mean filtering with adaptive thresholding. # When mean(threshold=True) the mean() method adaptive thresholds the image # by comparing the mean of the pixels around a pixel, minus an offset, with that pixel. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You # shouldn't ever need to use a value bigger than 2. img.mean(1, threshold=True, offset=5, invert=True) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -103,7 +85,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/mean_filter.py b/share/qtcreator/examples/12-Image-Filters/mean_filter.py old mode 100644 new mode 100755 index fe320130e75d..01ae1ac01fa5 --- a/share/qtcreator/examples/12-Image-Filters/mean_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/mean_filter.py @@ -3,94 +3,77 @@ # This example shows off mean filtering. Mean filtering is your standard average # filter in a NxN neighborhood. Mean filtering removes noise in the image by # bluring everything. But, it's the fastest kernel filter operation. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() + # The only argument is the kernel size. N coresponds to a ((N*2)+1)^2 # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You # shouldn't ever need to use a value bigger than 2. img.mean(1) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -103,7 +86,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/median_adaptive_threshold_filter.py b/share/qtcreator/examples/12-Image-Filters/median_adaptive_threshold_filter.py old mode 100644 new mode 100755 index a5860f22a731..d2ce7305d2a0 --- a/share/qtcreator/examples/12-Image-Filters/median_adaptive_threshold_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/median_adaptive_threshold_filter.py @@ -3,96 +3,78 @@ # This example shows off median filtering with adaptive thresholding. # When median(threshold=True) the median() method adaptive thresholds the image # by comparing the median of the pixels around a pixel, minus an offset, with that pixel. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # The first argument to the median filter is the kernel size, it can be # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second # argument "percentile" is the percentile number to choose from the NxN # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 # would be the upper quartile. img.median(1, percentile=0.5, threshold=True, offset=5, invert=True) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -105,7 +87,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/median_filter.py b/share/qtcreator/examples/12-Image-Filters/median_filter.py old mode 100644 new mode 100755 index 6fa5ff9a84e7..ca2bb2825298 --- a/share/qtcreator/examples/12-Image-Filters/median_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/median_filter.py @@ -3,96 +3,78 @@ # This example shows off median filtering. Median filtering replaces every pixel # with the median value of it's NxN neighborhood. Median filtering is good for # removing noise in the image while preserving edges. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # The first argument to the median filter is the kernel size, it can be # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. The second # argument "percentile" is the percentile number to choose from the NxN # neighborhood. 0.5 is the median, 0.25 is the lower quartile, and 0.75 # would be the upper quartile. img.median(1, percentile=0.5) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -105,7 +87,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/midpoint_adaptive_threshold_filter.py b/share/qtcreator/examples/12-Image-Filters/midpoint_adaptive_threshold_filter.py old mode 100644 new mode 100755 index a4a4dbde461d..73b6943af563 --- a/share/qtcreator/examples/12-Image-Filters/midpoint_adaptive_threshold_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/midpoint_adaptive_threshold_filter.py @@ -3,81 +3,62 @@ # This example shows off midpoint filtering with adaptive thresholding. # When midpoint(threshold=True) the midpoint() method adaptive thresholds the image # by comparing the midpoint of the pixels around a pixel, minus an offset, with that pixel. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You # shouldn't ever need to use a value bigger than 2. The "bias" argument @@ -85,15 +66,16 @@ def capture_picture(): # 0.0 == min filter, and 1.0 == max filter. Note that the min filter # makes images darker while the max filter makes images lighter. img.midpoint(1, bias=0.5, threshold=True, offset=5, invert=True) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -106,7 +88,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/midpoint_filter.py b/share/qtcreator/examples/12-Image-Filters/midpoint_filter.py old mode 100644 new mode 100755 index cc7c426c8b7c..4415bd3981b6 --- a/share/qtcreator/examples/12-Image-Filters/midpoint_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/midpoint_filter.py @@ -2,81 +2,62 @@ # # This example shows off midpoint filtering. Midpoint filtering replaces each # pixel by the average of the min and max pixel values for a NxN neighborhood. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # The first argument is the kernel size. N coresponds to a ((N*2)+1)^2 # kernel size. E.g. 1 == 3x3 kernel, 2 == 5x5 kernel, etc. Note: You # shouldn't ever need to use a value bigger than 2. The "bias" argument @@ -84,15 +65,16 @@ def capture_picture(): # 0.0 == min filter, and 1.0 == max filter. Note that the min filter # makes images darker while the max filter makes images lighter. img.midpoint(1, bias=0.5) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -105,7 +87,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/mode_adaptive_threshold_filter.py b/share/qtcreator/examples/12-Image-Filters/mode_adaptive_threshold_filter.py old mode 100644 new mode 100755 index 8b17cae93e82..db57110cf973 --- a/share/qtcreator/examples/12-Image-Filters/mode_adaptive_threshold_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/mode_adaptive_threshold_filter.py @@ -4,93 +4,75 @@ # When mode(threshold=True) the mode() method adaptive thresholds the image # by comparing the mode of the pixels around a pixel, minus an offset, with that pixel. # Avoid using the mode filter on RGB565 images. It will cause artifacts on image edges... +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # The only argument to the median filter is the kernel size, it can be # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. img.mode(1, threshold=True, offset=5, invert=True) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -103,7 +85,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/mode_filter.py b/share/qtcreator/examples/12-Image-Filters/mode_filter.py old mode 100644 new mode 100755 index fa63b142a74f..258280ca46d7 --- a/share/qtcreator/examples/12-Image-Filters/mode_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/mode_filter.py @@ -4,93 +4,75 @@ # operation which replaces each pixel with the mode of the NxN neighborhood # of pixels around it. Avoid using the mode filter on RGB565 images. It will # cause artifacts on image edges... +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # The only argument to the median filter is the kernel size, it can be # either 0, 1, or 2 for a 1x1, 3x3, or 5x5 kernel respectively. img.mode(1) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -103,7 +85,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/negative.py b/share/qtcreator/examples/12-Image-Filters/negative.py old mode 100644 new mode 100755 index dde77acc7d65..5d213ffacd6c --- a/share/qtcreator/examples/12-Image-Filters/negative.py +++ b/share/qtcreator/examples/12-Image-Filters/negative.py @@ -2,91 +2,73 @@ # # This example shows off negating the image. This is not a particularly # useful method but it can come in handy once in a while. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() img.negate() - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -99,7 +81,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/perspective_and_rotation_correction.py b/share/qtcreator/examples/12-Image-Filters/perspective_and_rotation_correction.py old mode 100644 new mode 100755 index 574b5a7fc483..37ab593ef414 --- a/share/qtcreator/examples/12-Image-Filters/perspective_and_rotation_correction.py +++ b/share/qtcreator/examples/12-Image-Filters/perspective_and_rotation_correction.py @@ -3,17 +3,14 @@ # This example shows off how to use the rotation_corr() to both correct for # perspective distortion and then to rotate the new corrected image in 3D # space aftwards to handle movement. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 # The image will be warped such that the following points become the new: # @@ -27,13 +24,13 @@ # on the image by clicking and dragging on the frame buffer and # recording the values shown in the histogram widget. -w = DETECT_WIDTH -h = DETECT_HEIGHT +width= DETECT_WIDTH +height = DETECT_HEIGHT TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! - (w-1, 0), # (x, y) CHANGE ME! - (w-1, h-1), # (x, y) CHANGE ME! - (0, h-1)] # (x, y) CHANGE ME! + (width-1, 0), # (x, y) CHANGE ME! + (width-1, height-1), # (x, y) CHANGE ME! + (0, height-1)] # (x, y) CHANGE ME! # Degrees per frame to rotation by... X_ROTATION_DEGREE_RATE = 5 @@ -51,61 +48,46 @@ # results in the more perspective distortion and sometimes # the image in 3D intersecting the scene window. +sensor = None + def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) x_rotation_counter = 0 y_rotation_counter = 0 z_rotation_counter = 0 @@ -114,9 +96,8 @@ def capture_picture(): fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() img.rotation_corr(x_rotation = x_rotation_counter, y_rotation = y_rotation_counter, z_rotation = z_rotation_counter, @@ -128,15 +109,16 @@ def capture_picture(): x_rotation_counter += X_ROTATION_DEGREE_RATE y_rotation_counter += Y_ROTATION_DEGREE_RATE z_rotation_counter += Z_ROTATION_DEGREE_RATE - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -149,7 +131,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/perspective_correction.py b/share/qtcreator/examples/12-Image-Filters/perspective_correction.py old mode 100644 new mode 100755 index da36f3821bcc..a9fc545a8c9b --- a/share/qtcreator/examples/12-Image-Filters/perspective_correction.py +++ b/share/qtcreator/examples/12-Image-Filters/perspective_correction.py @@ -2,17 +2,14 @@ # # This example shows off how to use the rotation_corr() to fix perspective # issues related to how your CanMV Cam is mounted. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 # The image will be warped such that the following points become the new: # @@ -26,87 +23,73 @@ # on the image by clicking and dragging on the frame buffer and # recording the values shown in the histogram widget. -w = DETECT_WIDTH -h = DETECT_HEIGHT +width= DETECT_WIDTH +height = DETECT_HEIGHT TARGET_POINTS = [(0, 0), # (x, y) CHANGE ME! - (w-1, 0), # (x, y) CHANGE ME! - (w-1, h-1), # (x, y) CHANGE ME! - (0, h-1)] # (x, y) CHANGE ME! + (width-1, 0), # (x, y) CHANGE ME! + (width-1, height-1), # (x, y) CHANGE ME! + (0, height-1)] # (x, y) CHANGE ME! + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() + img.rotation_corr(corners = TARGET_POINTS) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -119,7 +102,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/rotation_correction.py b/share/qtcreator/examples/12-Image-Filters/rotation_correction.py old mode 100644 new mode 100755 index 8789df09176e..87d685151622 --- a/share/qtcreator/examples/12-Image-Filters/rotation_correction.py +++ b/share/qtcreator/examples/12-Image-Filters/rotation_correction.py @@ -2,17 +2,14 @@ # # This example shows off how to use the rotation_corr() to play with the scene # window your CanMV Cam sees. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 # Degrees per frame to rotation by... X_ROTATION_DEGREE_RATE = 5 @@ -30,61 +27,46 @@ # results in the more perspective distortion and sometimes # the image in 3D intersecting the scene window. +sensor = None + def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) x_rotation_counter = 0 y_rotation_counter = 0 z_rotation_counter = 0 @@ -93,9 +75,8 @@ def capture_picture(): fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() img.rotation_corr(x_rotation = x_rotation_counter, y_rotation = y_rotation_counter, z_rotation = z_rotation_counter, @@ -107,15 +88,16 @@ def capture_picture(): x_rotation_counter += X_ROTATION_DEGREE_RATE y_rotation_counter += Y_ROTATION_DEGREE_RATE z_rotation_counter += Z_ROTATION_DEGREE_RATE - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -128,7 +110,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/sharpen_filter.py b/share/qtcreator/examples/12-Image-Filters/sharpen_filter.py old mode 100644 new mode 100755 index 7fca5b46075d..234454ce87b3 --- a/share/qtcreator/examples/12-Image-Filters/sharpen_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/sharpen_filter.py @@ -1,92 +1,74 @@ # Sharpen Filter Example # # This example shows off using the laplacian filter to sharpen images. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(320, 16) +DETECT_HEIGHT = 240 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # Run the kernel on every pixel of the image. img.laplacian(1,sharpen=True) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -99,7 +81,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/unsharp_filter.py b/share/qtcreator/examples/12-Image-Filters/unsharp_filter.py old mode 100644 new mode 100755 index e43e89e3e685..12774242b177 --- a/share/qtcreator/examples/12-Image-Filters/unsharp_filter.py +++ b/share/qtcreator/examples/12-Image-Filters/unsharp_filter.py @@ -1,92 +1,74 @@ # Unsharp Filter Example # # This example shows off using the guassian filter to unsharp mask filter images. +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) fps = time.clock() while True: fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + global sensor + img = sensor.snapshot() # Run the kernel on every pixel of the image. img.gaussian(1,unsharp=True) - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -99,7 +81,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/12-Image-Filters/vflip_hmirror_transpose.py b/share/qtcreator/examples/12-Image-Filters/vflip_hmirror_transpose.py old mode 100644 new mode 100755 index 1d4371906b46..b2bfa0c62f9e --- a/share/qtcreator/examples/12-Image-Filters/vflip_hmirror_transpose.py +++ b/share/qtcreator/examples/12-Image-Filters/vflip_hmirror_transpose.py @@ -7,73 +7,55 @@ # vflip=True, hmirror=False, transpose=True -> 90 degree rotation # vflip=True, hmirror=True, transpose=False -> 180 degree rotation # vflip=False, hmirror=True, transpose=True -> 270 degree rotation +import time, os, gc, sys -from media.camera import * +from media.sensor import * from media.display import * from media.media import * -import time, os, gc, sys -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 -SCALE = 4 -DETECT_WIDTH = DISPLAY_WIDTH // SCALE -DETECT_HEIGHT = DISPLAY_HEIGHT // SCALE +DETECT_WIDTH = ALIGN_UP(640, 16) +DETECT_HEIGHT = 480 + +sensor = None def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) + global sensor + + # construct a Sensor object with default configure + sensor = Sensor(width=DETECT_WIDTH,height=DETECT_HEIGHT) + # sensor reset + sensor.reset() + # set hmirror + # sensor.set_hmirror(False) + # sensor vflip + # sensor.set_vflip(False) + # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) + sensor.set_framesize(width=DETECT_WIDTH,height=DETECT_HEIGHT) # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output nv12 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DETECT_WIDTH, DETECT_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # request media buffer for osd image - globals()["buffer"] = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) + sensor.set_pixformat(Sensor.RGB565) + + # use IDE as display output + Display.init(Display.VIRT, width= DETECT_WIDTH, height = DETECT_HEIGHT,fps=100,to_ide = True) + # init media manager + MediaManager.init() + # sensor start run + sensor.run() def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) + global sensor + + # sensor stop run + sensor.stop() # deinit display - display.deinit() + Display.deinit() + # sleep os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) time.sleep_ms(100) # release media buffer - media.release_buffer(globals()["buffer"]) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def capture_picture(): - # create image for osd - buffer = globals()["buffer"] - osd_img = image.Image(DETECT_WIDTH, DETECT_HEIGHT, image.RGB565, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) mills = time.ticks_ms() counter = 0 fps = time.clock() @@ -81,9 +63,10 @@ def capture_picture(): fps.tick() try: os.exitpoint() - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - img = rgb888_img.to_rgb565() - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) + + global sensor + img = sensor.snapshot() + img.replace(vflip=(counter//2)%2, hmirror=(counter//4)%2, transpose=False) @@ -91,15 +74,16 @@ def capture_picture(): if (time.ticks_ms() > (mills + 1000)): mills = time.ticks_ms() counter += 1 - img.copy_to(osd_img) - del img + # draw result to screen + Display.show_image(img) + img = None gc.collect() print(fps.fps()) except KeyboardInterrupt as e: print("user stop: ", e) break except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") break def main(): @@ -112,7 +96,7 @@ def main(): print("camera capture") capture_picture() except Exception as e: - sys.print_exception(e) + print(f"Exception {e}") finally: if camera_is_init: print("camera deinit") diff --git a/share/qtcreator/examples/13-Snapshot/emboss_snapshot.py b/share/qtcreator/examples/13-Snapshot/emboss_snapshot.py deleted file mode 100644 index 7a7a513bd257..000000000000 --- a/share/qtcreator/examples/13-Snapshot/emboss_snapshot.py +++ /dev/null @@ -1,77 +0,0 @@ -# Emboss Snapshot Example -# -# Note: You will need an SD card to run this example. -# -# You can use your CanMV Cam to save modified image files. - -from media.camera import * -from media.display import * -from media.media import * -import time, os - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output rgb888 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - time.sleep(1) - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - try: - img = rgb888_img.to_rgb565() - img.morph(1, [+2, +1, +0, - +1, +1, -1, - +0, -1, -2]) # Emboss the image. - img.save("/sdcard/snapshot_emboss.jpg") - print("save image ok") - except Exception as e: - print("save image fail: ", e) - # release image for dev and chn - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - print("camera init") - camera_init() - print("camera capture") - capture_picture() - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() diff --git a/share/qtcreator/examples/13-Snapshot/snapshot.py b/share/qtcreator/examples/13-Snapshot/snapshot.py deleted file mode 100644 index 9656f2646931..000000000000 --- a/share/qtcreator/examples/13-Snapshot/snapshot.py +++ /dev/null @@ -1,73 +0,0 @@ -# Snapshot Example -# -# Note: You will need an SD card to run this example. -# -# You can use your CanMV Cam to save image files. - -from media.camera import * -from media.display import * -from media.media import * -import time, os - -DISPLAY_WIDTH = ALIGN_UP(1920, 16) -DISPLAY_HEIGHT = 1080 - -def camera_init(): - # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # init default sensor - camera.sensor_init(CAM_DEV_ID_0, CAM_DEFAULT_SENSOR) - # set chn0 output size - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_0, DISPLAY_WIDTH, DISPLAY_HEIGHT) - # set chn0 output format - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_0, PIXEL_FORMAT_YUV_SEMIPLANAR_420) - # create meida source device - globals()["meida_source"] = media_device(CAMERA_MOD_ID, CAM_DEV_ID_0, CAM_CHN_ID_0) - # create meida sink device - globals()["meida_sink"] = media_device(DISPLAY_MOD_ID, DISPLAY_DEV_ID, DISPLAY_CHN_VIDEO1) - # create meida link - media.create_link(meida_source, meida_sink) - # set display plane with video channel - display.set_plane(0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT, PIXEL_FORMAT_YVU_PLANAR_420, DISPLAY_MIRROR_NONE, DISPLAY_CHN_VIDEO1) - # set chn1 output rgb888 - camera.set_outsize(CAM_DEV_ID_0, CAM_CHN_ID_1, DISPLAY_WIDTH, DISPLAY_HEIGHT) - camera.set_outfmt(CAM_DEV_ID_0, CAM_CHN_ID_1, PIXEL_FORMAT_RGB_888) - # media buffer init - media.buffer_init() - # start stream for camera device0 - camera.start_stream(CAM_DEV_ID_0) - -def camera_deinit(): - # stop stream for camera device0 - camera.stop_stream(CAM_DEV_ID_0) - # deinit display - display.deinit() - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) - # destroy media link - media.destroy_link(globals()["meida_source"], globals()["meida_sink"]) - # deinit media buffer - media.buffer_deinit() - -def capture_picture(): - time.sleep(1) - rgb888_img = camera.capture_image(CAM_DEV_ID_0, CAM_CHN_ID_1) - try: - rgb888_img.to_jpeg().save("/sdcard/snapshot.jpg") - print("save image ok") - except Exception as e: - print("save image fail: ", e) - # release image for dev and chn - camera.release_image(CAM_DEV_ID_0, CAM_CHN_ID_1, rgb888_img) - -def main(): - os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - print("camera init") - camera_init() - print("camera capture") - capture_picture() - print("camera deinit") - camera_deinit() - -if __name__ == "__main__": - main() diff --git a/share/qtcreator/examples/14-Socket/http_client.py b/share/qtcreator/examples/14-Socket/http_client.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/14-Socket/http_server.py b/share/qtcreator/examples/14-Socket/http_server.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/14-Socket/network_lan.py b/share/qtcreator/examples/14-Socket/network_lan.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/14-Socket/network_wlan.py b/share/qtcreator/examples/14-Socket/network_wlan.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/14-Socket/tcp_client.py b/share/qtcreator/examples/14-Socket/tcp_client.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/14-Socket/tcp_server.py b/share/qtcreator/examples/14-Socket/tcp_server.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/14-Socket/udp_clinet.py b/share/qtcreator/examples/14-Socket/udp_clinet.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/14-Socket/udp_server.py b/share/qtcreator/examples/14-Socket/udp_server.py old mode 100644 new mode 100755 diff --git a/share/qtcreator/examples/15-LVGL/lvgl_demo.py b/share/qtcreator/examples/15-LVGL/lvgl_demo.py old mode 100644 new mode 100755 index 2bef58bac476..0d18ba552a46 --- a/share/qtcreator/examples/15-LVGL/lvgl_demo.py +++ b/share/qtcreator/examples/15-LVGL/lvgl_demo.py @@ -8,56 +8,44 @@ def display_init(): # use hdmi for display - display.init(LT9611_1920X1080_30FPS) - # config vb for osd layer - config = k_vb_config() - config.max_pool_cnt = 1 - config.comm_pool[0].blk_size = 4*DISPLAY_WIDTH*DISPLAY_HEIGHT - config.comm_pool[0].blk_cnt = 1 - config.comm_pool[0].mode = VB_REMAP_MODE_NOCACHE - # meida buffer config - media.buffer_config(config) - # media buffer init - media.buffer_init() - # request media buffer for osd image - buffer = media.request_buffer(4 * DISPLAY_WIDTH * DISPLAY_HEIGHT) - # create image for osd - globals()["buffer"] = buffer - osd_img = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888, alloc=image.ALLOC_VB, phyaddr=buffer.phys_addr, virtaddr=buffer.virt_addr, poolid=buffer.pool_id) - globals()["osd_img"] = osd_img - osd_img.clear() - display.show_image(osd_img, 0, 0, DISPLAY_CHN_OSD0) + Display.init(Display.LT9611, to_ide = False) + # init media manager + MediaManager.init() def display_deinit(): - # deinit display - display.deinit() os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) - time.sleep_ms(100) + time.sleep_ms(50) + # deinit display + Display.deinit() # release media buffer - media.release_buffer(globals()["buffer"]) - # deinit media buffer - media.buffer_deinit() + MediaManager.deinit() def disp_drv_flush_cb(disp_drv, area, color): + global disp_img1, disp_img2 + if disp_drv.flush_is_last() == True: - osd_img = globals()["osd_img"] - osd_img.copy_from(color.__dereference__(osd_img.size())) + if disp_img1.virtaddr() == uctypes.addressof(color.__dereference__()): + Display.show_image(disp_img1) + else: + Display.show_image(disp_img2) disp_drv.flush_ready() def lvgl_init(): + global disp_img1, disp_img2 + lv.init() disp_drv = lv.disp_create(DISPLAY_WIDTH, DISPLAY_HEIGHT) disp_drv.set_flush_cb(disp_drv_flush_cb) - buf1 = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - buf2 = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) - disp_drv.set_draw_buffers(buf1.bytearray(), buf2.bytearray(), buf1.size(), lv.DISP_RENDER_MODE.DIRECT) - globals()["buf1"] = buf1 - globals()["buf2"] = buf2 + disp_img1 = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + disp_img2 = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + disp_drv.set_draw_buffers(disp_img1.bytearray(), disp_img2.bytearray(), disp_img1.size(), lv.DISP_RENDER_MODE.DIRECT) def lvgl_deinit(): + global disp_img1, disp_img2 + lv.deinit() - del globals()["buf1"] - del globals()["buf2"] + del disp_img1 + del disp_img2 def user_gui_init(): res_path = "/sdcard/app/tests/lvgl/data/" @@ -111,14 +99,14 @@ def user_gui_init(): def main(): os.exitpoint(os.EXITPOINT_ENABLE) - display_init() - lvgl_init() try: + display_init() + lvgl_init() user_gui_init() while True: time.sleep_ms(lv.task_handler()) except BaseException as e: - sys.print_exception(e) + print(f"Exception {e}") lvgl_deinit() display_deinit() gc.collect() diff --git a/share/qtcreator/examples/15-LVGL/lvgl_touch_demo.py b/share/qtcreator/examples/15-LVGL/lvgl_touch_demo.py new file mode 100755 index 000000000000..b9c519c163a4 --- /dev/null +++ b/share/qtcreator/examples/15-LVGL/lvgl_touch_demo.py @@ -0,0 +1,157 @@ +from media.display import * +from media.media import * +import time, os, sys, gc +import lvgl as lv +from machine import TOUCH + +DISPLAY_WIDTH = ALIGN_UP(800, 16) +DISPLAY_HEIGHT = 480 + +def display_init(): + # use hdmi for display + Display.init(Display.ST7701, width = 800, height = 480, to_ide = True) + # init media manager + MediaManager.init() + +def display_deinit(): + os.exitpoint(os.EXITPOINT_ENABLE_SLEEP) + time.sleep_ms(50) + # deinit display + Display.deinit() + # release media buffer + MediaManager.deinit() + +def disp_drv_flush_cb(disp_drv, area, color): + global disp_img1, disp_img2 + + if disp_drv.flush_is_last() == True: + if disp_img1.virtaddr() == uctypes.addressof(color.__dereference__()): + Display.show_image(disp_img1) + print(f"disp disp_img1 {disp_img1}") + else: + Display.show_image(disp_img2) + print(f"disp disp_img2 {disp_img2}") + time.sleep(0.01) + + disp_drv.flush_ready() + +class touch_screen(): + def __init__(self): + self.x = 0 + self.y = 0 + self.state = lv.INDEV_STATE.RELEASED + + self.indev_drv = lv.indev_create() + self.indev_drv.set_type(lv.INDEV_TYPE.POINTER) + self.indev_drv.set_read_cb(self.callback) + self.touch = TOUCH(0) + + def callback(self, driver, data): + x, y, state = self.x, self.y, lv.INDEV_STATE.RELEASED + tp = self.touch.read(1) + if len(tp): + x, y, event = tp[0].x, tp[0].y, tp[0].event + if event == 2 or event == 3: + state = lv.INDEV_STATE.PRESSED + data.point = lv.point_t({'x': y, 'y': DISPLAY_HEIGHT - x}) + data.state = state + +def lvgl_init(): + global disp_img1, disp_img2 + + lv.init() + disp_drv = lv.disp_create(DISPLAY_WIDTH, DISPLAY_HEIGHT) + disp_drv.set_flush_cb(disp_drv_flush_cb) + disp_img1 = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + disp_img2 = image.Image(DISPLAY_WIDTH, DISPLAY_HEIGHT, image.ARGB8888) + disp_drv.set_draw_buffers(disp_img1.bytearray(), disp_img2.bytearray(), disp_img1.size(), lv.DISP_RENDER_MODE.DIRECT) + tp = touch_screen() + +def lvgl_deinit(): + global disp_img1, disp_img2 + + lv.deinit() + del disp_img1 + del disp_img2 + +def btn_clicked_event(event): + btn = lv.btn.__cast__(event.get_target()) + label = lv.label.__cast__(btn.get_user_data()) + if "on" == label.get_text(): + label.set_text("off") + else: + label.set_text("on") + +def user_gui_init(): + res_path = "/sdcard/app/tests/lvgl/data/" + + font_montserrat_16 = lv.font_load("A:" + res_path + "font/montserrat-16.fnt") + ltr_label = lv.label(lv.scr_act()) + ltr_label.set_text("In modern terminology, a microcontroller is similar to a system on a chip (SoC).") + ltr_label.set_style_text_font(font_montserrat_16,0) + ltr_label.set_width(310) + ltr_label.align(lv.ALIGN.TOP_MID, 0, 0) + + font_simsun_16_cjk = lv.font_load("A:" + res_path + "font/lv_font_simsun_16_cjk.fnt") + cz_label = lv.label(lv.scr_act()) + cz_label.set_style_text_font(font_simsun_16_cjk, 0) + cz_label.set_text("嵌入式系统(Embedded System),\n是一种嵌入机械或电气系统内部、具有专一功能和实时计算性能的计算机系统。") + cz_label.set_width(310) + cz_label.align(lv.ALIGN.BOTTOM_MID, 0, 0) + + anim_imgs = [None]*4 + with open(res_path + 'img/animimg001.png','rb') as f: + anim001_data = f.read() + + anim_imgs[0] = lv.img_dsc_t({ + 'data_size': len(anim001_data), + 'data': anim001_data + }) + anim_imgs[-1] = anim_imgs[0] + + with open(res_path + 'img/animimg002.png','rb') as f: + anim002_data = f.read() + + anim_imgs[1] = lv.img_dsc_t({ + 'data_size': len(anim002_data), + 'data': anim002_data + }) + + with open(res_path + 'img/animimg003.png','rb') as f: + anim003_data = f.read() + + anim_imgs[2] = lv.img_dsc_t({ + 'data_size': len(anim003_data), + 'data': anim003_data + }) + + animimg0 = lv.animimg(lv.scr_act()) + animimg0.center() + animimg0.set_src(anim_imgs, 4) + animimg0.set_duration(2000) + animimg0.set_repeat_count(lv.ANIM_REPEAT_INFINITE) + animimg0.start() + + btn = lv.btn(lv.scr_act()) + btn.align(lv.ALIGN.CENTER, 0, lv.pct(25)) + label = lv.label(btn) + label.set_text('on') + btn.set_user_data(label) + btn.add_event(btn_clicked_event, lv.EVENT.CLICKED, None) + +def main(): + os.exitpoint(os.EXITPOINT_ENABLE) + try: + display_init() + lvgl_init() + user_gui_init() + while True: + time.sleep_ms(lv.task_handler()) + except BaseException as e: + sys.print_exception(e) + lvgl_deinit() + display_deinit() + gc.collect() + +if __name__ == "__main__": + main() diff --git a/share/qtcreator/examples/15-Utils/db/readme.txt b/share/qtcreator/examples/15-Utils/db/readme.txt deleted file mode 100644 index a8f1f8dd323b..000000000000 --- a/share/qtcreator/examples/15-Utils/db/readme.txt +++ /dev/null @@ -1 +0,0 @@ -用于存放人脸数据库 diff --git a/share/qtcreator/examples/15-Utils/db_img/id_1.jpg b/share/qtcreator/examples/15-Utils/db_img/id_1.jpg deleted file mode 100644 index 0cb764acf1ad..000000000000 Binary files a/share/qtcreator/examples/15-Utils/db_img/id_1.jpg and /dev/null differ diff --git a/share/qtcreator/examples/15-Utils/db_img/id_2.png b/share/qtcreator/examples/15-Utils/db_img/id_2.png deleted file mode 100644 index 95372a281bd3..000000000000 Binary files a/share/qtcreator/examples/15-Utils/db_img/id_2.png and /dev/null differ diff --git a/share/qtcreator/examples/15-Utils/dict.txt b/share/qtcreator/examples/15-Utils/dict.txt deleted file mode 100755 index f43275c9b265..000000000000 --- a/share/qtcreator/examples/15-Utils/dict.txt +++ /dev/null @@ -1,6549 +0,0 @@ -光 -环 -主 -营 -: -热 -干 -面 -原 -汤 -创 -亿 -8 -1 -0 -香 -花 -桥 -社 -区 -( -街 -道 -) -藍 -色 -经 -典 -承 -接 -国 -内 -外 -针 -梭 -织 -订 -单 -欢 -迎 -来 -料 -加 -工 -P -H -O -E -N -I -X -T -A -电 -话 -3 -6 -4 -9 -5 -B -7 -申 -滨 -路 -爱 -拓 -升 -密 -斯 -全 -屋 -售 -票 -对 -讲 -机 -元 -茶 -刷 -、 -纸 -巾 -无 -限 -极 -安 -得 -装 -饰 -九 -龙 -休 -闲 -足 -浴 -中 -心 -凭 -身 -份 -证 -领 -手 -信 -用 -能 -当 -现 -金 -人 -民 -财 -产 -保 -险 -股 -有 -公 -司 -美 -食 -餐 -厅 -厨 -卫 -韵 -达 -灯 -箱 -裙 -楼 -助 -教 -聚 -时 -地 -线 -2 -- -同 -乐 -坊 -l -n -t -h -e -a -m -o -f -v -秀 -沃 -尔 -玛 -帮 -万 -家 -企 -包 -宁 -波 -埃 -柯 -铜 -阀 -门 -联 -系 -车 -养 -护 -站 -方 -時 -空 -購 -物 -小 -牛 -肉 -萬 -事 -興 -奢 -缇 -郭 -氏 -生 -堂 -, -支 -付 -宝 -和 -微 -都 -可 -集 -团 -纺 -品 -销 -棉 -被 -您 -! -碧 -水 -缘 -座 -椅 -定 -制 -. -脚 -垫 -临 -富 -园 -烟 -酒 -业 -幼 -儿 -拼 -音 -寒 -暑 -假 -贺 -佳 -节 -福 -官 -学 -育 -世 -红 -璞 -男 -鞋 -始 -于 -C -点 -粥 -粉 -饭 -类 -满 -送 -板 -栗 -入 -太 -仓 -市 -优 -特 -利 -贸 -易 -麻 -木 -前 -列 -腺 -一 -果 -蜜 -婆 -嘉 -际 -大 -店 -洋 -河 -架 -丰 -鑫 -壁 -软 -背 -神 -童 -文 -具 -梦 -之 -星 -英 -语 -铁 -客 -代 -博 -技 -笑 -甲 -山 -豆 -剪 -发 -想 -成 -行 -旺 -明 -吉 -逸 -夫 -程 -館 -酸 -辣 -盲 -按 -摩 -疗 -祝 -健 -康 -泰 -兴 -场 -监 -督 -管 -理 -局 -城 -分 -间 -室 -所 -域 -冠 -京 -丽 -华 -便 -活 -动 -价 -。 -详 -情 -苑 -村 -南 -烩 -天 -连 -锁 -宏 -汇 -厦 -印 -象 -S -馆 -饮 -何 -叶 -馍 -锦 -标 -志 -上 -海 -黄 -浦 -化 -政 -执 -法 -广 -东 -老 -年 -共 -务 -研 -究 -武 -汉 -档 -案 -L -本 -油 -不 -使 -银 -卡 -德 -壹 -作 -多 -润 -滑 -U -V -W -尚 -约 -超 -越 -展 -港 -体 -彩 -液 -气 -折 -哈 -五 -暖 -哥 -烫 -甬 -涛 -建 -筑 -刚 -网 -纱 -窗 -帘 -风 -眠 -艺 -术 -由 -吧 -肤 -• -形 -设 -计 -羊 -火 -锅 -洛 -塔 -做 -喜 -雪 -诚 -正 -办 -夏 -傅 -西 -服 -双 -梅 -婚 -摄 -项 -租 -房 -沟 -炎 -灰 -指 -址 -二 -层 -速 -宾 -早 -唐 -精 -修 -洗 -衣 -冰 -柜 -琶 -洲 -派 -出 -R -d -u -澳 -投 -资 -号 -居 -介 -昊 -好 -下 -停 -高 -回 -铝 -G -Y -Z -窖 -轩 -苏 -· -御 -钵 -晾 -遇 -见 -祥 -如 -意 -洪 -府 -导 -才 -厂 -直 -沙 -泥 -配 -件 -党 -平 -李 -向 -轮 -周 -口 -颜 -就 -景 -韩 -霞 -医 -礼 -州 -白 -云 -古 -奥 -格 -汽 -新 -北 -烤 -y -长 -辉 -旅 -游 -左 -转 -米 -廣 -告 -焖 -鸡 -镇 -各 -F -s -i -牌 -策 -划 -推 -拉 -开 -锈 -钢 -防 -胎 -祭 -女 -招 -珍 -津 -粮 -维 -通 -子 -权 -交 -咨 -询 -位 -已 -谢 -晚 -末 -百 -友 -低 -至 -傲 -帝 -纪 -图 -徽 -纳 -住 -材 -庄 -b -p -伊 -甸 -劳 -遣 -艾 -灸 -幸 -狐 -狸 -桂 -林 -科 -野 -批 -零 -囍 -鸭 -飞 -雲 -書 -畫 -恭 -头 -袖 -布 -庭 -智 -慧 -D -选 -铺 -烈 -王 -芳 -药 -习 -青 -打 -蜡 -呢 -商 -为 -快 -丁 -舞 -跆 -淀 -委 -备 -煲 -质 -量 -盖 -鲜 -盒 -部 -疆 -辰 -络 -会 -淋 -淮 -膳 -芝 -士 -绒 -衫 -杏 -槐 -院 -胖 -烧 -饼 -条 -寓 -侬 -瞰 -敏 -久 -把 -散 -+ -观 -翠 -阁 -型 -请 -陶 -专 -磊 -喇 -叭 -马 -瓦 -罐 -煨 -寳 -貿 -豪 -吊 -顶 -義 -藝 -術 -顺 -睫 -半 -永 -姐 -擀 -罡 -杨 -待 -阿 -腔 -颌 -晨 -副 -鱼 -移 -川 -床 -铣 -块 -乳 -K -呈 -禾 -扭 -伤 -关 -膜 -骨 -坏 -死 -力 -“ -评 -” -余 -记 -猪 -孕 -婴 -陈 -唯 -旧 -书 -M -羽 -调 -解 -员 -汾 -竹 -味 -雕 -垃 -圾 -走 -进 -益 -寿 -千 -里 -蒲 -庙 -榨 -落 -附 -众 -宫 -廷 -桃 -酥 -昌 -欣 -抗 -裂 -渗 -四 -季 -麦 -硕 -脑 -潮 -汕 -杂 -咸 -容 -纤 -俱 -拆 -迁 -处 -货 -阳 -自 -田 -J -静 -瑞 -日 -贷 -款 -估 -过 -户 -后 -刻 -名 -聘 -师 -费 -课 -夹 -® -况 -源 -税 -征 -收 -馈 -乡 -湖 -井 -靖 -江 -数 -卖 -刀 -郎 -三 -两 -七 -酉 -库 -土 -求 -到 -期 -湘 -妈 -妆 -喷 -画 -卤 -菜 -姚 -表 -驾 -校 -杭 -颐 -秋 -职 -苍 -梧 -球 -遍 -看 -淘 -默 -片 -写 -真 -绘 -亚 -克 -字 -善 -溢 -歡 -衡 -: -胡 -春 -榮 -济 -秸 -坎 -器 -械 -敬 -亭 -律 -皮 -担 -筋 -凉 -灌 -肠 -锐 -隐 -眼 -镜 -造 -免 -放 -盘 -邢 -台 -先 -的 -个 -性 -辅 -构 -消 -残 -r -恩 -饺 -宸 -宇 -露 -样 -炉 -深 -巷 -● -插 -者 -珠 -退 -菱 -鼻 -秘 -传 -统 -酵 -绿 -含 -添 -剂 -蚕 -丝 -套 -松 -虹 -踏 -农 -k -变 -劉 -响 -娟 -呵 -托 -雅 -迪 -合 -受 -十 -远 -立 -盗 -鹦 -鹉 -首 -排 -序 -境 -c -购 -守 -腰 -妹 -流 -鲢 -吃 -g -减 -兰 -额 -存 -预 -置 -悦 -签 -涮 -脆 -栋 -县 -— -影 -视 -听 -诊 -乾 -坤 -盛 -然 -宴 -石 -橱 -梯 -搓 -拖 -鹰 -实 -兄 -弟 -渔 -带 -徐 -頡 -范 -睿 -缆 -押 -简 -胜 -贝 -佛 -玩 -Ω -饯 -炒 -糖 -警 -月 -培 -基 -八 -义 -控 -稀 -闵 -陇 -第 -荣 -棋 -邹 -泉 -池 -甘 -贵 -钦 -巨 -总 -碗 -拿 -雄 -伟 -属 -结 -亮 -姿 -梵 -运 -息 -玺 -竞 -鲲 -鹏 -与 -冈 -瓷 -塑 -照 -重 -庆 -弗 -尼 -留 -铪 -弥 -饵 -镦 -巧 -兹 -宗 -馄 -饨 -码 -融 -釜 -佰 -業 -扑 -换 -屏 -蛋 -糕 -Λ -蔬 -瓜 -墙 -藏 -夜 -盟 -; -翻 -腐 -贴 -砖 -毛 -峰 -г -寄 -私 -边 -煌 -馨 -岭 -朝 -菲 -目 -避 -風 -塘 -式 -劵 -钟 -威 -胶 -z -底 -' -/ -次 -芙 -灵 -刺 -柿 -媒 -妇 -汗 -激 -示 -霍 -强 -题 -复 -哦 -, -霸 -j -欧 -俪 -鸿 -運 -围 -削 -榻 -蛟 -帐 -篷 -振 -反 -郑 -仕 -恒 -闸 -肯 -玻 -璃 -俗 -互 -相 -攀 -比 -滤 -军 -队 -离 -确 -凤 -纯 -枕 -只 -蓝 -筒 -整 -种 -螺 -母 -训 -凯 -省 -伪 -协 -清 -蛙 -频 -央 -奶 -冷 -择 -我 -们 -没 -错 -验 -雷 -杰 -辛 -啤 -谊 -己 -蝎 -组 -腾 -仔 -尾 -巴 -严 -君 -宜 -再 -鲁 -迅 -帽 -颖 -别 -致 -责 -任 -准 -罕 -启 -温 -需 -登 -杜 -良 -其 -湾 -测 -起 -丹 -斓 -感 -未 -鹅 -魅 -族 -妻 -肺 -角 -汪 -豫 -砂 -柳 -磚 -血 -病 -航 -枫 -叉 -浇 -焗 -怡 -稻 -槎 -宽 -串 -综 -治 -报 -勿 -吸 -蒸 -扶 -扁 -沪 -草 -勇 -琪 -丶 -伞 -紫 -虾 -嘿 -冬 -薪 -咖 -啡 -诉 -拌 -炖 -趣 -班 -伸 -缩 -q -酬 -朗 -蔡 -莲 -卷 -圣 -痛 -在 -违 -章 -犬 -姜 -驻 -群 -净 -效 -你 -麒 -麟 -郡 -常 -酷 -享 -补 -》 -今 -蜘 -蛛 -梳 -鼎 -耒 -玥 -莎 -坛 -墅 -暴 -笼 -缔 -敖 -肥 -寰 -浩 -粒 -也 -芦 -敢 -追 -卓 -供 -耳 -焊 -氩 -弧 -赖 -召 -捷 -钣 -虎 -崇 -寺 -H -Z -L -酱 -页 -赁 -稞 -宠 -孚 -寸 -炭 -念 -锋 -雀 -巢 -思 -冻 -羲 -输 -歌 -毂 -改 -曲 -她 -彭 -荒 -坪 -愿 -帆 -洁 -止 -辆 -参 -颈 -鹿 -漆 -森 -骏 -晓 -铭 -这 -是 -裤 -知 -度 -泵 -谷 -旗 -舰 -漕 -够 -更 -衔 -岁 -还 -犹 -? -刘 -遥 -蹈 -長 -规 -菊 -递 -陕 -萍 -齐 -翡 -赌 -要 -找 -版 -盐 -禁 -充 -豊 -着 -最 -奇 -讯 -读 -鸣 -昕 -诺 -莉 -雨 -熟 -w -荟 -瓶 -缝 -纫 -检 -损 -张 -少 -樱 -箭 -钻 -此 -若 -船 -芯 -& -兵 -割 -摸 -叔 -幅 -震 -讠 -根 -饸 -伴 -唱 -戏 -载 -披 -萨 -蟹 -茜 -均 -翼 -账 -慎 -谐 -兼 -帛 -诱 -惑 -媚 -匕 -貢 -棒 -沂 -禅 -蚊 -趟 -等 -弄 -摆 -兽 -宵 -幻 -】 -突 -破 -扣 -畅 -潢 -瘾 -盆 -算 -隆 -虫 -睛 -鹤 -曼 -尖 -埔 -将 -授 -菘 -驰 -牙 -练 -壶 -岗 -午 -应 -磁 -汝 -財 -進 -俊 -鸟 -绣 -颗 -醋 -贤 -叮 -咚 -粤 -蜂 -播 -铮 -燕 -树 -嘴 -惠 -完 -勤 -钥 -匙 -继 -续 -裕 -Q -幢 -x -奋 -柏 -查 -洱 -途 -录 -轿 -榕 -圆 -磐 -认 -刮 -痧 -斤 -压 -潜 -宿 -咕 -噜 -喵 -覆 -浆 -骜 -仰 -历 -盈 -栏 -镶 -馒 -皇 -铃 -仪 -像 -碟 -诗 -倩 -牢 -國 -步 -缴 -以 -言 -黛 -援 -甜 -麗 -柠 -檬 -→ -买 -宋 -每 -六 -挖 -屯 -掘 -圈 -忆 -蒙 -扯 -汁 -触 -及 -巫 -率 -注 -册 -湯 -厉 -! -赛 -遗 -审 -荨 -祖 -恶 -魔 -瑜 -伽 -他 -链 -尊 -磨 -冀 -孩 -聪 -崧 -澜 -弯 -轨 -劲 -廊 -奠 -晶 -槽 -纹 -镀 -锌 -郸 -疹 -荘 -驿 -耀 -郏 -丨 -桑 -剔 -茴 -朋 -返 -炫 -箐 -嵩 -探 -独 -抛 -猫 -裁 -澄 -嵌 -齿 -蘭 -麺 -臻 -晋 -賀 -值 -罗 -捐 -赠 -靜 -學 -妍 -妙 -桶 -車 -卉 -邮 -婷 -倪 -泾 -钱 -擎 -彪 -痕 -咪 -邀 -伦 -非 -拳 -舍 -绍 -挂 -靠 -丫 -娃 -试 -潭 -苹 -闪 -琴 -声 -睡 -醇 -¥ -冒 -戒 -廉 -棚 -娱 -考 -级 -故 -è -蔚 -泡 -顾 -琳 -杠 -舒 -适 -绕 -荷 -悠 -肝 -胆 -胃 -横 -依 -慕 -势 -袭 -媳 -界 -弘 -眉 -泸 -贰 -傻 -旭 -茂 -茅 -染 -固 -靓 -增 -扫 -燃 -灶 -毒 -烙 -髪 -俏 -紅 -開 -炸 -寻 -% -椹 -酿 -核 -蓉 -绅 -因 -急 -啊 -祯 -宣 -施 -紧 -抵 -邯 -翔 -另 -滋 -瀚 -借 -氢 -沫 -槟 -榔 -览 -玲 -厘 -丸 -亨 -沥 -混 -凝 -肩 -胸 -那 -莊 -梨 -幕 -葛 -黑 -莱 -凡 -剑 -荆 -旋 -从 -臨 -門 -肌 -献 -赏 -许 -丢 -失 -概 -负 -漫 -鮮 -铸 -苗 -乘 -玫 -铂 -嗨 -席 -毅 -岛 -匠 -邦 -農 -肴 -湃 -瑰 -怀 -3 -菠 -萝 -蒂 -泛 -昆 -邻 -右 -勁 -畸 -刑 -辩 -而 -议 -喝 -榞 -莞 -断 -霖 -辽 -乌 -怕 -滩 -奕 -橡 -隔 -圳 -咬 -芬 -馅 -涡 -封 -釉 -飘 -マ -シ -サ -ジ -泊 -扎 -甩 -斬 -访 -稳 -恋 -當 -佩 -黎 -奈 -烘 -棕 -券 -椒 -醛 -引 -裱 -旦 -盱 -眙 -旁 -穗 -赔 -尧 -赵 -难 -绳 -陪 -锭 -卢 -冲 -绝 -揽 -〇 -脸 -拔 -孙 -爆 -饹 -寶 -楚 -岳 -氪 -篇 -捞 -斋 -栓 -端 -扒 -钜 -侨 -桌 -几 -詩 -帕 -絲 -爽 -茗 -编 -發 -救 -孤 -困 -玉 -杯 -涌 -提 -袋 -汛 -署 -褔 -匾 -奖 -煮 -晟 -觅 -罚 -狗 -龍 -氧 -資 -忠 -乖 -馋 -让 -瑩 -繡 -障 -泳 -椎 -蓄 -泽 -兑 -蝶 -击 -描 -吴 -茸 -窝 -柔 -種 -仁 -圃 -笔 -仙 -顿 -舟 -宰 -给 -杆 -亲 -遮 -毯 -陽 -丘 -除 -害 -骑 -韶 -坚 -功 -显 -说 -演 -坐 -産 -冯 -弹 -韦 -橘 -晒 -澡 -斗 -尝 -取 -橙 -摇 -蕊 -殊 -魏 -樊 -模 -束 -卜 -宛 -素 -墓 -积 -透 -鲈 -孟 -枪 -荔 -舌 -坦 -状 -篓 -袜 -虑 -患 -纠 -纷 -崽 -笋 -蓬 -跌 -渐 -翟 -籽 -碍 -疼 -腿 -脊 -轴 -嬰 -翅 -瑾 -丄 -搬 -跃 -伐 -宅 -仟 -岩 -葱 -蘸 -睐 -战 -孝 -( -) -寨 -檀 -楠 -煎 -贫 -饲 -陵 -普 -熙 -宙 -翰 -钅 -袁 -郊 -昶 -捆 -擦 -圪 -硫 -脲 -桐 -矫 -秦 -硅 -藻 -态 -誉 -猛 -腩 -渝 -拾 -挥 -侠 -痔 -瘘 -挡 -堡 -烽 -贾 -華 -采 -予 -辊 -沌 -坝 -堆 -梁 -牡 -熨 -耕 -鹌 -鹑 -豹 -履 -植 -觉 -鲤 -醉 -菇 -筝 -蜻 -蜓 -莫 -闯 -涯 -乃 -剧 -墨 -革 -雾 -掌 -煤 -肾 -扦 -藕 -命 -齋 -漏 -芭 -荧 -創 -偉 -順 -納 -湿 -鸥 -即 -弦 -驶 -疾 -纂 -闺 -察 -枞 -浪 -碳 -盾 -姻 -锥 -滏 -禹 -畵 -闽 -缓 -邝 -桦 -又 -渡 -瘦 -啦 -逍 -爪 -壽 -+ -娌 -繁 -纟 -柴 -翁 -垂 -钓 -促 -沐 -龄 -短 -溶 -淼 -去 -熏 -漾 -咀 -嚼 -壳 -騰 -肚 -了 -敲 -膏 -艇 -卿 -绞 -冚 -肿 -胀 -楷 -瀛 -嫂 -诞 -湛 -灾 -募 -… -漂 -奔 -葡 -萄 -搏 -伍 -曹 -慈 -; -牧 -淞 -熊 -穿 -孔 -沧 -绸 -丧 -葬 -孛 -赢 -聊 -段 -貴 -堵 -滁 -沈 -馥 -冮 -婦 -羅 -废 -荤 -倍 -耐 -姓 -瀘 -痘 -鱿 -仿 -差 -降 -峡 -斜 -慢 -恢 -切 -番 -茄 -薇 -脉 -驭 -尿 -耗 -朱 -疯 -狂 -储 -虔 -砍 -旨 -珊 -萊 -堰 -牵 -阖 -曾 -涎 -蠡 -捕 -莺 -凰 -据 -咏 -悍 -= -悟 -夷 -跟 -妊 -枣 -什 -么 -拍 -稽 -炮 -粘 -脱 -樂 -谨 -溪 -董 -氟 -芒 -爵 -吞 -抄 -扬 -识 -Ⓡ -恺 -倾 -妮 -貂 -阪 -赣 -炙 -★ -撕 -焙 -猬 -岸 -腱 -尃 -斑 -頭 -举 -近 -揭 -甫 -必 -橄 -榄 -薯 -叠 -毓 -兆 -⊥ -芊 -朵 -锨 -淳 -糯 -抓 -钧 -闭 -异 -佑 -篮 -丑 -怪 -玖 -腹 -鼠 -赐 -隍 -鳝 -倡 -惊 -阜 -枇 -杷 -㸃 -鸽 -鲫 -沼 -睦 -芜 -绽 -狮 -滬 -瘀 -疚 -秤 -缺 -襄 -鳳 -藥 -凌 -抚 -丞 -栈 -硬 -谭 -亍 -巡 -判 -蒋 -岚 -映 -初 -敌 -曙 -逢 -肘 -筷 -濠 -伯 -葉 -鏡 -菌 -蘇 -尤 -谱 -乔 -貝 -祛 -h -殡 -暨 -殿 -腊 -厕 -迈 -趋 -淇 -桔 -尺 -媄 -奓 -娄 -祺 -希 -望 -叁 -袍 -缸 -挑 -辭 -舊 -歲 -飲 -姣 -艳 -俄 -宦 -塾 -茱 -【 -戴 -玄 -践 -邱 -斌 -候 -弍 -虚 -醒 -镂 -碎 -锤 -# -妥 -《 -鉴 -辑 -骝 -約 -烛 -冶 -乎 -钝 -陂 -愛 -吹 -穆 -辐 -谦 -疮 -粽 -E -V -R -暗 -隹 -亻 -筏 -~ -弱 -索 -娜 -拇 -筛 -杀 -陆 -淡 -兜 -往 -藤 -萃 -榜 -贡 -飾 -經 -綸 -钰 -贞 -颛 -症 -嘻 -褥 -帅 -奉 -盔 -澈 -锯 -灡 -泓 -哪 -彬 -癌 -峩 -芽 -锡 -絮 -鄂 -『 -泗 -砭 -』 -迷 -遁 -羿 -臣 -搭 -饿 -莆 -瀑 -| -笨 -略 -驳 -禧 -簧 -猴 -優 -币 -碱 -熹 -粑 -铰 -辫 -卧 -杉 -危 -豐 -鞭 -記 -兿 -聖 -似 -乙 -胚 -茭 -吻 -碚 -舜 -赫 -否 -鳯 -答 -疑 -磅 -刁 -框 -亏 -柱 -浮 -归 -撑 -迦 -尘 -銀 -渎 -葵 -偿 -潘 -垣 -终 -忘 -颂 -∧ -И -炔 -氮 -祸 -黔 -侧 -疏 -浚 -嚞 -糊 -句 -扌 -勘 -争 -咅 -圗 -尽 -涂 -胗 -幺 -疤 -嘀 -嗒 -滚 -痣 -逗 -節 -髙 -随 -懋 -畜 -敦 -令 -坑 -栽 -蝴 -跳 -伏 -裹 -懿 -璜 -烨 -匹 -蚝 -偏 -禽 -史 -努 -细 -昇 -晴 -‘ -貌 -缤 -珂 -蕾 -閏 -鞍 -肖 -钉 -島 -の -珑 -璇 -庵 -厝 -戈 -粱 -倒 -嶺 -妞 -赤 -父 -姨 -飙 -狼 -轻 -號 -枢 -纽 -幽 -° -掏 -誠 -闻 -猜 -G -I -A -俭 -皆 -匆 -忙 -贯 -彻 -葒 -蕃 -晏 -柘 -纶 -∶ -喱 -缅 -累 -專 -ㆍ -氣 -跑 -曜 -占 -姆 -蔓 -惦 -倫 -愉 -垦 -洽 -娇 -滘 -泷 -郅 -焕 -顔 -槿 -澧 -箔 -浙 -朕 -衰 -俺 -逆 -é -捧 -奎 -焦 -稷 -铅 -矿 -忄 -韓 -燎 -濤 -钊 -蕙 -携 -榆 -沉 -鳶 -潍 -蹄 -皖 -啵 -铄 -夕 -汥 -乓 -炬 -棵 -衤 -菏 -饶 -’ -问 -賓 -喻 -閣 -薬 -攻 -兔 -熬 -钎 -呱 -谌 -吒 -乱 -邪 -煞 -耄 -梓 -摊 -幂 -豌 -「 -」 -樟 -脖 -苦 -荞 -喽 -攸 -熘 -竺 -蔻 -获 -咻 -薰 -駿 -挝 -镯 -坠 -尹 -萧 -陡 -坡 -輕 -" -窃 -戚 -撒 -煜 -蹦 -颁 -皙 -椰 -愚 -很 -憨 -馬 -壕 -译 -昂 -延 -俞 -茵 -棱 -谈 -桢 -蛳 -炝 -钭 -唇 -點 -審 -喊 -樓 -榭 -琉 -呷 -哺 -③ -巩 -乒 -婕 -蒜 -厚 -媛 -滙 -哲 -沿 -▏ -渭 -硼 -阴 -持 -東 -决 -筹 -并 -隽 -忧 -邑 -骄 -诵 -夬 -沁 -蜀 -卦 -礻 -懒 -浅 -阅 -卯 -炕 -藜 -汐 -莜 -碣 -雞 -艮 -洞 -← -逹 -郝 -乍 -鲨 -湟 -迮 -竿 -葫 -誌 -劝 -浑 -儒 -彦 -燚 -喏 -酪 -極 -抢 -般 -禮 -墩 -珀 -簡 -廖 -稚 -芃 -纵 -灿 -網 -電 -枝 -粟 -吗 -妃 -麵 -催 -著 -仃 -揚 -汀 -绵 -剛 -堅 -▪ -赞 -佬 -该 -萱 -阻 -颊 -羔 -淑 -呼 -铕 -坞 -綢 -盼 -漢 -勐 -晰 -孬 -楊 -徒 -崔 -` -豚 -脯 -酝 -溜 -厢 -沽 -龟 -励 -鳄 -涉 -邺 -笙 -谋 -唢 -呐 -伙 -磷 -溝 -栖 -秃 -肛 -裟 -菩 -绎 -阎 -庚 -彝 -佐 -拨 -勒 -個 -靴 -蜕 -喆 -吕 -狱 -辜 -且 -嫁 -裳 -逊 -丛 -棍 -抽 -叫 -烹 -饪 -键 -粗 -吾 -滇 -喉 -ä -嘎 -芸 -仲 -瓮 -允 -跨 -犀 -煦 -凿 -寬 -刃 -肢 -陳 -猎 -來 -骓 -债 -師 -範 -涤 -锣 -侯 -皂 -棠 -萌 -哒 -摘 -匝 -浓 -骞 -樸 -碑 -耘 -勋 -疣 -叻 -潼 -弓 -须 -趙 -欠 -| -瞳 -堤 -瘤 -輪 -際 -團 -刹 -射 -祎 -驴 -佧 -崎 -礦 -遂 -骆 -驼 -污 -仆 -[ -] -@ -莓 -潞 -腕 -泪 -拐 -菁 -呆 -陟 -诠 -佗 -函 -箕 -浒 -翘 -亘 -酌 -郫 -麓 -鄉 -場 -緣 -璐 -浜 -內 -奂 -揸 -愧 -诸 -届 -凳 -扇 -灏 -佤 -達 -臭 -慶 -嫚 -蚁 -谛 -ɔ -妯 -薛 -娘 -捏 -旱 -蟠 -昔 -課 -挺 -扳 -桩 -籁 -駕 -匯 -亞 -笠 -荠 -郦 -隧 -吓 -禄 -称 -箫 -鋪 -孫 -彼 -韭 -赋 -丙 -昭 -舶 -璟 -憾 -掉 -渣 -煊 -奴 -则 -卅 -秒 -挞 -铖 -颍 -栅 -括 -撸 -鲸 -¥ -驹 -镁 -钛 -覃 -邓 -帜 -鄺 -寝 -涨 -鲍 -郁 -薄 -寇 -咳 -> -琥 -靛 -規 -劃 -會 -顽 -癣 -飯 -垛 -或 -悔 -楽 -徳 -爬 -浏 -燈 -晗 -媽 -馿 -晕 -繪 -圖 -標 -焱 -躁 -麋 -论 -刊 -灞 -傣 -榴 -龚 -罩 -醫 -É -疫 -绥 -拦 -卸 -鎮 -垒 -硚 -連 -较 -窕 -蕉 -勝 -蔷 -裝 -虞 -胳 -膊 -痒 -涵 -粪 -裘 -\ -瀾 -醚 -覺 -烂 -贩 -Ⅱ -亩 -抿 -逐 -參 -З -毡 -肽 -敷 -吖 -滴 -例 -鈴 -抖 -閑 -隋 -骗 -鲮 -玑 -撼 -郴 -[ -撤 -從 -園 -搂 -壤 -遒 -坋 -纬 -讼 -皓 -渊 -虽 -踪 -盤 -嘣 -峨 -嵋 -瑶 -荐 -殖 -鞘 -覌 -蓓 -拢 -涩 -呦 -腌 -苪 -墟 -埠 -喬 -姬 -畐 -砀 -𣇉 -昀 -铵 -夺 -衬 -棺 -窦 -腻 -洒 -枉 -間 -嬉 -舫 -檔 -耍 -鯨 -粼 -珞 -娣 -俩 -圭 -杼 -鸳 -鸯 -現 -鳞 -拥 -絡 -復 -恂 -顏 -軒 -茨 -厄 -雍 -倚 -雁 -苔 -藓 -& -柚 -亢 -逃 -眸 -馐 -矾 -鹭 -爸 -缎 -耶 -偷 -镗 -氛 -醪 -糟 -溉 -剐 -錦 -偶 -憩 -镐 -錧 -庞 -賢 -戶 -鹃 -兮 -钩 -邨 -覚 -屈 -娅 -俵 -敟 -瘊 -阶 -遵 -曦 -妤 -茉 -滞 -珏 -啰 -径 -勃 -篱 -扩 -姑 -厮 -職 -㐂 -垚 -癫 -痫 -茯 -苓 -K -N -膝 -鷹 -熔 -窑 -镖 -螃 -泌 -础 -錢 -锂 -侣 -桉 -霜 -魂 -凉 -役 -塗 -凬 -糙 -粹 -纲 -滕 -濮 -闿 -毕 -昵 -鄭 -哚 -椿 -馫 -蚨 -莹 -泼 -ü -仞 -肇 -砼 -枱 -屌 -跖 -佘 -抱 -見 -芷 -砚 -岢 -晖 -糁 -濟 -翌 -瘢 -疙 -瘩 -噢 -拜 -箩 -蚂 -菀 -芹 -劈 -矸 -痨 -嫩 -鼓 -鲱 -怎 -虏 -靡 -皱 -释 -鴻 -滿 -糍 -㐱 -烊 -霆 -骐 -桼 -沾 -苟 -徕 -碾 -邵 -崛 -潤 -揪 -佶 -雎 -臊 -瞬 -岐 -棟 -锻 -❋ -惩 -淄 -# -蛤 -瑙 -逅 -凹 -凸 -茬 -咽 -彤 -劇 -瓣 -侈 -惜 -咔 -讨 -孖 -氨 -酯 -賴 -漳 -嘟 -竖 -礴 -芮 -雯 -词 -塞 -柒 -趾 -趴 -锘 -銘 -朴 -歸 -弋 -〉 -琼 -蕴 -符 -谣 -肃 -谥 -荥 -氵 -脂 -崃 -㙱 -挣 -瑭 -绶 -漯 -鬼 -骋 -姗 -崖 -壬 -祠 -織 -唔 -堑 -肪 -‧ -炊 -笃 -產 -苝 -埌 -竭 -析 -琅 -穴 -棘 -铎 -戀 -亦 -栾 -睢 -邸 -珈 -朽 -刨 -褂 -啃 -操 -丈 -機 -構 -魚 -汶 -閃 -膽 -陋 -哮 -喘 -帼 -澤 -綫 -氰 -胺 -些 -呀 -紗 -飬 -勺 -荻 -叙 -嘢 -霉 -│ -胞 -熠 -踩 -臂 -犯 -罪 -婁 -態 -陌 -窄 -伈 -壮 -杖 -跪 -帥 -衢 -燦 -燘 -仅 -扮 -闷 -悸 -铱 -循 -剃 -哎 -茹 -闫 -① -蘑 -钙 -⑪ -爍 -_ -應 -粵 -挽 -䝉 -尓 -枸 -杞 -握 -濑 -鏢 -卑 -蛇 -沔 -撬 -碶 -簸 -耦 -颠 -② -醴 -遠 -谅 -F -T -窨 -哨 -拱 -笛 -硒 -糜 -㎡ -瞿 -喀 -寕 -夀 -唛 -哆 -雙 -訓 -孵 -挎 -闰 -谚 -嗽 -戋 -烜 -茫 -護 -膚 -迹 -莘 -既 -獭 -鎖 -2 -輝 -講 -複 -荫 -a -r -黃 -琦 -廰 -懮 -務 -幛 -哇 -杳 -辖 -褪 -栎 -/ -挫 -述 -炼 -懂 -误 -* -歇 -籍 -抑 -舱 -捡 -矩 -毫 -诈 -株 -嘛 -抹 -渠 -它 -- -拟 -挤 -穷 -廿 -羞 -谁 -截 -钳 -搞 -填 -但 -焰 -炳 -匀 -蜗 -饱 -酶 -奏 -± -吨 -× -某 -悬 -暇 -礁 -辈 -毁 -契 -亡 -悉 -稍 -绑 -骤 -尴 -尬 -澎 -韧 -烷 -~ -堪 -詹 -搜 -妾 -祁 -1 -惧 -酮 -蚌 -恨 -谜 -绩 -叹 -侦 -$ -朔 -阵 -惯 -烁 -绪 -堇 -燥 -灭 -忽 -彰 -哟 -歧 -败 -烦 -恼 -逾 -. -肆 -虐 -枚 -距 -喔 -翎 -伶 -措 -却 -帖 -竟 -拒 -浸 -褐 -圩 -勾 -埋 -驱 -吐 -阡 -柑 -骚 -氯 -磺 -仍 -啥 -匪 -臀 -蛮 -咋 -剥 -孜 -硝 -钮 -潇 -砸 -無 -遭 -暂 -痴 -梗 -挪 -赴 -胁 -惨 -衍 -霾 -℃ -扰 -溴 -酰 -轧 -弃 -瑕 -苞 -踢 -迫 -妖 -畏 -² -恤 -缕 -厌 -逻 -枯 -稣 -? -迩 -挚 -擅 -删 -摔 -岔 -唤 -庐 -宪 -隙 -忍 -勉 -陀 -摧 -矮 -耽 -剩 -榛 -蚀 -峻 -※ -烯 -囊 -侵 -愈 -雇 -亳 -泄 -欲 -浣 -讽 -噶 -瑚 -瑟 -羚 -赶 -拯 -阔 -淆 -雌 -坂 -恰 -哭 -慌 -碌 -酚 -祈 -琢 -慰 -骂 -羯 -悲 -裸 -筱 -替 -欺 -碰 -桓 -躺 -稿 -螨 -矛 -孢 -恐 -怖 -镊 -敛 -惟 -甚 -C -拽 -俯 -叛 -诬 -陷 -鸦 -< -爷 -黏 -噪 -仑 -璨 -仗 -辟 -闹 -褚 -咯 -贼 -捉 -唑 -锰 -钠 -秉 -苣 -秩 -聿 -罢 -僧 -嫌 -☆ -钒 -乏 -阮 -痿 -拘 -肋 -漠 -婿 -癸 -髓 -璧 -渍 -喂 -镍 -隶 -疲 -炜 -旬 -哑 -耿 -斥 -膀 -轰 -惭 -恳 -瘪 -哝 -嗓 -泣 -忸 -怩 -劫 -捂 -嚎 -悄 -蹲 -黯 -咧 -贻 -搅 -瞧 -柬 -蠢 -垮 -怒 -睬 -扛 -颤 -眯 -蟆 -吼 -窘 -吆 -滔 -凶 -狠 -愤 -佣 -聂 -跻 -迟 -脾 -凑 -绉 -抬 -吵 -瞌 -耸 -牺 -牲 -瞪 -膨 -惹 -揉 -懦 -犟 -憋 -绰 -惋 -咙 -蔼 -躲 -狡 -黠 -淌 -辕 -辙 -搁 -呃 -吩 -咐 -嚷 -庸 -咱 -噩 -斩 -哧 -歹 -昏 -绊 -歪 -妨 -吟 -啼 -慷 -慨 -忿 -眨 -漓 -轶 -扔 -贪 -譬 -谎 -捶 -哼 -窍 -啪 -绢 -冤 -屁 -屉 -惕 -擂 -坟 -屡 -辨 -蔑 -脏 -嗯 -擒 -谍 -馁 -愁 -忌 -狭 -怔 -晃 -蜷 -辞 -庇 -擞 -兢 -屑 -拎 -蠕 -睁 -沸 -衷 -矢 -吁 -豁 -曝 -怅 -踮 -坯 -敞 -婉 -狈 -憎 -哐 -跺 -踹 -婶 -羡 -耻 -挨 -搔 -佼 -唠 -鄙 -哀 -梆 -犒 -舅 -妄 -佯 -嘘 -柄 -舆 -荡 -惴 -夸 -瞒 -霹 -雳 -嗬 -瞻 -挠 -缠 -馊 -踱 -愣 -堕 -讳 -漉 -潦 -拭 -歉 -趁 -翩 -僵 -摞 -侄 -怵 -魁 -簿 -怨 -叽 -怜 -瞥 -嘲 -揍 -抡 -唬 -赦 -咄 -逼 -侮 -蹑 -霎 -眶 -诀 -篆 -傍 -瘫 -魄 -捅 -叨 -噼 -祟 -哄 -捣 -仇 -嗷 -娑 -啸 -弛 -捺 -恪 -殷 -缭 -锲 -爹 -拄 -炯 -辱 -讶 -撇 -诡 -睽 -掀 -沮 -岂 -昨 -宕 -趔 -颓 -斟 -蜇 -掖 -诧 -箍 -羹 -诌 -唉 -嚓 -懈 -岑 -趄 -奚 -沓 -懑 -沛 -○ -贬 -忐 -忑 -聋 -蚤 -誓 -怂 -恿 -砰 -拙 -冥 -谓 -襟 -掺 -楞 -咒 -鞠 -诩 -裆 -蹚 -髦 -劣 -匿 -抒 -垢 -嗦 -扉 -苛 -贿 -赂 -搡 -蹭 -鲛 -攥 -舵 -塌 -胫 -琐 -癞 -晦 -崩 -夭 -搽 -咛 -嗖 -褒 -悚 -诲 -怦 -懊 -踊 -揶 -揄 -躬 -懵 -渴 -悯 -喳 -垠 -撞 -吱 -叼 -熄 -吝 -骷 -髅 -趿 -俘 -僻 -猾 -赚 -曰 -抠 -阐 -倦 -楔 -迂 -逛 -铆 -凛 -奸 -逝 -寞 -嘤 -昧 -哗 -癖 -坷 -寂 -姊 -汲 -惮 -呗 -皈 -霄 -= -鲶 -褀 -芋 -張 -臺 -< -> -鳌 -汴 -j -昼 -剁 -侃 -體 -硂 -啄 -_ -薏 -砌 -\ -羌 -鹞 -碁 -樽 -畔 -疱 -" -辋 -毽 -↑ -尕 -稠 -獨 -玮 -桁 -莅 -卵 -捌 -鱬 -忻 -醍 -铧 -№ -扞 -涧 -牟 -锚 -浔 -傢 -俬 -] -垧 -涇 -糠 -麸 -掂 -蔗 -笺 -隅 -瘁 -巍 -氙 -葆 -霏 -@ -叢 -秆 -鶏 -缪 -峪 -斐 -缙 -甄 -钯 -` -胭 -眩 -統 -鄱 -盅 -嵊 -铲 -菓 -❤ -呕 -泻 -龅 -圧 -洺 -呛 -π -翊 -餘 -翥 -芎 -铬 -沱 -婵 -裔 -橦 -暢 -煸 -唻 -腋 -莽 -镭 -穂 -樵 -蕲 -乞 -暄 -剖 -狄 -蓟 -捍 -鼾 -珙 -拷 -屠 -茁 -堃 -裴 -皋 -炽 -屹 -頔 -剎 -迺 -廬 -兒 -膠 -脐 -颢 -畈 -竣 -卒 -缦 -蟾 -艰 -夢 -崋 -萘 -撮 -倌 -晞 -荪 -痹 -炤 -娓 -傳 -琨 -筠 -蕳 -髮 -樹 -溃 -疡 -瑪 -峯 -恕 -勑 -洼 -④ -蘘 -鰻 -裡 -夙 -昱 -谕 -钞 -赊 -琵 -坨 -哓 -寅 -邂 -锟 -鷄 -環 -縣 -痰 -矶 -飛 -昽 -痤 -皲 -霓 -馕 -娲 -冉 -赉 -匍 -瓢 -廠 -岙 -親 -稼 -勅 -锄 -^ -為 -戎 -麯 -绮 -* -铠 -設 -浠 -榈 -瞄 -芥 -钿 -泺 -氽 -鴿 -涞 -諾 -麾 -蔺 -祙 -仨 -芪 -囧 -蝇 -褊 -溧 -姥 -嚏 -嗅 -嗡 -崂 -癜 -喨 -扙 -‰ -颅 -恍 -惚 -侍 -↓ -汊 -岽 -涕 -峙 -甏 -溸 -枭 -億 -掩 -蒄 -涫 -拴 -扪 -爺 -補 -歆 -嶪 -遐 -囱 -灼 -札 -邳 -袄 -〈 -岱 -籣 -鐵 -锴 -豉 -鍋 -鋒 -赎 -砋 -垓 -頤 -蓮 -崴 -} -炀 -{ -様 -趵 -飚 -耙 -粢 -泩 -皿 -於 -哔 -匡 -枋 -柛 -溯 -砥 -砺 -係 -犇 -灣 -靳 -禺 -饦 -區 -铛 -鄯 -滟 -鲩 -鈺 -黍 -跷 -楂 -潔 -埗 -靈 -徜 -徉 -嘬 -戊 -铞 -鳅 -秭 -í -猗 -聲 -旌 -酩 -馏 -憬 -實 -鹊 -總 -衖 -矗 -蝉 -囡 -疥 -缨 -ʌ -聞 -钾 -廟 -沅 -娠 -骁 -鍊 -葳 -耵 -沣 -鄢 -笈 -圜 -胪 -啾 -瑁 -蒡 -廓 -夯 -臼 -湓 -煋 -丼 -珺 -瘙 -仺 -矽 -動 -Ƨ -汆 -宓 -弈 -翱 -³ -処 -専 -賣 -跤 -镒 -娽 -蔽 -雏 -琛 -巅 -虱 -龢 -滢 -踺 -壓 -阙 -и -绨 -铍 -斛 -龛 -缐 -飨 -忱 -韬 -嵘 -類 -聯 -嗝 -′ -町 -梏 -蛊 -瑛 -伢 -囗 -淅 -協 -爾 -讴 -菡 -桖 -嗲 -囿 -湄 -诛 -憶 -荃 -棛 -鵬 -屿 -夌 -俨 -弊 -亟 -娶 -缥 -缈 -― -舐 -忏 -瞭 -觑 -岌 -尸 -乩 -砝 -傀 -儡 -汰 -妓 -淫 -轼 -阗 -曳 -岿 -莒 -僚 -掷 -绠 -浊 -眢 -矣 -殃 -哉 -晤 -婢 -鼋 -鳏 -曩 -湫 -偃 -袱 -嶷 -惫 -帚 -悼 -倘 -腑 -撺 -﹒ -涸 -祀 -遏 -猿 -辄 -俟 -绛 -绡 -叩 -呻 -迄 -寐 -逵 -涅 -蒿 -滥 -艘 -镣 -铐 -焉 -惆 -窥 -} -妗 -诘 -簪 -氆 -氇 -兀 -匈 -唾 -渺 -冕 -舷 -僮 -笥 -怼 -伺 -溥 -杵 -捽 -叱 -贱 -袤 -■ -寡 -慑 -馔 -匮 -寥 -捭 -颇 -噤 -媪 -垄 -檄 -顷 -暮 -藉 -莠 -髻 -疵 -窜 -啖 -漱 -溟 -孰 -惬 -旎 -骘 -恙 -瘥 -氓 -黝 -豕 -痢 -鸢 -嫔 -韪 -讷 -磕 -狲 -睹 -︰ -辗 -跬 -瘸 -欷 -抨 -掠 -撩 -睒 -蟒 -涣 -骇 -嫉 -妒 -啬 -驯 -颔 -◆ -舁 -娥 -椭 -诣 -膦 -帷 -摹 -嫦 -狎 -龋 -訾 -涝 -槛 -蛹 -滦 -盏 -胧 -妪 -龁 -凄 -盎 -劾 -觇 -{ -渲 -髯 -衾 -孑 -婺 -萦 -谒 -惰 -桎 -婊 -鳃 -褫 -怙 -à -迥 -鼍 -赀 -─ -甾 -裨 -遽 -瘟 -娡 -陛 -囚 -哩 -浐 -扈 -慵 -桅 -兖 -酊 -舄 -蹴 -佟 -缢 -啜 -吏 -哌 -赡 -掣 -萎 -悴 -屦 -扃 -缚 -棹 -奘 -闱 -唆 -逶 -迤 -绷 -苡 -纨 -郜 -辇 -蒗 -阑 -簇 -怠 -雉 -嘱 -悖 -娩 -殆 -腥 -咫 -阱 -驮 -焚 -瞅 -佻 -聆 -藩 -嗪 -捋 -袴 -泯 -揣 -拂 -袈 -衅 -瞑 -愕 -彀 -蹊 -榇 -滓 -敝 -颦 -咎 -斧 -笞 -脓 -驺 -麂 -腮 -濒 -迭 -翦 -辔 -汹 -隘 -伉 -臧 -癀 -觊 -觎 -琏 -霁 -丕 -龈 -赍 -谴 -谏 -苎 -睾 -祚 -庥 -瘠 -耎 -缜 -秽 -沦 -咤 -⑾ -钏 -抟 -砧 -戍 -衙 -攫 -藿 -愦 -盂 -鸩 -眷 -喧 -瘗 -瞽 -幔 -恃 -苯 -袒 -劬 -踞 -匣 -俚 -眈 -碉 -啻 -〔 -〕 -撰 -戌 -豳 -銮 -亵 -吮 -谬 -镑 -綦 -箸 -褶 -痪 -缉 -嗤 -濡 -膛 -睚 -腆 -糗 -髭 -尉 -锹 -骸 -赟 -殉 -攒 -饷 -茎 -铿 -歼 -挟 -鲠 -峭 -靶 -笄 -碘 -瞩 -涔 -鳖 -抉 -貉 -睇 -嫖 -娼 -衲 -⑦ -讹 -禀 -倭 -徙 -叟 -趺 -毙 -伫 -鸾 -朦 -濯 -怏 -蹙 -玳 -偕 -粳 -驸 -旷 -卞 -爇 -猝 -溺 -喙 -瞠 -□ -昙 -檐 -窠 -蟋 -禳 -逡 -攘 -诫 -穹 -磋 -奄 -踝 -骅 -噬 -彊 -榷 -⑵ -夤 -筐 -璀 -忤 -赧 -篝 -豺 -徘 -徊 -晌 -孺 -丐 -戟 -飕 -蒽 -褓 -遶 -嗥 -纭 -溲 -褴 -庶 -辍 -篡 -剿 -畀 -逮 -酆 -猖 -闾 -犁 -纣 -镌 -怯 -墉 -酣 -溅 -胱 -酋 -铀 -觥 -舛 -唏 -鏖 -肮 -婪 -遹 -呶 -嫡 -倔 -幌 -殂 -戮 -侑 -憧 -赘 -赃 -筵 -呜 -饥 -锷 -鬓 -诮 -诋 -瞎 -祜 -毋 -廛 -迕 -恝 -峥 -颚 -缗 -遴 -蓖 -唁 -恬 -桀 -骠 -獐 -踵 -霭 -剽 -洟 -姝 -雹 -锢 -霣 -溷 -髫 -寤 -惶 -歔 -吭 -俸 -哂 -濂 -厥 -皎 -骡 -喹 -篙 -扼 -咆 -敕 -伎 -嶂 -盯 -狩 -殴 -镪 -蛆 -镳 -骛 -坌 -邴 -谙 -飒 -琮 -Р -П -О -旖 -俑 -饕 -⑤ -糅 -撵 -牯 -蹿 -砣 -婧 -姮 -甥 -紊 -踌 -躇 -谲 -掐 -璋 -谀 -噱 -缄 -嗜 -贮 -嗔 -蚡 -髋 -迸 -侏 -箦 -椽 -蹰 -醮 -萤 -邈 -橐 -栉 -猕 -珰 -恻 -臾 -祷 -兕 -奁 -赈 -蚓 -骼 -澹 -伛 -偻 -俎 -傩 -纾 -鬣 -烬 -钗 -揖 -⑧ -怿 -暧 -鲧 -瞟 -袅 -β -呓 -赇 -蜈 -拮 -谑 -樯 -囤 -氤 -氲 -阕 -宥 -喋 -卮 -娈 -嘶 -圹 -嬖 -诏 -酞 -罄 -恹 -淹 -锏 -蜚 -矜 -蚣 -邃 -鸠 -疸 -掼 -栩 -洮 -耜 -毗 -頫 -畴 -痞 -躯 -悒 -孽 -梃 -绯 -嘈 -诿 -骰 -鬟 -崭 -铙 -斡 -袂 -彷 -渤 -疋 -痉 -挛 -眦 -芈 -啕 -纰 -刍 -忒 -祗 -膺 -畲 -獬 -豸 -Ⅲ -﹐ -庖 -啧 -壑 -襁 -痈 -恸 -诟 -楹 -吠 -痊 -荼 -樾 -苌 -讪 -蹬 -贳 -娴 -潸 -搪 -倨 -纥 -醺 -忡 -拣 -蟭 -鑙 -蜿 -蜒 -酡 -罹 -谩 -◎ -溍 -锗 -麽 -遨 -亥 -泮 -樗 -捎 -伧 -牦 -憔 -幡 -煽 -郪 -眺 -俐 -冗 -漪 -蚩 -痂 -椟 -漩 -嗟 -诽 -谤 -枷 -饽 -棣 -卬 -幞 -帔 -镬 -牍 -诳 -詈 -阆 -掇 -荏 -觌 -逑 -稔 -歙 -缮 -盹 -儋 -厍 -睨 -畦 -酗 -栀 -逞 -徇 -蚯 -摒 -炷 -鹜 -鹂 -谶 -绚 -臬 -罔 -枥 -瓯 -甑 -亓 -庠 -唿 -拗 -谗 -窟 -噎 -岫 -凋 -叵 -牒 -簋 -蛰 -噔 -拚 -鸮 -岖 -蹂 -躏 -徨 -掳 -涪 -屎 -绾 -箧 -拈 -茕 -殒 -黟 -薜 -噉 -嫣 -戛 -涟 -冁 -邋 -遢 -菽 -悭 -囔 -彘 -徼 -⑨ -锺 -顼 -掬 -廪 -捻 -俾 -愆 -窒 -釂 -後 -嚣 -腓 -哽 -铉 -靥 -颧 -疟 -雒 -搀 -汞 -阂 -杲 -唧 -佞 -娆 -妩 -浃 -叡 -⑩ -蹇 -祐 -缀 -谄 -梢 -臆 -胰 -蠹 -胤 -━ -狙 -谳 -俛 -翕 -瓠 -盥 -咂 -衿 -鐘 -惇 -な -て -诅 -畿 -枳 -跛 -泫 -孳 -巉 -飓 -迨 -垩 -焘 -– -恚 -箴 -疽 -讣 -窈 -妳 -噫 -魍 -魉 -爨 -╱ -诙 -狰 -狞 -踣 -汜 -尻 -缁 -⑥ -犷 -闼 -珩 -觞 -鸵 -蝠 -擢 -拧 -蓼 -晁 -瘴 -槊 -邕 -粜 -縯 -豭 -媵 -佚 -衩 -阊 -坳 -湍 -⑴ -铨 -俅 -嚬 -粕 -罘 -畑 -瞀 -瓻 -蹶 -搴 -祧 -冢 -秧 -缰 -邬 -诃 -聩 -糌 -骈 -佈 -┌ -┐ -蜥 -蜴 -痍 -∈ -墀 -渥 -缧 -孥 -咿 -勖 -恫 -刽 -嗣 -郓 -惘 -羁 -蝗 -枵 -绦 -弩 -踰 -馓 -蓦 -黜 -苷 -胯 -遑 -侩 -铤 -惺 -桨 -诨 -砾 -磬 -龊 -骊 -喃 -鬲 -渖 -锉 -坍 -鲇 -苒 -腼 -狻 -猊 -廨 -诒 -蛾 -爰 -裾 -舔 -桧 -裀 -悻 -讫 -奭 -戳 -膘 -倬 -殚 -峤 -颉 -戾 -葺 -薮 -涓 -卺 -饴 -椁 -榧 -镵 -怆 -怛 -翳 -踉 -挈 -迢 -踧 -羸 -胄 -戗 -價 -浼 -喟 -菟 -驷 -俳 -簏 -僦 -桠 -Ⅰ -咦 -^ -掰 -彗 -蝼 -匳 -胥 -⑿ -弑 -冏 -愠 -陨 -罂 -倜 -傥 -搠 -镛 -傈 -僳 -嫜 -鬻 -噻 -鸬 -鹚 -腉 -摁 -欻 -牖 -鹄 -√ -腭 -缵 -肄 -唳 -÷ -钨 -◇ -鸨 -撝 -挹 -黾 -倏 -绐 -麝 -蝙 -睑 -儆 -牝 -猷 -≤ -裰 -啁 -甭 -聒 -蹋 -蓐 -耆 -闳 -骥 -渚 -锽 -逋 -贽 -跋 -獠 -虬 -铢 -嘹 -α -羟 -匐 -肱 -椀 -焯 -鳍 -潺 -殛 -妁 -傕 -蛀 -巿 -偎 -芫 -狝 -楮 -淤 -絷 -珅 -肓 -犊 -镕 -魇 -∩ -氦 -贲 -脔 -窭 -谟 -愫 -媲 -珥 -旃 -磔 -嶙 -峋 -陬 -喑 -琖 -徭 -峦 -摈 -猱 -蕨 -婀 -Ⅳ -舂 -夥 -藐 -驽 -骀 -帏 -谪 -弁 -襆 -镰 -⒀ -窬 -棂 -鞫 -诰 -皴 -玷 -跣 -恣 -绫 -钤 -怍 -篑 -腈 -涿 -姒 -冽 -埒 -巳 -獗 -啮 -阈 -绔 -媾 -簌 -钺 -侥 -砒 -劙 -峇 -阏 -榫 -旒 -偌 -罴 -钼 -坭 -纮 -劓 -刖 -缞 -绖 -苫 -苻 -猢 -脍 -徵 -燠 -冑 -帧 -茧 -罥 -幄 -踔 -愬 -瓒 -辘 -猥 -槃 -荀 -酹 -∵ -锱 -﹑ -囫 -囵 -戢 -愍 -縠 -屣 -忝 -≠ -揆 -崚 -犴 -蹒 -佃 -楫 -獾 -嗾 -窿 -苇 -薨 -绌 -荚 -蕤 -逦 -锵 -耋 -佝 -桡 -晡 -钲 -槌 -檠 -鬃 -讥 -訇 -搢 -泠 -歘 -泅 -暾 -孪 -淬 -妲 -殓 -愎 -祇 -厩 -剜 -蛔 -俦 -迓 -藁 -凇 -跄 -萸 -嗄 -哙 -舀 -珐 -刓 -赝 -噙 -缱 -绻 -遘 -鞚 -媸 -戆 -嵯 -骶 -圯 -仡 -鳜 -赅 -愀 -殇 -膈 -辎 -馀 -滂 -▲ -乂 -襦 -葩 -壅 -砷 -巽 -瀹 -蹀 -躞 -嗫 -嚅 -筮 -瘳 -楗 -诤 -嗳 -皤 -柩 -剌 -忖 -殪 -髑 -钹 -嗌 -蹩 -缒 -囹 -圄 -讦 -钡 -蜍 -臛 -喁 -偈 -氡 -阍 -殁 -淙 -枰 -棰 -轲 -楣 -陲 -蛎 -悌 -岬 -邰 -臃 -搒 -讙 -⒈ -粲 -邛 -粝 -欤 -髀 -豨 -凫 -苋 -榼 -飧 -姹 -阃 -墠 -榱 -畎 -忪 -衽 -腴 -耨 -扢 -氅 -谧 -搐 -罅 -絜 -顒 -诂 -悞 -殄 -≥ -堙 -噭 -橇 -眛 -缳 -釐 -泞 -菅 -汭 -爲 -骧 -湮 -捱 -暹 -噗 -镞 -斫 -仫 -娉 -铳 -碓 -夔 -嫱 -跸 -蛐 -拊 -绀 -疃 -跚 -蓍 -谯 -柢 -钚 -茆 -蜃 -鹳 -刭 -锑 -暲 -篾 -饬 -蒺 -啶 -隗 -燔 -趸 -弼 -燹 -胾 -旻 -浍 -踽 -阉 -衮 -眇 -芍 -绁 -浞 -祓 -嘌 -刈 -葭 -鹫 -揩 -鄄 -⑶ -煅 -炟 -觖 -贇 -魑 -弭 -瓴 -圉 -竽 -莴 -撷 -疝 -镉 -鹘 -觐 -甙 -縢 -嬛 -柈 -僖 -舡 -谮 -孱 -岷 -荜 -坼 -鞑 -⒊ -湎 -赭 -嗑 -碴 -啐 -鲑 -鸱 -芘 -闟 -慙 -焜 -汩 -隳 -芡 -茏 -踟 -赪 -侗 -狒 -窣 -谇 -瞾 -郯 -螫 -纔 -澍 -泱 -瘐 -伻 -蹻 -烝 -燮 -咣 -旄 -鹗 -擘 -酢 -篁 -△ -悫 -淖 -猩 -皑 -戕 -恽 -纻 -髃 -镝 -碛 -侪 -绺 -癔 -谆 -懜 -朐 -阚 -鈇 -刎 -苕 -匏 -蹉 -稗 -郄 -虮 -蛩 -嬗 -儇 -蔫 -豢 -椐 -蚰 -昃 -柞 -峄 -蛲 -曷 -赳 -珮 -杪 -虢 -螂 -呤 -唶 -昴 -鄣 -茔 -仄 -劭 -鞣 -杓 -姁 -薤 -膻 -氐 -醵 -杌 -笫 -穰 -螭 -跂 -褛 -燧 -郢 -哏 -撂 -韂 -﹖ -痼 -琰 -脁 -隼 -穑 -槁 -羖 -僭 -蒯 -孀 -骖 -龌 -潴 -﹔ -盍 -莩 -讵 -跽 -觳 -垝 -橹 -钴 -缶 -鸷 -遛 -翮 -鹣 -汨 -珪 -祉 -鞅 -怫 -缯 -噌 -濞 -庑 -斲 -洙 -趹 -玕 -颀 -轵 -髡 -嘭 -讧 -笤 -⒉ -磛 -繇 -疴 -沬 -趱 -鲔 -铩 -⒆ -跹 -胝 -酤 -寖 -轫 -貔 -嬴 -玦 -滈 -瘵 -曛 -馎 -楸 -晔 -笆 -缟 -庾 -茀 -爻 -弢 -赜 -遫 -睪 -郧 -鲋 -帑 -璆 -驩 -縻 -踯 -鲐 -崆 -峒 -餍 -邠 -螳 -喾 -嗛 -⒂ -颡 -苴 -瞋 -闇 -膑 -帨 -躅 -觚 -刳 -逖 -犍 -掾 -诎 -轸 -揜 -殽 -猃 -狁 -皁 -⒄ -鄜 -郿 -亶 -洹 -荦 -蛭 -紬 -柰 -寘 -羑 -嫪 -侔 -纡 -徂 -鲰 -乜 -餮 -鄠 -氖 -嵬 -虺 -蝮 -锾 -⒅ -埤 -棓 -苄 -悝 -渑 -觜 -莨 -轺 -酎 -鹆 -郾 -秏 -狃 -殳 -瀍 -蟥 -郗 -凃 -淦 -蟀 -昝 -嵇 -檩 -鄞 -荸 -仵 -邗 -铡 -琊 -芗 -蓥 -酃 -嘁 -鲅 -陉 -妫 -蛉 -璩 -濉 -畹 -蚪 -蒉 -溆 -謇 -掸 -仝 -浉 -逯 -麴 -誊 -邙 -仉 -珲 -逄 -邡 -坻 -眭 -磴 -渌 -僪 -埇 -缑 -禚 -沭 -欎 -螟 -笪 -庹 -茌 -邶 -秕 -隰 -秫 -圻 -冼 -杈 -蚜 -祢 -埂 -猇 -浈 -佴 -蝌 -貊 - -© \ No newline at end of file diff --git a/share/qtcreator/examples/15-Utils/fist.bin b/share/qtcreator/examples/15-Utils/fist.bin deleted file mode 100644 index be382d134089..000000000000 --- a/share/qtcreator/examples/15-Utils/fist.bin +++ /dev/null @@ -1,1313 +0,0 @@ -~w{quylpuimrfkoaei]af[`dZ^bW[`TY]TX]TX]UZ^X\`Z^c\`e]beaegeijkporwvx}}~tvw\_aMPUMPUKOUGKPBEK=AF8=CBCHGKMMNPPUWWeggorrvxx|||tttlllddd[[[RRRJJJBCC;=>369/28-08-08+/6),3&)1$'/!$,")'%%%$%%$%%& ' (!(")"' "#" %$"'&#('$)($)(%*)(-,,//011466<>>BEEHJJOPPWWW^^^fffnnnyyy}}}tttddeYXYSSSLLLBBB<<<666000+++%%% ''&'''(('''''(!* ",!#-#&/#&/$&0#&/"%.!#-#+"(!%!!!  !! !!!###%&&())++.//212444656999AA@KJJWVV`__hggtrr}|}xuuihhZZZNNNEED===333,,,(((#$$ "#$$%%%%&( *!+!#-!#-!", *)(%#" %%%./.565;<:CA@IHHUVUgfeywwxurhedTQPEB?<:66320..)('"!  $#%'&*,,),/ ")$%'&&&&%%&%%''&%$#!!     ! !# % $#    "! %#"''$/-*623>;;FEDTRRged~}|zwd^[LGD@=951/'#! -  !!"%%$(((-,-1137947;(*2 +!- #."-!,+*)))(())&#! "####$' (#"*##*##*!"( !&$!)'&A?>b``}}xwjc`TLIB;74-*-'%*&%*%$)%#($#&" #     !!$"%!&')+++++******('&%#""!!"##$&&&%%%%$####!%$$222===KKK`_appr}}xvkfd[TTMEE>75-'%  '##,((,))(%%$!!!!$#$&#$&!"$#$!$!$ #! $%%$$$##" ! ! !     -  - - -           !! !!  "!%./2<>ALOQ^_btv{}xymhiXSTIDD>996122,--('&! $!#&#&%#%! # ""#($'+ $( $!$ $$#!           -  -     -        !   - "&+.27:>FINX[`nqvc^_C>?9572.0%!#!""& $ ##! !!"#%%&%##"! !   - !!!! " " " " ! "!          $(('& $ ! -      -!#"##     "! $"' #&"  -!""''+036;ADIUX]qty½pkm\WYJFG511%!"!  !!$ !(!#*&"! $&%"   -   -  - -   -   "% -( +-/0121136"8$:&<&<&<$:#9!76!8#9!863/+'"      -    !!"#"#!( %+$*$! ! #&)/:=DNRWhkoýz{]XY@;<'$& (%(&%(#"&#"&#"&#"&#!'%"(%#)$$)$%+"$,($"""!"''$ !  - !%*- 27"='A",G&1L*5P.9T.9T1X.8Q*5L$.E&>!62/+ *'%"  !"   -#'(%####""&#+#+&#"$)025GHLcdg~niiRLM<78.)*% !! ! $"$%$($"#"       -$ -& ' '#  % --';#1E&5I+;O4DX=LcFUlP^uXh`rj{ou~wp}hv]kQ`zFUn>Mf6E]+:S#2I(@": 0 " - !# !  !"# -$ -# %)",!&/$)3!'0)!#!( $*$'+)*./16<>AVUWvuwibcSNM=79%!"#$!&%#)$#,(%%%$#  - &* , . -*'(0&<"0G,;T;JcGVnL]uRd|^ogzpztk}`rSeEWr7Hc->Y$3N%?876 3 -/,)&" !  -$* , ,,(! %"+&!'(,@@Ca`blkoYWYJEG<672,-("$!$'""-%&1#%0 #/ .+'# -  ( -1 :'A#0J,;T9HaIYrUd~_oo|tfx\mQbGZw=Ok1B^%6R+E!:. -## ('%%%!  $ *"+ #+"(""0/2IGHgeeonrJIM0/3"$!)#(+&+)%*#%+%&4"%3!10.-+) '$# " #/">%3O7EaGUrXhhxw{fzRf>Sp.B].I 7+ -$ " # #$%$! #((% ! !"+%*2'*2#&.%101RPPpmlecfLKN==B116%%)"#)%+,*0,*2)&/"!)"  - ")%  -   "$& *28'A-8T=IhHXwYim}~|m]pJ^z9Lh+;W!0J(@8 0'  &' $   $)$-"+$,+,866PML}xwghm>?C $!"(,,3,,3 !'#$$$ %!!($#-&&0&&1 "-'  $ ,5)B0;V=HeNZwiwxcvTfEUp0?[!0I#: + " -     -   #' (!("&!% !$ !$""EBAlhh¾jkoKLP359!$)  ) +& )"#,("#(*($ %/$>%0K3?ZBNiU`|jw{xaqO_z@Oh-;R*@3 -*$  -    !#' #*"%)!$("0..GDC_[Z~yywwzPQU027" %&$#%''%!       -   -# -&# %6%1J8D^KWrZfiwyw^lDSl-'   -  !$!% "'(),,+.&%' - - )''TRRy{yUYW9>=(-,#&("% "!'1#(3".% -  !&*+ -' "! -$ -1%A/:UEOl\gvw^l3BY*A 7)   " "   - **.0/2(')213TSUyxzſotpKON,0/""&+!'!'",)$     " %(), 1!?)6TCOm^kxrXjARk'7N!7 (#!   #%)++/)),#"$ 0.0HGIb`cptpZ_[GLJ165##$#*$- )#  $ -$ - " 1#@#0M2@^IWwbox|[k=Md#2H4# - !#  ! !"!# +*,==>YXY^c^8=:$)&!"!&#)!*$& &  "2*E/VO_zoerANk&1M,  $&%$&-#$0"$-%$&'678ZY[MW]#.5  !!$'13#,0"   $'" 35F`hytVb~5?X'?0 ' - -)+8.0;$'/!889hhhzKTZ#,3  "&$$$""&( -# !)$:9E^_o~am=Ic"+C.  % #0-/:%'/  &')CCDonorv|BHN'/    *(4!/ $!.#,8& -  - "/#3 -& - +"2HI[rhs?Jb!)A +   $". )#!#)$$(*),@?Akiiotx=@G$  $$1*90!+!)9.    -(/ 0%;1@YXjnxCMf *A '  ! #$"$")'(/$$*  <9;fddjor>BG%  ","2"3.& -#',+$   2&6OIZsoq}DNg&?( " ".#-)!)&'/$%+" #ECDwst^_c69>& *#1!1-*,. ! - ('&61?Z[jxKWs)3M4 -(  #1 %0'"%%0.0MIKuprfhj005 )$+8%,;"3) -&,4 ,)#/HNZvyfr8B]5 -(&  -  (%)D?Csoqpqr@BD % $0")7$5- ' -'+/ ,$# *)GP\yam)3P , -   -&!&#%IFJ~iiiBCD"$&& "-"/+ % " -% * -* &" )"A6@`ZcS^|-8S6 ! !)#'((2,*4#  )&,XTYffcCCC+./ ! #-#$0-$  -& +) -& -(=8Eedq^k:E]/ ',)%)'(3,,7' 315[W\ghd<<:,--&() #&"#/"#0-& " ! % + -3'F=Hhamdp)2M ( # #$%+"".& !305WRWvvsBC@)))$$%!! !("$/#$2/'  -!0 ;%,JGPpr|S\y!*E -- %  - !$%)(#" &85;fbg|}zGHE)*(#####$!%*,+ # -!!<3:ULTp{}O[v)D (  &+$##$&$*EBGzvzLMK$&$ ##""! -/6 *) -   ##!'!#)69>ikpEEE  ###///(&(!! '!  #3>BTfl~ap1;W % -+  - !()+1# !&CEJ|~QRQ'(' ###" $"  -9=?(&(-,.#"% ('+$  &56BX[i~zKZq"0E '!$%!!%,#',  #&,148fhkklmGFH$#%! !!!#""%!'',! (  #-/F[Q]ser{_q):S* '(   -!(!"#)!!&()-TUYxqwh`gYQXHBI4/7$!( !"$"&$#'!#'!%$$%# #.* #    "$  &"7!,B*5J5>S=EZKRfgkJ\v)B*'%!"" - %*,1 %449styĿ|e]dMEL;3:+#+#" -   - !!$'(''*&%*$%) "'%"!   # /)  "&$ ! '+/BRVg~r=Lf$<'   #   ),-3 &$%+STZuqv\X^HAJ:2<0'1#%  ""! #&"$(!#(!'%# !! !   ! -"  -  " $ $ "   #%# - -'"&9<@S[]qno~fx2A[ 2  # " -  *#$,"$8:?cdigfhKHL617&!)$!$$'$("%"&#'$($ +%",%#.#$- ")&""#$$"  "$&((  - $ (( & #   !'+ ' #' -* ) -& ! - --/C>?UNQgknWg"2J)## !  '"("&,+.3JJPooslkmTSV>=A+(- "%+,&3,$3'!.$+!*)('%#      "%),," ,32+#  '/ '$ $#  -!1./D+0/!&% $$),*1,*2(%0!*#"$#""$&'('$        -  "!  ! -# %((' % #) .*$ "# -% $ - ! 0'0JCMkaoiy6D^4   -&) "  $$!$ %89=ttxy}QWV176)0.-20$))"#"*&%/&$/$!/".("!$&),/.*&        - " $ $!""" '*,* & -# " # " $ (+//+ #  -% ,">-;WJ[ymUe$2L' ! '#  " '*/UW[jqo?FD!''&*+%)+ # #$&(+-*$ ! ! - ! - " " $ " %. 2"4$7$7$8$;&>'?&@$@!=9 5 43 2 1 0/.+ '"  -! #& -))( ' $!  -  $ ) -) %( 5.K;Lk\ovDTo$< -  -'  !%#"&+=@Dikofkl;AA$% $'+#&+"' %%%$"  #%'& "  &0 7 &<)1E1\):W"3P*G!? 6/ ) &(+*(% !    ! -# -$ % ') )% $'/$A,>[Nbzk~.AY+!.' !&!(!%*'+/AEIwz{{UY[389!! ##),'+/!$*!"#""#%(*) ) +,/4!=+.I7Oo5Dd+:Y!0O(G$A$?"<4 -% -  ! './- $  -  ! " -") 9*=XUh}K^x$=(%( / -$%"*#) #(UX[`ef?DF(,/!# #'*%,0&,0!%+!(%!  %)( # -%5&+G6:WCHeOUq[_}hnu|xjyXgJYz?Nm0=Z*F";2'  $-22 )  -$()+,7*8QEVpi|u7Id";)')#  #"+%,8;@kmp~HMO!'*!(+#,0$-1%+0!' %"--& - $+. ( "728TSXwoutYfX%= .&* -'#"  - -& -&   *,ACWk|tnh~e{aw^t\rZpViTfUfTeSdPbPaQaRbTdUeUeXhaqhxl|qv{}dz4Hc . )# - $'1(0 &$ %/26W[]^ae9Y';W&9T$7R"6Q 2N /K!/K .K .J,H,I-K .L"0M#1P$1P&4R+8X/=]3Aa7Ee    !&%%$* $*%(,59;ilmghl/16 !%$*'.'.$+&!  (3&@-5P;F`Q\wozk=Qi'> 4 4'#( (#  # $ $ 0DWe{}n~bsXgL\vCTn#C)I%0P.9Y3=^;GfLYwXf`nkzvh}-D\ / - -"  "&.$*1#("$EHIikm>?D "#)0$,5#+ - -  "   - "95>ZZd}cw>Ri 2G -/%"#!  !**  "/CXf{drQ`xL\rFTj;H^/=S%5J+B$:4/, *(&# !  ! " -# " !$ ( - 135 :%A$/J,:S9G`FTmUd}hw{Sj$7M# -" '!   %-$*0#*..mqq`bc=?B"#(   -"#)0$+4 )&'& -' 82Shwq^nXg}LYo,8N#8"51 )#        $%%%&&&'&&%%% $#  -" , 7#1G6DYKYn`qo|GZr4 ! %! ! '"( #$SWXeeg89;" %")"+ )" ##-+83@SWb|tvFYo/C#%%  ##$8FTkuueuVf}FVn9H_."6 +%#  -  -      ! " #" # '*))*)) ) ( -&$#$ -' -( $  !  " # ! " "$' , 3*<&5G-Oh,S-D4* # - - !# $$"  - - -    !   "# ! ! !"#$%%%%% $ $ $ $ # ! !!      # -$ # # -$ % & ')*** & -  ! $ % # #$7"0E,:P@Pg_nziz1AR,    #%  !!&)$)++/0<@Aeijkhl<:>#"& +/426   " % $" -$#  - % 5-Y:@^Bc?EdAHe@He>Fe\4>Z0;W)5R%/N!-I)E%A <9 62- *%"! $ % #     " - - -  '#';LTiR^}!*G#!)' (*($ !&./9Z\c^]f74?+'2%! %"$(  +5APcpn#/E & -)% (. -+# - 58E^l{yWf5E^(@32 540(  -$ "   "'*..* ' & &(),.. -0 369">&B*D",H%/L)5P0;V8B^=JdBNhHRnJVqN[uU_{Xa}[dajfmgoipktmvmvluisfqcmal^iXcS^}M[wGVqAOl;If5C`0=Z'6S.K$B:654. *& #      $ % $%% #%' $  +8AX|r~4>[) (' ( ' $ ##!"-@AIttz<;D&%!"$'" $ %1>K\iy=Ja +$ ) -( " + )$% # - &+CS`w[j=Lf'3L5 -) ( ( )( ( $ -  - !   &), + ( #" -'/ 8#@*H&3R.=[7GdDPpJXvO_|Zg`ndskxsx~}wrl{crWiM]{GWsAOk8E`4@Z0U#8/ )  -'%# + .'  ,8DZjvkx>Ka%;*   ( #    -   - - - - $*!0!2.%# +6"D!0T0Bd?QrP`^pk~|o|dp^kR_yBMh-8Q$;( #    - -" &#  "1J^$8 - ! -" $ #  %&%#" ! &$ *)?.:R7Fa=Nl?SuH]Uhdwq|weqS`zCPh3@V'2F%:0 *%  $ -  +#,ER]wU`~)E && * % %(!*WV]|>CH! '%%$"!$$ -%6R^qLZr,C . $   &.)23 ) -"-.'  -!%&%$!     " -/C7J_Tg~k{xbpIXo5D['5L'=1' "  " % $)(@DNhs|9Da -0"', ' -#%!#"#/>>DX^c") ("",'"  "(( (7BVfv,:S4  !$ ! - % % ,(0BHOafmbi|BJ[&.=-%    -        -    #("1*;%2C/Ni+;V-H)E".L-:V>Kh`mp|.:X6+&  ! ")&(4# #%-TU]vz}BHL&,1%$&1&)3 +&$  -#%87BV\jhv@Kf)@ )  "#    %+,# "&$!   $"     "    !0"-=/;K>L]N[n`ntnZjJZwHXvSb_oo~IUr!-J 1 -"  # $)!$3* (;=EjjpMQU!%!%* %-$".&)5 -**   $*>P]sfr3>U. !&+ $ (.("  %# %# - -  -"(:;GZUatky]j3@]9 ! - $ #$ .!-%&(*3NOW|6:>"'.%*2%)!/)+- ! 35CXp~KVo":  &/*" "))     &% !     *)<3>QMZmo|uDRo%? #$#" "'+)%(:Ec ' +,.-%   -#%"  %  -&, # %3"+D:D\[e~~lz2A\:*$ # % $ +& "%.7:CknwGJO"'"$! & -) %%:>LduNWu9 ( , ' -$ "    $" #  ''"$ #" *$>8B^XcxN^z#1M)   -!( -'  )+.8[]gilp/27$'#%*' -),;Qbpep*3Q4 -- -#  %+%  & - - -"(&& (!5*3MHQnlwm|:Hd . !!  %++& % *DHRPSW #( '$-(%' -# 6DSl}BLk%C 0 # %*$ + -   $/!,@=H^]i~Q^{8&#  (+&#"%!38AlorLdk{R\|(F+ & -' -% !* & ! )3'>&6MFWnvSd$3L 3 "!+ -#!.!*5(&ZsK\E$ *4.- + % $ $ -! &!.IhyXi!2M 3 !!%$  '( 37<+)))(* -    & -#1BSqat"/V),7 /- *! -!(A`nj|0C_!< "" -(!   )"+$!).3JOTfhf//++)))(*! -  $+)FSfs/Cj6.50 + & " #%=S_zJ]z!1K& ! -(! -($-#+$!',=CH|MNM *(((')" %94EblCY B, -.. * & %&#;DOjyfy3C\) - -(! %#,$,%#(17Md'" "'& '""'*Y^anpq778" ,*)'%&  3ANgrGYz$B)& & -  # % -$$ /FVqQ^w* +)   $"&HMQ_ab++,%##+))'$% ! #9N[vRe+I-% !  " " )>Lhwcq4%!2. -$  - % 8=AFHI !" $##"! $!" "   -&&2H^l`s$6S 4 #   ! ! -!8C^l}w%4J11 & -#   "*! -',0kpr}668(%%$""    23@Wkzj},>Z : # -   !"3=Vct9G^&;." #  # &-% "'VZ]kmq203)$%'"#  &;@Ohvn4E`%=% -   -  -6PYjJXo$1E1 ! $ #$+$  'NRWUV[,*/)#$'"#  '2GQazt;Mh+C)      (0KPa~wan2@U 3   $ $!(!'EIN|~sw|=>C'&*!!(#$&"#   8CXfw}EWr#3J ,   - ",GHYvr|DQi"8 -$  -%& %7<@jlp^`e--2&%)#"%'"'&"$$ " %JVlzRc}+:P/ -   -)CBSpnUc|'>#! !$"#$,05Z[`POU#"'('+*',+%+&!$$ "  .Xf|\m2@U/ -   ! &@>Mkk~es#.F ! # #"'$$(-MNTAAG'&*+(-*%*&!$%!"   &9etfw8GZ/   -   !  !;6Daexu.9Q& $$  "*%"'ABH77>%$('$*(#''"%&"$  -   &1Ern=M` /      - 3,:V^q;H^0  %&  !!$,%"89?ppv007! $"$$#(#%'"% - "4?S}wBRe 0      *!0LVjLZp(>  %'  #!$,$128aaguvz++2" ""($%'!$  -,DQdHYk"1      %(DQd_l-9O -% #' -% #+!*+1RRXklp,,1   &!$% $  (8Q_qO_q#1 -"    $@Qcmy5AX ) #'   (!(%'-IJO}}bcg++/!  #$ #  -(3D\kSbv#3#   - $@Rcx>Kc." (!(' #)?BGpquVVY$$'! "!#"$""'"   -&/>ShzSaw#5 #   ! '&DL]~Tb{$<  ( !  $("'14:[]bMLN"!#"!$""&!$*! - --3F^pP`w#8 - #   -$" -!-(FGXygu*/H -) $  *' ''*2KNTJHH! " !# !%#*"  49MfvPax#9 - # !  -#" ,(FHYyl|,4N -+$ !*)"($(0DINyyyECC " %!*" - !&;CWp~Pax$9 -# !  " *'DHZyu/:U! $ -#!)*% *"&.>CHnnn<::" %!)" #-BPc|Qay%: $ ! -  " *'DHZy7E`(' .#(+' *"*48>|~bbb421! %!(# &"3H]oQay&< $ ! -  " )'DJ\{AQl 2* .# '+( *%*.3jlrXXX,*)! %!($ ''8MgzP`w'= # ! -  " ))FL^}M]y!=- -,# %*()""&+[]bNNN%##"&'$  -),=RqM]t'<! " !  - " -*+HOa\k .J /)##((' %MOTEEE   "&&# +1AWzIZq&<! " !  - -# +.KSehx,;X 3'# #'(&"BCI<<<#!$ "$#"-7G^yEVm%;! " !   % ,"1MWit;Kh9&# #''% 89@ww}{}~656" &$(!"!  "1=Mds@Pg#8 !  !   ! & ! .&5R\nL\y$@ % ! $&&$008jjpxz{1/1$ ")'*    $ 3AQgn=Md"7 !  !   " & ! -0)7T^pUe)E $ - "'(&$,-4ccisuv,+,%"#+)+    '"7EUktdp\idqxj|:Kb"6!     # '" 1+8Uas]o/J #  $()($)*1\\bjlm,+,!)') ##" -  (*?L]szVc~>Le1=V%.H &B%+G6=YQ[wvfw3CZ2    -  # & ! - 3.:Xdvp%7S -! -&*   (*"!")PPUade,,-&$'$#' &%# *(3HTd{vGUq$1K 1 " &'BGQms~ar,,   $&#$()'hhoWX[&%)!"*',%#)##9@Kco~P[w/ & )+ -(!  ($?NYuv?Pk*A' - ""  - !)2MXgOa+F 0   %($$) ", __fTUY%#("#+(-&$*$ $9BMerKVq. " -' -& ( -# (&AP\xn5Fa%<' ##  !09TcrUi!3N5  %)$#*"$/WW^TUY$"'"#+(-&$+$!!$9DOgtyGQl. $ -' # %! # -.",HUa~fz,=X 7'" -!##  !9B\n}\q(9V$;  $)$#*"$/RRYSTX#!&!!*'-%"+$!"#9FQiwr~BKf , % -& -" % $! # '5)5R\g[o"4O 2&# -!##  " !BLf{ey0A_(A# -"(%#) "- LLS¾RSW" $ )%,$!*#!#$9HRkylw;FcozEYx#>,&# - !   &(P[vt?Pn%1J,   "$#$% - @@G}|OPT! -%")"(!!$$7HUl~hr8>X - $ #  (- % #">>Jgv?Qo 8*%"      (+Vb|zDUr(5M0 !  &$"" ==Dxw}NOS" " (%!!#'9IVl}`l08O " #  -%* & # &%AEQm~7Gb 0(&#  !  & 1YfN^z,8Q.   -$&$" ;:AontMMS% !(#!"!!+Vv\m2>W , #)&#! 48=^_dIHQ%$ +% #.>N\nQ^v#.D!    $ -%" (#0IS_z|HWm&>' -! !"   $ " *$=BOics6BZ-")& " " 25;Z\aHGP%%!,%$/>O]oN[s",C"   ! % % '$1JVb}m{8E\ 7& ""$& ", -HQ_yhx9E].")& ! #!039VW\HGP%%!,%$/>O]oLYr!+A"   -" & % -&$1KWdbn+7N3& "$ -)) #.(6Q_nn~=I`/") %   # !.17QRWIHQ&% ,$#/>N\mKXp +@# -  ! % $ -&$1KWcVc} ,D1 -* % + ,$ -03A\o~tAMe1 !) % - $""+.4LMRKJS(% *#!-Lg}yEQi3 !) %  &$"),2FIN}MLU )$)" *9HVhzLYq!,A#    -$ $ & -FQ]x~>Ke1 *+ !  ' *(%"U)%,$  - %()+,E\kKXq 6 !) $  ()$%(.>?DvtzSR[#",$) " &5BQbuO\u!,B#  -"  ! $# %)CKWrap$2H -  !, ' #!!$'* -/$2LgwMZr!6 !) $  (*%$'-<qouVU^'&0# *'    -6DUlzdq:E[ * !  '.#86C^nzbp(5N$ **+( # - /%2KN_yVbz(=  -" #$!%&!$*78=pntWV_('0# *'   --4BTkyjv?J_,   '.$5/' ! -" - ##  +5Igsy^j@Mi,I, -4 -01683+03 *4T_xWc{(>  " "" $% #)68=pntts|21:%   -   - ,,=IWm2;N $ & -"## ! !%, % =]kl*rovxw43<&!   - - *'9DRh=GZ* %   -# %" !%- $ 5LZyWk(E &!! $  -  $5]fz{bs@Om(5Q$@/ '$ % -& (** &! % !  % *$+BZc{Uay&; - -" ##!%'"%+8:?sqwyz55?'"   - -! (#5@NeDPe/ -# -  "' -$  #-"->Ml{FZz5 " -&)#   (1Cs}rWeCRr0?]'C -3+ ' -% -% % % (-. ' -&. !. ' &#^ * %&,-&   *AJ^mxP[{8Dc%1O%C9 /+, / /, & # &, /,, -1))3-# $ <3B^bpR^v$: - !  !#"$&#%+9<@rtw27?    % %   -*65DZiv@Kb!4 - "'# "/!3NPg{=Oo<# ( ,/.' - '/6IdmiuU`y@Jc)3L";/) % ! # -) , ,)% %* / /.*$ &-124 ="2R?Oo`qKYp"6 # -#  - $! "$*;?Csvz6DK!!   & ($ -!*65@[Vb{$+?  " -     "0(A^iO]~$0N 3 () -,( -& % -"  - %!(;GNa}}ftQ_q>J\&1B-    $ &# # $% * 0 2 0*"! -)05 ->,O5Il\oyDRi1 -!"  -#"#(-ADIx{GMT )#    $ '# '32=X}gs)1D - " -  $ -" -$/JWrgx6Ed!>, &(($% % -"  ))0AKRd}hvQ_uDRg6EW#2C%6/ &   $ -% & ( ( ) * )& %%( -- 1 0, '# "3"3TCUxattAOe/ !  -##%*0EHM|PV]!'0#   -# % ! $ -/.9Tux/7J    -#) $#>Ic}tM^)7V -5 -&  ! "! &) % '"):U)6M,B 3&   -  !#$# !  # ' -)' # -"  # $ &.;$2O4A`DSs^nvjx9G]-   -"").1PTW`em,1;   #" -!,&2NdqQ\r#+=&    $( #) 7NB\ryYj,;X$@) (-'  !&+#+=AGZW]tho~l|XfGRo0H`08T&0K&?2' !  !$ * -(" -  !)./ , ) $ % - 1.. 19)C%6R8JfQcqbp4BV-   ""  +04Y]`msy5:B   $$# -$."0MVdNXm#,> - -' '"" " $, # ")=Sg}bp#1K* " " ('# !    ! -   "+";37OEIbOSn^c~~o|^kP]tANe4AX$2I!7( -  #*02 ,#  -"4 )!# ' +0!6!74 -(( -% %3)D2B]K\wdv~\j1?R-  - - !#$  -.26`cety9?G  $'(% &/!/MP^}cn/:N" ' '$% "& 4).?Qer4B\+ ",+ -&"   $( !  +!8),D6:RILfX[vcfpszslybnYezQ^sHTiZEXsewVd}.DL $ -) +((/ .KJXw|y=Ia)%%$% "&6. %)8J^mFSk2  (( % #  -)$6#4( %)'   '+4"%:2,7'+D48QCG`QUn]azgknrswtyuzx}{|}{vo{gs_kWczQ^tJWlDPe=H[4?S.9M)4H$-A(<#71,++ ) -&"     !  ! $ (* )% $ -- . -',8"/I9IdI[wWii}Q_x+9L +   #"$&  58S.6J(0E"+>%8 2- ) & -%!      $ ( &"  $ ( (&" - -& *'& % " " &.$=-:T=LfN_ycwsN\u)7J + #"% '  7;=moqHMT -  # -( +**/(F=Mmm_m ,G ( #" -" " -', & )9T_rxIRg"!4"6! -   -# # -!.1' -  -! #&% - (& #%')*++.0#3#&7$(:$*<',=+.@+0B,2C/2D/2D.2D,0B+/A)/@'.?%,>#)<&8"50-) & $ -#!! #  -    -$(( -% ###$$##% -+ -( # -! ! "' 3*E/?YEVpapsIWp&3F ' "!%")  ;?AsuvUZa!!%*+,."B6FghyKZv'C $ (*  "  $1:Klw~JRh"*= "#.0'   !   & -! #$$   -  -  -   -'((' !  -  -      -    -  -     -   -         -" %& % -"! " (+ ( ( -(%"!#' ,35,(, 17*D,;UCQm]mww?Mf!-@ "     &$+AEG]bj  #' ( -& -' -;-?`ariy.<[ **, &     -/JSeZav,3F+%'#$# - "&  $   !" -  ! !$ &))' #  !&&!  -        - - - -    ! " -#!  " % % % % % & ' * , + *( " # '.7*F-`^qARr7&* ))%  -   -5G`i|dk7=Q"4)# -  - -  %&  -#.!5"8 1' " " -! -  $ */22/*%  !   !%'!) *#       -      -  - ! -$ $ $ $ $ # !  -    !# & *, )''&% !" ' (0"/L8GdJZwdthv0>W'9"   # ("&+NPQgnu ) #++ -$$ +#D7Jmh{]p)L1, % * * - "-@GZpygn9@T/ "$$ !$! !1-8M@McAOf4DZ!1J5( -$  -#( -* + -( $ !    -! " -! - -      "!     - -      -   -! "$ & ' ' & -&%# ! ! ! ! " -! -  -  "')$ !#&), 1 9(C)4Q5B`9FfFSseu|^l&4L!3 #  %#+#&+!UWYtz'/9 ! # " $3 1RI[~vy0Dg A1#(+  - "%  !*0BQZm}hp>EZ#5  !% $ - --+7KWcyxzj|Tf5Gb/K#? -5) -$ -$ #!  !"!           " $ % % % $ $ -#!   "" " #&& % -#! "'*'&* 2 ;+F%7R4GcGYw[moyTb*A- #   %%,!%* $\^`DLV&2+"  (!?2Ef]pVi):\= )& & " - #*&  /1:LWbu`j9AX#7%   $ - '%  - $0CLYnpYnF[y3Gf 3P$B 81 )$" -$ %"  ! #"        -  #' ' ' & -#"  # # %)+** )+1 7#=)C.H"5O,>Z8IfDXwRfdwvKYw 8& " -    $$+#(!$'dggjr|=FQ$.< ( - -$ 9%8XM`v~HY| 0O 4 $ ""$)(   - /.:LUav^j6@X5 -# -  !*# /@L^n{x`vF]}4Hi(9Y)I;0 ) ' ' $ !   !#& ( '" ! -! " $ & ' ) )) ( %# ! !% ),04=+I#3Q.@^FZwUj\qh}wDRp 2! " !  - #!'$&)-knnir}GP^'2C#6!7%>/J2DbOaqhz8Hg)F& - %#"!#  -)7KO^tz}R`|3@Y%< %   "  $4>Ndoyh}YmIZ{8Ii(8X)H"@< 72,% -   "#     !!"##  - ! " $ # $ % $'+..+ *-4 ;"B*J"1S+;\7HjGYzQc^ry 61,( $  # '+ / 5 <#?)E(6T3?_5Dd;LmFXzQd\newoy~3B^*     " :<=vVb'/H   #&*(! #&:6Jae{gz@Pp+I1+ -*(#  *;?L`gusf{\rQiF^}:Sq1Jg)B_"9V1N)F#@ <98754333210011-,17#>)D#.J)5Q/;X6C`=Ki@OmEVtUe`qgyp~o~*9U&    !# #%GII~KQj 4  -"&&+- ( $&<6Ka^u|Zm5Hj%E0 ( &()( "    69Ddiu{smgawYoRgMaJ]}H\zGZxGYvHXtHWsGUrETpERpFQoGRoHSnIToITpJUpJTpKVrO\vUa|[g`neskxo~u{_n.J #  !  &$ (*,TVWho15K+$# ")$ - -(-##.)?2G^Sjui}H[+>_$D/.. -#   %" '+6NTa~}|{{ywwyz|~Ud%B ! "  ""*$ $.02\]^MVn$+@-(&(+  /,& -) * -,#:'7N@QiZnx~Xk7Hj-M;0*, + " -! "  !!'7DM^p{IWw;    $$+$!569kjlx@Mf)?0,, '  .!50 &!    '$94DY[muZh9Fbgw`m%0J * ! - "'!) !' "[Y[qAPi+C* *36)*/)#     !."0@4CUBPgP]y^lp~zasN_>No(7U#>52 -+* ))) ' -#   -$,/+#) 6"0LCRnn~Q^z&@&"!!&(%)(*hfho|IVp$1L80 + -' $ &+,/-% -  $ % ! &0%; -G*8T9FbKWrXdaojxquxwwxwqkzetdsbq\jQ`~BPo2?]$1O'C6-+' " "'-0, % " $   ( ,&,!0KLZwzCOk5 " #"$# /.0usuYe6A]#? -* -% )03.-+ " - ',* %  - - ( .4";)B#/H(5M.;S5B[:I`>Md@OgKYsRa{L[tBQi:Hc4B\-;T"0H#; -/ &  #)+&  $,/*! $  '& & -/$2MN]zx6A]+  -$"!868it@Hg!? )$ ',,* ' # -! !%(*)&   -  ! # # #$%&( ,03!6&<")? &:!652 -&  "&) -,( -  -" *- -' " ")%@5D`^lnz,7S -$ #" !!A@BvJNn#%D * ! $ $ # &*+*&"    - # % -%%#" ! - -   -  -&. * $( ( " " -) -* # * -- & ''CERosbo%0J!" #"&$)ONP{OTv*.N6 )! '-/,% -  !$%'( ( &$##"! -       !# ( -) '%' )&  /!<%A*5QP]y}Ub})B  !"$ "1/3]\^ciEIk(*M1& - & (*)& # " #&'%# # -$"   " # #! $)& -" ! - !## !  - !&( -) -) (% ! "* :*5QCOkdquGTn";  !$""!&==?lkm_e>Bd(,L >6- # "&)&      " " " # &'%"    !%&&"   -  #&'% ! &5&A&1M6C^P^{s`o6E\3  "%! )(-LKN}|~~`f@Cd*-M8  -# % $ -" -" -"   " -$ % $ $% $    -  - #'(&$ -"   "$ 1*8RDQlWdmzSa|+9P ,  ' ") //3YXZouSXy:@^%A0 -($" $)' "    ! &(' % - " - ! - ! - ##         "((+!92?YYgwGSn -E -( !(!#( 668fegtcnU`zCMe'3H1"  "" ! % & % %),* # -! - # $ -!((&  -! ,";(4N@KfZgxx6C\5'% -"  %#)><>wuwzSbv/?Q&6$    #+-* & ') ' # % - %&% -! - "*4$=#-F4>WHQl^j{m{)7P)% *  "*(.#!'"GEG¿Yj}2DU&6#  '/0+ -$!!"!$ ), %! - '))().6%&@,,F/3L:@YHQkT_zcnx]j -F $ & -%$("!%*)-USV|H[l2A( ! *12 ,#" ,5 !>"&C:/29; ="#@')F13P>A^LOkUXuZ`|goyJXr$<   $ $ ! $548daeK_p0?&" '/2 -# ,$(E8;YEGfLPmEKh?C`AFbGKiKNmNQqRUuY]}chqvr7E^2 $' !?>Broq=Qb"2! ! -# +/ +#!!>GLjeiuyYh%4K -#&" JIL~j%:L!  ! -& * (" '.3PgnAPj%< )  "%""' -  WVWYp'9 "! $ -#  +:A\~|*9S -/'  - - "!#,)).  '$(ifhRfz . #%   -   -,?Gbfu(A((    - "&,)*. 3/2zwyP`v*#' -(#  - - "$ /DNhO_{5& ,%  #'%%)B?AJWm#( -( ) & - & ,#7O\qx9Hd+$/ )" "!##!"WUWAK`&! % ( #/&7"0A\k}jz-9U! !1,!  -  !  .*-hfgymvhskwwv9AV!  & #"3".@-;MgvZj#+F-)  ! "%!&=9<}{|lsU]qI#0)! &2"0NP]}lu3;N#     "'<7B\r~Td})@" $# #,*%#$"&DCG\]g +  " %;7Ddcsgq/7I#      #(<6C\rsAPg4 $*( %+(! #-+/YX\EHQ  #  - ! %#! -0)6VVfbn,5F#    ")<7E]t^n.DpotNPW)#, #   ( -+ -& (8*8Vfur7@S  %  %82AZp:Ja5' -   '+$!(RPVWZ`')3)- $ ! * , ')7$0M[j{KhJYi! - - )( "8FYtM\q,>,    "%$!109]\d55<&&,- #  -! -" --4?\wSct' ' - )*   -2C_x4BV %"  )) GEOAAG$%% -,  ## )(4O_n`o!1##.  &(#  %!1Mast!/B -!!+#%/%  ^]fSSW%%+  ++  # #&+DL]xl}#3D, +  - -""   #*FRd[h{%6     &#&."$, "!*zzjim0/5)) - $ $ #!9:Kdt{8HY%3 &       %#>ARouo?K]-  -    #*'*0%87@@?D)) &'  " -$3LasRbs0@#   -!"  & -3(8U[lM\n"-? $  $ #(,0,-4SR[RPT * * ((  #%8Oa|k{-?P#  -  -$& #)+"?@Rr}n}0?Q*   !&%(,0@AHsr{cac"!, * (( - & -'AOj@Rd *      ' )', '/,<\fwVdw+=! -  !$!%)!&*-2YZausv((,"#",( %& !(4=WWi}&8   &* -* /$#(HL\~yv%#*#"+$ "" ',1Jo{l~%6K( - # ( * /$;3DfZmXh~'7H -' -!!%%(, "&!"&GGKTPU2/5#!(  %"(B]h;Kb$7"  $ ) -. -&! 4.O:Mpvt;L`'7"!%')-88RgoXb"*H )( ,.1% *94 % %#@;Mjo~JXv -I-& * (%#  #$MOOuty>>D - (!, -  " ("*>MUl@Jl<1./ 4 ( ) 7." .+H;Olez\k)6R 1 -" " -& -%  !$  $-01lmmXX^&!#. !  -(25?Vcp,8\ B30 -5-  (/ -%" '+.4"?,>[ZoBNj < ' -" ##  - &&),.FFHssy$$+ %"$.    !+ +!,ChtTd-;`B54 0 ' & -%  & -1 4 -0, (5+>Z]scp.9V 3(&$   - )'!;<=jik87? $ "+  -  -$+#0LWpN`%6Y @20+" ! *0 0* $" ':)^0 -$''- %  ! -' ) '#-,J>Qpcu]q>Qj!3J 0# -,- -. % $# -"%*)!206RQUFGL%'-" "% %!  !"Oh*8P%; /*(%$& , +# -+,;P[jVm,@[6 $ "#" - - * ,"((-edh{z>>D""( ( !) +( "   +' 9FTpxdzCYz+?\%? '& #&(( % -#"$ ( *41N@RogxwduJ\q2CZ,B , -#%$  $,BIYps8Li!< ' -" & $ -& *#87:UU[)*/!!'"&( -  )/%++8SZhWn/Ab&D7 .   - "!  %%$# $ -% % #/"?+9VFUqcq}hyL]q7H\)9L /B%9 --#  - ##! -&$+9D[sUi.I'  -" ! !$&$+YW[pqu68;"#(* ! - $/.'%!:;HduZn&;Y2&"  - ! %* &""" )9'3OCPl_m{hvIXh-Rm'A -" #%"($87;rrthgjCCG #+#.$ - - % */'>APjfv1?Z-  ! -''   -    #' (''( , ,( -%"+&B4>ZJVrfr}t`k~S]oAL].:H *8!.' "   ! " $ %#   -# *. /*%$ " 6CVoVi#3M+*"1 "!'&',QPT[Z]%%)$",& $- -)*%3Kdrhy6Fa3 -      -  "'(%%( & !!  ! 0'B,7S7B^@KfIVpVc}htxlv`j~T_qBK]3  !%$ "!)8<@egjNOR "' &"   !%"(5I`owBRm(@$%-'  - -#$ -    $00>BBQ=>L()8#  #%" !(.* $ " -& * -* & $* 1:*D)6P7D^ESkTa{eqmzu~sybhPVmAG]3:N'/B$9/* %  -  !$& " - - -  ! & ( ) ($ -* 6(B&8SThN[v*C* ((  -!"*$+ %QTXoqr58; -$ )&   - *=MaQay /F) '"0(   &+'  - -%(->NRdmqznsIN`$5 -&/2231, -$ ! "' .5%=!,H$0L'5P2@\?LhHTrP^{]ljxs{xly^lVcOZxBJf9AY4;R.3J#'?4+ # -  %))' "    % *&#" ! ,*D0B\CUpWkfq'0J -" " #)( - '#*$ %6:?sv{OQS  #*#&.'    #2Dj{hx4CZ2 " '&  &+, # "1*2IOXnzW_v2:P 6 &   $%&-1. & # $ -!   ' -19'B'4O3@Z=KeGVqPazYjcuhzo|zsjv`mWdMYyBNn6Cb)6V".M%C0 % $ #   % ( ' & & ' ' & "  !!#+ 4&@(:TGXtewzuAKf-%'' #  #+! (-2Z_cnno#$( !%$'-'     0KZk{qCRi+A*$$ "  -  &' %  ##<;Fa`j{dn?I`&>% !%% - ! !(.1/* '&' & % $ -" !!'+ -/4!8%<)@-D 0H&6M*:R.>U3CZ8H_;Kb?OfETkJXnLZqO\tQ\uS_wXcz\f~\g[fXcT`|P[xMWuIUqHSpGQoFQnDPlBMk@Ki=If;Fc8B`5A]1KeO_yduyLWt",G  %($ -!!((- BHK==@!&# -! -  -%*:JWgyK[r#3H2 * ) &  $ !  -""<>KhkywQ]w6?W$'>- # " ! -    "'+,* ' ( * -)'& % $ # $ $%&'&&'((((((), -/ 0 1 24 6"9%;'=)@+A!,C%.F(0G(1J'1L$/K -H)E&C$@#?!>!< ;;:74 2 -1 0/-,+ * ( (%$ *-* * + ) ( ) * + ,. . ) " #'('&$"##*"8+6MDPg]ju}PZx+3P1 %(# "%,#,1#'&03hnq\[\113!"& - -! /6EVkzfw.=T/% -*- #  # ! 28Gcn~zdmELb+.D3( - - $& $ - ! " ' %# $ # " #%'* -, . + -''& $ # " " ! "$&'(())*+ -+ -./10145443 2 1 1 -/. . -.- ) -& % -&%#""! ! -50. , ' -# """ # && -#! -" # #%), 1#:!/E6BXNXpep{Zg0:W7 -# ! %(    $$-23<@U^a{{{MMO014"! -  -  *:SauVf~+? - -&, "( ! %#0M_pt{UWo==T''=)&)* %  '++ + * -* ) -) -* -* + ,* ) ** &# ! -  - - -   !#" !       -   ! $ % % % -% # "$# -# % ' % & & # "# # -# & ' ( -, . - -+ ) -&#!!!$ -& ' ' (+/ -10+ % -$(4&>!1J7E_N\vkxq}>Ig"? % - & $( -    -&04R[^nop?BC"    ! *=J]ouAOf0  & & "'& # -$0#0JL\zbc~CD^//J"#=1 ' ! -# -" " " #$ ' ( * -* -* -) -' -% -$ "    !"# -$ & % #"!! ! !  -  -# %&& '&% ' &&)*)*+('(&&(++ &#& -' ( * * + + * ) ( -' % -& ( ** ) -& '1'B,AZ?SmTfq}HSq)D , # %% !%  -   ?HLUWW(*+  "  "$1DETiwQay'4J"  ('   "- &-3@XWg~fhHJh23Q%(E;1 ' -"%& ( ' -   - ! "$&''&&%$%& % % % -$ ! #&&')))+++, ,+(&'((''&%$ $&&!!% )- 1 4 4 2. * -'$$$% &+3'C+@\H]yh~~P[x(B # '-) !  !"" 17>nuznppSrQfh|}]i1=X1 ! % !  $)$ !'.JQW}BDE" % $+& - ( $ "7>NfuARm-F 7)!    -!,+&>LZt~gsS^zHVpANh5A['5M,C$; 5 0 -- *&#    !## " " -        # % % -& &&$#"" ! !   ! ! ! !#$$##"" ! ! !#%)0 5 9=??@C$F,N*7[/=`8IjUej{tes-Qb|l~*;W 3 -. -+$ $+ -% 4@Xm{}n~\nM_w>Ph1D[%8O,C&;3 ,&"  -  -   - ! -# -# " -" - - -       "&0 9B(M(3W1=`7DhAMrKY|Yhjyujz;Ie: " $)'&%#  (,3ehpNPQ!$'"#&*"'   -"$# #/>Xi{[m*F &' -* &   -# & ' !/S^xp`vPf?Vm5La0CX*:P%2H"+@%80* ' -% -% $#!     #"      "$# # ( *.5"9%=!+I(2V2>bALqP[_lkyy_o2A]'C -0&  - -!'*#-!*  $)OSXwyz68:"&!&&   , ,"P.;M#0B&8#5 42/ - . -- ,-+*+ + -) -) -* -) * -.034 4$:!)?#,A%.D)2J.9N4>T;D\BLbIShQ[rT^vXc}grs}yu2A^ 6 / 10&  -#&#* %'+.IMPPRS!$$+$   -/  -'7RP`{q>On :" "()$ (+ '$ )2>I`|vqlzftds`nP_sCRe@Ob>La9E^5A[2?X/;V-9U,8S*5R'3O&2N)5O-7O,7N-7N-7P-8P/9Q2] 7 -% "&(',, %%2'1HV`y{xurn|lziwhvgteqboaocpepepfqgrhsjumxozq|utwq?Nj,G7 --$& ' #$ )$) &+*FJHtyufhi?AA#%'"(!%-'#" -!#%''61@YiyL\}.M 6, % % %& + +$%!8Vjxn}(8X0 + '" &, +& ! $$0HutFPm(D( !  *$1!+#+$+0!  ,1-MQK~:9< $%"   -!((&"9@Mhn{l{)8W/ '" -%, -, -) " #&)5MxkwCNj *E+ $ -(  ,&2!*!)$*.!.20KPJvyrùede113%&"   # $ -% ,$.HMZvp)8V )&%" $ &' ($& +.:Rwfp;Ea&A , %) &!  #%! !$!"(-*KOKtxqXWY"!$!#($!      -+*6P\hu+:W )$ -& -$ -% $ $('( ./@&&)%&)#&#  -   #73>X]hy.Sd}n{HTm)2I-  - ##"$&"*-2IJOoprTPS)')%&# -    !#6@J_t~AQi&=($( -( !# !8BTl~an6B[$: (   - '*&   !'IKOz|{}KIJ&%'##") '"  $ ! "(;HTh}N]t#2I+  "*) !$ -  -3+@XcxXf0Oi"0I -) %%##&&#&!$*!(!"+/5SU[cbc<;=! $"$($*# *#0,  !1CQbyR`r*= -#("#%$" &=[kap6EZ+?/   !%$!#)#+' "048TY\qpqEDF%$' %!   (!., +9LXgp|p|(6F ' ' " -& %""! ! 5=Kao}jzO]q5BU'7) #  -!"%$).$*. - 046bghihi<;<  "&&&&# &(3@FP]fp}w7BP%4$2! -% % !#% +#/DO\q{Wh{5BT%6&    "# $#&!  *//Z_____333  #))#  !&%.;IR_hq~ajw=FS$.<$2!! $! # ( -%!-0 -" -  !! - !!'$(-'*/#&+ #&*=ACaefKOT(-2" &%*0#(."  "'&  -(%   -  $$   # &$&$: -& -#$   !"!%%(*&)+ #% "(*-FHKjnotw{INQ058 #(#&*'+0!$) "#*& - "*"+    -  *,&#6BPi~|cvK\s4EZ+>$  !'!$' %'(-/0:<=QSTuwx}RVY/15 %!   #$ -!*!&.##    -  -!4*";P^yoN`z1D\(= .)!  -  ")#("$&%()#&'#&&*+,:9;TUWy||{}GGK##( - ##*#!   %* -# -#.#6-  +6QfszZk;Jc%2H2 $ ')& ! "$).#'* !!"# !"*,+DCDjikutvFFG(*+ !!$%"&'!#&!  -  % -,+-. ) "46D_crap>Ne 0H / -&#."*4'-6(.4%)-!%##$$%%"""BCAwvv|||PPQ557*+-"%&   -    " .#2(   -( * $& 96D_lyhzL\t1?W&;*    $($+"'-$(."%( %$#0/-766FDD\[[_^`>>?%&'"!!"   !"  -)/ -"(2 ,%'$>DQmudvIYs0@W.C 5(## -$ $%'&$ ,*)CA?ca`iii?@@())#$$#%% #$! -  - #$'+$    " -!  -$ * )( %()DDTpiyg{N`|2A\)C 1)"  "*!+$&#* '  "#%)**.-,754NLKkiglkkKKK555$%% "" -  ""  - - !"%(& #(%@>Lhiyz`s?Om,9V/H8.(&# -#&%""!%!,,+EDCecc¿|||UUU332$%% ! ""$&' "  "      -# -$$$ # -" 'BFUrwdvL^|.@] -3#''' -' % #  - -$+$  ')+023(*+ A?>nlkWWW566!"!! "#   -     - ! #& & %# " !,*FBNmo{dvGXw)9W!= - & &&&"   - #%*/(,1 #(  !!-..CBA`^]^__578&()!#$!! -   "%')) &$ !  " & (-%C>Iho|z_oDTo)9U">- -,2- )"    $##"''+/,/2$',"##JII~|{npqILM,./"     #&()( & -#! ! " $*-7".KDPmp~s\gBNl)7R"<, -% -$,/'# %&%##%'&!!%&'*,-.0=??^]\}MOP')+ !   !$$% $ -# "! !%)-++ /'C@MjjwzlwWb|>Ic)3N!< -. $ " -,0 '" "#%'( )'# &()>@A^]_^ad?BG,/3!! #(!  - -      !!! !& ** % *(F?Kidrn{]iMWp>G_,4K6 -( " -! -! -! -! ! -%+*%#$%$ %#&'')(*+578UVXzy{}OSW(+.     - -  -   !#$ - # + - +*.6'3POZxuZg>Kg/;V$.F1$ - ',--.-,)))!"-#%. "))&(<:<^^_VY]259 $   "      ! -# -% -% - " -,24 -1*"-$.MS_}o}S`|8C_ ,H 4*    $'*,/10,)((!)!)##!#;8:NKMmjljns>BF"&+! % )(%"!  !" # % -$ -$ (, *% $(,-*+:'4PDRnm{v`nKYv;Hd1=Y%2N&A5*" %('$      %,,&! !%335RQRywwsw|NRW8<@*-5"&. *('%"   ! # "! -& + +' #! $*-- 1 6&B7C_^khwTa@Mj)6R#? -2 .,*' % $ "  ( . .)"  -    (+'$$#&*025GILiikruyUY^8;B #-&%! !!    -  &* )% ! ! -# $ '+3#=,8PKXokzjwSa}CPl8F_0$   ",02<(*5$ "%&#      #'8-:K;IYHWiZj{gxtuitahY^tINc6Ie2=V'.D %92* -" -          -&)%%(#!-'&0+)432Jd3AX)6M*D#=4$   ! $% $ -!       -  $%%" "! +21CBLLKUZYckjstv{YZaFGN78@()2( "%'$   !%&$ !  -   !#' ),1%8$.A-9M1?T6F[@TjJazPgWmbxls{{ri{_qWiO`HYu?Ok7Fb3?Z.:S&4M-F$=4 / /1. -)# -  -      -    !#()'%&&$! !!(!&-"'.'*2<(A*C,F!.J$1M'5O)7Q)8Q)8Q*9R*9R+:S+9S+9S)8Q(7P'5N$2K"0J .H,E+D)C)B&?#< 9 5 0-) # #,2 --*)() + -, -+ (&$       "&&$$%)!+ ", )$  -    $(*-25:=7:7242-/-'*' %!! !!"""!"" !$&&$"!#$$%#"    ! $$%)**.01599=<>BABFMMQVUX]\^gfhsrv{~rlnibd_Y[XRTQLMJFGC?@;683/1.+-*'*($'%!$#"  !!%%"(%")#"' $ %%% &!&&%$" !"#$% $$$&(()))*++-..143599;BADMMOYZ[gfinnqxwz|zzsnohfe_^]XUWQOPJHIECDB?@>;<97:5382040.2-,/++--+./*0-*.*),)$*% %!! $##(!$'"#'##("$'!#'"!' "&!$$"!!"&$%*%(*'(,**/-/313777<9;?77:68:89;98<89;9:<;:>9;=9;<<;?::=:<>HHKRQURTUUUXZZ]^`bcfgkkopqtuwy}|}~}|{|{xzwvxvvxvvxwvywwyxxzzy|zz|{{}~}~}þ¾ \ No newline at end of file diff --git a/share/qtcreator/examples/15-Utils/five.bin b/share/qtcreator/examples/15-Utils/five.bin deleted file mode 100644 index 4cfe3920579b..000000000000 --- a/share/qtcreator/examples/15-Utils/five.bin +++ /dev/null @@ -1,999 +0,0 @@ -y|ilqY[aMNVBDL:=G<@JCFQIMVOS[[^flnvihlCEJ26;(+2"  -  #%(!%016>NQXqt|{~_aiMRZKOR~ikqGJQ47>*.6 +"  ')% "$".(/99>ILOYdfpBBI#  - **  -# !!  3:8MSN}~ghnBEK%)2 - !   -  "01PfEZqE[t?Wo1F^'9T*D -4( "" -!!   (08bhoBIR$."   !  . % / 8"="=82/,& "& *,( -  !*.8`enyx34@ $'$$ -"$0BTcwrvytYn8Nj 2M9+  ##)& -&6=F{DJT   " ! - *#:,E+:S5F`;Li9Lj.B`#5S#>+,) #  - #+ " -&4;DdksEDL -  ! .,.AS]o{|\l8Ea&C* (- "  -4:Dsx?EN  $%    ,(4ICQgsyvgzUfK[v5B]#< " # & #    -%-7^gp^\f''& - 8;Ngk~cn@Kg4 & (+& "$.5?hmwQXa$++) % -&%7AMbr~[h9F_*@ , - "*(&#"&0=BKuq{-*5 # -  -'7I\lyp}@Ld'> )* $"&/LNX50: ##/  (;BUnyqX,  % $7>Eqx4:C& #!# .:F]{t=O_0? )    '0\`hLIQ &4jrq)7T," / -! - 4X)#'  LRV?DL# -0 &0LXp5H[ +' (# %)dgkQNV"$'#=U]zn~*>V# '-3adh`bl!$. '8C\>Qh"8!   6Csv|%(1&   '/7C\bt 0E"(" -KPP}* -%! .LXt;H^" ) %!# (ST[W[`#' &=Wc~M^v5   !&*TYY\_j! ) -$ 9kyu.=S %  -"%!+zz9;B' -#,9Sv3D^& - -ILZ%"+ -$$/In{ 2&$&$$DDO$&0 -  # !IUpm/H*  $.  ouv:=L&# % %4A\EPn(+  $) ,GITVYa  -   !2ftRc}!9" ,! @EG(-< "  -0FSmCGux(!"'>esdi')   ! -cjtdfn$ '1lwDQj  - 049fkmnq " #%"+Dn}=CZ ! "- $3:GNPX  &!+Dgv*% &)/DHLcgq  !$&1Kv'0I -' -*CJX>@G # $&1=V|)8P )  $&*0x|Y]h -!&)7SU_x#9 -! -',!#2nv24;  !# /?KfO^x,A#  VY`OS^ - $).>Y=J`( * % -7?Lhhm!#*" -  "!9Ua|y3BX -( $  - 36 -  # -ZZ^@DO -  ' , :Jfz(7L ( # *RZd=>C& -*CKf{3?U & -?>Cvy~dgmTX^HJTABL>BJ5C>DHLPU[aelqvv{<@K - - $ ,!@PlPbx ," /)2;159# 0Yb{M[r $ 217yw|}~RSWFIN;>D,/7!#,#!   "'./398:@MOWgkp~;?J # -+!CRn0AU'"!0 & !*QY`svz'*/""3lxjx' -# &%+\Z^jln78<      )#,"    )/4=V[dz}=AL" *DSoZl.C% ( * !")bej!)! -!7|(>1  ;9BM!) BRm6F`%"%%KVYFIN!)  &/FO\s * *(+`ce469! - - $ -  - -"! # ( (#    %& && $ '$)2;?Dcgj@DO  -!(>OjVi%= $ % .7;}Mg % -$ #Wdf59=    )5=V +A   -VWYVXY,/3$) &   +$9+B%0H,8Q2@Z;HaAPgAQiDRjCQi?Me9G_-;S#1I":* + 0 *  ' %-&)0XZ`EIT &'6B^fv="$ %  -.:>039 ,;E^.:Q   89;TSV!   -!)&;5AWLYq]jkyy|_mCPj/Y@Mm*# -!MY\.2: +BLe@Ld) -   #$x~~ZY].-1%$)  $.*5 --EPdcqrsKZw-8V$B -3 (- -%-)(BDKNR]   -%'-8Tm|: ,:  ,/+/6  -KTmXe|$7  aggQPT"!%)(-  #'5#,:0:L[fysRa-9[9 -#   0) CEMZ^i   $)'2N~/<[ #&8 -"^ij'*2 - -"1T]wt07L   NSSZXZ   &  #1JTezP\|(D80$ -! -   "+58=hlw "(",Ho`l ,H - '" - -6@A"%-  - #4[d}FLa   - -  ->FJ|( -'%9 &%4T|P`{ -#   -  $$(utw7;F   $4@Z!/I % -& #! - &vw|!   !!5]j5?S%   09Z -  -!# - *+6}}-1; ' +BOjt!-G( "# ,-6 (  "1Vd|*3G   - "*V\cTY\!  - /Ta}kv'D -#  "AAJ7;D (  "*6PDPm -, !'  -!#+jjp"*! #/Uc{4?T &  - #GMT4:? -+(3N~KVt(C !% "UV]GIS  -& #=kxes7 "' #BCK #+ !0Say;H^*  - ;AGdik! -  -.DPlGTp) %   ,.6oqvkmu"+ -  ( )LZu+8T' % !  {{$'/ - 2KXpERk )  ,29y~PUW  - +D`nt".H $#*$?AI39@ & #8F`P]{ 9  " -_bf),4 .FSkS^z3  %+3ou{9>A ':Gb_m , -*  !MPXNTZ    !#0Irz.8R   -!" <>E03;#  *@Mecp'?  - !)bhow|}$).!  1P^z4?Z' $"  %'0orylpw'  ! 3N\vKWq )  $  (_af8;B% -  (9F^s'1L !   - TY`^ce!#$;aqy+8N $, -48?/4<! " -!)9Sn{!)B ! #! =@E@CJ'  '3@X2DJ6<> 3B[=K_ -$   BGKmrx"+ - ,DVren '?' -  GHLQT[$-  -"#1HuLWu 0  #  49A*02  !ETlt'8  -! #PUX17A - % $$6Ro9@X -$&!"'Z]e$)2 - (@n|[g4 #  ).7|~$*, -  -  *Q_wIUf&! * -18EJS  & 9[kGQl/(  xx}^af'-5  # !;kyfr6$ "&0tu~(/0   - (Ue}?H\  ! BDMlox!,  #$ !CPlgu(4N ' OPX{~59A - " *Ykr%B %#,ST_(.0  - &Whq{&-A -! %   -''6bdq:=G" -  % (+6Slx9Hd  +./;|~@BH '$J]}z!/N)**AAP"&) -  +[mV^q!5.& '(946Dcen"%1! # #%8JUrIXt.*$ $ '\^gFJM' ! (?Qq)6V# &%'@?O#& -/`q4;N(%( -QQ^01>  -$ "/Khw(D -1$  ! - ;>FOSW -& % '.A_3@` ( $ ""86F"& # -1aq\cu+ - !   24?GHR  % 4\j9Gc /!$ -(*2xxW[_$ % -&!1On=Ji -, - ! -   /.>!%( # 0_n6@R # $/KMXnny$&2  # $).;WZk 1 && &RRZ`dh% " " -#$A]mERq 0   ('8%(,  -Xg~eo/ %& "'$*3git?AK - ! - -)( 3ly$@ ( %  46?nrv%*1   # 6K\{N[{3  !!0+.2     (M[q>L^3#  %>CM_`j" -# -* %JUq9Ea 1' & ""#-wx28@  -  #,;JgXe7 -" )138 -    &IVmp~!-B %% '0`fnuw')5"  , !*4NxVc$?* %SV\;?H  & $/=Y_m9 % $G' - - -# ")3Hnz]i'?!  TW]z}!$-    "8Yey)D # -  -eitbeh ") /_k{'7N -,#  -T]ajms%'1 -  "  3GSkw1CK02;  - -.?Kd#/I"  - Z^i}+/4 -  %MWpJ[v"8! ' " -#-2w:VvYe~&; -*']ciKJR  -6BYm|HUo' " 04?GFN *+#-G0@X( FLP25=$+, $ 8Zh~)3J)+ -CIPZ[`$0FXeM[w -. " )/:rqy('1 -  "0Yc{r&= %  !, %-loslow#(1 &.' /(4M}AOj* ) & $(1x}rqr   'GSkRb .   $,821; -' !)4KDSk ,$ "#$FHQ=BK  -+'3[iSe8%$&  "]aj..1 -  9E^Te 0  ! "*6|GGQ#* - )dp~(7M('')4Z\dglu + -&$+;Iak~.H "*CGPMMP  )5MyXi 5 # &2v}een ,IUlM[t !*& *,5;>I  %%)B\k3Fa -+%+5srw!!(   8P^z]n#: "#!"/qx/19  #2>Tzz'@ *!   !yxbcn*  % . "#" ,mt}WYa!# - $:O[sLXq1 - *  IIR23A - -"# "$1L|m$;   "V[dA?H! # # $6vdu(?  "$)ipy}} ) ) )/:O~$.I "-''1rqzTVa  -#! -P`|0?V ("=CL[Zd#% -&# '^kiz+B - "%&elu9;C!4$';[h~MVt8 (NMW( # ( #3MxGYo) & '.7--8& -& -#DPol}.E  "&#ahqmnu# -.  31>St&.L$" !  - -10=_^g9:F   " ' 2SbSe{ 0 - )nr{DDO( -& #0:Xrm~1H  " ' !]dm/1: - &,0p}U_. ( " -'::Eopz&)5 -  -%'*+9Tsm}': "  SYc`_i -' -%&#@Tcr 2I  " & - -^enUX_ #  % BOe",M,  - %&2opyCFQ   #%6JZw'8L") =CNwx$% -#& 0?Mlw"4K "#% -_fo-08  - - )>boQ\~= >AJfgp#-$" ! -,(5S>Qd - &!,2=x}13?& " -!,5Bax&7N$$% -]dmVYa - -$  !0Z%$ % [^c79B -  $,G* ( #!:x2D['$% V]fMOW -%+ - $;Ea*Ta{fw -1 ! % -9?A`dm"* )#$ :?Mp:M\ %  $ (/2y~3;D  %.DXv1BY# # U\e`bj  -! )&(4L"1K -0$ " " UY\!,,%! *$0PhvJ^p- -& #yBJQ $,-A[u3@Y" "T[d<;D -  - -" #  +MZrOa~*E -# -' -% - 00:ZZa"$-  - '6:Gdg|&3G"   TZ_[bg !1%:[o:E]$  R[dgdm$ %  1=Q|v3E`/ % ' - &TR]57= !�Kx}2E\  =BHnvw%) -  , ,L_r;D]$ - R[d?=G - ) $2R`wUg'B #'  - -,+5PQW" -   5RaH]v,  - -'-4lou29> -' 7J^9C\$   S\ezx%%/"$ )9Mduz8Ga !,"c`jrry,-5 -"! -$(6Uudw&@ !  #MQVGLQ & $4Gz7BZ% " T\eJJV - 04C[cs5+& ==E}|NMV - -#%! -3HYy~,;W # #  49?Y_d - ' &8ex3?X% "$ T^gvv&* -%"4iz4CZ "%  - MNSvu~% *% %5RwDUq &%%(0gjopv{%  ' 0Pcv}.;U $ "$W`iHHS*,  !1AXds'< !% - (+0svz;=G   %):O`ew 3 $ !)HJP(07 -#" -*;L`{*8R #"$ [bk44@$+ "# -VhGVl% & ADHkmu*   * &.)8V{$2N -*!  #02;?GN!  $(8Kv%4N " #% -]dmQS]$ () 2&3Mts 6  !  {46A  ' -/,7Wf8Gc"9 -* " -"+rtzSY`!+ *=sv"2L !#% `fo56?' !  -$ .AOj?Ng +% -:?A]^i#/) , $,8Fcfu",F$- " EDMkqx(.9 - "4\lp!1K -  # -( "dgpden"  * "'@vp&4K -'  v{z@CN  ' $(#ASf7A\ *$  !",uv|x}16A  # 0IXlm}!0I -  ! ( %fip>AI! -!  - -,;IdM^x 0 -$$  LOQghs%# * " %6GhXb}/ QRY7:E$ !" ,;J^l{-F -  % $ilpz{'(3 -  #% "4Wet'2K &" %(,lnq69F - * 0Mox$+D ! ! :;BDHS&  "%*8Lgw*B - "  $norYYb()-& #(5QqL[w) - -#& HJO[^l +$9J]}ENi(  ++5vt|V[f" -Bsds&=   !$'rtt>>H #&( $(>Liz$3N' -# )-2nmn',,  %(7Vsmv' )  !FELimx%  (;hu_m"7   ')*yzxomv" -(('C_sN\{ :' -%;;<;>L - $% &7Ra~ ; -( -  (~{y{#'2#   %8Zg~Yg2    - /./}RP\  %' -2/E`vz4?Z#&#_][þy{$(9+2 %/E78@34:./5+.4+/4,/5*.2'*/&(-(+/+.4038HMQfimz}NUZ$ $-DesPVs2 ! ?>IOUX -# ( )!;NZvP^t 4  %#  (,>**)$$*9Oxu)L&' ``^lkkJLN.14$     -            !#',15;JNThkpsy{!(+ -"  (8F]t|!)F !$ 'uu~uz{!(' -%" (/9T7DZ +  $+% " ) ( ! 1CQjj$F% % jkigccB=?*)+!  -   -!'(%%  - - -    - !!    -   %+.6<@FY]bz}Z__$% 3SawMVu ) -%( !??KFMR#3 "  - -"$?isp}.9Q/ !-+ $! -%& -")$3LgwazC("zzzlgi945    -  "#"" -$      "   ! -!      - #&-BFLgjoDJH -  '2Fao}:$+ $)%VXcotx'+9  -!28C^Ta|!.G$0/. -% '* ;>NiZsA!-  - pms74: ! " -  %' -  % *+ ) -&#   - !&'      & )#%.HKQ,0/!" !4@ULWu / ). # )89Guw7:F  - " * :zl{#1J -$26Gb|@Y 1+$'.\[d+)2 %#   - +6%?".I+6P1;V7B\?LgESmJWmO\qVdxWeyS`uP]rJXlESg?Ma7DX4@U1:O-5H$,@"7 ,$ ! $$  !#  #6;Aegn_ca# !$5NYn.8V 24$ &.CEOLMY&!  "'_km}9E`&-H$>77'A.FS]ah~PP\!!,"   ) ( +9'4O9FbLZxarpy}rbsM^v7G_%4M+A&:"6 .%"$# #))58BuyHML (  $"5ak[h%B+ % ) - -&(3y|llx,-<  ALgduQ]yIVrP^yTau{~9Pj&4O&= 0#$  &-% - -24D)% , +GSlOU` -, & !% &"=GNloxzSb|/=T4 - #"## "BEM&-) -( -  - :t{w&6O -! - -#   %VV]10:$ % - -!5@[cio"(3 - !, ! "&ANVsu@Of%; /0 ,#  - $66=jjoipk).-$' # "7=[Xi4$ -"$ ))0{zKJT(   *4O|v{"*!#)9/ - -) +!OkwxSh,G,%.' ##-?=Fqoufgq)+6  # +8GSow0A^ $*'  E<@387 0ISplmr"%-&"-;Utbt6E_- (' 109jgm[]g#05 (!3@NmWX^! ' -4YfWd!-E# -% !(*)2"TRWafq"0 - - +*+ALf|gt!,C)"GMKfmkmru*17)-# '4Ce89@ - -##'2Kr/N_LQ\mpymnu -+#;P\vy>Mc ' $!#-%$gdilq} &4!0 " &#  -*.`n.:Q !% -    9%",Ont%3#&0efmTT]  -"'/Jv~.9P/ #  *&/qlugmw'.9 -  & % -& !!,FoyVcz(4G &  ( )'' 08Iou& %(1bbgHHR !" -$6?Zu4?V$  " 0+5snxRXa(  #* & $ #3:S}cp+8L2- *# ! 0 )/"4Yatv!3++5nkq<Nt`rv#3G -)&  dcg{z&'4   6`hjx2>T *((AAIou{,09#$1U]usvn-C )) 55U%"5 " "FFMknu36=! - *#*>T]un/E!(! *,4__egeu+  &,Hpyv0/*.  % +)5Mmzo3DZ%  %)&  ,/2oprVXb # )3M|Udz"2   $  /28}?=C   '0+6Kkycw+:N ' $   &79@lotUW_& (5LwBTf/!!. - %@CIvx},.6 %  $=I\n~5CY -" ):;F[^d!" !/Er/AS$ -%-% #TX^nqw')1 $,'9LZo{,=Q , 1 - )KMUbek"(>l}s9J_2  -   $%/RU\qtx-/6   $   )Fkmt (! '2  -  #@Pkvbu#4L )'*  *08W[_|%)/   -+Ve~Tb~!.H / " - ,%HIQWZ` * '+ ) '>BTn}Vj6! ! / ) -$bfj/49  -  &HWp`87 -& #!#-^`gNRZ% ( &""1@VVj,M1 %$)! 78@y{ $;GXqlz3>_ 5'  $ )+3{~)/8 - %! 1L[sMb(H ' ' ##0/6!'#  - 1*;V[i9* ,. - !DIHOR[  - " .CrBRs<  -%! ZY^-16%  % -'AANn7 &,) [`_49A  - " %&IZow5Fe ' -%$ - !"(iilFIP(   ) ,8o2@^" 0/  - %+*uzyknu( #  '8Jm}dt+I $ $ - 79=knt'*3 -  -&( 3M_{k|-J # ! &  -:?>BEN  !&"4FWmP]y$> + & -!!%@AE8 ' - % %qtyGKS!+   $ 9\n>Mh .  $+-fknKQW! - +BUlx,5Q %& %029ils38@  -)=Eb~.?^0'"  39?sw{)/4  "&:j~am!> &  -  VV_CEN  - )/6RWj*M.# &%  (FKRKQS #  $Sf{ANm2" ( % 66B{zOPZ "  &#.H]h?Ru ;% - -$+2ein~%( -!=Nb{5@_ * %+ ""0NNWkmt$!  -$ /1BHQW["* -  -/IWn:D_ - -"- $:T|%-H0(%  &[]b/0;  -" - ""=^iL^r . ! #)kmrLQT ! !!,>eqP[v'-F $ -#$ 244 ,* *kqy]_f  - + *7Orey 2H!$ -)$&/fhpcbj&&/ -  !%6z>Jf' -!3' DIT-08     *FTrfy*= ()  24>rt|yx*,6 -  *ixx".I %,"!-dis`ch!$, -  *""/KduI[q! ('$)+4~),6 -   %crUa}8 -!   5=H>AG -'#. +0>[}3BY3 %BDN|$)6   - -(fv5B\ - & -+joyjmq % - #  3er|7DW -%" &"$.MOYty&-; -  ,j{Yf ) , -% :BKFIN   - )"/H~) "!01BWw(3O ' (+ % 07@BHL -  ! 1+A|Vau$,@ !%$)9?Impx{}6H "#10,2HgUi/@Z 7 &* -    -& 3&AQ`{iz*C# # UW_5>K) -..9Vl_r'7R* -! '#+:JesY2A[?MjTert .F)#!ABKFNZ #  -, , - 5CVq4C] / # .09v3;E  - %)$ -2HXrL[v2  % - %&0ts}pw~2;D  %%4Xgp#<%*  %JJSotz(/6  "" .D\l#0I -*  &%/\ae%*/ - -#%(7JYi6D^!9 $ -   PSV "#  -)!0EtR_{#/G -   dcmJMO# ) -  +*=Vi~r5@X !"#IHR138 !)5(%))Y29Ido~maqN]v=Kb/=T$1H%<3 ' ! $ $ # "  -" # # # %*06)/F,2I:@WNSl_e~mu}ix.  - -$#YT`wvy%&*"+ &! 19Jcxs]qBSo#4M7 -& -       - ! ! ! -  -   - ! " - " - ! "%&&  ! *!:$-FY"4M'>,  -    $ # # " " $% & " -! # $ -" ! "" -$3)B%2I9G_Sc{m|:He+ #' =:Eaej*.3  -#' -%(A=Pl}rK[v6F_,D, #$('$#" ! # # -#!!  ! -# $ & -# "         &*) -' &'$ '#7%4I:K`]mxO]z ( %*/-9UZ_)29   *' -5@Qm~[h:Fb(1K";0$ $& ' ) '"  -    -  !"""! -  # -&)* ) -)" ( -)+)9.=NCSgbrcr, ')"!-ss}X_f" #  #"<;Ifz~`lCLh",G+  &&#!"!     #, 5';+>!-A%1E)5I+7K,8L,8L,8L*6J)5I'3G%0E ,@'; 4- ) -(%  - ! ## -'):@M`hus7 -&&$`blLX]*/ -#* +,6:HexYd(2N 0(( * ) ( ') (" --&:+5I;EYOXn^h~kwtuz~}~zresZiL[q+9O)7K-@!4 +% !. &"  -!.* ')2GYe{})C "!%"VXbM[]!&   -%'$:?Kg|xR]y3>Y)C -* "% ' ( -%  -!% -)4!)@/9QHQlT]wfor{y{jxWe|FTj3AW$1F 2%! - !! !  /.5JHPgblw)9S, # "HJUDPR/;?$*  ))' 01SjyUc< ')/ . ! -!$?EPlt_dw&+:    -  -)$?GSpFTn #   7;F:9A *!  - #3?To|z8Fa ,F -/ $ "!")* %2/9R_jbl.6F)   " ,4:@LhKXs'   - 37ANJR("- - ,",@IUjfo&0G -! ,!95( "5JUq~H_4 $ " !'%'*D@Vqyal5>W6 -' (4 - $ 0T^zUc2 -  (.6mlr45=' !   '#0G^mo}/;Q , # ! - - -) -& &(BLb|oz7A\ ( , -,8 !+ 1LYvYg6 - - &*2xzrrx67=#/(  '.CHXp`n%2G -)## %(,/ .4,?Xf{{*2O /* -/ -% $ 0(D`r]k8 -  -$(/qs{yy~AAF! !  ' %"!*/?Ut`l *?"+ %  " " **CIVrr|09V* 1+0 , /+ -" - '!"-fgqsux9<@ -!  -((4LS_xVj0J ;& ) + $ $24>Xhqbp ; -!%##/mlvKOR -"  *#9=IcsWj)D '! -" & , " 1@@!$$#+(@@Niqdv(E$ # / $ -*3 .5Ba{Tb1 # $ !//[`z~7Fe & )  -HGTbcf57< &"!  ! % ' #)'A?Lfp|KWr5!',+** &"=\ix+9V $ !! VWbTX[*.1#"  ",09M_jQ^x,C  ") +*'5OYil{)E !! - $gjtfjk6;= "  -% # '0BVbwzKXr"-F' -* (  "$3JeuWe!<  - )sw?EG !%0$1)'!%=ZfJUn 2 - + ,%4Kdq;Ie /  &#)3msu;@C '   %'  ##:;E^hq]j)@ #/1'$9jw&3N(  -+-3E^qwq}%/E %-&% +Ckw}3 - % -&0NT]NPZ'*5 % # -$7;R_e|r}3 & , - - (MYn>EY! - -:@F__d669 # -  --/ ! 24@]hx}~oet^mWgRb~JZv>Mj>Ok?Nk>Ok?PlBToJZvO^zZigtlyr~Xfz1 !) !) -?K`ip/4I!  OVZVYY%&)"% '* ( )1'D8Igfwr|`kOYuBNj6A\'4O!,G'B">94 0 1 1 0 . 047(D*4P*4O,7R4@YDQj[io}|[h|+9L+ -# ! -1DOcLRj &: -# ipsikjCHH&), -"(## -3$1NHWtw}lxWb|Le -G9 . -# -#$#! !# -& -& (///- + ( ( ' % $ &' " " ! -& ( &,* 5_f{ku!&< - !ELO~IOT#(1!   ! %$" -&8)9S?MfDSm;Jd&4M 1! &, ' - & - & & -$ -$ ')+ & - % -$ -$ -$ $ #!" " $ ) * )'% $-- + '25MqsHOk4  &)/ios`eo8XPXniqr}ZfxJXjR`u|=Ig1 # - "klphiv:;J.  $- - $ ( 2%A6B^\gp ,H & ! - 58BEHBN -'% -."+ABLe!-E -! ! -" $hlmQMW&#/    " ! !04@V^kZh4  !  ->ADqnuC>I" &# ''ADOhr|-:T# +lprgfl-+5+ &"$?MWsWc1  - & %)+uv|0/7  -#' - # ,)0MT^zs1;U# - # - %X]]x{9:B  ,*$+1Oen=Gc3  -$  =CF|z13: !  # ( 58Wag8 -!  - $mrs>=A -  #437TEPl $ !&$BGJ:6<" - $ - *.Knvlv6! (- -$(0hlnUPV'%,  .' ",4Nku{9C` $ $-% RT\`]e&#+%$-  -+!(?dqITq"= , &  ,-7|64<   - -%sx}pov"!+* !6KXsWi~%5 &  -""cklEGN    ) $:C^o': !(  AJIjir"$- -  /#,CwWyt$2I! CKHEHN -++5Xc?Qj % " " t{v^ah!%,  /)5?YP]z%="*! KPO9  " * " =EG&).    #$0L+G -$ -( &  $,2y`bf") -    2S^yGXu ' ' #& #-TZa9;?% - ''3K~P^~< -/+ ! +HO\mmq" 0?Kc_n&3N - )  .5A~BBI')'/Idor$1M) -" !!  %0gnv.-7 "(=Eay5@\2 ! ) -"&1X^frsz * $ )S^{@Jg+ -! ) ! !ELVKMS - $7lxLXx"> -# % -#/5?,/2    -# - +7PJVt: &- + - -15@rw~  -(GUpjw%@+   %'/pr{jnn   -aru+7R -$ -!& " -[[aQUV - $=zv2=Y '*,  - ! $WVX<@A "#5N| ! $ ,Vg);U' . -* ) $  QJT.5=!  -.[lxAH  $ -% +DRiUby(4I!4( -$ !   (SS[ILS -  $-@NbN\r&1G -'!  " ",35;QRVSVZ   -# 2?K`nR|Te$2K2 -( - &"98@rpvY[_#  !(3FsL[w%> -   - "">>Dzw~him!% %9hur@Ql(B) ! "!+POUwx{ #'   ([g{R`{*4M1%# %**  -&VU^*)-   EQfUb| 2)&,;(*8QPZ::>   -8Ko~,8Q1 ! (1 %  57Ars}QSS  # $7quO^u(5L# -! $"  *),7BDN{|lnn" " +dpt@I`*@5#2JHXql{/?R +   " -# $$'4LNY%&* - # !KWngo#+> # -(( $.)7QYjMYo&: - -* "  *,9lmx549   /;R@I\#+;1#7( &@N`{s:EZ-  ((  !"-:;Eqr|LLR!  !8dtRb/;S#3* !   '1 /+ 5'8SVhsITk'/D$  !#&% --,4`^gllq"$,  -   0GTqv:Kh$>&   %# . - #+0JEVql~u`m4@W!6 '! !/$$!*EBIusy57> -   " #+8Rhx[k$6Q1)$"#%& ' *) #$&/0KSdXj&4N*1-   -0-6[Y_OSX!' $Oj+C.'$%  !/-6`^g@DI& 4]no6Da#> & ' /* #)(#67>hlrlp|,2? ! " %)" --64":4@Y^n{Pb6Hb*H"  "  .'2Mbm:C\%<  ') !+HJV}|MR["+ -  $ ! /):"0C-W)>- !  %''! -&=?J]_ifgp>=H%#,  !  -  #$""&.45:)@$2G+8N.=R2@T5DV7FX7DY7DY7DY6CY5CX2@U/B&+0% -   -      " $ # # % $ % $ $ $ # " -" !  -    -&.6;FLRZaeUWd!#3 4: ,4GPneqMZo,D<@D=@F@CHCFLDGLKNTX[`dgkpsyz}215"-)   5A[dq|Raw4AV(< )  $& %&#   #'58@Y\c<=F    $ ' $# '5 .E=Kderp[lETn0@W$2I-A%700.%     #*,6QS]VYd#. + /2*8OGUneswj{Tf8Jd!3K#: .$   !    # /23AHJWegsPR]%(3#-" ,,)% - %'6MXf~|~k|XiFWr7Hc/?Z"3L$; / "  -   -"  $ )+7LOYxyINU% %  +$8*7L=J^Ucxgvt~xnbtYjTcM]y@Ql8Hb,;S.F"9 1)$ - ! '* %     -#+-4Y[dnpyQU[(.3 - ( -$ -##4%2B/;O6DW>L^DQdIWlN]sRawVf~YhVgOazH[sCUo=Mf9Hb4D^.=W%5O-G&> 94.' " -    !$ !# $# -   "(-238;CFJ]_b{}LQT %*       !( $ #  ! %*-./0!1!4!5 42 . +12/ -*(&%%%"!!#$ " #   &++057=?MQTgiky~AEI+05!( - - - -%% -       -     - - -       -  - " )%-37=CCGK[`buyzlqtCGL%*/" -   -       - -   -       '&.55=AJPUeilx}Y]b>CH$+0 -          - -         $+/3:6:BDIOX^cmsyafiFKN7BJIMSZ]fnsyty}^cfLRTAFI;@C5:>28;(.2#(-$*.$*-%+/&,0)/3,15+3607<1:?4=A8BF?HMJQWV^c`gmkqv} \ No newline at end of file diff --git a/share/qtcreator/examples/15-Utils/prior_data_320.bin b/share/qtcreator/examples/15-Utils/prior_data_320.bin deleted file mode 100644 index 4d8aab972a77..000000000000 Binary files a/share/qtcreator/examples/15-Utils/prior_data_320.bin and /dev/null differ diff --git a/share/qtcreator/examples/15-Utils/shear.bin b/share/qtcreator/examples/15-Utils/shear.bin deleted file mode 100644 index a739569dc53b..000000000000 --- a/share/qtcreator/examples/15-Utils/shear.bin +++ /dev/null @@ -1,866 +0,0 @@ -~yyxytuprylmwhjrffqcenbdo`doaepbfqfhsikulnxnoyvw}|ru}_`jNPZACM>@K68C/1<(*5!#.(#  -   -  -  -    "$%#%-0/787?A@HMMSb_hrrx|gjnUX\LPUAFK9>F47?(-5!)!    -  -   -        -  - -     #&%$.)*2//787?BAIHHNWV\jgmgkkW\]MRUADH27;&,2#)!  - -      - ! ! ! # #$%'()( ! " -" # $%&&&%"  #'  -  -  ! "*.-5:9AECHMKP_]a{w}mpuZ]aBGK/47!'*    -   ! -$ % %$ % ' ( -' -' ( ' %#    -  - !#$ -% -% -$ -% % $ -# -# % %$ !!(%,54;DCEQRQijkvyZ]dCFM8(*2"       - " & '#   -   - $##!'(*(%!!#&'+-047DKLjpsxz~aci>@H'+3&  -      !$&% $ """    - - - -  $' +.26": '?$-F&/H)2K.8P2Jb%1F$8 )    -  $"  " ! - ! (%&##(9R\j~~pO]q-:N%8#   - !&(%"  #')DHHsxxns{,4;   - -   -" &+=;JaYh~hxIXp'6J/%" ) ' !$  $)/47MQTquvFNU)0(!  - ))=>I_]jh|M^uGZvvIXt.Nh]en "( #.#:Hctz(/9   '- "*+E08@' 8".IwIPU!0 #6 <^iaij (/*+ .GSq # (,*,/7SHMP $ ) )!-Ls]ba!  $;Zi179 - $!# #?NkOPS"(, -*$ /H~-/4"  + - *ZisjrQTV"(  - ) -0(%E20O><[KOlZelyDCH   !9Uc~Si -4+:4( -* , '&!  '#;0WLUpoyPSWSRTFEI # "" 7Tc~e}'G6 +&$""!" $ ' $  &** -) ' )0(+D7=YMTsdm8=C'&)`__cch&$) # '7C\}Rh,?_)I @ 5 -1 -#"&$ -# (,,' -"  %:5EQ /01[[Zxv{ ! - !1Op\o}:CR  +-/OQQHGP#  -"7Xe~|q`oR`q>K[*6E&3'    $'!" * 8!0P>Mn`pn}&2E   $VXXvu~""+%&!>I`rYew# !!'" $)UX]a`i&' -# -Vd~rMZu+8R&< 4'(-)'  - ""@EKkq{GZu#: #!  (!%,dglIHS  & '?swBI^"';  - $*#  +1?PXzJYt&@ - +  & */3Je8 # )-+ -& &,8Ljyu+H  $'*('TSWsx#-  - 0XkXb{(? + +,+# +-;MRbxcq#A %2"&5"#)tsu¿TZb !-Fw~8@Y7  ! -% '$ *&CL #&1@XS[s#< $ -*1"$%(AH[uYg:* !#3) /,105>%  /GUobi3;R ) ) -)  # "*DL^|Ra9 *&$.B?Gsw&*4 !   8anz1G   $ & #2>Y8H_ * ! #%( ,&4Jly9 (/ ! #$6;>37? " "%)?Lg{.I& # /&  ( ,ARiFSs; 2 . " PUY(,4 -$ -# "3S_zj},G**) $ %)CYo4Cd3 1 ' )ejo}#+ % # ";fshz0G (  &.Asl}!1P2, + -04=qs{(# -$ ,CuTf4' -& !#( $ $$&$O_t\s"=% (" !^`dJLU  !#?K`R^p  %(# $3IxFVt0 $$ &$%/FHP!    ">Nb~$2B %  .5Sc{2;X5( - ".$>AKBDL    %CQe\j|0 - $*(1>Xqmx)E'$ % !'*6oq{=?G ! &GUi+:N -* 0 "3ETnBLg  -# # # 9@H ! &FThEVn4 ** .GRm+6R 0 + ILSCEN    $APdt,MaFTn)' $& % -"Ye,7S ' -'( Z_hOQZ $ -" 7FZdt - , -, AJeo|#= 5 -*+1;Y[c  -!-jzez % '$ #P]sevK\y%B!.J2;WHPl]hx>Kf $ )( -)\eo#'/ " # "6\m|0+. ;H]s^lCPl/7* $ & -* . -* ( $ % $''*'(&(&' #  - */.AZf}kx&="0   - -7@?BJ  % -( )9RLb| "9!  2]lGTs(5R&B 2 (  $ * ( " -  $ - " "'./ ) " :3Fcn.:T * & * ! v|SVY   *  /F|o'A "".FXrt}4=W)' + ! #    - !.-.0330 / %)/ + ()' -#(/>XfvR^y4 %$ -DKRkmm$%& -' %;ap(D -.* #54F`JQg -%(%( # -''!  $!3%0C/9LEQeIUjR^s[h}_m\h~P^tGUm4A]"/M= -2'"165 -"$*#5_hx~&2K%! - ! "(10// .Sa{ /K'3 -+(5/=W~EMc"4 "%    # (!();2>> - ";Ha)7P $ -(  -.,9Qv2GRRR !   !.G{8C]$    "#6KVm]k^ngzoz>Ib/ %  ! ""%!5*9MESknzAJk > !$#  +FO`ALe' !  '1pvmmm"# $ 1_lIRl ( )' -  0":&C'E'H 2R.DaM`zr;F]4$ ,,  !/E]kZd)1M- -!! 1MVhlv0! -%   KQZ036 -  # -%GRnLTl -% !"% $   '25%='5J>H]\ey@L_$(' * &!5>I`p~r{=C[( - " -$,>_k)6L -)#  - )1;LOQ - - -8R|ELd   ,( $ #+6 2* -  $$*;16K]fITf)/ '8& -)MZpFL`. ) -"1;OqP^w 5  -' %&rxloq -   8Ze@F\ "! -!  -0 * ! -   # #"9?Ifaqu~(/@ ! -"     *>UawEJZ#!! &!-,:P~o~(>2 ) IQ^"&+  -.9C\Jf .N -:-(' '6 3 -40$/-=!2 $$,,- GVi%+A " (%% !&+Awk|+F0! NWd9<@ ' "!9x.8R$!%27IgzL[|5@]$,H3 $ # %/ 7+"+) " # '  &9udm / -%(' - '6DZ4Fc! " -$ 0;Icch "( 'MUp%1K %(1=FZgsOWt2:V7 /: 6 %%' ! ! - -+Zh~6Aa%* -,%#)cqLa|-'$"$4<=B - -% ! - &.Hkwj{+E! ( -1JWoWa~&0O. ) -%+ /!( ( -  **9N&2Q  %, " 5ASex 8)#' - -%]drjjo  !  .?KgRe!=' "( !-B`oDQr!@% & -/$ '2 #7Saxfr%C -&( !  0P]p}0K.& ," >FS+,3  $ "%Bbp?Qp3(%$(4Lq]o0@\/' & " -( %*9PGTt * % * - +6H/C`6  , ' (0=WX^"& & 01<]7Ea+((&"0J3B[ ) # % --Wgt$B #(" 1R_tZh4 $ -",ejp<=I) -( -/4N[}&4M! &,* -&%9SU`y/#  (&3JzBUt* "(""#"/E|}#7  !!RUYopz''7 ) -!!Aumx3&.( &9LgT`y0   /MZq)C -! +   ,K[t-6KDGJ=>M#' -$&&GRtKWo  , -.$.Vh@Nf/ "   5sCPk&' & &!0J{COc(  ,/3VWf( ! #"& -3%F?Jl0=U  1+)Bvw,E -), ! "%.D$/F "1 ) /XgYe{$4  $OP_. " )*18"8U`hs+A ) , 9F_Vk 2 , # 'CKcXc{ *- $  $-:X|ly".? - nqtUW`@BO$(7* #/ / 1 - '#-Jr}?J_. /$ #`nCXu -.&" !8gp&/F#  ' ) 2P_~}+9M "   [^cxy~_`h;>G$  # -'' -." -& )-2=Yjw':   2  -C+?]+ ( -%-4L\d~ *+ * #!.:Y8G\  &  ILSv{}MRU$'-& 0%- &4+ $4EMjIVn$ 1  &ANd|$B & - -'  '#(C $  '!3- .4 0 ! -"& # '8T[xCQk*  )"$6DYK^y)&"  !4do$,F $ -#  %$-Hzau 6,!#!)}pmrQQW-07  " -+ -.4+D6E_ZlctTb|(B* 1 *7`h|$>!%&!%7Zg}8Id -#)$ -$.C~DOh& %&%.R[uy1H+" &!gmu~KHR!(  "$! $*&(5/IW "' -% 1;Phs(B%'.  #09Q-AY("' R[crrx=;C$"   % &# -.$1LO]x|:F] + -0 # %ZVb7D[ $& ) $ +- *EStx$0I)-$ -:K`o~"0D! "%OXlVd} /"$ GPgF\w &$)  7?Icmr8?G( -  -""  $.>5H[Qbyyw-8P. $ ( 1-!'HGVvx5C^ 2' $(,Cn\k 3 )cm})7N( $#&.BvWm*'* )1;ovz9BI$ -"",( -% *$3GSfz%0K + %  &+ *- 7:Jh|?Ok) % $ #* 3@TlQ_t!  !-s}BPg%:#!#)Zc}ax1,)  )3t{pqy9:B$   #+0 - ''4L\l*3R ( + % -$ /!(EktPa = % )6 - "2C_GRf - -*;hx/H % '.Q   !"-9NO^{ 0 !5'.M`}*E-& KRZMOW+,5& - % -($9BNg|AJf ; + $ $"2 ( -29Ddv`r-@^1 %) # --8Jd49M - -"  3?Sq~"/J " "/ & '.?Z 4P/$ -! AHPY\a.18& -  - "7;I_ixOYt!; "%" -!" -& ()IR]{eq!.M, -#+ ")2DZw07L %  7CYAJf . -"%&*)@x*?[/  -! ;BJKNT'  - #" *'7MQaxUb|*@    # /5"%%?AKev`g+1P! . -/ $,BRp+2G % - !AMeY`}1 # * % /[h4Hd0! -! :AIRUY36 '. ,/%#":L_|{%,@ $  -'NZqu|!> (,$;G_@Ro 1"  ;>Fvz~  !,Tby?C`3 ( ( %2ItI[x4#  8V`ni{4D](>& # % -# - - -"#,Dxn{#:( " .Xhuz3 ,) -Xf~N^z7 % +08_bb(,5   ) " $+9NhvN\r,@' !#)( "  $ 0,C7F`^nPa*:X'%#  !)* '28Slz#9( $/Yi@Ff ,$!*3A[R`| 9  -$ */7quyHf*5R%2N4C_pUc#<  $ (-5bfm'-4 $,  % 0%6MgxWd~%/G& /6303&  --!-EP\t|\k-;X8 (,3 /"'3&%@Q[wjx!9( #0[k{Yd$= - $  -')2zIPY $/ )$ -% -!3HGYs_i*4Q -1$ #' ' &  .!+ABOjXfq~Zh:Hd&B 4- $ !,' **:Tfxjx!9)"0\k[f$> - $  &(1fju*0;   -" &*9;Lfk}tLYx%2O 1 (+  -%(( #  3(5SLXvkv~nK\w/>Z)E 1## -%! -& -+'#*2GL`yiy":*"/\k\f#= " - &(1RT^%(4   %! "$=?Rl}jy>Kj%A (1 *   $!     % 0%C3A^FUun|pRh4Hc+E+! !)1/ % & , ")'>Tjjy#;*"-Zk[e"; -" - ')2ghr46A - -#!  0,=YexwOYw*D/ "  -   - "#" -$ $ $) 4#@#3O7GdM^z`qs|f}ZoATm1D[-D 4& -! #' + + $! %"3JKbxky$;,!-YjZd:! - &(1[\g$%1 - !  1$3LEWv{gsP]x13:IBIYMUg\gfvp~}ypZiBRk9Ib*;T#8 $ $) 5, ! -+07MKXlvly&> %"-XhZf 8 -$" +.7=?I#  -  - % )%( 6,AYWmQ`.=[!=*00+)*,&     *!7.E+:Q3BY8H_@OgETkRbz`pbrgwl|qssso~jxcq^lXe~OZtFOk:C_,5O(0K )B0$ * -' -$" - $%  -0+3GFSeo|n{(A -# $ ,Ue|Yf!7 %# --09ou|28?  %%) ( " !!9;OhxPc,G*" - !& ( " -      - "# " # &)+.1 4"6#7#7$7%6 2.,*( ( $    - &$  )25KY`vq~ *B& $ )P`xVb~ 6 $ ! --/8TZa%  ! " $ /"1IPb|p\kN\y3@\%A +    -"'$ -!"  - - - #+" !$  -   - &"#78;Q]azu!+C'" -'L\tO[w4 #   /1:GMT&     -  4;Ibizt}iyM]y4E^ .F5 * -$         $ ) ' ( & &## ! - # % $ $ &-(% ! #  -")'/  -)',<9lqz.3<    #- )!7ERkWc-6U$,I6A^nzlyMZr;F^/6L!6( ! - -     -# $ $ -# # # " """ ! "$% "    !    $ )2>HT`hw{'/G &!'?Og?Kf 0  # :MiOYq# % % "5o 2 -! ) $/or{|}68D %*1 -)/&>gsju!,H%, 0KYk>Tr 7 &0 -''D`pXbz) # - *$ 1fxw -. + ! ',7z}bcm&&2 ! "( ) 3@Idlw5B\&% *!&%3E)>Z0)1 ' '6Qbl0! --& 0]ohv - +  06@[Zd)  "0!:2 2:WFUo* - " 1j{3K,*, # $:C_pz!(< - # -0Oa}R_{- -! -( Ok "(% ) -/[fP]x&*( ! )Alv@Ne - -* %),9Rm~ 1%& $#kq{V\c'+     -#"9FTpaq.-% -)' %:EeBOk (, + %/HyHZs $)"&$/H}^q%%" (~W[c! -  %-!&;R_x+H ) # -!' %"Ags6C_, ( -, -" -"2mxK^v#)  "+5EIP& #% /A`lAMi* "  # &-:Eez'4P. # - & $@Jcgz 3 ! !#$0[e6Ia - -+  - 8?IMPV #+   " ".=Op~u!= !$ -) $ )#Ahup*E. * % %MWnu"; - $ -*$ $JSqy):P - -+ - - !JQZ-!& #'V`w,G $%.$C 1CPcep9 #2" *. 8fugx:- & -# --]g);V,% *$/7QQd}%9  (w}xvz439!!%%""5ESh?Id ) .-%+?Kfdu8/-1ak6Fa 3 !## (/Ht>Ng0  - "  %-sos338    &7Vczmx$A (,% (%?Se`q 50 .4gqFXt5 -"  %#)@_j+;S '   " 04:egi+*/!"  &7Xf{KWt#! -0)+ -$+E}[l0-)(5oycw5 -) -$1CLfu)?  -  NQUdgg.25 -$)( ();^mw9- ' '' #*5E[\m0 ( &'! 8oyx-F ! &   -9AY[n 6    $(psvX\_#',  #&"(3@VvERn0 '+((hy]n0 -& %) %":mw.?X "%( ,3K6Ha *! -15SY[!%! , -&.Pi# "/' &;s*@$  " =@CU]b & -%  ! !6C\`m!-I -! -%- --9Lxct 6 -& -$ -,)!9dnPa} )/( *[hkz /#   X[^NX[(27! (8E\Q^w % ,  %EPehy:$!*(5`jgz&< ' " !GSlIZp'# %(+|~~PUW!%  - ") -& ( &0Zd|6BX  "1?U{%5K$  -FHLMOR    %+/7E[gu -D'$ -!,30_io$@) - ( # -T`wKWp - - -# )Adr`q -/ dfgRT^!  !  "  7R_yJ^v1 0 -& $-Ioxx#/J&" #)MZueo' * -( +JTpCTe SWb+ & (  !%>NZu)=V.(+! -!1,'Dv~}*7O ! #&FSnx"9$- & ';C_|+;J 000QU]+# % 5T`{n8Og " $6#07Tw}1>V # #?>fjj'-2 $ 9P]xOYn ()* - & *+:RfyQ_x - %' -"-Gfs5?W ( "% ,"3nzDNi1  !  - cbbz~~*/0  &"  7GVqLVj! & %'- '-F_nbp)!#$ -$=VaMYt0 $' # (CPl'C $ )# -679|.25  -' -/;Me_k 4 & -# $- % - - %%>Q^xo~ 2#" 5HTpr#,H " . &*%*FsXb+ & ! YY\:>C  -  # -"5=Oew5 ' '(  -#- & !%$ 2COi$;& "-@I*/< --2( %/Ibv:?\ -- &/ )" -+"2Eevp,E[ 4 $ ! "& ) ) '+19Rwdx'<!' ! "1;Fc~|*8O ) -&'qtz?GS "/*$&BYlEHh + ,. -+ /$;iz2E]6( - '+ &*'0HQ\u->T .& !  &!-IXh,$  "8=CIO\% - "( 1M\}{AKl>- ( ( * # -CTcz{M\t#: - !  -% $(1mVe"0J /& !%  -,?PizHPf'/F / ' -,*Ox4C\ &!*&CM^~'4Q # , - 8>Bsx&$)" 4Oax^l(A1#  '# !$(8N{s2=X 0:#C&HGZ~Wd , -& -# 01'2S@Mk 2 . " -! - "&.otxAEN+*/=Mcx+9Q -,1  .* ';PtJ[y3DcK\}u(/H -%  ,%;tht8 -%1 &( PT[npx"&0 ( * - (8Lo}y.;R& ' +&1BjMSm2 +#3IRsz+8S -$$% 46?68A # & 2P^w};F^ ! # ! ! -!-7Dwio"&> % *% -#Cy?Jf -- ( !  &(2mowrsx!*  " $'%2L~@Md (+( #  '/:v=A[ ' ( -' - + *LYxCOk1 $* %  "TU^=>E$ -   "#=IXu9H_ "'' - -$,:ip~im4! -& + + (+Ij}Xd6) #  -BBMsrw()1  $! ,DW[Z -  *";co_fx"%4 - - !"' -+CMmh|">+ " --:" &) "<@F8=<  "(9E_[dw'.>  #( &4:V0?\ +*%  - - '>E_COm(F6!! % >?Hvww#''   -# (\gLYo"5 - $ .6;R~r:E` -(*$$;O]uP`z9  &"<=HXYY  -$0{BQh. #!$! "/5Ir~": ) &( # /Qax*:R ' $1$MLX??B  '".FXg% +    *2EyWc|/ ( % % "  1Qcy=L`* " -$ -0 IHS--0  -   .BNhLZx9$/& - #!*;?Kb&;,.# ( +*Sezu!6!) $ "#%WR`ggj $  + -%4Xe;Kh!= '(? -% " #(7kv?Md3"  %2N^yep%0I%! -"$ %PKVFGJ -  ' -(!-Dy?No -3 ( -,."  # (.=Xaq3DW #-$ 39HfHTn5 )* - - "#"*hdm/04  -'4AZKYy& 3 -& + ) $" 'OYkk"5I & -' -& -() -'@Lk|AId , %*  - &ifl%%,  /IUqSc$C,7 , '% (4:F\}f|6I^ , - * -&-0:%1Qitq|'0J -!)/ !0/5wvy¿ ! :boQb&C ' -.52 2 , $%1Mq'9O&% -, ' '"-L^iT\z": & &&# -   !%IKNsqt$ $%,Ht?So&B+ --4 - )/ *3 4"0M]nbx1?V3 - / ,- * 3EPqAJg )'2"   KMQdbg $'09WIZv 9 -& " .1 -'3#%Cix{DPh2 %' (&1*<4?ahwan/' " -* (/6 5"DFTuzN\|#/M -' '/7.*89Buu|NKP -  - --AJjBOr$F*%,& -# - % -&/IQr[c|'/F ((47-! :6Dd_p^q+=\<,  -&% -   %DBO~}HDM  - .DNm8Df6. &% + " -# -*/5U|_j3/' .-#)$# 66Ebm~z0B_#@4 ( -( &,NN[45> - % %Q_{:Hd 5 )* -  ( ! -'-'2Pq}>Jb -.2 .($ ! /!;,GAPjduI[w!6L.($-- #%3hht-.8  - %'VdAOj 5&* $$  -+ ,8Xh^l(4L3  )1&  -  %7$3MSftarS$ -&0" (0*  %+FJ]y}w=Lh 6 -.! -!'  "*7Y_k( -   $.bp~9Hc %# -( % #" -!4(1@Vt(3J" "++ $ - -  *. 2(@3D^^mp5B] -/ -# .&<& % -(9CM}{}% -! $ -/fs8F` - % ) -+ -- '1?Rk{EQg  (+ "%%5 # , +A@Lc]k}o~JWr)A'%,+ -'  #' -5@eoy|~( -  $ .fslz:E[T_yn{bu@Qp$4O6 )& -% % ! - !+19ciq)*3  - $-aoiw/=U )$ ! !(+*:P\t/ # 25=^`inqxQSZ+.1 - - "   -%3!+@4?UHTl`nulSg/B^#>* -! &$"  .18^ah)*3 - - $(an`n+8P -  -!$  #  " (8m~5AY0)  -.5RPN630" -   )#<$3P6FcN]|bswv_rI^|5Jg 81( $ ! !" # #  $+*.?=@MLO*+4 -  & &\iYi'?&" %(% #     $BTe'>+  89?b]Y?<7$! !    %+, /7 *F0?[@PkO^{\jdtpzvi{bsYjK\x8Ie);V/K#? 3 +.# -   "%# 969a_`24=   -) %O]yN]w 9% ( '  -$$  ") - ):Nwz 8 -% -  !\\_¾|ytTPN1.-   "$!  "$ ( & -)/4!;!(D%-I)2N-7S/;V2AZ9F`Id -' ' AAEzzYZ^69?#&."     -! $'+* ' ) * -( '&$  -   -# $! $ % $ -$ $'&   &8Z9C^ ) ##+ #  $"! -&! - 1bt'1I ! - UVZmowJLT14<*  -  !!   -   -#&(*&      "!! -     -  "(.6;BNTZpszden#   !&3OwPXs5 ! (&  #  , %5Ebgr 9  -  lnrggqJJV>?J./9#%0)!  -  !  -  -        !+**535?UZ^qvxvw*  - # (Cfwt~%-G $.*+&(*4 !+"# & ! " 0NrQYt- ! !$)rr}[[fJJV@AL24@#$3+        &)+4?@IRT]hhr"%0  # ! ;Wg^j,6O5 *!&  "/?AK8:E& $ # 8Sc>D_% # - 037xylmvefoaak^aj]_i]`g_bfaafachfglnqu{|/2=  &.3Da^j/I' %   "BRo?Gd )    &).nt{!) !  7CUqy.;T)%) %  -(1;y{^`k& . -% (Aw(1L# # &ACH18? - -! M_n1mtv#'   -&  ,ViLXu ,F% -%#""#KOW| $0!/% ,Ces&1L'' AEN4;=  - $%4DYBLk9 ) "  &  38>aekBHS  -% % -"ESjmy -. ! -. \_eV]_  !%&:`qx9B^5 !( -*-3nqu_dn!'3   -(%*AuOZv ## $ +,7{~z!%  &#9I\[k)5O - -$)(  137"+&& # 1IZsx9D]& - "  GIS*04 -  - ():l}Wh"9(0(  ACDX^e   -",& /Haq'?% &  dgpMRW  ,*HXmv0?W"7 ! ' "%*[]_pwz '"  -/":HYtDSm *  -!'-3 % '%2Fxdr&.F ( *# #/15gjj=EI * $! *8Kc)9R )#&,5HLR - -  -$ '(>Kces6 $ %,"EGLlsw#*0  $ # 4fzar(?  *INWwy*-7 - # -( ""9q5A` 736 %  !?CGCJP#     &EVk>Oj5$  - '2y{Y\g& - &+#LZpbt=Ii - '0 -  (./7VX^fko%+  ! 0C.I(# -$ - -.4@'+7  ,"'E$   "&CSin~.#&#/HNW[\f  - &6$9s~.;U. % $-@8FyvzUYa & #% !-:SBMh"$ 07@w}24>! - !4?MdLWr&0I2 ,"  MER38C !!  !!8Zi"> -$(GMTcen#    (( 0We~m{9F`6 "#" " -"  !83@piuTW`(   ( $2BZeq -!((&djq68A %. ' - , .GfvESk&< / * "  %%2HGRU'' -(.4>JQYbfl" &&)'+-7$ 4>MkUaz$1G *  $ -$#.!!.4<`fn*.6   $ / !Ni/9V 0 % ! " '(6yzdgp*! - / 05?Mi~cp9H^(='   #) %  - #:?Epuz02; -'" (Co~!<% % -!";;IX[c%'2   )5* *!+F]h|Vc{8F]&< $  -!& ! - "6NkR^y - &'*]^l[\d"   ! -) .!:=H`{kwHUk$0G2 (        "(0NU\.18   * $:ix,5O&'& 23ARV\!+  $0( -3=Rox\h@Mf%<(  -$* -  #+=ELz]`f$ ' & 65Eeep 0 & &![[i^bi (0'* -" -("5 4,6J[h~frFQk)4M8* -'&#  $-BHOty+.5   . -""0Mgw@Je ' # -+nu{*2< "+ ' ' '>09QKTlbl{t]iEPk*2L 71- * #",,% !8@Gtz`dh   ' &@Olw)2L #* # >?M1:C%   %   ! -!71;XQ[zhvrr|vjy]kO]|BOl4A^+6Q7)# % "   - +6IGKV`dotutvux~ &!% -/JZugx .E - $GHRz{ijtcep`alYYeQT^LNZKMYLOZMQ\PU^[ajkryzGKR -* !!:fv>Oi) * % kjtux|"%,   %%4D^i{ /H+ .08GIN   !"9dt8Kf3"% &HHN+-1%(8Q#=   -#$%'/wz| & -FXsL]x/ -! & "CFLOST  -! -# -*Ch{{+E(' $ !,06y|+/1'!  ->XVe~  $*% TX]xz{ ( .K^wt+9R# '&"%/VYY    /FnDVp 3&"  ORX69:   -;Ja&6Q(1   )}pps!$%"  $P_wNc"; $ ,   EJSXW`& - #!6iwq"2K * ! -  !.x}CBO - # -" '2;VS[v -($ -  &DIS31B - # $1FOkw},0K ' %  .4>nu~rp~,  (9fnIOl ' %) MS]QR_   *!!'Au)0J ! &%/u{==J $)!,3M:B]3$ -  IOZ//< ( % '?Gbjw2 +-  &1|~&&4 -  + 0Yc}1=W%%, %JPYlkw#$1  ,:lu]j(%  !",1:su\\h(  (&-I.9S & # 'Z]fRS^$ - -'.8WO^x3  &"69AGHS   (";Ff"0F # (% #fhp<>H   ( *HStLXs2#,$  -(.545@  &/P\}l}#; .# #  &.din*-8  -  & 4Vb*9R -!'  ENR $. -   & 8]kXi 2 #% *2v& " -'AVp2 & 59=&(2    ) 6UdOd*B"& -$ &jlo+.6  (/K\{y"4J #/ LOT.1:    ',GWs9Lc -, ) -" -3:69@  - " )@LhN_| / -( !.$gjoAEL  -! -%7A\k}#2K . '#;@EV[`   (3Iy7Jf ! !.   ;@F|~puw ! %8\lVg!7 #"+ lpt"% ! -ESgs&5K   -!!89=39<  %1>Pr+:N#  "  37:RXZ   +;o0BV  "# "prvhno $  )O`sSdz3 - !# _bg488     !-:Nev&4G %$"  69>C - -  - !7Hb|P^q &  "%  -.39tw{jko%#"  09IdM\r0 %  %.4Icy|=Lh2 " ( & & XbjU\f *  -#,#0[gl~(7Q'   ! -!$KRZ_ek&+2 "  -   # -#0JWfrzK[v .E) ( )& $%/"&cfmtx}FJO ) $$  - !#( :)7SK[xn^p-AGilpLOS!#,! $- $ -4$4PFWwkp8J_$6"  -  ( ! *CCJhho-,5!$.+ - '# % $$-"?':WYkOd0L.  -" $ - "! -#VTYVU\""+!  -   %1 :.I,/0:()3"#-'%%& #!#+&)/)+313:?AGLMT`ahnov{xtr{spztq{us{srwxy{~ \ No newline at end of file diff --git a/share/qtcreator/examples/15-Utils/wozai.wav b/share/qtcreator/examples/15-Utils/wozai.wav deleted file mode 100644 index bef54be0109a..000000000000 Binary files a/share/qtcreator/examples/15-Utils/wozai.wav and /dev/null differ diff --git a/share/qtcreator/examples/16-AI-Cube/ClassificationApp.py b/share/qtcreator/examples/16-AI-Cube/ClassificationApp.py new file mode 100755 index 000000000000..ac5367191be2 --- /dev/null +++ b/share/qtcreator/examples/16-AI-Cube/ClassificationApp.py @@ -0,0 +1,111 @@ +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys + +# 自定义分类任务类 +class ClassificationApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size=[224,224],confidence_threshold=0.7,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + self.kmodel_path=kmodel_path + # 分类标签 + self.labels=labels + # 模型输入分辨率 + self.model_input_size=model_input_size + # 分类阈值 + self.confidence_threshold=confidence_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 显示分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # build参数包含输入shape和输出shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,results是模型输出的array列表 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + softmax_res=self.softmax(results[0][0]) + res_idx=np.argmax(softmax_res) + gc.collect() + # 如果类别分数大于阈值,返回当前类别和分数 + if softmax_res[res_idx]>self.confidence_threshold: + return self.labels[res_idx],softmax_res[res_idx] + else: + return "", 0.0 + + # 将结果绘制到屏幕上 + def draw_result(self,pl,res,score): + with ScopedTiming("draw result",self.debug_mode > 0): + if res!="": + pl.osd_img.clear() + mes=res+" "+str(round(score,3)) + pl.osd_img.draw_string_advanced(5,5,32,mes,color=(0,255,0)) + else: + pl.osd_img.clear() + + # softmax函数 + def softmax(self,x): + exp_x = np.exp(x - np.max(x)) + return exp_x / np.sum(exp_x) + + +if __name__=="__main__": + # 添加显示模式,支持"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径,需要用户自行拷贝到开发板的目录下 + kmodel_path="/sdcard/app/tests/ai_test_kmodel/veg_cls.kmodel" + # 根据数据集设置,在线训练平台和AICube部署包的deploy_config.json文件中包含该字段 + labels=["菠菜","长茄子","红苋菜","胡萝卜","西红柿","西蓝花"] + # 初始化PipeLine + pl=PipeLine(rgb888p_size=[1280,720],display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自定义分类器 + cls=ClassificationApp(kmodel_path,labels,rgb888p_size=[1280,720],display_size=display_size,debug_mode=0) + # 配置分类任务的预处理 + cls.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧数据 + img=pl.get_frame() + # 推理当前帧 + res,score=cls.run(img) + # 绘制结果到PipeLine的osd图像 + cls.draw_result(pl,res,score) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + cls.deinit() + pl.destroy() + diff --git a/share/qtcreator/examples/16-AI-Cube/DetectionApp.py b/share/qtcreator/examples/16-AI-Cube/DetectionApp.py new file mode 100755 index 000000000000..7621e54517d7 --- /dev/null +++ b/share/qtcreator/examples/16-AI-Cube/DetectionApp.py @@ -0,0 +1,170 @@ +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aicube + +# 自定义目标检测任务类 +class DetectionApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size=[640,640],anchors=[10.13,16,30,33,23,30,61,62,45,59,119,116,90,156,198,373,326],model_type="AnchorBaseDet",confidence_threshold=0.5,nms_threshold=0.25,nms_option=False,strides=[8,16,32],rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 类别标签 + self.labels=labels + # 模型输入分辨率 + self.model_input_size=model_input_size + # 检测任务的锚框 + self.anchors=anchors + # 模型类型,支持"AnchorBaseDet","AnchorFreeDet","GFLDet"三种模型 + self.model_type=model_type + # 检测框类别置信度阈值 + self.confidence_threshold=confidence_threshold + # 检测框NMS筛选阈值 + self.nms_threshold=nms_threshold + # NMS选项,如果为True做类间NMS,如果为False做类内NMS + self.nms_option=nms_option + # 输出特征图的降采样倍数 + self.strides=strides + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # 调试模式 + self.debug_mode=debug_mode + # 检测框预置颜色值 + self.color_four=[(255, 220, 20, 60), (255, 119, 11, 32), (255, 0, 0, 142), (255, 0, 0, 230), + (255, 106, 0, 228), (255, 0, 60, 100), (255, 0, 80, 100), (255, 0, 0, 70), + (255, 0, 0, 192), (255, 250, 170, 30), (255, 100, 170, 30), (255, 220, 220, 0), + (255, 175, 116, 175), (255, 250, 0, 30), (255, 165, 42, 42), (255, 255, 77, 255), + (255, 0, 226, 252), (255, 182, 182, 255), (255, 0, 82, 0), (255, 120, 166, 157)] + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数 + top,bottom,left,right=self.get_padding_param() + # 配置padding预处理 + self.ai2d.pad([0,0,0,0,top,bottom,left,right], 0, [114,114,114]) + # 配置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # build预处理过程,参数为输入tensor的shape和输出tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理,这里调用了aicube模块的后处理接口 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + # AnchorBaseDet模型的后处理 + if self.model_type == "AnchorBaseDet": + det_boxes = aicube.anchorbasedet_post_process( results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(labels), self.confidence_threshold, self.nms_threshold, self.anchors, self.nms_option) + # GFLDet模型的后处理 + elif self.model_type == "GFLDet": + det_boxes = aicube.gfldet_post_process( results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(labels), self.confidence_threshold, self.nms_threshold, self.nms_option) + # AnchorFreeDet模型的后处理 + elif self.model_type=="AnchorFreeDet": + det_boxes = aicube.anchorfreedet_post_process( results[0], results[1], results[2], self.model_input_size, self.rgb888p_size, self.strides, len(labels), self.confidence_threshold, self.nms_threshold, self.nms_option) + else: + det_boxes=None + return det_boxes + + # 将结果绘制到屏幕上 + def draw_result(self,pl,det_boxes): + with ScopedTiming("draw osd",self.debug_mode > 0): + if det_boxes: + pl.osd_img.clear() + for det_boxe in det_boxes: + # 获取每一个检测框的坐标,并将其从原图分辨率坐标转换到屏幕分辨率坐标,将框和类别信息绘制在屏幕上 + x1, y1, x2, y2 = det_boxe[2],det_boxe[3],det_boxe[4],det_boxe[5] + sx=int(x1 * self.display_size[0] // self.rgb888p_size[0]) + sy=int(y1 * self.display_size[1] // self.rgb888p_size[1]) + w = int(float(x2 - x1) * self.display_size[0] // self.rgb888p_size[0]) + h = int(float(y2 - y1) * self.display_size[1] // self.rgb888p_size[1]) + pl.osd_img.draw_rectangle(sx , sy , w , h , color=self.get_color(det_boxe[0])) + label = self.labels[det_boxe[0]] + score = str(round(det_boxe[1],2)) + pl.osd_img.draw_string_advanced(sx, sy-50,32, label + " " + score , color=self.get_color(det_boxe[0])) + else: + pl.osd_img.clear() + pl.osd_img.draw_rectangle(0, 0, 128, 128, color=(0,0,0,0)) + + # 计算padding参数 + def get_padding_param(self): + ratiow = float(self.model_input_size[0]) / self.rgb888p_size[0]; + ratioh = float(self.model_input_size[1]) / self.rgb888p_size[1]; + ratio = min(ratiow, ratioh) + new_w = int(ratio * self.rgb888p_size[0]) + new_h = int(ratio * self.rgb888p_size[1]) + dw = float(self.model_input_size[0]- new_w) / 2 + dh = float(self.model_input_size[1] - new_h) / 2 + top = int(round(dh - 0.1)) + bottom = int(round(dh + 0.1)) + left = int(round(dw - 0.1)) + right = int(round(dw - 0.1)) + return top,bottom,left,right + + # 根据当前类别索引获取框的颜色 + def get_color(self, x): + idx=x%len(self.color_four) + return self.color_four[idx] + + +if __name__=="__main__": + # 添加显示模式,支持"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # kmodel路径 + kmodel_path="/sdcard/app/tests/ai_test_kmodel/insect_det.kmodel" + # 检测类别标签 + labels=["leconte","boerner","armandi","linnaeus","coleoptera","acuminatus"] + # 类别置信度阈值 + confidence_threshold=0.5 + # nms阈值 + nms_threshold = 0.5 + # 训练中使用的锚框,在线训练平台和AICube部署包的deploy_config.json文件中包含该字段,只有AnchorBaseDet需要该参数 + anchors=[30,23,21,33,29,43,44,29,41,39,41,68,71,43,59,61,71,72] + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=[1280,720],display_size=display_size,display_mode=display_mode) + pl.create() + # 检测类实例,关注模型输入分辨率,传给AI的图像分辨率,显示的分辨率 + det=DetectionApp(kmodel_path,labels,model_input_size=[640,640],anchors=anchors,rgb888p_size=[1280,720],display_size=display_size,debug_mode=0) + # 配置预处理过程 + det.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧 + img=pl.get_frame() + # 获得检测框 + det_boxes=det.run(img) + # 绘制检测框和类别信息 + det.draw_result(pl,det_boxes) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except BaseException as e: + sys.print_exception(e) + finally: + det.deinit() + pl.destroy() + diff --git a/share/qtcreator/examples/16-AI-Cube/MultiLabelApp.py b/share/qtcreator/examples/16-AI-Cube/MultiLabelApp.py new file mode 100755 index 000000000000..3d49cb3012fc --- /dev/null +++ b/share/qtcreator/examples/16-AI-Cube/MultiLabelApp.py @@ -0,0 +1,111 @@ +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys + +# 自定义多标签分类任务类 +class MultiLabelApp(AIBase): + def __init__(self,kmodel_path,labels,model_input_size=[224,224],confidence_threshold=0.5,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + self.kmodel_path=kmodel_path + # 分类标签 + self.labels=labels + # 模型输入分辨率 + self.model_input_size=model_input_size + # 分类阈值 + self.confidence_threshold=confidence_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 显示分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 配置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # build预处理过程,参数为输入tensor的shape和输出tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + res=[] + scores=[] + # 依次计算所有类别中的所属类别,对每一个类别做二分类 + for i in range(len(self.labels)): + score=self.sigmoid(results[0][0][i]) + if score>self.confidence_threshold: + res.append(i) + scores.append(score) + return res,scores + + # 将结果绘制到屏幕上 + def draw_result(self,pl,res,scores): + with ScopedTiming("draw osd",self.debug_mode > 0): + pl.osd_img.clear() + mes="" + # 组织多标签分类结果 + for i in range(len(res)): + mes+=self.labels[res[i]]+" "+str(scores[i])+"\n" + pl.osd_img.draw_string_advanced(5,5,32,mes,color=(255,0,255,0)) + + # sigmoid函数 + def sigmoid(self,x): + return 1.0 / (1.0 + np.exp(-x)) + + +if __name__=="__main__": + # 添加显示模式,支持"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # 模型路径,需要用户自行拷贝到开发板的目录下 + kmodel_path="/sdcard/app/tests/ai_test_kmodel/landscape_multilabel.kmodel" + # 根据数据集设置,在线训练平台和AICube部署包的deploy_config.json文件中包含该字段 + labels=["沙漠","山脉","海洋","阳光","树"] + # 初始化PipeLine + pl=PipeLine(rgb888p_size=[1280,720],display_size=display_size,display_mode=display_mode) + pl.create() + # 初始化自定义多标签分类器 + multi=MultiLabelApp(kmodel_path,labels,rgb888p_size=[1280,720],display_size=display_size) + # 配置预处理过程 + multi.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧数据 + img=pl.get_frame() + # 推理当前帧 + res,scores=multi.run(img) + # 绘制结果到PipeLine的osd图像 + multi.draw_result(pl,res,scores) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + multi.deinit() + pl.destroy() diff --git a/share/qtcreator/examples/16-AI-Cube/OCR_Det.py b/share/qtcreator/examples/16-AI-Cube/OCR_Det.py new file mode 100755 index 000000000000..ee5b1c900610 --- /dev/null +++ b/share/qtcreator/examples/16-AI-Cube/OCR_Det.py @@ -0,0 +1,134 @@ +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import image +import aicube +import random +import gc +import sys + +# 自定义OCR检测任务类 +class OCRDetectionApp(AIBase): + def __init__(self,det_kmodel,model_input_size,mask_threshold=0.5,box_threshold=0.5,rgb888p_size=[1280,720],display_size=[1920,1080],debug_mode=0): + super().__init__(det_kmodel,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.det_kmodel=det_kmodel + # OCR检测模型输入分辨率[width,height] + self.model_input_size=model_input_size + # ocr检测输出feature map二值化阈值 + self.mask_threshold=mask_threshold + # 检测框分数阈值 + self.box_threshold=box_threshold + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了pad和resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 计算padding参数,设置padding预处理 + self.ai2d.pad(self.get_pad_param(self.model_input_size,self.rgb888p_size), 0, [0, 0, 0]) + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # build预处理过程,参数为输入tensor的shape和输出tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + # chw2hwc + hwc_array=self.chw2hwc(self.cur_img) + # det_boxes结构为[[crop_array_nhwc,[p1_x,p1_y,p2_x,p2_y,p3_x,p3_y,p4_x,p4_y]],...],crop_array_nhwc是切割的检测框数据,后八个数据表示检测框的左上,右上,右下,左下的坐标 + det_boxes = aicube.ocr_post_process(results[0][:,:,:,0].reshape(-1), hwc_array.reshape(-1),self.model_input_size,self.rgb888p_size, self.mask_threshold, self.box_threshold) + # 只取坐标值 + all_boxes_pos=[] + for det_box in det_boxes: + all_boxes_pos.append(det_box[1]) + return all_boxes_pos + + # 绘制推理结果 + def draw_result(self,pl,all_boxes_pos): + pl.osd_img.clear() + # 一次绘制四条边,得到文本检测的四边形,坐标需要从原图分辨率转换成显示分辨率 + for i in range(len(all_boxes_pos)): + for j in range(4): + x1=all_boxes_pos[i][2*j]*self.display_size[0]//self.rgb888p_size[0] + y1=all_boxes_pos[i][2*j+1]*self.display_size[1]//self.rgb888p_size[1] + x2=all_boxes_pos[i][(2*j+2)%8]*self.display_size[0]//self.rgb888p_size[0] + y2=all_boxes_pos[i][(2*j+3)%8]*self.display_size[1]//self.rgb888p_size[1] + pl.osd_img.draw_line(int(x1),int(y1),int(x2),int(y2),color=(255,255,0,0),thickness=4) + + # 计算padding参数 + def get_pad_param(self,out_img_size,input_img_size): + dst_w, dst_h = out_img_size + input_w, input_h = input_img_size + ratio = min(dst_w / input_w, dst_h / input_h) + new_w, new_h = int(input_w * ratio), int(input_h * ratio) + dw, dh = (dst_w - new_w) / 2, (dst_h - new_h) / 2 + top, bottom = int(round(0)), int(round(dh * 2)) + left, right = int(round(0)), int(round(dw * 2)) + return [0, 0, 0, 0, top, bottom, left, right] + + # chw2hwc + def chw2hwc(self,features): + ori_shape = (features.shape[0], features.shape[1], features.shape[2]) + c_hw_ = features.reshape((ori_shape[0], ori_shape[1] * ori_shape[2])) + hw_c_ = c_hw_.transpose() + new_array = hw_c_.copy() + hwc_array = new_array.reshape((ori_shape[1], ori_shape[2], ori_shape[0])) + del c_hw_ + del hw_c_ + del new_array + return hwc_array + +if __name__=="__main__": + # 添加显示模式,支持"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # OCR检测kmdoel路径 + det_kmodel_path="/sdcard/app/tests/ai_test_kmodel/ocr_det_int16.kmodel" + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=[640,360],display_size=display_size,display_mode=display_mode) + pl.create() + # OCR检测类实例,关注模型输入分辨率,传给AI的图像分辨率,显示的分辨率 + ocr_det=OCRDetectionApp(det_kmodel_path,model_input_size=[640,640],rgb888p_size=[640,360],display_size=display_size,debug_mode=0) + # 配置预处理过程 + ocr_det.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧 + img=pl.get_frame() + # 推理当前帧获得检测框坐标 + boxes=ocr_det.run(img) + # 绘制文本检测框 + ocr_det.draw_result(pl,boxes) + # 在osd上显示文本检测框 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + ocr_det.deinit() + pl.destroy() + diff --git a/share/qtcreator/examples/16-AI-Cube/SegmentationApp.py b/share/qtcreator/examples/16-AI-Cube/SegmentationApp.py new file mode 100755 index 000000000000..34a1b5ac4cde --- /dev/null +++ b/share/qtcreator/examples/16-AI-Cube/SegmentationApp.py @@ -0,0 +1,100 @@ +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys +import aicube +import math + +# 自定义分割任务类 +class SegmentationApp(AIBase): + def __init__(self,kmodel_path,num_class,model_input_size=[512,512],rgb888p_size=[512,512],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 分割类别数 + self.num_class=num_class + # 模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug_mode模式 + self.debug_mode=debug_mode + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置,默认为sensor给到AI的尺寸,您可以通过设置input_image_size自行修改输入尺寸 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # build预处理过程,参数为输入tensor的shape和输出tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 自定义当前任务的后处理 + def postprocess(self,input_np): + with ScopedTiming("postprocess",self.debug_mode > 0): + # 这里使用了aicube封装的接口seg_post_process做后处理,返回一个和display_size相同分辨率的mask图 + mask = aicube.seg_post_process(self.results[0], self.num_class, [self.rgb888p_size[1],self.rgb888p_size[0]], [self.display_size[1],self.display_size[0]]) + # 在mask数据上创建osd图像并返回 + res_mask = image.Image(self.display_size[0], self.display_size[1], image.ARGB8888,alloc=image.ALLOC_REF,data=mask) + return res_mask + + # 绘制分割结果,将创建的mask图像copy到pl.osd_img上 + def draw_result(self,pl,res_img): + with ScopedTiming("draw osd",self.debug_mode > 0): + res_img.copy_to(pl.osd_img) + + +if __name__=="__main__": + # 添加显示模式,支持"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # kmodel路径 + kmodel_path="/sdcard/app/tests/ai_test_kmodel/ocular_seg.kmodel" + # 分割类别数,设置类别数时必须包含背景,且背景为第一类 + num_class=2 + # 初始化PipeLine,只关注传给AI的图像分辨率,显示的分辨率 + pl=PipeLine(rgb888p_size=[512,512],display_size=display_size,display_mode=display_mode) + pl.create() + # 分割类实例,关注模型输入分辨率,传给AI的图像分辨率,显示的分辨率 + seg=SegmentationApp(kmodel_path,num_class,model_input_size=[512,512],rgb888p_size=[512,512],display_size=display_size) + # 配置预处理过程 + seg.config_preprocess() + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧 + img=pl.get_frame() + # 获得mask结果 + mask=seg.run(img) + # 绘制mask结果到osd上 + seg.draw_result(pl,mask) + # 显示绘制结果 + pl.show_image() + gc.collect() + except BaseException as e: + sys.print_exception(e) + finally: + seg.deinit() + pl.destroy() + diff --git a/share/qtcreator/examples/16-AI-Cube/SelfLearningApp.py b/share/qtcreator/examples/16-AI-Cube/SelfLearningApp.py new file mode 100755 index 000000000000..06719a09995e --- /dev/null +++ b/share/qtcreator/examples/16-AI-Cube/SelfLearningApp.py @@ -0,0 +1,158 @@ +from libs.PipeLine import PipeLine, ScopedTiming +from libs.AIBase import AIBase +from libs.AI2D import Ai2d +import os +import ujson +from media.media import * +from time import * +import nncase_runtime as nn +import ulab.numpy as np +import time +import utime +import image +import random +import gc +import sys + +# 自定义自学习分类任务类 +class SelfLearningApp(AIBase): + def __init__(self,kmodel_path,model_input_size=[224,224],confidence_threshold=0.5,rgb888p_size=[224,224],display_size=[1920,1080],debug_mode=0): + super().__init__(kmodel_path,model_input_size,rgb888p_size,debug_mode) + # kmodel路径 + self.kmodel_path=kmodel_path + # 模型输入分辨率 + self.model_input_size=model_input_size + # sensor给到AI的图像分辨率,宽16字节对齐 + self.rgb888p_size=[ALIGN_UP(rgb888p_size[0],16),rgb888p_size[1]] + # 视频输出VO分辨率,宽16字节对齐 + self.display_size=[ALIGN_UP(display_size[0],16),display_size[1]] + # debug模式 + self.debug_mode=debug_mode + # 模型输出列表 + self.results=[] + # features库 + self.embeddings=[] + # features对应的标签 + self.embeddings_labels=[] + # Ai2d实例,用于实现模型预处理 + self.ai2d=Ai2d(debug_mode) + # 设置Ai2d的输入输出格式和类型 + self.ai2d.set_ai2d_dtype(nn.ai2d_format.NCHW_FMT,nn.ai2d_format.NCHW_FMT,np.uint8, np.uint8) + + # 配置预处理操作,这里使用了resize,Ai2d支持crop/shift/pad/resize/affine,具体代码请打开/sdcard/app/libs/AI2D.py查看 + def config_preprocess(self,input_image_size=None): + with ScopedTiming("set preprocess config",self.debug_mode > 0): + # 初始化ai2d预处理配置 + ai2d_input_size=input_image_size if input_image_size else self.rgb888p_size + # 设置resize预处理 + self.ai2d.resize(nn.interp_method.tf_bilinear, nn.interp_mode.half_pixel) + # build预处理过程,参数为输入tensor的shape和输出tensor的shape + self.ai2d.build([1,3,ai2d_input_size[1],ai2d_input_size[0]],[1,3,self.model_input_size[1],self.model_input_size[0]]) + + # 加载图片,将图片特征化后存入特征向量库 + def load_image(self,image_path,label): + # 读取一张图片 + img=self.read_img(image_path) + # 不同图片的宽高不同,因此每加载一张都要配置预处理过程 + self.config_preprocess([img.shape[2],img.shape[1]]) + # 预处理,推理,输出特征入库,特征标签入库 + tensor=self.preprocess(img) + results=self.inference(tensor) + self.embeddings.append(results[0][0]) + self.embeddings_labels.append(label) + # 重置为视频流的预处理 + self.config_preprocess() + gc.collect() + + # 从本地读入图片,并实现HWC转CHW + def read_img(self,img_path): + img_data = image.Image(img_path) + img_data_rgb888=img_data.to_rgb888() + img_hwc=img_data_rgb888.to_numpy_ref() + shape=img_hwc.shape + img_tmp = img_hwc.reshape((shape[0] * shape[1], shape[2])) + img_tmp_trans = img_tmp.transpose() + img_res=img_tmp_trans.copy() + img_return=img_res.reshape((shape[2],shape[0],shape[1])) + return img_return + + # 自学习任务推理流程 + def postprocess(self,results): + with ScopedTiming("postprocess",self.debug_mode > 0): + if len(self.embeddings)>0: + # 计算特征向量和向量库中所有向量的最大相似度和相似向量的索引 + idx,score=self.compute_similar(results[0][0]) + gc.collect() + # 返回分类标签和分数 + if len(self.embeddings_labels): + return self.embeddings_labels[idx],score + else: + return "", 0.0 + else: + return "Please add new category images...", 0.0 + + # 绘制分类结果 + def draw_result(self,pl,res,score): + with ScopedTiming("draw osd",self.debug_mode > 0): + pl.osd_img.clear() + mes=res+" "+str(round(score,3)) + pl.osd_img.draw_string_advanced(5,5,32,mes,color=(255,0,255,0)) + + # 计算参数向量和向量库所有向量的相似度,并返回最大相似索引和对应的相似度分数 + def compute_similar(self,embedding): + output = np.linalg.norm(embedding) + embed_lib = np.linalg.norm(np.array(self.embeddings,dtype=np.float), axis=1) + dot_products = np.dot(np.array(self.embeddings), embedding) + similarities = dot_products / (embed_lib * output) + most_similar_index=np.argmax(similarities) + return most_similar_index,similarities[most_similar_index] + + + +if __name__=="__main__": + # 添加显示模式,支持"hdmi"和"lcd" + display_mode="hdmi" + if display_mode=="hdmi": + display_size=[1920,1080] + else: + display_size=[800,480] + # kmodel模型路径 + kmodel_path="/sdcard/app/tests/ai_test_kmodel/embedding.kmodel" + # 初始化PipeLine + pl=PipeLine(rgb888p_size=[1280,720],display_size=display_size,display_mode=display_mode) + pl.create() + # 自定义自学习分类器实例化 + cls=SelfLearningApp(kmodel_path,model_input_size=[224,224],rgb888p_size=[1280,720],display_size=display_size) + # 配置预处理过程 + cls.config_preprocess() + # 加载图片及其类别标签 + cls.load_image("/sdcard/app/tests/ai_test_utils/0.jpg","菠菜") + cls.load_image("/sdcard/app/tests/ai_test_utils/1.jpg","菠菜") + cls.load_image("/sdcard/app/tests/ai_test_utils/2.jpg","菠菜") + + cls.load_image("/sdcard/app/tests/ai_test_utils/3.jpg","长茄子") + cls.load_image("/sdcard/app/tests/ai_test_utils/4.jpg","长茄子") + cls.load_image("/sdcard/app/tests/ai_test_utils/5.jpg","长茄子") + + cls.load_image("/sdcard/app/tests/ai_test_utils/6.jpg","胡萝卜") + cls.load_image("/sdcard/app/tests/ai_test_utils/7.jpg","胡萝卜") + cls.load_image("/sdcard/app/tests/ai_test_utils/8.jpg","胡萝卜") + try: + while True: + os.exitpoint() + with ScopedTiming("total",1): + # 获取当前帧 + img=pl.get_frame() + # 获取分类结果和分数 + res,score=cls.run(img) + # 绘制结果到PipeLine的osd图像 + cls.draw_result(pl,res,score) + # 显示当前的绘制结果 + pl.show_image() + gc.collect() + except Exception as e: + sys.print_exception(e) + finally: + cls.deinit() + pl.destroy() + diff --git a/share/qtcreator/examples/99-HelloWorld/helloworld.py b/share/qtcreator/examples/99-HelloWorld/helloworld.py new file mode 100644 index 000000000000..ad35e5ae34d7 --- /dev/null +++ b/share/qtcreator/examples/99-HelloWorld/helloworld.py @@ -0,0 +1 @@ +print("Hello World") diff --git a/src/plugins/openmv/histogram/openmvpluginhistogram.cpp b/src/plugins/openmv/histogram/openmvpluginhistogram.cpp old mode 100644 new mode 100755 index e8c3fbebc86d..25790435aa1b --- a/src/plugins/openmv/histogram/openmvpluginhistogram.cpp +++ b/src/plugins/openmv/histogram/openmvpluginhistogram.cpp @@ -4,6 +4,8 @@ #include "openmvtr.h" #include +#include +#include #define RGB_COLOR_SPACE_R 0 #define RGB_COLOR_SPACE_G 1 @@ -140,6 +142,163 @@ static inline int getValue(int value, int channel) } } +struct Histogram { + QVector value; + int mean; + int median; + int mode; + int standardDeviation; + int min; + int max; + int lowerQuartile; + int upperQuartile; +}; + +static void statistics(struct Histogram& hist, unsigned total) { + unsigned medianOffset = (total + 1) / 2; // 1/2th + long long lq = ((total * 1) + 3) / 4; // 1/4th + long long uq = ((total * 3) + 3) / 4; // 3/4th + + long long sum = 0; + long long avg = 0; + auto arr = &hist.value; + for (unsigned j = 0; j < arr->size(); j++) { + sum += (*arr)[j]; + avg += (*arr)[j] * j; + if ((*arr)[j]) { + if (hist.min == 0) { + hist.min = j; + } + hist.max = j; + } + + if ((sum >= medianOffset) && (hist.median == 0)) { + hist.median = j; + } + if ((sum >= lq) && (hist.lowerQuartile == 0)) { + hist.lowerQuartile = j; + } + if ((sum >= uq) && (hist.upperQuartile == 0)) { + hist.upperQuartile = j; + } + if ((*arr)[j] > (*arr)[hist.mode]) { + hist.mode = j; + } + } + hist.mean = (avg + total / 2) / total; + unsigned long long stCount = 0; + for (int j = 0; j < arr->size(); j++) { + stCount += (*arr)[j] * (j - hist.mean) * (j - hist.mean); + } + hist.standardDeviation = sum ? round(sqrt(stCount / double(sum))) : 0; +} + +static std::array statiticsRGB(const QImage& image) { + std::array result; + for (unsigned i = 0; i < 3; i++) { + result[i].value.resize(256); + result[i].value.fill(0); + result[i].mean = 0; + result[i].median = 0; + result[i].mode = 0; + result[i].min = 0; + result[i].max = 0; + result[i].median = 0; + result[i].lowerQuartile = 0; + result[i].upperQuartile = 0; + result[i].mode = 0; + } + + if (image.isNull()) { + return result; + } + + for(int y = 0; y < image.height(); y++) { + for(int x = 0; x < image.width(); x++) { + auto pix = image.pixel(x, y); + result[0].value[qRed(pix)] += 1; + result[1].value[qGreen(pix)] += 1; + result[2].value[qBlue(pix)] += 1; + } + } + + for (unsigned i = 0; i < 3; i++) { + statistics(result[i], image.width() * image.height()); + } + + return result; +} + +static std::array statiticsLAB(const QImage& image) { + std::array result; + result[0].value.resize(101); + result[0].mean = 0; + result[0].median = 0; + result[0].mode = 0; + result[0].min = 0; + result[0].max = 0; + result[0].median = 0; + result[0].lowerQuartile = 0; + result[0].upperQuartile = 0; + for (unsigned i = 1; i < 3; i++) { + result[i].value.resize(256); + result[i].mean = 0; + result[i].median = 0; + result[i].mode = 0; + result[i].min = 0; + result[i].max = 0; + result[i].median = 0; + result[i].lowerQuartile = 0; + result[i].upperQuartile = 0; + } + + for(int y = 0; y < image.height(); y++) { + for(int x = 0; x < image.width(); x++) { + auto pix = image.pixel(x, y); + result[0].value[toL(pix)] += 1; + result[1].value[toA(pix)] += 1; + result[2].value[toB(pix)] += 1; + } + } + + for (unsigned i = 0; i < 3; i++) { + statistics(result[i], image.width() * image.height()); + } + + return result; +} + +static std::array statiticsYUV(const QImage& image) { + std::array result; + for (unsigned i = 0; i < 3; i++) { + result[i].value.resize(256); + result[i].value.fill(0); + result[i].mean = 0; + result[i].median = 0; + result[i].mode = 0; + result[i].min = 0; + result[i].max = 0; + result[i].median = 0; + result[i].lowerQuartile = 0; + result[i].upperQuartile = 0; + } + + for(int y = 0; y < image.height(); y++) { + for(int x = 0; x < image.width(); x++) { + auto pix = image.pixel(x, y); + result[0].value[toY(pix)] += 1; + result[1].value[toU(pix)] += 1; + result[2].value[toV(pix)] += 1; + } + } + + for (unsigned i = 0; i < 3; i++) { + statistics(result[i], image.width() * image.height()); + } + + return result; +} + void OpenMVPluginHistogram::updatePlot(QCPGraph *graph, int channel) { QImage image = m_pixmap.toImage(); @@ -482,6 +641,8 @@ OpenMVPluginHistogram::OpenMVPluginHistogram(QWidget *parent) : QWidget(parent), arg(Utils::creatorTheme()->color(Utils::Theme::TextColorNormal).name())); colorSpaceChanged(m_colorSpace); + + } OpenMVPluginHistogram::~OpenMVPluginHistogram() @@ -571,215 +732,83 @@ void OpenMVPluginHistogram::colorSpaceChanged(int colorSpace) { m_colorSpace = colorSpace; + update(); +} + +void OpenMVPluginHistogram::update() +{ switch(m_colorSpace) { + case NONE_COLOR_SPACE: + break; case RGB_COLOR_SPACE: { - updatePlot(m_channel0, RGB_COLOR_SPACE_R); - m_ui->C0MeanValue->setNum(m_mean); - m_ui->C0MedianValue->setNum(m_median); - m_ui->C0ModeValue->setNum(m_mode); - m_ui->C0StDevValue->setNum(m_standardDeviation); - m_ui->C0MinValue->setNum(m_min); - m_ui->C0MaxValue->setNum(m_max); - m_ui->C0LQValue->setNum(m_lowerQuartile); - m_ui->C0UQValue->setNum(m_upperQuartile); +#define USE_STATISTICS 0 +#if USE_STATISTICS + auto hists = statiticsRGB(m_pixmap.toImage()); + m_ui->C0MeanValue->setNum(hists[0].mean); + m_ui->C0MedianValue->setNum(hists[0].median); + m_ui->C0ModeValue->setNum(hists[0].mode); + m_ui->C0StDevValue->setNum(hists[0].standardDeviation); + m_ui->C0MinValue->setNum(hists[0].min); + m_ui->C0MaxValue->setNum(hists[0].max); + m_ui->C0LQValue->setNum(hists[0].lowerQuartile); + m_ui->C0UQValue->setNum(hists[0].upperQuartile); m_channel0->setPen(QPen(QBrush(QColor(255, 0, 0)), 0, Qt::SolidLine)); m_channel0->setBrush(QBrush(QColor(255, 200, 200), Qt::SolidPattern)); m_ui->C0Plot->rescaleAxes(); m_ui->C0Plot->yAxis->setLabel(Tr::tr("R")); m_ui->C0Plot->yAxis->setRange(0, 1); m_ui->C0Plot->replot(); + m_channel0->clearData(); + if (hists[0].value[hists[0].mode]) { + for (unsigned i = 0; i < hists[0].value.size(); i++) { + m_channel0->addData(i, hists[0].mode ? hists[0].value[i] / double(hists[0].value[hists[0].mode]) : 0); + } + } - updatePlot(m_channel1, RGB_COLOR_SPACE_G); - m_ui->C1MeanValue->setNum(m_mean); - m_ui->C1MedianValue->setNum(m_median); - m_ui->C1ModeValue->setNum(m_mode); - m_ui->C1StDevValue->setNum(m_standardDeviation); - m_ui->C1MinValue->setNum(m_min); - m_ui->C1MaxValue->setNum(m_max); - m_ui->C1LQValue->setNum(m_lowerQuartile); - m_ui->C1UQValue->setNum(m_upperQuartile); + m_ui->C1MeanValue->setNum(hists[1].mean); + m_ui->C1MedianValue->setNum(hists[1].median); + m_ui->C1ModeValue->setNum(hists[1].mode); + m_ui->C1StDevValue->setNum(hists[1].standardDeviation); + m_ui->C1MinValue->setNum(hists[1].min); + m_ui->C1MaxValue->setNum(hists[1].max); + m_ui->C1LQValue->setNum(hists[1].lowerQuartile); + m_ui->C1UQValue->setNum(hists[1].upperQuartile); m_channel1->setPen(QPen(QBrush(QColor(0, 255, 0)), 0, Qt::SolidLine)); m_channel1->setBrush(QBrush(QColor(200, 255, 200), Qt::SolidPattern)); m_ui->C1Plot->rescaleAxes(); m_ui->C1Plot->yAxis->setLabel(Tr::tr("G")); m_ui->C1Plot->yAxis->setRange(0, 1); m_ui->C1Plot->replot(); + m_channel1->clearData(); + if (hists[1].value[hists[1].mode]) { + for (unsigned i = 0; i < hists[1].value.size(); i++) { + m_channel1->addData(i, hists[1].value[i] / double(hists[1].value[hists[1].mode])); + } + } - updatePlot(m_channel2, RGB_COLOR_SPACE_B); - m_ui->C2MeanValue->setNum(m_mean); - m_ui->C2MedianValue->setNum(m_median); - m_ui->C2ModeValue->setNum(m_mode); - m_ui->C2StDevValue->setNum(m_standardDeviation); - m_ui->C2MinValue->setNum(m_min); - m_ui->C2MaxValue->setNum(m_max); - m_ui->C2LQValue->setNum(m_lowerQuartile); - m_ui->C2UQValue->setNum(m_upperQuartile); + m_ui->C2MeanValue->setNum(hists[2].mean); + m_ui->C2MedianValue->setNum(hists[2].median); + m_ui->C2ModeValue->setNum(hists[2].mode); + m_ui->C2StDevValue->setNum(hists[2].standardDeviation); + m_ui->C2MinValue->setNum(hists[2].min); + m_ui->C2MaxValue->setNum(hists[2].max); + m_ui->C2LQValue->setNum(hists[2].lowerQuartile); + m_ui->C2UQValue->setNum(hists[2].upperQuartile); m_channel2->setPen(QPen(QBrush(QColor(0, 0, 255)), 0, Qt::SolidLine)); m_channel2->setBrush(QBrush(QColor(200, 200, 255), Qt::SolidPattern)); m_ui->C2Plot->rescaleAxes(); m_ui->C2Plot->yAxis->setLabel(Tr::tr("B")); m_ui->C2Plot->yAxis->setRange(0, 1); m_ui->C2Plot->replot(); - - m_ui->C1Plot->show(); - m_ui->C1Stats->show(); - m_ui->C2Plot->show(); - m_ui->C2Stats->show(); - - break; - } - case GRAYSCALE_COLOR_SPACE: - { - updatePlot(m_channel0, GRAYSCALE_COLOR_SPACE_Y); - m_ui->C0MeanValue->setNum(m_mean); - m_ui->C0MedianValue->setNum(m_median); - m_ui->C0ModeValue->setNum(m_mode); - m_ui->C0StDevValue->setNum(m_standardDeviation); - m_ui->C0MinValue->setNum(m_min); - m_ui->C0MaxValue->setNum(m_max); - m_ui->C0LQValue->setNum(m_lowerQuartile); - m_ui->C0UQValue->setNum(m_upperQuartile); - m_channel0->setPen(QPen(QBrush(QColor(143, 143, 143)), 0, Qt::SolidLine)); - m_channel0->setBrush(QBrush(QColor(200, 200, 200), Qt::SolidPattern)); - m_ui->C0Plot->rescaleAxes(); - m_ui->C0Plot->yAxis->setLabel(Tr::tr("Y")); - m_ui->C0Plot->yAxis->setRange(0, 1); - m_ui->C0Plot->replot(); - - m_ui->C1Plot->hide(); - m_ui->C1Stats->hide(); - m_ui->C2Plot->hide(); - m_ui->C2Stats->hide(); - - break; - } - case LAB_COLOR_SPACE: - { - updatePlot(m_channel0, LAB_COLOR_SPACE_L); - m_ui->C0MeanValue->setNum(m_mean); - m_ui->C0MedianValue->setNum(m_median); - m_ui->C0ModeValue->setNum(m_mode); - m_ui->C0StDevValue->setNum(m_standardDeviation); - m_ui->C0MinValue->setNum(m_min); - m_ui->C0MaxValue->setNum(m_max); - m_ui->C0LQValue->setNum(m_lowerQuartile); - m_ui->C0UQValue->setNum(m_upperQuartile); - m_channel0->setPen(QPen(QBrush(QColor(143, 143, 143)), 0, Qt::SolidLine)); - m_channel0->setBrush(QBrush(QColor(200, 200, 200), Qt::SolidPattern)); - m_ui->C0Plot->rescaleAxes(); - m_ui->C0Plot->yAxis->setLabel(Tr::tr("L")); - m_ui->C0Plot->yAxis->setRange(0, 1); - m_ui->C0Plot->replot(); - - updatePlot(m_channel1, LAB_COLOR_SPACE_A); - m_ui->C1MeanValue->setNum(m_mean); - m_ui->C1MedianValue->setNum(m_median); - m_ui->C1ModeValue->setNum(m_mode); - m_ui->C1StDevValue->setNum(m_standardDeviation); - m_ui->C1MinValue->setNum(m_min); - m_ui->C1MaxValue->setNum(m_max); - m_ui->C1LQValue->setNum(m_lowerQuartile); - m_ui->C1UQValue->setNum(m_upperQuartile); - m_channel1->setPen(QPen(QBrush(QColor(204, 255, 0)), 0, Qt::SolidLine)); - m_channel1->setBrush(QBrush(QColor(244, 255, 200), Qt::SolidPattern)); - m_ui->C1Plot->rescaleAxes(); - m_ui->C1Plot->yAxis->setLabel(Tr::tr("A")); - m_ui->C1Plot->yAxis->setRange(0, 1); - m_ui->C1Plot->replot(); - - updatePlot(m_channel2, LAB_COLOR_SPACE_B); - m_ui->C2MeanValue->setNum(m_mean); - m_ui->C2MedianValue->setNum(m_median); - m_ui->C2ModeValue->setNum(m_mode); - m_ui->C2StDevValue->setNum(m_standardDeviation); - m_ui->C2MinValue->setNum(m_min); - m_ui->C2MaxValue->setNum(m_max); - m_ui->C2LQValue->setNum(m_lowerQuartile); - m_ui->C2UQValue->setNum(m_upperQuartile); - m_channel2->setPen(QPen(QBrush(QColor(0, 102, 255)), 0, Qt::SolidLine)); - m_channel2->setBrush(QBrush(QColor(200, 222, 255), Qt::SolidPattern)); - m_ui->C2Plot->rescaleAxes(); - m_ui->C2Plot->yAxis->setLabel(Tr::tr("B")); - m_ui->C2Plot->yAxis->setRange(0, 1); - m_ui->C2Plot->replot(); - - m_ui->C1Plot->show(); - m_ui->C1Stats->show(); - m_ui->C2Plot->show(); - m_ui->C2Stats->show(); - - break; - } - case YUV_COLOR_SPACE: - { - updatePlot(m_channel0, YUV_COLOR_SPACE_Y); - m_ui->C0MeanValue->setNum(m_mean); - m_ui->C0MedianValue->setNum(m_median); - m_ui->C0ModeValue->setNum(m_mode); - m_ui->C0StDevValue->setNum(m_standardDeviation); - m_ui->C0MinValue->setNum(m_min); - m_ui->C0MaxValue->setNum(m_max); - m_ui->C0LQValue->setNum(m_lowerQuartile); - m_ui->C0UQValue->setNum(m_upperQuartile); - m_channel0->setPen(QPen(QBrush(QColor(143, 143, 143)), 0, Qt::SolidLine)); - m_channel0->setBrush(QBrush(QColor(200, 200, 200), Qt::SolidPattern)); - m_ui->C0Plot->rescaleAxes(); - m_ui->C0Plot->yAxis->setLabel(Tr::tr("Y")); - m_ui->C0Plot->yAxis->setRange(0, 1); - m_ui->C0Plot->replot(); - - updatePlot(m_channel1, YUV_COLOR_SPACE_U); - m_ui->C1MeanValue->setNum(m_mean); - m_ui->C1MedianValue->setNum(m_median); - m_ui->C1ModeValue->setNum(m_mode); - m_ui->C1StDevValue->setNum(m_standardDeviation); - m_ui->C1MinValue->setNum(m_min); - m_ui->C1MaxValue->setNum(m_max); - m_ui->C1LQValue->setNum(m_lowerQuartile); - m_ui->C1UQValue->setNum(m_upperQuartile); - m_channel1->setPen(QPen(QBrush(QColor(0, 255, 102)), 0, Qt::SolidLine)); - m_channel1->setBrush(QBrush(QColor(200, 255, 222), Qt::SolidPattern)); - m_ui->C1Plot->rescaleAxes(); - m_ui->C1Plot->yAxis->setLabel(Tr::tr("U")); - m_ui->C1Plot->yAxis->setRange(0, 1); - m_ui->C1Plot->replot(); - - updatePlot(m_channel2, YUV_COLOR_SPACE_V); - m_ui->C2MeanValue->setNum(m_mean); - m_ui->C2MedianValue->setNum(m_median); - m_ui->C2ModeValue->setNum(m_mode); - m_ui->C2StDevValue->setNum(m_standardDeviation); - m_ui->C2MinValue->setNum(m_min); - m_ui->C2MaxValue->setNum(m_max); - m_ui->C2LQValue->setNum(m_lowerQuartile); - m_ui->C2UQValue->setNum(m_upperQuartile); - m_channel2->setPen(QPen(QBrush(QColor(204, 0, 255)), 0, Qt::SolidLine)); - m_channel2->setBrush(QBrush(QColor(244, 200, 255), Qt::SolidPattern)); - m_ui->C2Plot->rescaleAxes(); - m_ui->C2Plot->yAxis->setLabel(Tr::tr("V")); - m_ui->C2Plot->yAxis->setRange(0, 1); - m_ui->C2Plot->replot(); - - m_ui->C1Plot->show(); - m_ui->C1Stats->show(); - m_ui->C2Plot->show(); - m_ui->C2Stats->show(); - - break; - } - } -} - -void OpenMVPluginHistogram::pixmapUpdate(const QPixmap &data) -{ - m_pixmap = data; - - switch(m_colorSpace) - { - case RGB_COLOR_SPACE: - { + m_channel2->clearData(); + if (hists[2].value[hists[2].mode]) { + for (unsigned i = 0; i < hists[2].value.size(); i++) { + m_channel2->addData(i, hists[2].value[i] / double(hists[2].value[hists[2].mode])); + } + } +#else updatePlot(m_channel0, RGB_COLOR_SPACE_R); m_ui->C0MeanValue->setNum(m_mean); m_ui->C0MedianValue->setNum(m_median); @@ -795,6 +824,7 @@ void OpenMVPluginHistogram::pixmapUpdate(const QPixmap &data) m_ui->C0Plot->yAxis->setLabel(Tr::tr("R")); m_ui->C0Plot->yAxis->setRange(0, 1); m_ui->C0Plot->replot(); + m_channel0->clearData(); updatePlot(m_channel1, RGB_COLOR_SPACE_G); m_ui->C1MeanValue->setNum(m_mean); @@ -811,6 +841,7 @@ void OpenMVPluginHistogram::pixmapUpdate(const QPixmap &data) m_ui->C1Plot->yAxis->setLabel(Tr::tr("G")); m_ui->C1Plot->yAxis->setRange(0, 1); m_ui->C1Plot->replot(); + m_channel1->clearData(); updatePlot(m_channel2, RGB_COLOR_SPACE_B); m_ui->C2MeanValue->setNum(m_mean); @@ -827,6 +858,8 @@ void OpenMVPluginHistogram::pixmapUpdate(const QPixmap &data) m_ui->C2Plot->yAxis->setLabel(Tr::tr("B")); m_ui->C2Plot->yAxis->setRange(0, 1); m_ui->C2Plot->replot(); + m_channel2->clearData(); +#endif break; } @@ -957,5 +990,12 @@ void OpenMVPluginHistogram::pixmapUpdate(const QPixmap &data) } } +void OpenMVPluginHistogram::pixmapUpdate(const QPixmap &data) +{ + m_pixmap = data; + // TODO: to another thread + update(); +} + } // namespace Internal } // namespace OpenMV diff --git a/src/plugins/openmv/histogram/openmvpluginhistogram.h b/src/plugins/openmv/histogram/openmvpluginhistogram.h old mode 100644 new mode 100755 index 6788187564d8..10e04d0e4c0a --- a/src/plugins/openmv/histogram/openmvpluginhistogram.h +++ b/src/plugins/openmv/histogram/openmvpluginhistogram.h @@ -11,6 +11,7 @@ #define GRAYSCALE_COLOR_SPACE 1 #define LAB_COLOR_SPACE 2 #define YUV_COLOR_SPACE 3 +#define NONE_COLOR_SPACE 4 namespace Ui { @@ -41,6 +42,7 @@ public slots: private: void updatePlot(QCPGraph *graph, int channel); + void update(); int m_colorSpace; QPixmap m_pixmap; diff --git a/src/plugins/openmv/openmvplugin.cpp b/src/plugins/openmv/openmvplugin.cpp index 68748eb0233f..8ddd8c32e6c1 100755 --- a/src/plugins/openmv/openmvplugin.cpp +++ b/src/plugins/openmv/openmvplugin.cpp @@ -1055,27 +1055,23 @@ void OpenMVPlugin::extensionsInitialized() Core::EditorManager::addCurrentPositionToNavigationHistory(); QString titlePattern = Tr::tr("untitled_$.py"); - QByteArray data = - QStringLiteral("# Untitled - By: %L1 - %L2\n" - "\n" - - "import sensor, image, time\n" - "\n" - "sensor.reset()\n" - "sensor.set_pixformat(sensor.RGB565)\n" - "sensor.set_framesize(sensor.QVGA)\n" - "sensor.skip_frames(time = 2000)\n" - "\n" - "clock = time.clock()\n" - "\n" - "while(True):\n" - " clock.tick()\n" - " img = sensor.snapshot()\n" - " print(clock.fps())\n").arg(Utils::Environment::systemEnvironment().toDictionary().userName()).arg(QDate::currentDate().toString()).toUtf8(); - auto scriptPath = Core::ICore::userResourcePath(QStringLiteral("examples/01-Media/camera_480p.py")).toString(); + QByteArray data = QStringLiteral("# Untitled - By: %L1 - %L2\n\n").arg(Utils::Environment::systemEnvironment().toDictionary().userName()).arg(QDate::currentDate().toString()).toUtf8(); + + auto scriptPath = Core::ICore::userResourcePath(QStringLiteral("examples/00-Micropython-Basics/demo_sys_info.py")).toString(); QFile file(scriptPath); if (file.open(QIODevice::ReadOnly | QIODevice::Text)) { - data = file.readAll(); + data += file.readAll(); + } else { + data += QStringLiteral( + "import sys\n\n" + "for i in range(0, 2):\n" + " print(\"hello canmv\")\n" + " print(\"hello \", end=\"canmv\\n\")\n\n" + "print(\"implementation:\", sys.implementation)\n" + "print(\"platform:\", sys.platform)\n" + "print(\"path:\", sys.path)\n" + "print(\"Python version:\", sys.version)\n" + ).toUtf8(); } TextEditor::BaseTextEditor *editor = qobject_cast(Core::EditorManager::openEditorWithContents(Core::Constants::K_DEFAULT_TEXT_EDITOR_ID, &titlePattern, data)); @@ -1844,13 +1840,13 @@ void OpenMVPlugin::extensionsInitialized() }); QAction *aboutAction = new QAction(QIcon::fromTheme(QStringLiteral("help-about")), - Utils::HostOsInfo::isMacHost() ? Tr::tr("About CanMV IDE") : Tr::tr("About CanMV IDE..."), this); + Utils::HostOsInfo::isMacHost() ? Tr::tr("About CanMV IDE K230") : Tr::tr("About CanMV IDE K230..."), this); aboutAction->setMenuRole(QAction::AboutRole); Core::Command *aboutCommand = Core::ActionManager::registerAction(aboutAction, Utils::Id("OpenMV.About")); helpMenu->addAction(aboutCommand, Core::Constants::G_HELP_ABOUT); connect(aboutAction, &QAction::triggered, this, [] { - QMessageBox::about(Core::ICore::dialogParent(), Tr::tr("About CanMV IDE"), Tr::tr( - "

About CanMV IDE %L1-%L4

" + QMessageBox::about(Core::ICore::dialogParent(), Tr::tr("About CanMV IDE K230"), Tr::tr( + "

About CanMV IDE K230 %L1-%L4

" "

By: Canaan Inc.

" "

Based on OpenMV IDE By Ibrahim Abdelkader & Kwabena W. Agyeman

" "

GNU GENERAL PUBLIC LICENSE

" @@ -2143,7 +2139,8 @@ void OpenMVPlugin::extensionsInitialized() colorSpace->insertItem(GRAYSCALE_COLOR_SPACE, Tr::tr("Grayscale Color Space")); colorSpace->insertItem(LAB_COLOR_SPACE, Tr::tr("LAB Color Space")); colorSpace->insertItem(YUV_COLOR_SPACE, Tr::tr("YUV Color Space")); - colorSpace->setCurrentIndex(RGB_COLOR_SPACE); + colorSpace->insertItem(NONE_COLOR_SPACE, Tr::tr("None")); + colorSpace->setCurrentIndex(NONE_COLOR_SPACE); colorSpace->setToolTip(Tr::tr("Use Grayscale/LAB for color tracking")); styledBar1Layout->addWidget(colorSpace); @@ -2560,10 +2557,10 @@ void OpenMVPlugin::extensionsInitialized() editor = editors.first(); } } - + // FIXME if(editor ? (editor->document() ? editor->document()->contents().isEmpty() : true) : true) { - QString filePath = Core::ICore::userResourcePath(QStringLiteral("examples/00-HelloWorld/helloworld.py")).toString(); + QString filePath = Core::ICore::userResourcePath(QStringLiteral("examples/99-HelloWorld/helloworld.py")).toString(); QFile file(filePath); @@ -2800,63 +2797,6 @@ bool OpenMVPlugin::delayedInitialize() Tr::tr("Failed to create the documents folder!")); } - // open default file - Core::EditorManager::cutForwardNavigationHistory(); - Core::EditorManager::addCurrentPositionToNavigationHistory(); - QString titlePattern = Tr::tr("untitled_$.py"); - - QByteArray data = - QStringLiteral("# Untitled - By: %L1 - %L2\n" - "\n" - "import sensor, image, time\n" - "\n" - "sensor.reset()\n" - "sensor.set_pixformat(sensor.RGB565)\n" - "sensor.set_framesize(sensor.QVGA)\n" - "sensor.skip_frames(time = 2000)\n" - "\n" - "clock = time.clock()\n" - "\n" - "while(True):\n" - " clock.tick()\n" - " img = sensor.snapshot()\n" - " print(clock.fps())\n").arg(Utils::Environment::systemEnvironment().toDictionary().userName()).arg(QDate::currentDate().toString()).toUtf8(); - auto scriptPath = Core::ICore::userResourcePath(QStringLiteral("examples/01-Media/camera_480p.py")).toString(); - QFile file(scriptPath); - if (file.open(QIODevice::ReadOnly | QIODevice::Text)) { - data = file.readAll(); - } - - if((m_sensorType == QStringLiteral("HM01B0")) || - (m_sensorType == QStringLiteral("HM0360")) || - (m_sensorType == QStringLiteral("MT9V0X2")) || - (m_sensorType == QStringLiteral("MT9V0X4"))) - { - data = data.replace(QByteArrayLiteral("sensor.set_pixformat(sensor.RGB565)"), QByteArrayLiteral("sensor.set_pixformat(sensor.GRAYSCALE)")); - if(m_sensorType == QStringLiteral("HM01B0")) data = data.replace(QByteArrayLiteral("sensor.set_framesize(sensor.VGA)"), QByteArrayLiteral("sensor.set_framesize(sensor.QVGA)")); - } - - TextEditor::BaseTextEditor *editor = qobject_cast(Core::EditorManager::openEditorWithContents(Core::Constants::K_DEFAULT_TEXT_EDITOR_ID, &titlePattern, data)); - - if(editor) - { - QTemporaryFile file(QDir::tempPath() + QDir::separator() + QString(editor->document()->displayName()).replace(QStringLiteral(".py"), QStringLiteral("_XXXXXX.py"))); - - if(file.open()) - { - if(file.write(data) == data.size()) - { - file.setAutoRemove(false); - file.close(); - - editor->document()->setProperty("diffFilePath", QFileInfo(file).canonicalFilePath()); - Core::EditorManager::addCurrentPositionToNavigationHistory(); - editor->editorWidget()->configureGenericHighlighter(); - Core::EditorManager::activateEditor(editor); - } - } - } - return true; } diff --git a/src/plugins/openmv/openmvpluginfb.cpp b/src/plugins/openmv/openmvpluginfb.cpp index 490a4b5fb17c..4be206415e97 100755 --- a/src/plugins/openmv/openmvpluginfb.cpp +++ b/src/plugins/openmv/openmvpluginfb.cpp @@ -135,6 +135,12 @@ void OpenMVPluginFB::frameBufferData(const QPixmap &data) if(!data.isNull()) { + if (rotation == 0) { + if (data.height() > data.width()) { + // rotate 270 deg + rotation = 270; + } + } m_pixmap = scene()->addPixmap(data); } else diff --git a/src/plugins/openmv/openmvterminal.cpp b/src/plugins/openmv/openmvterminal.cpp old mode 100644 new mode 100755 index d9363f911cb4..03a50e94f735 --- a/src/plugins/openmv/openmvterminal.cpp +++ b/src/plugins/openmv/openmvterminal.cpp @@ -905,6 +905,7 @@ OpenMVTerminal::OpenMVTerminal(const QString &displayName, QSettings *settings, m_colorSpace->insertItem(GRAYSCALE_COLOR_SPACE, Tr::tr("Grayscale Color Space")); m_colorSpace->insertItem(LAB_COLOR_SPACE, Tr::tr("LAB Color Space")); m_colorSpace->insertItem(YUV_COLOR_SPACE, Tr::tr("YUV Color Space")); + m_colorSpace->insertItem(NONE_COLOR_SPACE, Tr::tr("None")); m_colorSpace->setToolTip(Tr::tr("Use Grayscale/LAB for color tracking")); styledBar1Layout->addWidget(m_colorSpace);