diff --git a/3rdparty/apriltag/README.ViSP b/3rdparty/apriltag/README.ViSP.md similarity index 100% rename from 3rdparty/apriltag/README.ViSP rename to 3rdparty/apriltag/README.ViSP.md diff --git a/3rdparty/simdlib/CMakeLists.txt b/3rdparty/simdlib/CMakeLists.txt index baade3ea2e..7056b6d382 100644 --- a/3rdparty/simdlib/CMakeLists.txt +++ b/3rdparty/simdlib/CMakeLists.txt @@ -76,6 +76,7 @@ if(X86 OR X86_64) vp_check_compiler_flag(CXX "-Wno-reorder" HAVE_NO_REORDER_FLAG "${VISP_SOURCE_DIR}/cmake/checks/cpu_warning.cpp") vp_check_compiler_flag(CXX "-Wno-strict-aliasing" HAVE_NO_STRICT_ALIASING_FLAG "${VISP_SOURCE_DIR}/cmake/checks/cpu_warning.cpp") vp_check_compiler_flag(CXX "-Wno-missing-field-initializers" HAVE_NO_MISSING_FIELD_FLAG "${VISP_SOURCE_DIR}/cmake/checks/cpu_warning.cpp") + vp_check_compiler_flag(CXX "-Wno-stringop-overflow" HAVE_NO_STRINGOP_OVERFLOW_EQUAL_FLAG "${VISP_SOURCE_DIR}/cmake/checks/cpu_warning.cpp") if(HAVE_SSE2_FLAG) set(SSE2_FLAG "-msse -msse2") @@ -136,7 +137,9 @@ if(X86 OR X86_64) if(HAVE_NO_MISSING_FIELD_FLAG) set(COMMON_CXX_FLAGS "${COMMON_CXX_FLAGS} -Wno-missing-field-initializers") endif() - + if(HAVE_NO_STRINGOP_OVERFLOW_EQUAL_FLAG) + set(COMMON_CXX_FLAGS "${COMMON_CXX_FLAGS} -Wno-stringop-overflow") + endif() if(MSVC AND ((CMAKE_CXX_COMPILER_ID MATCHES "AppleClang") OR (CMAKE_CXX_COMPILER_ID MATCHES "Clang"))) # Clang under windows needs AVX flags set(COMMON_CXX_FLAGS "${COMMON_CXX_FLAGS} ${AVX_FLAG} ${AVX2_FLAG}") diff --git a/3rdparty/stb_image/CMakeLists.txt b/3rdparty/stb_image/CMakeLists.txt index f344e7f27d..3af07b9766 100644 --- a/3rdparty/stb_image/CMakeLists.txt +++ b/3rdparty/stb_image/CMakeLists.txt @@ -1,5 +1,5 @@ project(${STBIMAGE_LIBRARY}) set(STBIMAGE_MAJOR_VERSION 2 PARENT_SCOPE) -set(STBIMAGE_MINOR_VERSION 27 PARENT_SCOPE) +set(STBIMAGE_MINOR_VERSION 30 PARENT_SCOPE) set(STBIMAGE_PATCH_VERSION 0 PARENT_SCOPE) diff --git a/3rdparty/stb_image/README.ViSP.md b/3rdparty/stb_image/README.ViSP.md index 6a894d7c53..fc21d5b93d 100644 --- a/3rdparty/stb_image/README.ViSP.md +++ b/3rdparty/stb_image/README.ViSP.md @@ -1,5 +1,2 @@ -Here we give the list of the main changes introduced in std_image for ViSP - -- 2022.01.14: Fix compat with clang that doesn't implement thread_local with c++11 - - See "Modified to ensure clang supports thread_local" in stb_image.h +Porting of the stb_image library: https://github.com/nothings/stb +stb_image version is 2.30 diff --git a/3rdparty/stb_image/README.md b/3rdparty/stb_image/README.md index 736bef4a74..cea3e49605 100644 --- a/3rdparty/stb_image/README.md +++ b/3rdparty/stb_image/README.md @@ -3,38 +3,49 @@ stb === -single-file public domain (or MIT licensed) libraries for C/C++ +single-file public domain (or MIT licensed) libraries for C/C++ -Most libraries by stb, except: stb_dxt by Fabian "ryg" Giesen, stb_image_resize -by Jorge L. "VinoBS" Rodriguez, and stb_sprintf by Jeff Roberts. +# This project discusses security-relevant bugs in public in Github Issues and Pull Requests, and it may take significant time for security fixes to be implemented or merged. If this poses an unreasonable risk to your project, do not use stb libraries. +Noteworthy: + +* image loader: [stb_image.h](stb_image.h) +* image writer: [stb_image_write.h](stb_image_write.h) +* image resizer: [stb_image_resize2.h](stb_image_resize2.h) +* font text rasterizer: [stb_truetype.h](stb_truetype.h) +* typesafe containers: [stb_ds.h](stb_ds.h) + +Most libraries by stb, except: stb_dxt by Fabian "ryg" Giesen, original stb_image_resize +by Jorge L. "VinoBS" Rodriguez, and stb_image_resize2 and stb_sprintf by Jeff Roberts. + + library | lastest version | category | LoC | description --------------------- | ---- | -------- | --- | -------------------------------- -**[stb_vorbis.c](stb_vorbis.c)** | 1.16 | audio | 5486 | decode ogg vorbis files from file/memory to float/16-bit signed output -**[stb_image.h](stb_image.h)** | 2.27 | graphics | 7897 | image loading/decoding from file/memory: JPG, PNG, TGA, BMP, PSD, GIF, HDR, PIC -**[stb_truetype.h](stb_truetype.h)** | 1.21 | graphics | 4882 | parse, decode, and rasterize characters from truetype fonts +**[stb_vorbis.c](stb_vorbis.c)** | 1.22 | audio | 5584 | decode ogg vorbis files from file/memory to float/16-bit signed output +**[stb_hexwave.h](stb_hexwave.h)** | 0.5 | audio | 680 | audio waveform synthesizer +**[stb_image.h](stb_image.h)** | 2.30 | graphics | 7988 | image loading/decoding from file/memory: JPG, PNG, TGA, BMP, PSD, GIF, HDR, PIC +**[stb_truetype.h](stb_truetype.h)** | 1.26 | graphics | 5079 | parse, decode, and rasterize characters from truetype fonts **[stb_image_write.h](stb_image_write.h)** | 1.16 | graphics | 1724 | image writing to disk: PNG, TGA, BMP -**[stb_image_resize.h](stb_image_resize.h)** | 0.96 | graphics | 2630 | resize images larger/smaller with good quality -**[stb_rect_pack.h](stb_rect_pack.h)** | 1.00 | graphics | 628 | simple 2D rectangle packer with decent quality -**[stb_ds.h](stb_ds.h)** | 0.5 | utility | 1691 | typesafe dynamic array and hash tables for C, will compile in C++ -**[stb_sprintf.h](stb_sprintf.h)** | 1.06 | utility | 1860 | fast sprintf, snprintf for C/C++ -**[stretchy_buffer.h](stretchy_buffer.h)** | 1.03 | utility | 262 | typesafe dynamic array for C (i.e. approximation to vector<>), doesn't compile as C++ -**[stb_textedit.h](stb_textedit.h)** | 1.13 | user interface | 1404 | guts of a text editor for games etc implementing them from scratch -**[stb_voxel_render.h](stb_voxel_render.h)** | 0.88 | 3D graphics | 3806 | Minecraft-esque voxel rendering "engine" with many more features -**[stb_dxt.h](stb_dxt.h)** | 1.08b | 3D graphics | 728 | Fabian "ryg" Giesen's real-time DXT compressor -**[stb_perlin.h](stb_perlin.h)** | 0.4 | 3D graphics | 366 | revised Perlin noise (3D input, 1D output) -**[stb_easy_font.h](stb_easy_font.h)** | 1.0 | 3D graphics | 303 | quick-and-dirty easy-to-deploy bitmap font for printing frame rate, etc -**[stb_tilemap_editor.h](stb_tilemap_editor.h)** | 0.41 | game dev | 4161 | embeddable tilemap editor +**[stb_image_resize2.h](stb_image_resize2.h)** | 2.10 | graphics | 10572 | resize images larger/smaller with good quality +**[stb_rect_pack.h](stb_rect_pack.h)** | 1.01 | graphics | 623 | simple 2D rectangle packer with decent quality +**[stb_perlin.h](stb_perlin.h)** | 0.5 | graphics | 428 | perlin's revised simplex noise w/ different seeds +**[stb_ds.h](stb_ds.h)** | 0.67 | utility | 1895 | typesafe dynamic array and hash tables for C, will compile in C++ +**[stb_sprintf.h](stb_sprintf.h)** | 1.10 | utility | 1906 | fast sprintf, snprintf for C/C++ +**[stb_textedit.h](stb_textedit.h)** | 1.14 | user interface | 1429 | guts of a text editor for games etc implementing them from scratch +**[stb_voxel_render.h](stb_voxel_render.h)** | 0.89 | 3D graphics | 3807 | Minecraft-esque voxel rendering "engine" with many more features +**[stb_dxt.h](stb_dxt.h)** | 1.12 | 3D graphics | 719 | Fabian "ryg" Giesen's real-time DXT compressor +**[stb_easy_font.h](stb_easy_font.h)** | 1.1 | 3D graphics | 305 | quick-and-dirty easy-to-deploy bitmap font for printing frame rate, etc +**[stb_tilemap_editor.h](stb_tilemap_editor.h)** | 0.42 | game dev | 4187 | embeddable tilemap editor **[stb_herringbone_wa...](stb_herringbone_wang_tile.h)** | 0.7 | game dev | 1221 | herringbone Wang tile map generator -**[stb_c_lexer.h](stb_c_lexer.h)** | 0.10 | parsing | 964 | simplify writing parsers for C-like languages -**[stb_divide.h](stb_divide.h)** | 0.92 | math | 421 | more useful 32-bit modulus e.g. "euclidean divide" +**[stb_c_lexer.h](stb_c_lexer.h)** | 0.12 | parsing | 941 | simplify writing parsers for C-like languages +**[stb_divide.h](stb_divide.h)** | 0.94 | math | 433 | more useful 32-bit modulus e.g. "euclidean divide" **[stb_connected_comp...](stb_connected_components.h)** | 0.96 | misc | 1049 | incrementally compute reachability on grids -**[stb.h](stb.h)** | 2.34 | misc | 14453 | helper functions for C, mostly redundant in C++; basically author's personal stuff -**[stb_leakcheck.h](stb_leakcheck.h)** | 0.5 | misc | 190 | quick-and-dirty malloc/free leak-checking +**[stb_leakcheck.h](stb_leakcheck.h)** | 0.6 | misc | 194 | quick-and-dirty malloc/free leak-checking +**[stb_include.h](stb_include.h)** | 0.02 | misc | 295 | implement recursive #include support, particularly for GLSL -Total libraries: 21 -Total lines of C code: 55669 +Total libraries: 21 +Total lines of C code: 51059 FAQ @@ -50,6 +61,24 @@ They are also licensed under the MIT open source license, if you have lawyers who are unhappy with public domain. Every source file includes an explicit dual-license for you to choose from. +#### How do I use these libraries? + +The idea behind single-header file libraries is that they're easy to distribute and deploy +because all the code is contained in a single file. By default, the .h files in here act as +their own header files, i.e. they declare the functions contained in the file but don't +actually result in any code getting compiled. + +So in addition, you should select _exactly one_ C/C++ source file that actually instantiates +the code, preferably a file you're not editing frequently. This file should define a +specific macro (this is documented per-library) to actually enable the function definitions. +For example, to use stb_image, you should have exactly one C/C++ file that doesn't +include stb_image.h regularly, but instead does + + #define STB_IMAGE_IMPLEMENTATION + #include "stb_image.h" + +The right macro to define is pointed out right at the top of each of these libraries. + #### Are there other single-file public-domain/open source libraries with minimal dependencies out there? [Yes.](https://github.com/nothings/single_file_libs) @@ -129,11 +158,10 @@ way of namespacing the filenames and source function names. #### Will you add more image types to stb_image.h? -If people submit them, I generally add them, but the goal of stb_image -is less for applications like image viewer apps (which need to support -every type of image under the sun) and more for things like games which -can choose what images to use, so I may decline to add them if they're -too rare or if the size of implementation vs. apparent benefit is too low. +No. As stb_image use has grown, it has become more important +for us to focus on security of the codebase. Adding new image +formats increases the amount of code we need to secure, so it +is no longer worth adding new formats. #### Do you have any advice on how to create my own single-file library? @@ -154,4 +182,3 @@ for other people to use them from other languages. I still use MSVC 6 (1998) as my IDE because it has better human factors for me than later versions of MSVC. - diff --git a/3rdparty/stb_image/stb_image.h b/3rdparty/stb_image/stb_image.h index 10de350c9d..9eedabedc4 100644 --- a/3rdparty/stb_image/stb_image.h +++ b/3rdparty/stb_image/stb_image.h @@ -1,4 +1,4 @@ -/* stb_image - v2.27 - public domain image loader - http://nothings.org/stb +/* stb_image - v2.30 - public domain image loader - http://nothings.org/stb no warranty implied; use at your own risk Do this: @@ -48,6 +48,9 @@ LICENSE RECENT REVISION HISTORY: + 2.30 (2024-05-31) avoid erroneous gcc warning + 2.29 (2023-05-xx) optimizations + 2.28 (2023-01-29) many error fixes, security errors, just tons of stuff 2.27 (2021-07-11) document stbi_info better, 16-bit PNM support, bug fixes 2.26 (2020-07-13) many minor fixes 2.25 (2020-02-02) fix warnings @@ -108,7 +111,7 @@ RECENT REVISION HISTORY: Cass Everitt Ryamond Barbiero github:grim210 Paul Du Bois Engin Manap Aldo Culquicondor github:sammyhw Philipp Wiesemann Dale Weiler Oriol Ferrer Mesia github:phprus - Josh Tobin Matthew Gregan github:poppolopoppo + Josh Tobin Neil Bickford Matthew Gregan github:poppolopoppo Julian Raschke Gregory Mullen Christian Floisand github:darealshinji Baldur Karlsson Kevin Schmidt JR Smith github:Michaelangel007 Brad Weinberger Matvey Cherevko github:mosra @@ -140,7 +143,7 @@ RECENT REVISION HISTORY: // // ... x = width, y = height, n = # 8-bit components per pixel ... // // ... replace '0' with '1'..'4' to force that many components per pixel // // ... but 'n' will always be the number that it would have been if you said 0 -// stbi_image_free(data) +// stbi_image_free(data); // // Standard parameters: // int *x -- outputs image width in pixels @@ -618,30 +621,24 @@ STBIDEF int stbi_zlib_decode_noheader_buffer(char *obuffer, int olen, const ch #endif #ifndef STBI_NO_THREAD_LOCALS - #ifdef __clang__ // Modified to ensure clang supports thread_local - #if defined(__has_feature) && __has_feature(cxx_thread_local) - #define STBI_THREAD_LOCAL thread_local - #endif - #else - #if defined(__cplusplus) && __cplusplus >= 201103L - #define STBI_THREAD_LOCAL thread_local - #elif defined(__GNUC__) && __GNUC__ < 5 - #define STBI_THREAD_LOCAL __thread - #elif defined(_MSC_VER) - #define STBI_THREAD_LOCAL __declspec(thread) - #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) - #define STBI_THREAD_LOCAL _Thread_local - #endif - - #ifndef STBI_THREAD_LOCAL - #if defined(__GNUC__) - #define STBI_THREAD_LOCAL __thread - #endif - #endif - #endif + #if defined(__cplusplus) && __cplusplus >= 201103L + #define STBI_THREAD_LOCAL thread_local + #elif defined(__GNUC__) && __GNUC__ < 5 + #define STBI_THREAD_LOCAL __thread + #elif defined(_MSC_VER) + #define STBI_THREAD_LOCAL __declspec(thread) + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_THREADS__) + #define STBI_THREAD_LOCAL _Thread_local + #endif + + #ifndef STBI_THREAD_LOCAL + #if defined(__GNUC__) + #define STBI_THREAD_LOCAL __thread + #endif + #endif #endif -#ifdef _MSC_VER +#if defined(_MSC_VER) || defined(__SYMBIAN32__) typedef unsigned short stbi__uint16; typedef signed short stbi__int16; typedef unsigned int stbi__uint32; @@ -1069,6 +1066,23 @@ static void *stbi__malloc_mad4(int a, int b, int c, int d, int add) } #endif +// returns 1 if the sum of two signed ints is valid (between -2^31 and 2^31-1 inclusive), 0 on overflow. +static int stbi__addints_valid(int a, int b) +{ + if ((a >= 0) != (b >= 0)) return 1; // a and b have different signs, so no overflow + if (a < 0 && b < 0) return a >= INT_MIN - b; // same as a + b >= INT_MIN; INT_MIN - b cannot overflow since b < 0. + return a <= INT_MAX - b; +} + +// returns 1 if the product of two ints fits in a signed short, 0 on overflow. +static int stbi__mul2shorts_valid(int a, int b) +{ + if (b == 0 || b == -1) return 1; // multiplication by 0 is always 0; check for -1 so SHRT_MIN/b doesn't overflow + if ((a >= 0) == (b >= 0)) return a <= SHRT_MAX/b; // product is positive, so similar to mul2sizes_valid + if (b < 0) return a <= SHRT_MIN / b; // same as a * b >= SHRT_MIN + return a >= SHRT_MIN / b; +} + // stbi__err - error // stbi__errpf - error returning pointer to float // stbi__errpuc - error returning pointer to unsigned char @@ -1991,9 +2005,12 @@ static int stbi__build_huffman(stbi__huffman *h, int *count) int i,j,k=0; unsigned int code; // build size list for each symbol (from JPEG spec) - for (i=0; i < 16; ++i) - for (j=0; j < count[i]; ++j) + for (i=0; i < 16; ++i) { + for (j=0; j < count[i]; ++j) { h->size[k++] = (stbi_uc) (i+1); + if(k >= 257) return stbi__err("bad size list","Corrupt JPEG"); + } + } h->size[k] = 0; // compute actual symbols (from jpeg spec) @@ -2118,6 +2135,8 @@ stbi_inline static int stbi__jpeg_huff_decode(stbi__jpeg *j, stbi__huffman *h) // convert the huffman code to the symbol id c = ((j->code_buffer >> (32 - k)) & stbi__bmask[k]) + h->delta[k]; + if(c < 0 || c >= 256) // symbol id out of bounds! + return -1; STBI_ASSERT((((j->code_buffer) >> (32 - h->size[c])) & stbi__bmask[h->size[c]]) == h->code[c]); // convert the id to a symbol @@ -2136,6 +2155,7 @@ stbi_inline static int stbi__extend_receive(stbi__jpeg *j, int n) unsigned int k; int sgn; if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing sgn = j->code_buffer >> 31; // sign bit always in MSB; 0 if MSB clear (positive), 1 if MSB set (negative) k = stbi_lrot(j->code_buffer, n); @@ -2150,6 +2170,7 @@ stbi_inline static int stbi__jpeg_get_bits(stbi__jpeg *j, int n) { unsigned int k; if (j->code_bits < n) stbi__grow_buffer_unsafe(j); + if (j->code_bits < n) return 0; // ran out of bits from stream, return 0s intead of continuing k = stbi_lrot(j->code_buffer, n); j->code_buffer = k & ~stbi__bmask[n]; k &= stbi__bmask[n]; @@ -2161,6 +2182,7 @@ stbi_inline static int stbi__jpeg_get_bit(stbi__jpeg *j) { unsigned int k; if (j->code_bits < 1) stbi__grow_buffer_unsafe(j); + if (j->code_bits < 1) return 0; // ran out of bits from stream, return 0s intead of continuing k = j->code_buffer; j->code_buffer <<= 1; --j->code_bits; @@ -2198,8 +2220,10 @@ static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman memset(data,0,64*sizeof(data[0])); diff = t ? stbi__extend_receive(j, t) : 0; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta","Corrupt JPEG"); dc = j->img_comp[b].dc_pred + diff; j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, dequant[0])) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); data[0] = (short) (dc * dequant[0]); // decode AC components, see JPEG spec @@ -2213,6 +2237,7 @@ static int stbi__jpeg_decode_block(stbi__jpeg *j, short data[64], stbi__huffman if (r) { // fast-AC path k += (r >> 4) & 15; // run s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); j->code_buffer <<= s; j->code_bits -= s; // decode into unzigzag'd location @@ -2252,8 +2277,10 @@ static int stbi__jpeg_decode_block_prog_dc(stbi__jpeg *j, short data[64], stbi__ if (t < 0 || t > 15) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); diff = t ? stbi__extend_receive(j, t) : 0; + if (!stbi__addints_valid(j->img_comp[b].dc_pred, diff)) return stbi__err("bad delta", "Corrupt JPEG"); dc = j->img_comp[b].dc_pred + diff; j->img_comp[b].dc_pred = dc; + if (!stbi__mul2shorts_valid(dc, 1 << j->succ_low)) return stbi__err("can't merge dc and ac", "Corrupt JPEG"); data[0] = (short) (dc * (1 << j->succ_low)); } else { // refinement scan for DC coefficient @@ -2288,6 +2315,7 @@ static int stbi__jpeg_decode_block_prog_ac(stbi__jpeg *j, short data[64], stbi__ if (r) { // fast-AC path k += (r >> 4) & 15; // run s = r & 15; // combined length + if (s > j->code_bits) return stbi__err("bad huffman code", "Combined length longer than code bits available"); j->code_buffer <<= s; j->code_bits -= s; zig = stbi__jpeg_dezigzag[k++]; @@ -3108,6 +3136,7 @@ static int stbi__process_marker(stbi__jpeg *z, int m) sizes[i] = stbi__get8(z->s); n += sizes[i]; } + if(n > 256) return stbi__err("bad DHT header","Corrupt JPEG"); // Loop over i < n would write past end of values! L -= 17; if (tc == 0) { if (!stbi__build_huffman(z->huff_dc+th, sizes)) return 0; @@ -3357,6 +3386,28 @@ static int stbi__decode_jpeg_header(stbi__jpeg *z, int scan) return 1; } +static stbi_uc stbi__skip_jpeg_junk_at_end(stbi__jpeg *j) +{ + // some JPEGs have junk at end, skip over it but if we find what looks + // like a valid marker, resume there + while (!stbi__at_eof(j->s)) { + stbi_uc x = stbi__get8(j->s); + while (x == 0xff) { // might be a marker + if (stbi__at_eof(j->s)) return STBI__MARKER_none; + x = stbi__get8(j->s); + if (x != 0x00 && x != 0xff) { + // not a stuffed zero or lead-in to another marker, looks + // like an actual marker, return it + return x; + } + // stuffed zero has x=0 now which ends the loop, meaning we go + // back to regular scan loop. + // repeated 0xff keeps trying to read the next byte of the marker. + } + } + return STBI__MARKER_none; +} + // decode image to YCbCr format static int stbi__decode_jpeg_image(stbi__jpeg *j) { @@ -3373,25 +3424,22 @@ static int stbi__decode_jpeg_image(stbi__jpeg *j) if (!stbi__process_scan_header(j)) return 0; if (!stbi__parse_entropy_coded_data(j)) return 0; if (j->marker == STBI__MARKER_none ) { - // handle 0s at the end of image data from IP Kamera 9060 - while (!stbi__at_eof(j->s)) { - int x = stbi__get8(j->s); - if (x == 255) { - j->marker = stbi__get8(j->s); - break; - } - } + j->marker = stbi__skip_jpeg_junk_at_end(j); // if we reach eof without hitting a marker, stbi__get_marker() below will fail and we'll eventually return 0 } + m = stbi__get_marker(j); + if (STBI__RESTART(m)) + m = stbi__get_marker(j); } else if (stbi__DNL(m)) { int Ld = stbi__get16be(j->s); stbi__uint32 NL = stbi__get16be(j->s); if (Ld != 4) return stbi__err("bad DNL len", "Corrupt JPEG"); if (NL != j->s->img_y) return stbi__err("bad DNL height", "Corrupt JPEG"); + m = stbi__get_marker(j); } else { - if (!stbi__process_marker(j, m)) return 0; + if (!stbi__process_marker(j, m)) return 1; + m = stbi__get_marker(j); } - m = stbi__get_marker(j); } if (j->progressive) stbi__jpeg_finish(j); @@ -3982,6 +4030,7 @@ static void *stbi__jpeg_load(stbi__context *s, int *x, int *y, int *comp, int re unsigned char* result; stbi__jpeg* j = (stbi__jpeg*) stbi__malloc(sizeof(stbi__jpeg)); if (!j) return stbi__errpuc("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); STBI_NOTUSED(ri); j->s = s; stbi__setup_jpeg(j); @@ -3995,6 +4044,7 @@ static int stbi__jpeg_test(stbi__context *s) int r; stbi__jpeg* j = (stbi__jpeg*)stbi__malloc(sizeof(stbi__jpeg)); if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); j->s = s; stbi__setup_jpeg(j); r = stbi__decode_jpeg_header(j, STBI__SCAN_type); @@ -4020,6 +4070,7 @@ static int stbi__jpeg_info(stbi__context *s, int *x, int *y, int *comp) int result; stbi__jpeg* j = (stbi__jpeg*) (stbi__malloc(sizeof(stbi__jpeg))); if (!j) return stbi__err("outofmem", "Out of memory"); + memset(j, 0, sizeof(stbi__jpeg)); j->s = s; result = stbi__jpeg_info_raw(j, x, y, comp); STBI_FREE(j); @@ -4127,6 +4178,7 @@ typedef struct { stbi_uc *zbuffer, *zbuffer_end; int num_bits; + int hit_zeof_once; stbi__uint32 code_buffer; char *zout; @@ -4193,9 +4245,20 @@ stbi_inline static int stbi__zhuffman_decode(stbi__zbuf *a, stbi__zhuffman *z) int b,s; if (a->num_bits < 16) { if (stbi__zeof(a)) { - return -1; /* report error for unexpected end of data. */ + if (!a->hit_zeof_once) { + // This is the first time we hit eof, insert 16 extra padding btis + // to allow us to keep going; if we actually consume any of them + // though, that is invalid data. This is caught later. + a->hit_zeof_once = 1; + a->num_bits += 16; // add 16 implicit zero bits + } else { + // We already inserted our extra 16 padding bits and are again + // out, this stream is actually prematurely terminated. + return -1; + } + } else { + stbi__fill_bits(a); } - stbi__fill_bits(a); } b = z->fast[a->code_buffer & STBI__ZFAST_MASK]; if (b) { @@ -4260,17 +4323,25 @@ static int stbi__parse_huffman_block(stbi__zbuf *a) int len,dist; if (z == 256) { a->zout = zout; + if (a->hit_zeof_once && a->num_bits < 16) { + // The first time we hit zeof, we inserted 16 extra zero bits into our bit + // buffer so the decoder can just do its speculative decoding. But if we + // actually consumed any of those bits (which is the case when num_bits < 16), + // the stream actually read past the end so it is malformed. + return stbi__err("unexpected end","Corrupt PNG"); + } return 1; } + if (z >= 286) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, length codes 286 and 287 must not appear in compressed data z -= 257; len = stbi__zlength_base[z]; if (stbi__zlength_extra[z]) len += stbi__zreceive(a, stbi__zlength_extra[z]); z = stbi__zhuffman_decode(a, &a->z_distance); - if (z < 0) return stbi__err("bad huffman code","Corrupt PNG"); + if (z < 0 || z >= 30) return stbi__err("bad huffman code","Corrupt PNG"); // per DEFLATE, distance codes 30 and 31 must not appear in compressed data dist = stbi__zdist_base[z]; if (stbi__zdist_extra[z]) dist += stbi__zreceive(a, stbi__zdist_extra[z]); if (zout - a->zout_start < dist) return stbi__err("bad dist","Corrupt PNG"); - if (zout + len > a->zout_end) { + if (len > a->zout_end - zout) { if (!stbi__zexpand(a, zout, len)) return 0; zout = a->zout; } @@ -4414,6 +4485,7 @@ static int stbi__parse_zlib(stbi__zbuf *a, int parse_header) if (!stbi__parse_zlib_header(a)) return 0; a->num_bits = 0; a->code_buffer = 0; + a->hit_zeof_once = 0; do { final = stbi__zreceive(a,1); type = stbi__zreceive(a,2); @@ -4569,9 +4641,8 @@ enum { STBI__F_up=2, STBI__F_avg=3, STBI__F_paeth=4, - // synthetic filters used for first scanline to avoid needing a dummy row of 0s - STBI__F_avg_first, - STBI__F_paeth_first + // synthetic filter used for first scanline to avoid needing a dummy row of 0s + STBI__F_avg_first }; static stbi_uc first_row_filter[5] = @@ -4580,29 +4651,56 @@ static stbi_uc first_row_filter[5] = STBI__F_sub, STBI__F_none, STBI__F_avg_first, - STBI__F_paeth_first + STBI__F_sub // Paeth with b=c=0 turns out to be equivalent to sub }; static int stbi__paeth(int a, int b, int c) { - int p = a + b - c; - int pa = abs(p-a); - int pb = abs(p-b); - int pc = abs(p-c); - if (pa <= pb && pa <= pc) return a; - if (pb <= pc) return b; - return c; + // This formulation looks very different from the reference in the PNG spec, but is + // actually equivalent and has favorable data dependencies and admits straightforward + // generation of branch-free code, which helps performance significantly. + int thresh = c*3 - (a + b); + int lo = a < b ? a : b; + int hi = a < b ? b : a; + int t0 = (hi <= thresh) ? lo : c; + int t1 = (thresh <= lo) ? hi : t0; + return t1; } static const stbi_uc stbi__depth_scale_table[9] = { 0, 0xff, 0x55, 0, 0x11, 0,0,0, 0x01 }; +// adds an extra all-255 alpha channel +// dest == src is legal +// img_n must be 1 or 3 +static void stbi__create_png_alpha_expand8(stbi_uc *dest, stbi_uc *src, stbi__uint32 x, int img_n) +{ + int i; + // must process data backwards since we allow dest==src + if (img_n == 1) { + for (i=x-1; i >= 0; --i) { + dest[i*2+1] = 255; + dest[i*2+0] = src[i]; + } + } else { + STBI_ASSERT(img_n == 3); + for (i=x-1; i >= 0; --i) { + dest[i*4+3] = 255; + dest[i*4+2] = src[i*3+2]; + dest[i*4+1] = src[i*3+1]; + dest[i*4+0] = src[i*3+0]; + } + } +} + // create the png data from post-deflated data static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 raw_len, int out_n, stbi__uint32 x, stbi__uint32 y, int depth, int color) { - int bytes = (depth == 16? 2 : 1); + int bytes = (depth == 16 ? 2 : 1); stbi__context *s = a->s; stbi__uint32 i,j,stride = x*out_n*bytes; stbi__uint32 img_len, img_width_bytes; + stbi_uc *filter_buf; + int all_ok = 1; int k; int img_n = s->img_n; // copy it into a local for later @@ -4614,8 +4712,11 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r a->out = (stbi_uc *) stbi__malloc_mad3(x, y, output_bytes, 0); // extra bytes to write off the end into if (!a->out) return stbi__err("outofmem", "Out of memory"); + // note: error exits here don't need to clean up a->out individually, + // stbi__do_png always does on error. if (!stbi__mad3sizes_valid(img_n, x, depth, 7)) return stbi__err("too large", "Corrupt PNG"); img_width_bytes = (((img_n * x * depth) + 7) >> 3); + if (!stbi__mad2sizes_valid(img_width_bytes, y, img_width_bytes)) return stbi__err("too large", "Corrupt PNG"); img_len = (img_width_bytes + 1) * y; // we used to check for exact match between raw_len and img_len on non-interlaced PNGs, @@ -4623,189 +4724,137 @@ static int stbi__create_png_image_raw(stbi__png *a, stbi_uc *raw, stbi__uint32 r // so just check for raw_len < img_len always. if (raw_len < img_len) return stbi__err("not enough pixels","Corrupt PNG"); + // Allocate two scan lines worth of filter workspace buffer. + filter_buf = (stbi_uc *) stbi__malloc_mad2(img_width_bytes, 2, 0); + if (!filter_buf) return stbi__err("outofmem", "Out of memory"); + + // Filtering for low-bit-depth images + if (depth < 8) { + filter_bytes = 1; + width = img_width_bytes; + } + for (j=0; j < y; ++j) { - stbi_uc *cur = a->out + stride*j; - stbi_uc *prior; + // cur/prior filter buffers alternate + stbi_uc *cur = filter_buf + (j & 1)*img_width_bytes; + stbi_uc *prior = filter_buf + (~j & 1)*img_width_bytes; + stbi_uc *dest = a->out + stride*j; + int nk = width * filter_bytes; int filter = *raw++; - if (filter > 4) - return stbi__err("invalid filter","Corrupt PNG"); - - if (depth < 8) { - if (img_width_bytes > x) return stbi__err("invalid width","Corrupt PNG"); - cur += x*out_n - img_width_bytes; // store output to the rightmost img_len bytes, so we can decode in place - filter_bytes = 1; - width = img_width_bytes; + // check filter type + if (filter > 4) { + all_ok = stbi__err("invalid filter","Corrupt PNG"); + break; } - prior = cur - stride; // bugfix: need to compute this after 'cur +=' computation above // if first row, use special filter that doesn't sample previous row if (j == 0) filter = first_row_filter[filter]; - // handle first byte explicitly - for (k=0; k < filter_bytes; ++k) { - switch (filter) { - case STBI__F_none : cur[k] = raw[k]; break; - case STBI__F_sub : cur[k] = raw[k]; break; - case STBI__F_up : cur[k] = STBI__BYTECAST(raw[k] + prior[k]); break; - case STBI__F_avg : cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); break; - case STBI__F_paeth : cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(0,prior[k],0)); break; - case STBI__F_avg_first : cur[k] = raw[k]; break; - case STBI__F_paeth_first: cur[k] = raw[k]; break; - } - } - - if (depth == 8) { - if (img_n != out_n) - cur[img_n] = 255; // first pixel - raw += img_n; - cur += out_n; - prior += out_n; - } else if (depth == 16) { - if (img_n != out_n) { - cur[filter_bytes] = 255; // first pixel top byte - cur[filter_bytes+1] = 255; // first pixel bottom byte - } - raw += filter_bytes; - cur += output_bytes; - prior += output_bytes; - } else { - raw += 1; - cur += 1; - prior += 1; + // perform actual filtering + switch (filter) { + case STBI__F_none: + memcpy(cur, raw, nk); + break; + case STBI__F_sub: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); + break; + case STBI__F_up: + for (k = 0; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); + break; + case STBI__F_avg: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (prior[k]>>1)); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); + break; + case STBI__F_paeth: + for (k = 0; k < filter_bytes; ++k) + cur[k] = STBI__BYTECAST(raw[k] + prior[k]); // prior[k] == stbi__paeth(0,prior[k],0) + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes], prior[k], prior[k-filter_bytes])); + break; + case STBI__F_avg_first: + memcpy(cur, raw, filter_bytes); + for (k = filter_bytes; k < nk; ++k) + cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); + break; } - // this is a little gross, so that we don't switch per-pixel or per-component - if (depth < 8 || img_n == out_n) { - int nk = (width - 1)*filter_bytes; - #define STBI__CASE(f) \ - case f: \ - for (k=0; k < nk; ++k) - switch (filter) { - // "none" filter turns into a memcpy here; make that explicit. - case STBI__F_none: memcpy(cur, raw, nk); break; - STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k-filter_bytes]); } break; - STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; - STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k-filter_bytes])>>1)); } break; - STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],prior[k],prior[k-filter_bytes])); } break; - STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k-filter_bytes] >> 1)); } break; - STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k-filter_bytes],0,0)); } break; - } - #undef STBI__CASE - raw += nk; - } else { - STBI_ASSERT(img_n+1 == out_n); - #define STBI__CASE(f) \ - case f: \ - for (i=x-1; i >= 1; --i, cur[filter_bytes]=255,raw+=filter_bytes,cur+=output_bytes,prior+=output_bytes) \ - for (k=0; k < filter_bytes; ++k) - switch (filter) { - STBI__CASE(STBI__F_none) { cur[k] = raw[k]; } break; - STBI__CASE(STBI__F_sub) { cur[k] = STBI__BYTECAST(raw[k] + cur[k- output_bytes]); } break; - STBI__CASE(STBI__F_up) { cur[k] = STBI__BYTECAST(raw[k] + prior[k]); } break; - STBI__CASE(STBI__F_avg) { cur[k] = STBI__BYTECAST(raw[k] + ((prior[k] + cur[k- output_bytes])>>1)); } break; - STBI__CASE(STBI__F_paeth) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],prior[k],prior[k- output_bytes])); } break; - STBI__CASE(STBI__F_avg_first) { cur[k] = STBI__BYTECAST(raw[k] + (cur[k- output_bytes] >> 1)); } break; - STBI__CASE(STBI__F_paeth_first) { cur[k] = STBI__BYTECAST(raw[k] + stbi__paeth(cur[k- output_bytes],0,0)); } break; - } - #undef STBI__CASE - - // the loop above sets the high byte of the pixels' alpha, but for - // 16 bit png files we also need the low byte set. we'll do that here. - if (depth == 16) { - cur = a->out + stride*j; // start at the beginning of the row again - for (i=0; i < x; ++i,cur+=output_bytes) { - cur[filter_bytes+1] = 255; - } - } - } - } + raw += nk; - // we make a separate pass to expand bits to pixels; for performance, - // this could run two scanlines behind the above code, so it won't - // intefere with filtering but will still be in the cache. - if (depth < 8) { - for (j=0; j < y; ++j) { - stbi_uc *cur = a->out + stride*j; - stbi_uc *in = a->out + stride*j + x*out_n - img_width_bytes; - // unpack 1/2/4-bit into a 8-bit buffer. allows us to keep the common 8-bit path optimal at minimal cost for 1/2/4-bit - // png guarante byte alignment, if width is not multiple of 8/4/2 we'll decode dummy trailing data that will be skipped in the later loop + // expand decoded bits in cur to dest, also adding an extra alpha channel if desired + if (depth < 8) { stbi_uc scale = (color == 0) ? stbi__depth_scale_table[depth] : 1; // scale grayscale values to 0..255 range + stbi_uc *in = cur; + stbi_uc *out = dest; + stbi_uc inb = 0; + stbi__uint32 nsmp = x*img_n; - // note that the final byte might overshoot and write more data than desired. - // we can allocate enough data that this never writes out of memory, but it - // could also overwrite the next scanline. can it overwrite non-empty data - // on the next scanline? yes, consider 1-pixel-wide scanlines with 1-bit-per-pixel. - // so we need to explicitly clamp the final ones - + // expand bits to bytes first if (depth == 4) { - for (k=x*img_n; k >= 2; k-=2, ++in) { - *cur++ = scale * ((*in >> 4) ); - *cur++ = scale * ((*in ) & 0x0f); + for (i=0; i < nsmp; ++i) { + if ((i & 1) == 0) inb = *in++; + *out++ = scale * (inb >> 4); + inb <<= 4; } - if (k > 0) *cur++ = scale * ((*in >> 4) ); } else if (depth == 2) { - for (k=x*img_n; k >= 4; k-=4, ++in) { - *cur++ = scale * ((*in >> 6) ); - *cur++ = scale * ((*in >> 4) & 0x03); - *cur++ = scale * ((*in >> 2) & 0x03); - *cur++ = scale * ((*in ) & 0x03); + for (i=0; i < nsmp; ++i) { + if ((i & 3) == 0) inb = *in++; + *out++ = scale * (inb >> 6); + inb <<= 2; } - if (k > 0) *cur++ = scale * ((*in >> 6) ); - if (k > 1) *cur++ = scale * ((*in >> 4) & 0x03); - if (k > 2) *cur++ = scale * ((*in >> 2) & 0x03); - } else if (depth == 1) { - for (k=x*img_n; k >= 8; k-=8, ++in) { - *cur++ = scale * ((*in >> 7) ); - *cur++ = scale * ((*in >> 6) & 0x01); - *cur++ = scale * ((*in >> 5) & 0x01); - *cur++ = scale * ((*in >> 4) & 0x01); - *cur++ = scale * ((*in >> 3) & 0x01); - *cur++ = scale * ((*in >> 2) & 0x01); - *cur++ = scale * ((*in >> 1) & 0x01); - *cur++ = scale * ((*in ) & 0x01); + } else { + STBI_ASSERT(depth == 1); + for (i=0; i < nsmp; ++i) { + if ((i & 7) == 0) inb = *in++; + *out++ = scale * (inb >> 7); + inb <<= 1; } - if (k > 0) *cur++ = scale * ((*in >> 7) ); - if (k > 1) *cur++ = scale * ((*in >> 6) & 0x01); - if (k > 2) *cur++ = scale * ((*in >> 5) & 0x01); - if (k > 3) *cur++ = scale * ((*in >> 4) & 0x01); - if (k > 4) *cur++ = scale * ((*in >> 3) & 0x01); - if (k > 5) *cur++ = scale * ((*in >> 2) & 0x01); - if (k > 6) *cur++ = scale * ((*in >> 1) & 0x01); } - if (img_n != out_n) { - int q; - // insert alpha = 255 - cur = a->out + stride*j; + + // insert alpha=255 values if desired + if (img_n != out_n) + stbi__create_png_alpha_expand8(dest, dest, x, img_n); + } else if (depth == 8) { + if (img_n == out_n) + memcpy(dest, cur, x*img_n); + else + stbi__create_png_alpha_expand8(dest, cur, x, img_n); + } else if (depth == 16) { + // convert the image data from big-endian to platform-native + stbi__uint16 *dest16 = (stbi__uint16*)dest; + stbi__uint32 nsmp = x*img_n; + + if (img_n == out_n) { + for (i = 0; i < nsmp; ++i, ++dest16, cur += 2) + *dest16 = (cur[0] << 8) | cur[1]; + } else { + STBI_ASSERT(img_n+1 == out_n); if (img_n == 1) { - for (q=x-1; q >= 0; --q) { - cur[q*2+1] = 255; - cur[q*2+0] = cur[q]; + for (i = 0; i < x; ++i, dest16 += 2, cur += 2) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = 0xffff; } } else { STBI_ASSERT(img_n == 3); - for (q=x-1; q >= 0; --q) { - cur[q*4+3] = 255; - cur[q*4+2] = cur[q*3+2]; - cur[q*4+1] = cur[q*3+1]; - cur[q*4+0] = cur[q*3+0]; + for (i = 0; i < x; ++i, dest16 += 4, cur += 6) { + dest16[0] = (cur[0] << 8) | cur[1]; + dest16[1] = (cur[2] << 8) | cur[3]; + dest16[2] = (cur[4] << 8) | cur[5]; + dest16[3] = 0xffff; } } } } - } else if (depth == 16) { - // force the image data from big-endian to platform-native. - // this is done in a separate pass due to the decoding relying - // on the data being untouched, but could probably be done - // per-line during decode if care is taken. - stbi_uc *cur = a->out; - stbi__uint16 *cur16 = (stbi__uint16*)cur; - - for(i=0; i < x*y*out_n; ++i,cur16++,cur+=2) { - *cur16 = (cur[0] << 8) | cur[1]; - } } + STBI_FREE(filter_buf); + if (!all_ok) return 0; + return 1; } @@ -4961,7 +5010,7 @@ STBIDEF void stbi_convert_iphone_png_to_rgb(int flag_true_if_should_convert) static STBI_THREAD_LOCAL int stbi__unpremultiply_on_load_local, stbi__unpremultiply_on_load_set; static STBI_THREAD_LOCAL int stbi__de_iphone_flag_local, stbi__de_iphone_flag_set; -STBIDEF void stbi__unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) +STBIDEF void stbi_set_unpremultiply_on_load_thread(int flag_true_if_should_unpremultiply) { stbi__unpremultiply_on_load_local = flag_true_if_should_unpremultiply; stbi__unpremultiply_on_load_set = 1; @@ -5070,14 +5119,13 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) if (!pal_img_n) { s->img_n = (color & 2 ? 3 : 1) + (color & 4 ? 1 : 0); if ((1 << 30) / s->img_x / s->img_n < s->img_y) return stbi__err("too large", "Image too large to decode"); - if (scan == STBI__SCAN_header) return 1; } else { // if paletted, then pal_n is our final components, and // img_n is # components to decompress/filter. s->img_n = 1; if ((1 << 30) / s->img_x / 4 < s->img_y) return stbi__err("too large","Corrupt PNG"); - // if SCAN_header, have to scan to see if we have a tRNS } + // even with SCAN_header, have to scan to see if we have a tRNS break; } @@ -5109,10 +5157,14 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) if (!(s->img_n & 1)) return stbi__err("tRNS with alpha","Corrupt PNG"); if (c.length != (stbi__uint32) s->img_n*2) return stbi__err("bad tRNS len","Corrupt PNG"); has_trans = 1; + // non-paletted with tRNS = constant alpha. if header-scanning, we can stop now. + if (scan == STBI__SCAN_header) { ++s->img_n; return 1; } if (z->depth == 16) { - for (k = 0; k < s->img_n; ++k) tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is + for (k = 0; k < s->img_n && k < 3; ++k) // extra loop test to suppress false GCC warning + tc16[k] = (stbi__uint16)stbi__get16be(s); // copy the values as-is } else { - for (k = 0; k < s->img_n; ++k) tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger + for (k = 0; k < s->img_n && k < 3; ++k) + tc[k] = (stbi_uc)(stbi__get16be(s) & 255) * stbi__depth_scale_table[z->depth]; // non 8-bit images will be larger } } break; @@ -5121,7 +5173,13 @@ static int stbi__parse_png_file(stbi__png *z, int scan, int req_comp) case STBI__PNG_TYPE('I','D','A','T'): { if (first) return stbi__err("first not IHDR", "Corrupt PNG"); if (pal_img_n && !pal_len) return stbi__err("no PLTE","Corrupt PNG"); - if (scan == STBI__SCAN_header) { s->img_n = pal_img_n; return 1; } + if (scan == STBI__SCAN_header) { + // header scan definitely stops at first IDAT + if (pal_img_n) + s->img_n = pal_img_n; + return 1; + } + if (c.length > (1u << 30)) return stbi__err("IDAT size limit", "IDAT section larger than 2^30 bytes"); if ((int)(ioff + c.length) < (int)ioff) return 0; if (ioff + c.length > idata_limit) { stbi__uint32 idata_limit_old = idata_limit; @@ -5504,8 +5562,22 @@ static void *stbi__bmp_load(stbi__context *s, int *x, int *y, int *comp, int req psize = (info.offset - info.extra_read - info.hsz) >> 2; } if (psize == 0) { - if (info.offset != s->callback_already_read + (s->img_buffer - s->img_buffer_original)) { - return stbi__errpuc("bad offset", "Corrupt BMP"); + // accept some number of extra bytes after the header, but if the offset points either to before + // the header ends or implies a large amount of extra data, reject the file as malformed + int bytes_read_so_far = s->callback_already_read + (int)(s->img_buffer - s->img_buffer_original); + int header_limit = 1024; // max we actually read is below 256 bytes currently. + int extra_data_limit = 256*4; // what ordinarily goes here is a palette; 256 entries*4 bytes is its max size. + if (bytes_read_so_far <= 0 || bytes_read_so_far > header_limit) { + return stbi__errpuc("bad header", "Corrupt BMP"); + } + // we established that bytes_read_so_far is positive and sensible. + // the first half of this test rejects offsets that are either too small positives, or + // negative, and guarantees that info.offset >= bytes_read_so_far > 0. this in turn + // ensures the number computed in the second half of the test can't overflow. + if (info.offset < bytes_read_so_far || info.offset - bytes_read_so_far > extra_data_limit) { + return stbi__errpuc("bad offset", "Corrupt BMP"); + } else { + stbi__skip(s, info.offset - bytes_read_so_far); } } @@ -7193,12 +7265,12 @@ static float *stbi__hdr_load(stbi__context *s, int *x, int *y, int *comp, int re // Run value = stbi__get8(s); count -= 128; - if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = value; } else { // Dump - if (count > nleft) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } + if ((count == 0) || (count > nleft)) { STBI_FREE(hdr_data); STBI_FREE(scanline); return stbi__errpf("corrupt", "bad RLE data in HDR"); } for (z = 0; z < count; ++z) scanline[i++ * 4 + k] = stbi__get8(s); } @@ -7452,10 +7524,17 @@ static void *stbi__pnm_load(stbi__context *s, int *x, int *y, int *comp, int req out = (stbi_uc *) stbi__malloc_mad4(s->img_n, s->img_x, s->img_y, ri->bits_per_channel / 8, 0); if (!out) return stbi__errpuc("outofmem", "Out of memory"); - stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8)); + if (!stbi__getn(s, out, s->img_n * s->img_x * s->img_y * (ri->bits_per_channel / 8))) { + STBI_FREE(out); + return stbi__errpuc("bad PNM", "PNM file truncated"); + } if (req_comp && req_comp != s->img_n) { - out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + if (ri->bits_per_channel == 16) { + out = (stbi_uc *) stbi__convert_format16((stbi__uint16 *) out, s->img_n, req_comp, s->img_x, s->img_y); + } else { + out = stbi__convert_format(out, s->img_n, req_comp, s->img_x, s->img_y); + } if (out == NULL) return out; // stbi__convert_format frees input on failure } return out; @@ -7492,6 +7571,8 @@ static int stbi__pnm_getinteger(stbi__context *s, char *c) while (!stbi__at_eof(s) && stbi__pnm_isdigit(*c)) { value = value*10 + (*c - '0'); *c = (char) stbi__get8(s); + if((value > 214748364) || (value == 214748364 && *c > '7')) + return stbi__err("integer parse overflow", "Parsing an integer in the PPM header overflowed a 32-bit int"); } return value; @@ -7522,9 +7603,13 @@ static int stbi__pnm_info(stbi__context *s, int *x, int *y, int *comp) stbi__pnm_skip_whitespace(s, &c); *x = stbi__pnm_getinteger(s, &c); // read width + if(*x == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); stbi__pnm_skip_whitespace(s, &c); *y = stbi__pnm_getinteger(s, &c); // read height + if (*y == 0) + return stbi__err("invalid width", "PPM image header had zero or overflowing width"); stbi__pnm_skip_whitespace(s, &c); maxv = stbi__pnm_getinteger(s, &c); // read max value diff --git a/3rdparty/stb_image/stb_image_write.h b/3rdparty/stb_image/stb_image_write.h index 665f363529..e4b32ed1bc 100644 --- a/3rdparty/stb_image/stb_image_write.h +++ b/3rdparty/stb_image/stb_image_write.h @@ -55,11 +55,11 @@ There are also five equivalent functions that use an arbitrary write function. You are expected to open/close your file-equivalent before and after calling these: - int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int -stride_in_bytes); int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void -*data); int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); int -stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); int -stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); + int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes); + int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); + int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); + int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); + int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); where the callback is: void stbi_write_func(void *context, void *data, int size); @@ -156,67 +156,65 @@ LICENSE // if STB_IMAGE_WRITE_STATIC causes problems, try defining STBIWDEF to 'inline' or 'static inline' #ifndef STBIWDEF #ifdef STB_IMAGE_WRITE_STATIC -#define STBIWDEF static +#define STBIWDEF static #else #ifdef __cplusplus -#define STBIWDEF extern "C" +#define STBIWDEF extern "C" #else -#define STBIWDEF extern +#define STBIWDEF extern #endif #endif #endif -#ifndef STB_IMAGE_WRITE_STATIC // C++ forbids static forward declarations +#ifndef STB_IMAGE_WRITE_STATIC // C++ forbids static forward declarations STBIWDEF int stbi_write_tga_with_rle; STBIWDEF int stbi_write_png_compression_level; STBIWDEF int stbi_write_force_png_filter; #endif #ifndef STBI_WRITE_NO_STDIO -STBIWDEF int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); -STBIWDEF int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data); -STBIWDEF int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_png(char const *filename, int w, int h, int comp, const void *data, int stride_in_bytes); +STBIWDEF int stbi_write_bmp(char const *filename, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_tga(char const *filename, int w, int h, int comp, const void *data); STBIWDEF int stbi_write_hdr(char const *filename, int w, int h, int comp, const float *data); -STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality); +STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality); #ifdef STBIW_WINDOWS_UTF8 -STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t *input); +STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input); #endif #endif typedef void stbi_write_func(void *context, void *data, int size); -STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, - int stride_in_bytes); -STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); -STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data, int stride_in_bytes); +STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); +STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const void *data); STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int w, int h, int comp, const float *data); -STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, - int quality); +STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality); STBIWDEF void stbi_flip_vertically_on_write(int flip_boolean); -#endif // INCLUDE_STB_IMAGE_WRITE_H +#endif//INCLUDE_STB_IMAGE_WRITE_H #ifdef STB_IMAGE_WRITE_IMPLEMENTATION #ifdef _WIN32 -#ifndef _CRT_SECURE_NO_WARNINGS -#define _CRT_SECURE_NO_WARNINGS -#endif -#ifndef _CRT_NONSTDC_NO_DEPRECATE -#define _CRT_NONSTDC_NO_DEPRECATE -#endif + #ifndef _CRT_SECURE_NO_WARNINGS + #define _CRT_SECURE_NO_WARNINGS + #endif + #ifndef _CRT_NONSTDC_NO_DEPRECATE + #define _CRT_NONSTDC_NO_DEPRECATE + #endif #endif #ifndef STBI_WRITE_NO_STDIO #include #endif // STBI_WRITE_NO_STDIO -#include #include #include #include +#include #if defined(STBIW_MALLOC) && defined(STBIW_FREE) && (defined(STBIW_REALLOC) || defined(STBIW_REALLOC_SIZED)) // ok @@ -227,25 +225,27 @@ STBIWDEF void stbi_flip_vertically_on_write(int flip_boolean); #endif #ifndef STBIW_MALLOC -#define STBIW_MALLOC(sz) malloc(sz) -#define STBIW_REALLOC(p, newsz) realloc(p, newsz) -#define STBIW_FREE(p) free(p) +#define STBIW_MALLOC(sz) malloc(sz) +#define STBIW_REALLOC(p,newsz) realloc(p,newsz) +#define STBIW_FREE(p) free(p) #endif #ifndef STBIW_REALLOC_SIZED -#define STBIW_REALLOC_SIZED(p, oldsz, newsz) STBIW_REALLOC(p, newsz) +#define STBIW_REALLOC_SIZED(p,oldsz,newsz) STBIW_REALLOC(p,newsz) #endif + #ifndef STBIW_MEMMOVE -#define STBIW_MEMMOVE(a, b, sz) memmove(a, b, sz) +#define STBIW_MEMMOVE(a,b,sz) memmove(a,b,sz) #endif + #ifndef STBIW_ASSERT #include #define STBIW_ASSERT(x) assert(x) #endif -#define STBIW_UCHAR(x) (unsigned char)((x)&0xff) +#define STBIW_UCHAR(x) (unsigned char) ((x) & 0xff) #ifdef STB_IMAGE_WRITE_STATIC static int stbi_write_png_compression_level = 8; @@ -259,25 +259,32 @@ int stbi_write_force_png_filter = -1; static int stbi__flip_vertically_on_write = 0; -STBIWDEF void stbi_flip_vertically_on_write(int flag) { stbi__flip_vertically_on_write = flag; } +STBIWDEF void stbi_flip_vertically_on_write(int flag) +{ + stbi__flip_vertically_on_write = flag; +} -typedef struct { - stbi_write_func *func; - void *context; - unsigned char buffer[64]; - int buf_used; +typedef struct +{ + stbi_write_func *func; + void *context; + unsigned char buffer[64]; + int buf_used; } stbi__write_context; // initialize a callback-based context static void stbi__start_write_callbacks(stbi__write_context *s, stbi_write_func *c, void *context) { - s->func = c; - s->context = context; + s->func = c; + s->context = context; } #ifndef STBI_WRITE_NO_STDIO -static void stbi__stdio_write(void *context, void *data, int size) { fwrite(data, 1, size, (FILE *)context); } +static void stbi__stdio_write(void *context, void *data, int size) +{ + fwrite(data,1,size,(FILE*) context); +} #if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8) #ifdef __cplusplus @@ -285,353 +292,339 @@ static void stbi__stdio_write(void *context, void *data, int size) { fwrite(data #else #define STBIW_EXTERN extern #endif -STBIW_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, - const char *str, int cbmb, wchar_t *widestr, - int cchwide); -STBIW_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, - const wchar_t *widestr, int cchwide, char *str, - int cbmb, const char *defchar, int *used_default); - -STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t *input) +STBIW_EXTERN __declspec(dllimport) int __stdcall MultiByteToWideChar(unsigned int cp, unsigned long flags, const char *str, int cbmb, wchar_t *widestr, int cchwide); +STBIW_EXTERN __declspec(dllimport) int __stdcall WideCharToMultiByte(unsigned int cp, unsigned long flags, const wchar_t *widestr, int cchwide, char *str, int cbmb, const char *defchar, int *used_default); + +STBIWDEF int stbiw_convert_wchar_to_utf8(char *buffer, size_t bufferlen, const wchar_t* input) { - return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int)bufferlen, NULL, NULL); + return WideCharToMultiByte(65001 /* UTF8 */, 0, input, -1, buffer, (int) bufferlen, NULL, NULL); } #endif static FILE *stbiw__fopen(char const *filename, char const *mode) { - FILE *f; + FILE *f; #if defined(_WIN32) && defined(STBIW_WINDOWS_UTF8) - wchar_t wMode[64]; - wchar_t wFilename[1024]; - if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename) / sizeof(*wFilename))) - return 0; + wchar_t wMode[64]; + wchar_t wFilename[1024]; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, filename, -1, wFilename, sizeof(wFilename)/sizeof(*wFilename))) + return 0; - if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode) / sizeof(*wMode))) - return 0; + if (0 == MultiByteToWideChar(65001 /* UTF8 */, 0, mode, -1, wMode, sizeof(wMode)/sizeof(*wMode))) + return 0; #if defined(_MSC_VER) && _MSC_VER >= 1400 - if (0 != _wfopen_s(&f, wFilename, wMode)) - f = 0; + if (0 != _wfopen_s(&f, wFilename, wMode)) + f = 0; #else - f = _wfopen(wFilename, wMode); + f = _wfopen(wFilename, wMode); #endif #elif defined(_MSC_VER) && _MSC_VER >= 1400 - if (0 != fopen_s(&f, filename, mode)) - f = 0; + if (0 != fopen_s(&f, filename, mode)) + f=0; #else - f = fopen(filename, mode); + f = fopen(filename, mode); #endif - return f; + return f; } static int stbi__start_write_file(stbi__write_context *s, const char *filename) { - FILE *f = stbiw__fopen(filename, "wb"); - stbi__start_write_callbacks(s, stbi__stdio_write, (void *)f); - return f != NULL; + FILE *f = stbiw__fopen(filename, "wb"); + stbi__start_write_callbacks(s, stbi__stdio_write, (void *) f); + return f != NULL; } -static void stbi__end_write_file(stbi__write_context *s) { fclose((FILE *)s->context); } +static void stbi__end_write_file(stbi__write_context *s) +{ + fclose((FILE *)s->context); +} #endif // !STBI_WRITE_NO_STDIO typedef unsigned int stbiw_uint32; -typedef int stb_image_write_test[sizeof(stbiw_uint32) == 4 ? 1 : -1]; +typedef int stb_image_write_test[sizeof(stbiw_uint32)==4 ? 1 : -1]; static void stbiw__writefv(stbi__write_context *s, const char *fmt, va_list v) { - while (*fmt) { - switch (*fmt++) { - case ' ': - break; - case '1': { - unsigned char x = STBIW_UCHAR(va_arg(v, int)); - s->func(s->context, &x, 1); - break; - } - case '2': { - int x = va_arg(v, int); - unsigned char b[2]; - b[0] = STBIW_UCHAR(x); - b[1] = STBIW_UCHAR(x >> 8); - s->func(s->context, b, 2); - break; - } - case '4': { - stbiw_uint32 x = va_arg(v, int); - unsigned char b[4]; - b[0] = STBIW_UCHAR(x); - b[1] = STBIW_UCHAR(x >> 8); - b[2] = STBIW_UCHAR(x >> 16); - b[3] = STBIW_UCHAR(x >> 24); - s->func(s->context, b, 4); - break; - } - default: - STBIW_ASSERT(0); - return; - } - } + while (*fmt) { + switch (*fmt++) { + case ' ': break; + case '1': { unsigned char x = STBIW_UCHAR(va_arg(v, int)); + s->func(s->context,&x,1); + break; } + case '2': { int x = va_arg(v,int); + unsigned char b[2]; + b[0] = STBIW_UCHAR(x); + b[1] = STBIW_UCHAR(x>>8); + s->func(s->context,b,2); + break; } + case '4': { stbiw_uint32 x = va_arg(v,int); + unsigned char b[4]; + b[0]=STBIW_UCHAR(x); + b[1]=STBIW_UCHAR(x>>8); + b[2]=STBIW_UCHAR(x>>16); + b[3]=STBIW_UCHAR(x>>24); + s->func(s->context,b,4); + break; } + default: + STBIW_ASSERT(0); + return; + } + } } static void stbiw__writef(stbi__write_context *s, const char *fmt, ...) { - va_list v; - va_start(v, fmt); - stbiw__writefv(s, fmt, v); - va_end(v); + va_list v; + va_start(v, fmt); + stbiw__writefv(s, fmt, v); + va_end(v); } static void stbiw__write_flush(stbi__write_context *s) { - if (s->buf_used) { - s->func(s->context, &s->buffer, s->buf_used); - s->buf_used = 0; - } + if (s->buf_used) { + s->func(s->context, &s->buffer, s->buf_used); + s->buf_used = 0; + } } -static void stbiw__putc(stbi__write_context *s, unsigned char c) { s->func(s->context, &c, 1); } +static void stbiw__putc(stbi__write_context *s, unsigned char c) +{ + s->func(s->context, &c, 1); +} static void stbiw__write1(stbi__write_context *s, unsigned char a) { - if ((size_t)s->buf_used + 1 > sizeof(s->buffer)) - stbiw__write_flush(s); - s->buffer[s->buf_used++] = a; + if ((size_t)s->buf_used + 1 > sizeof(s->buffer)) + stbiw__write_flush(s); + s->buffer[s->buf_used++] = a; } static void stbiw__write3(stbi__write_context *s, unsigned char a, unsigned char b, unsigned char c) { - int n; - if ((size_t)s->buf_used + 3 > sizeof(s->buffer)) - stbiw__write_flush(s); - n = s->buf_used; - s->buf_used = n + 3; - s->buffer[n + 0] = a; - s->buffer[n + 1] = b; - s->buffer[n + 2] = c; + int n; + if ((size_t)s->buf_used + 3 > sizeof(s->buffer)) + stbiw__write_flush(s); + n = s->buf_used; + s->buf_used = n+3; + s->buffer[n+0] = a; + s->buffer[n+1] = b; + s->buffer[n+2] = c; } -static void stbiw__write_pixel(stbi__write_context *s, int rgb_dir, int comp, int write_alpha, int expand_mono, - unsigned char *d) +static void stbiw__write_pixel(stbi__write_context *s, int rgb_dir, int comp, int write_alpha, int expand_mono, unsigned char *d) { - unsigned char bg[3] = {255, 0, 255}, px[3]; - int k; - - if (write_alpha < 0) - stbiw__write1(s, d[comp - 1]); - - switch (comp) { - case 2: // 2 pixels = mono + alpha, alpha is written separately, so same as 1-channel case - case 1: - if (expand_mono) - stbiw__write3(s, d[0], d[0], d[0]); // monochrome bmp - else - stbiw__write1(s, d[0]); // monochrome TGA - break; - case 4: - if (!write_alpha) { - // composite against pink background - for (k = 0; k < 3; ++k) - px[k] = bg[k] + ((d[k] - bg[k]) * d[3]) / 255; - stbiw__write3(s, px[1 - rgb_dir], px[1], px[1 + rgb_dir]); - break; - } - /* FALLTHROUGH */ - case 3: - stbiw__write3(s, d[1 - rgb_dir], d[1], d[1 + rgb_dir]); - break; - } - if (write_alpha > 0) - stbiw__write1(s, d[comp - 1]); + unsigned char bg[3] = { 255, 0, 255}, px[3]; + int k; + + if (write_alpha < 0) + stbiw__write1(s, d[comp - 1]); + + switch (comp) { + case 2: // 2 pixels = mono + alpha, alpha is written separately, so same as 1-channel case + case 1: + if (expand_mono) + stbiw__write3(s, d[0], d[0], d[0]); // monochrome bmp + else + stbiw__write1(s, d[0]); // monochrome TGA + break; + case 4: + if (!write_alpha) { + // composite against pink background + for (k = 0; k < 3; ++k) + px[k] = bg[k] + ((d[k] - bg[k]) * d[3]) / 255; + stbiw__write3(s, px[1 - rgb_dir], px[1], px[1 + rgb_dir]); + break; + } + /* FALLTHROUGH */ + case 3: + stbiw__write3(s, d[1 - rgb_dir], d[1], d[1 + rgb_dir]); + break; + } + if (write_alpha > 0) + stbiw__write1(s, d[comp - 1]); } -static void stbiw__write_pixels(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, void *data, - int write_alpha, int scanline_pad, int expand_mono) +static void stbiw__write_pixels(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, void *data, int write_alpha, int scanline_pad, int expand_mono) { - stbiw_uint32 zero = 0; - int i, j, j_end; - - if (y <= 0) - return; - - if (stbi__flip_vertically_on_write) - vdir *= -1; - - if (vdir < 0) { - j_end = -1; - j = y - 1; - } else { - j_end = y; - j = 0; - } - - for (; j != j_end; j += vdir) { - for (i = 0; i < x; ++i) { - unsigned char *d = (unsigned char *)data + (j * x + i) * comp; - stbiw__write_pixel(s, rgb_dir, comp, write_alpha, expand_mono, d); - } - stbiw__write_flush(s); - s->func(s->context, &zero, scanline_pad); - } + stbiw_uint32 zero = 0; + int i,j, j_end; + + if (y <= 0) + return; + + if (stbi__flip_vertically_on_write) + vdir *= -1; + + if (vdir < 0) { + j_end = -1; j = y-1; + } else { + j_end = y; j = 0; + } + + for (; j != j_end; j += vdir) { + for (i=0; i < x; ++i) { + unsigned char *d = (unsigned char *) data + (j*x+i)*comp; + stbiw__write_pixel(s, rgb_dir, comp, write_alpha, expand_mono, d); + } + stbiw__write_flush(s); + s->func(s->context, &zero, scanline_pad); + } } -static int stbiw__outfile(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, int expand_mono, - void *data, int alpha, int pad, const char *fmt, ...) +static int stbiw__outfile(stbi__write_context *s, int rgb_dir, int vdir, int x, int y, int comp, int expand_mono, void *data, int alpha, int pad, const char *fmt, ...) { - if (y < 0 || x < 0) { - return 0; - } else { - va_list v; - va_start(v, fmt); - stbiw__writefv(s, fmt, v); - va_end(v); - stbiw__write_pixels(s, rgb_dir, vdir, x, y, comp, data, alpha, pad, expand_mono); - return 1; - } + if (y < 0 || x < 0) { + return 0; + } else { + va_list v; + va_start(v, fmt); + stbiw__writefv(s, fmt, v); + va_end(v); + stbiw__write_pixels(s,rgb_dir,vdir,x,y,comp,data,alpha,pad, expand_mono); + return 1; + } } static int stbi_write_bmp_core(stbi__write_context *s, int x, int y, int comp, const void *data) { - if (comp != 4) { - // write RGB bitmap - int pad = (-x * 3) & 3; - return stbiw__outfile(s, -1, -1, x, y, comp, 1, (void *)data, 0, pad, - "11 4 22 4" - "4 44 22 444444", - 'B', 'M', 14 + 40 + (x * 3 + pad) * y, 0, 0, 14 + 40, // file header - 40, x, y, 1, 24, 0, 0, 0, 0, 0, 0); // bitmap header - } else { - // RGBA bitmaps need a v4 header - // use BI_BITFIELDS mode with 32bpp and alpha mask - // (straight BI_RGB with alpha mask doesn't work in most readers) - return stbiw__outfile(s, -1, -1, x, y, comp, 1, (void *)data, 1, 0, - "11 4 22 4" - "4 44 22 444444 4444 4 444 444 444 444", - 'B', 'M', 14 + 108 + x * y * 4, 0, 0, 14 + 108, // file header - 108, x, y, 1, 32, 3, 0, 0, 0, 0, 0, 0xff0000, 0xff00, 0xff, 0xff000000u, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0); // bitmap V4 header - } + if (comp != 4) { + // write RGB bitmap + int pad = (-x*3) & 3; + return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *) data,0,pad, + "11 4 22 4" "4 44 22 444444", + 'B', 'M', 14+40+(x*3+pad)*y, 0,0, 14+40, // file header + 40, x,y, 1,24, 0,0,0,0,0,0); // bitmap header + } else { + // RGBA bitmaps need a v4 header + // use BI_BITFIELDS mode with 32bpp and alpha mask + // (straight BI_RGB with alpha mask doesn't work in most readers) + return stbiw__outfile(s,-1,-1,x,y,comp,1,(void *)data,1,0, + "11 4 22 4" "4 44 22 444444 4444 4 444 444 444 444", + 'B', 'M', 14+108+x*y*4, 0, 0, 14+108, // file header + 108, x,y, 1,32, 3,0,0,0,0,0, 0xff0000,0xff00,0xff,0xff000000u, 0, 0,0,0, 0,0,0, 0,0,0, 0,0,0); // bitmap V4 header + } } STBIWDEF int stbi_write_bmp_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) { - stbi__write_context s = {0}; - stbi__start_write_callbacks(&s, func, context); - return stbi_write_bmp_core(&s, x, y, comp, data); + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_bmp_core(&s, x, y, comp, data); } #ifndef STBI_WRITE_NO_STDIO STBIWDEF int stbi_write_bmp(char const *filename, int x, int y, int comp, const void *data) { - stbi__write_context s = {0}; - if (stbi__start_write_file(&s, filename)) { - int r = stbi_write_bmp_core(&s, x, y, comp, data); - stbi__end_write_file(&s); - return r; - } else - return 0; + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_bmp_core(&s, x, y, comp, data); + stbi__end_write_file(&s); + return r; + } else + return 0; } -#endif //! STBI_WRITE_NO_STDIO +#endif //!STBI_WRITE_NO_STDIO static int stbi_write_tga_core(stbi__write_context *s, int x, int y, int comp, void *data) { - int has_alpha = (comp == 2 || comp == 4); - int colorbytes = has_alpha ? comp - 1 : comp; - int format = colorbytes < 2 ? 3 : 2; // 3 color channels (RGB/RGBA) = 2, 1 color channel (Y/YA) = 3 - - if (y < 0 || x < 0) - return 0; - - if (!stbi_write_tga_with_rle) { - return stbiw__outfile(s, -1, -1, x, y, comp, 0, (void *)data, has_alpha, 0, "111 221 2222 11", 0, 0, format, 0, 0, - 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8); - } else { - int i, j, k; - int jend, jdir; - - stbiw__writef(s, "111 221 2222 11", 0, 0, format + 8, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, - has_alpha * 8); - - if (stbi__flip_vertically_on_write) { - j = 0; - jend = y; - jdir = 1; - } else { - j = y - 1; - jend = -1; - jdir = -1; - } - for (; j != jend; j += jdir) { - unsigned char *row = (unsigned char *)data + j * x * comp; - int len; - - for (i = 0; i < x; i += len) { - unsigned char *begin = row + i * comp; - int diff = 1; - len = 1; - - if (i < x - 1) { - ++len; - diff = memcmp(begin, row + (i + 1) * comp, comp); - if (diff) { - const unsigned char *prev = begin; - for (k = i + 2; k < x && len < 128; ++k) { - if (memcmp(prev, row + k * comp, comp)) { - prev += comp; - ++len; - } else { - --len; - break; - } + int has_alpha = (comp == 2 || comp == 4); + int colorbytes = has_alpha ? comp-1 : comp; + int format = colorbytes < 2 ? 3 : 2; // 3 color channels (RGB/RGBA) = 2, 1 color channel (Y/YA) = 3 + + if (y < 0 || x < 0) + return 0; + + if (!stbi_write_tga_with_rle) { + return stbiw__outfile(s, -1, -1, x, y, comp, 0, (void *) data, has_alpha, 0, + "111 221 2222 11", 0, 0, format, 0, 0, 0, 0, 0, x, y, (colorbytes + has_alpha) * 8, has_alpha * 8); + } else { + int i,j,k; + int jend, jdir; + + stbiw__writef(s, "111 221 2222 11", 0,0,format+8, 0,0,0, 0,0,x,y, (colorbytes + has_alpha) * 8, has_alpha * 8); + + if (stbi__flip_vertically_on_write) { + j = 0; + jend = y; + jdir = 1; + } else { + j = y-1; + jend = -1; + jdir = -1; + } + for (; j != jend; j += jdir) { + unsigned char *row = (unsigned char *) data + j * x * comp; + int len; + + for (i = 0; i < x; i += len) { + unsigned char *begin = row + i * comp; + int diff = 1; + len = 1; + + if (i < x - 1) { + ++len; + diff = memcmp(begin, row + (i + 1) * comp, comp); + if (diff) { + const unsigned char *prev = begin; + for (k = i + 2; k < x && len < 128; ++k) { + if (memcmp(prev, row + k * comp, comp)) { + prev += comp; + ++len; + } else { + --len; + break; + } + } + } else { + for (k = i + 2; k < x && len < 128; ++k) { + if (!memcmp(begin, row + k * comp, comp)) { + ++len; + } else { + break; + } + } + } } - } else { - for (k = i + 2; k < x && len < 128; ++k) { - if (!memcmp(begin, row + k * comp, comp)) { - ++len; - } else { - break; - } + + if (diff) { + unsigned char header = STBIW_UCHAR(len - 1); + stbiw__write1(s, header); + for (k = 0; k < len; ++k) { + stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin + k * comp); + } + } else { + unsigned char header = STBIW_UCHAR(len - 129); + stbiw__write1(s, header); + stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin); } - } - } - - if (diff) { - unsigned char header = STBIW_UCHAR(len - 1); - stbiw__write1(s, header); - for (k = 0; k < len; ++k) { - stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin + k * comp); - } - } else { - unsigned char header = STBIW_UCHAR(len - 129); - stbiw__write1(s, header); - stbiw__write_pixel(s, -1, comp, has_alpha, 0, begin); - } + } } - } - stbiw__write_flush(s); - } - return 1; + stbiw__write_flush(s); + } + return 1; } STBIWDEF int stbi_write_tga_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data) { - stbi__write_context s = {0}; - stbi__start_write_callbacks(&s, func, context); - return stbi_write_tga_core(&s, x, y, comp, (void *)data); + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_tga_core(&s, x, y, comp, (void *) data); } #ifndef STBI_WRITE_NO_STDIO STBIWDEF int stbi_write_tga(char const *filename, int x, int y, int comp, const void *data) { - stbi__write_context s = {0}; - if (stbi__start_write_file(&s, filename)) { - int r = stbi_write_tga_core(&s, x, y, comp, (void *)data); - stbi__end_write_file(&s); - return r; - } else - return 0; + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_tga_core(&s, x, y, comp, (void *) data); + stbi__end_write_file(&s); + return r; + } else + return 0; } #endif @@ -639,183 +632,178 @@ STBIWDEF int stbi_write_tga(char const *filename, int x, int y, int comp, const // Radiance RGBE HDR writer // by Baldur Karlsson -#define stbiw__max(a, b) ((a) > (b) ? (a) : (b)) +#define stbiw__max(a, b) ((a) > (b) ? (a) : (b)) #ifndef STBI_WRITE_NO_STDIO static void stbiw__linear_to_rgbe(unsigned char *rgbe, float *linear) { - int exponent; - float maxcomp = stbiw__max(linear[0], stbiw__max(linear[1], linear[2])); - - if (maxcomp < 1e-32f) { - rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0; - } else { - float normalize = (float)frexp(maxcomp, &exponent) * 256.0f / maxcomp; - - rgbe[0] = (unsigned char)(linear[0] * normalize); - rgbe[1] = (unsigned char)(linear[1] * normalize); - rgbe[2] = (unsigned char)(linear[2] * normalize); - rgbe[3] = (unsigned char)(exponent + 128); - } + int exponent; + float maxcomp = stbiw__max(linear[0], stbiw__max(linear[1], linear[2])); + + if (maxcomp < 1e-32f) { + rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0; + } else { + float normalize = (float) frexp(maxcomp, &exponent) * 256.0f/maxcomp; + + rgbe[0] = (unsigned char)(linear[0] * normalize); + rgbe[1] = (unsigned char)(linear[1] * normalize); + rgbe[2] = (unsigned char)(linear[2] * normalize); + rgbe[3] = (unsigned char)(exponent + 128); + } } static void stbiw__write_run_data(stbi__write_context *s, int length, unsigned char databyte) { - unsigned char lengthbyte = STBIW_UCHAR(length + 128); - STBIW_ASSERT(length + 128 <= 255); - s->func(s->context, &lengthbyte, 1); - s->func(s->context, &databyte, 1); + unsigned char lengthbyte = STBIW_UCHAR(length+128); + STBIW_ASSERT(length+128 <= 255); + s->func(s->context, &lengthbyte, 1); + s->func(s->context, &databyte, 1); } static void stbiw__write_dump_data(stbi__write_context *s, int length, unsigned char *data) { - unsigned char lengthbyte = STBIW_UCHAR(length); - STBIW_ASSERT(length <= 128); // inconsistent with spec but consistent with official code - s->func(s->context, &lengthbyte, 1); - s->func(s->context, data, length); + unsigned char lengthbyte = STBIW_UCHAR(length); + STBIW_ASSERT(length <= 128); // inconsistent with spec but consistent with official code + s->func(s->context, &lengthbyte, 1); + s->func(s->context, data, length); } -static void stbiw__write_hdr_scanline(stbi__write_context *s, int width, int ncomp, unsigned char *scratch, - float *scanline) +static void stbiw__write_hdr_scanline(stbi__write_context *s, int width, int ncomp, unsigned char *scratch, float *scanline) { - unsigned char scanlineheader[4] = {2, 2, 0, 0}; - unsigned char rgbe[4]; - float linear[3]; - int x; - - scanlineheader[2] = (width & 0xff00) >> 8; - scanlineheader[3] = (width & 0x00ff); - - /* skip RLE for images too small or large */ - if (width < 8 || width >= 32768) { - for (x = 0; x < width; x++) { - switch (ncomp) { - case 4: /* fallthrough */ - case 3: - linear[2] = scanline[x * ncomp + 2]; - linear[1] = scanline[x * ncomp + 1]; - linear[0] = scanline[x * ncomp + 0]; - break; - default: - linear[0] = linear[1] = linear[2] = scanline[x * ncomp + 0]; - break; + unsigned char scanlineheader[4] = { 2, 2, 0, 0 }; + unsigned char rgbe[4]; + float linear[3]; + int x; + + scanlineheader[2] = (width&0xff00)>>8; + scanlineheader[3] = (width&0x00ff); + + /* skip RLE for images too small or large */ + if (width < 8 || width >= 32768) { + for (x=0; x < width; x++) { + switch (ncomp) { + case 4: /* fallthrough */ + case 3: linear[2] = scanline[x*ncomp + 2]; + linear[1] = scanline[x*ncomp + 1]; + linear[0] = scanline[x*ncomp + 0]; + break; + default: + linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; + break; + } + stbiw__linear_to_rgbe(rgbe, linear); + s->func(s->context, rgbe, 4); } - stbiw__linear_to_rgbe(rgbe, linear); - s->func(s->context, rgbe, 4); - } - } else { - int c, r; - /* encode into scratch buffer */ - for (x = 0; x < width; x++) { - switch (ncomp) { - case 4: /* fallthrough */ - case 3: - linear[2] = scanline[x * ncomp + 2]; - linear[1] = scanline[x * ncomp + 1]; - linear[0] = scanline[x * ncomp + 0]; - break; - default: - linear[0] = linear[1] = linear[2] = scanline[x * ncomp + 0]; - break; + } else { + int c,r; + /* encode into scratch buffer */ + for (x=0; x < width; x++) { + switch(ncomp) { + case 4: /* fallthrough */ + case 3: linear[2] = scanline[x*ncomp + 2]; + linear[1] = scanline[x*ncomp + 1]; + linear[0] = scanline[x*ncomp + 0]; + break; + default: + linear[0] = linear[1] = linear[2] = scanline[x*ncomp + 0]; + break; + } + stbiw__linear_to_rgbe(rgbe, linear); + scratch[x + width*0] = rgbe[0]; + scratch[x + width*1] = rgbe[1]; + scratch[x + width*2] = rgbe[2]; + scratch[x + width*3] = rgbe[3]; } - stbiw__linear_to_rgbe(rgbe, linear); - scratch[x + width * 0] = rgbe[0]; - scratch[x + width * 1] = rgbe[1]; - scratch[x + width * 2] = rgbe[2]; - scratch[x + width * 3] = rgbe[3]; - } - - s->func(s->context, scanlineheader, 4); - - /* RLE each component separately */ - for (c = 0; c < 4; c++) { - unsigned char *comp = &scratch[width * c]; - - x = 0; - while (x < width) { - // find first run - r = x; - while (r + 2 < width) { - if (comp[r] == comp[r + 1] && comp[r] == comp[r + 2]) - break; - ++r; - } - if (r + 2 >= width) - r = width; - // dump up to first run - while (x < r) { - int len = r - x; - if (len > 128) - len = 128; - stbiw__write_dump_data(s, len, &comp[x]); - x += len; - } - // if there's a run, output it - if (r + 2 < width) { // same test as what we break out of in search loop, so only true if we break'd - // find next byte after run - while (r < width && comp[r] == comp[x]) - ++r; - // output run up to r - while (x < r) { - int len = r - x; - if (len > 127) - len = 127; - stbiw__write_run_data(s, len, comp[x]); - x += len; - } - } + + s->func(s->context, scanlineheader, 4); + + /* RLE each component separately */ + for (c=0; c < 4; c++) { + unsigned char *comp = &scratch[width*c]; + + x = 0; + while (x < width) { + // find first run + r = x; + while (r+2 < width) { + if (comp[r] == comp[r+1] && comp[r] == comp[r+2]) + break; + ++r; + } + if (r+2 >= width) + r = width; + // dump up to first run + while (x < r) { + int len = r-x; + if (len > 128) len = 128; + stbiw__write_dump_data(s, len, &comp[x]); + x += len; + } + // if there's a run, output it + if (r+2 < width) { // same test as what we break out of in search loop, so only true if we break'd + // find next byte after run + while (r < width && comp[r] == comp[x]) + ++r; + // output run up to r + while (x < r) { + int len = r-x; + if (len > 127) len = 127; + stbiw__write_run_data(s, len, comp[x]); + x += len; + } + } + } } - } - } + } } static int stbi_write_hdr_core(stbi__write_context *s, int x, int y, int comp, float *data) { - if (y <= 0 || x <= 0 || data == NULL) - return 0; - else { - // Each component is stored separately. Allocate scratch space for full output scanline. - unsigned char *scratch = (unsigned char *)STBIW_MALLOC(x * 4); - int i, len; - char buffer[128]; - char header[] = "#?RADIANCE\n# Written by stb_image_write.h\nFORMAT=32-bit_rle_rgbe\n"; - s->func(s->context, header, sizeof(header) - 1); + if (y <= 0 || x <= 0 || data == NULL) + return 0; + else { + // Each component is stored separately. Allocate scratch space for full output scanline. + unsigned char *scratch = (unsigned char *) STBIW_MALLOC(x*4); + int i, len; + char buffer[128]; + char header[] = "#?RADIANCE\n# Written by stb_image_write.h\nFORMAT=32-bit_rle_rgbe\n"; + s->func(s->context, header, sizeof(header)-1); #ifdef __STDC_LIB_EXT1__ - len = sprintf_s(buffer, sizeof(buffer), "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); + len = sprintf_s(buffer, sizeof(buffer), "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); #else - len = snprintf(buffer, 128, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); + len = sprintf(buffer, "EXPOSURE= 1.0000000000000\n\n-Y %d +X %d\n", y, x); #endif - s->func(s->context, buffer, len); - - for (i = 0; i < y; i++) - stbiw__write_hdr_scanline(s, x, comp, scratch, - data + comp * x * (stbi__flip_vertically_on_write ? y - 1 - i : i)); - STBIW_FREE(scratch); - return 1; - } + s->func(s->context, buffer, len); + + for(i=0; i < y; i++) + stbiw__write_hdr_scanline(s, x, comp, scratch, data + comp*x*(stbi__flip_vertically_on_write ? y-1-i : i)); + STBIW_FREE(scratch); + return 1; + } } STBIWDEF int stbi_write_hdr_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const float *data) { - stbi__write_context s = {0}; - stbi__start_write_callbacks(&s, func, context); - return stbi_write_hdr_core(&s, x, y, comp, (float *)data); + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_hdr_core(&s, x, y, comp, (float *) data); } STBIWDEF int stbi_write_hdr(char const *filename, int x, int y, int comp, const float *data) { - stbi__write_context s = {0}; - if (stbi__start_write_file(&s, filename)) { - int r = stbi_write_hdr_core(&s, x, y, comp, (float *)data); - stbi__end_write_file(&s); - return r; - } else - return 0; + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_hdr_core(&s, x, y, comp, (float *) data); + stbi__end_write_file(&s); + return r; + } else + return 0; } #endif // STBI_WRITE_NO_STDIO + ////////////////////////////////////////////////////////////////////////////// // // PNG writer @@ -823,503 +811,434 @@ STBIWDEF int stbi_write_hdr(char const *filename, int x, int y, int comp, const #ifndef STBIW_ZLIB_COMPRESS // stretchy buffer; stbiw__sbpush() == vector<>::push_back() -- stbiw__sbcount() == vector<>::size() -#define stbiw__sbraw(a) ((int *)(void *)(a)-2) -#define stbiw__sbm(a) stbiw__sbraw(a)[0] -#define stbiw__sbn(a) stbiw__sbraw(a)[1] +#define stbiw__sbraw(a) ((int *) (void *) (a) - 2) +#define stbiw__sbm(a) stbiw__sbraw(a)[0] +#define stbiw__sbn(a) stbiw__sbraw(a)[1] -#define stbiw__sbneedgrow(a, n) ((a) == 0 || stbiw__sbn(a) + n >= stbiw__sbm(a)) -#define stbiw__sbmaybegrow(a, n) (stbiw__sbneedgrow(a, (n)) ? stbiw__sbgrow(a, n) : 0) -#define stbiw__sbgrow(a, n) stbiw__sbgrowf((void **)&(a), (n), sizeof(*(a))) +#define stbiw__sbneedgrow(a,n) ((a)==0 || stbiw__sbn(a)+n >= stbiw__sbm(a)) +#define stbiw__sbmaybegrow(a,n) (stbiw__sbneedgrow(a,(n)) ? stbiw__sbgrow(a,n) : 0) +#define stbiw__sbgrow(a,n) stbiw__sbgrowf((void **) &(a), (n), sizeof(*(a))) -#define stbiw__sbpush(a, v) (stbiw__sbmaybegrow(a, 1), (a)[stbiw__sbn(a)++] = (v)) -#define stbiw__sbcount(a) ((a) ? stbiw__sbn(a) : 0) -#define stbiw__sbfree(a) ((a) ? STBIW_FREE(stbiw__sbraw(a)), 0 : 0) +#define stbiw__sbpush(a, v) (stbiw__sbmaybegrow(a,1), (a)[stbiw__sbn(a)++] = (v)) +#define stbiw__sbcount(a) ((a) ? stbiw__sbn(a) : 0) +#define stbiw__sbfree(a) ((a) ? STBIW_FREE(stbiw__sbraw(a)),0 : 0) static void *stbiw__sbgrowf(void **arr, int increment, int itemsize) { - int m = *arr ? 2 * stbiw__sbm(*arr) + increment : increment + 1; - void *p = - STBIW_REALLOC_SIZED(*arr ? stbiw__sbraw(*arr) : 0, *arr ? (stbiw__sbm(*arr) * itemsize + sizeof(int) * 2) : 0, - itemsize * m + sizeof(int) * 2); - STBIW_ASSERT(p); - if (p) { - if (!*arr) - ((int *)p)[1] = 0; - *arr = (void *)((int *)p + 2); - stbiw__sbm(*arr) = m; - } - return *arr; + int m = *arr ? 2*stbiw__sbm(*arr)+increment : increment+1; + void *p = STBIW_REALLOC_SIZED(*arr ? stbiw__sbraw(*arr) : 0, *arr ? (stbiw__sbm(*arr)*itemsize + sizeof(int)*2) : 0, itemsize * m + sizeof(int)*2); + STBIW_ASSERT(p); + if (p) { + if (!*arr) ((int *) p)[1] = 0; + *arr = (void *) ((int *) p + 2); + stbiw__sbm(*arr) = m; + } + return *arr; } static unsigned char *stbiw__zlib_flushf(unsigned char *data, unsigned int *bitbuffer, int *bitcount) { - while (*bitcount >= 8) { - stbiw__sbpush(data, STBIW_UCHAR(*bitbuffer)); - *bitbuffer >>= 8; - *bitcount -= 8; - } - return data; + while (*bitcount >= 8) { + stbiw__sbpush(data, STBIW_UCHAR(*bitbuffer)); + *bitbuffer >>= 8; + *bitcount -= 8; + } + return data; } static int stbiw__zlib_bitrev(int code, int codebits) { - int res = 0; - while (codebits--) { - res = (res << 1) | (code & 1); - code >>= 1; - } - return res; + int res=0; + while (codebits--) { + res = (res << 1) | (code & 1); + code >>= 1; + } + return res; } static unsigned int stbiw__zlib_countm(unsigned char *a, unsigned char *b, int limit) { - int i; - for (i = 0; i < limit && i < 258; ++i) - if (a[i] != b[i]) - break; - return i; + int i; + for (i=0; i < limit && i < 258; ++i) + if (a[i] != b[i]) break; + return i; } static unsigned int stbiw__zhash(unsigned char *data) { - stbiw_uint32 hash = data[0] + (data[1] << 8) + (data[2] << 16); - hash ^= hash << 3; - hash += hash >> 5; - hash ^= hash << 4; - hash += hash >> 17; - hash ^= hash << 25; - hash += hash >> 6; - return hash; + stbiw_uint32 hash = data[0] + (data[1] << 8) + (data[2] << 16); + hash ^= hash << 3; + hash += hash >> 5; + hash ^= hash << 4; + hash += hash >> 17; + hash ^= hash << 25; + hash += hash >> 6; + return hash; } #define stbiw__zlib_flush() (out = stbiw__zlib_flushf(out, &bitbuf, &bitcount)) -#define stbiw__zlib_add(code, codebits) (bitbuf |= (code) << bitcount, bitcount += (codebits), stbiw__zlib_flush()) -#define stbiw__zlib_huffa(b, c) stbiw__zlib_add(stbiw__zlib_bitrev(b, c), c) +#define stbiw__zlib_add(code,codebits) \ + (bitbuf |= (code) << bitcount, bitcount += (codebits), stbiw__zlib_flush()) +#define stbiw__zlib_huffa(b,c) stbiw__zlib_add(stbiw__zlib_bitrev(b,c),c) // default huffman tables -#define stbiw__zlib_huff1(n) stbiw__zlib_huffa(0x30 + (n), 8) -#define stbiw__zlib_huff2(n) stbiw__zlib_huffa(0x190 + (n)-144, 9) -#define stbiw__zlib_huff3(n) stbiw__zlib_huffa(0 + (n)-256, 7) -#define stbiw__zlib_huff4(n) stbiw__zlib_huffa(0xc0 + (n)-280, 8) -#define stbiw__zlib_huff(n) \ - ((n) <= 143 ? stbiw__zlib_huff1(n) \ - : (n) <= 255 ? stbiw__zlib_huff2(n) \ - : (n) <= 279 ? stbiw__zlib_huff3(n) \ - : stbiw__zlib_huff4(n)) +#define stbiw__zlib_huff1(n) stbiw__zlib_huffa(0x30 + (n), 8) +#define stbiw__zlib_huff2(n) stbiw__zlib_huffa(0x190 + (n)-144, 9) +#define stbiw__zlib_huff3(n) stbiw__zlib_huffa(0 + (n)-256,7) +#define stbiw__zlib_huff4(n) stbiw__zlib_huffa(0xc0 + (n)-280,8) +#define stbiw__zlib_huff(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : (n) <= 255 ? stbiw__zlib_huff2(n) : (n) <= 279 ? stbiw__zlib_huff3(n) : stbiw__zlib_huff4(n)) #define stbiw__zlib_huffb(n) ((n) <= 143 ? stbiw__zlib_huff1(n) : stbiw__zlib_huff2(n)) -#define stbiw__ZHASH 16384 +#define stbiw__ZHASH 16384 #endif // STBIW_ZLIB_COMPRESS -STBIWDEF unsigned char *stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality) +STBIWDEF unsigned char * stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality) { #ifdef STBIW_ZLIB_COMPRESS - // user provided a zlib compress implementation, use that - return STBIW_ZLIB_COMPRESS(data, data_len, out_len, quality); -#else // use builtin - static unsigned short lengthc[] = {3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, - 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 259}; - static unsigned char lengtheb[] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, - 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0}; - static unsigned short distc[] = {1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, - 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, - 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 32768}; - static unsigned char disteb[] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, - 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; - unsigned int bitbuf = 0; - int i, j, bitcount = 0; - unsigned char *out = NULL; - unsigned char ***hash_table = (unsigned char ***)STBIW_MALLOC(stbiw__ZHASH * sizeof(unsigned char **)); - if (hash_table == NULL) - return NULL; - if (quality < 5) - quality = 5; - - stbiw__sbpush(out, 0x78); // DEFLATE 32K window - stbiw__sbpush(out, 0x5e); // FLEVEL = 1 - stbiw__zlib_add(1, 1); // BFINAL = 1 - stbiw__zlib_add(1, 2); // BTYPE = 1 -- fixed huffman - - for (i = 0; i < stbiw__ZHASH; ++i) - hash_table[i] = NULL; - - i = 0; - while (i < data_len - 3) { - // hash next 3 bytes of data to be compressed - int h = stbiw__zhash(data + i) & (stbiw__ZHASH - 1), best = 3; - unsigned char *bestloc = 0; - unsigned char **hlist = hash_table[h]; - int n = stbiw__sbcount(hlist); - for (j = 0; j < n; ++j) { - if (hlist[j] - data > i - 32768) { // if entry lies within window - int d = stbiw__zlib_countm(hlist[j], data + i, data_len - i); - if (d >= best) { - best = d; - bestloc = hlist[j]; - } + // user provided a zlib compress implementation, use that + return STBIW_ZLIB_COMPRESS(data, data_len, out_len, quality); +#else // use builtin + static unsigned short lengthc[] = { 3,4,5,6,7,8,9,10,11,13,15,17,19,23,27,31,35,43,51,59,67,83,99,115,131,163,195,227,258, 259 }; + static unsigned char lengtheb[]= { 0,0,0,0,0,0,0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0 }; + static unsigned short distc[] = { 1,2,3,4,5,7,9,13,17,25,33,49,65,97,129,193,257,385,513,769,1025,1537,2049,3073,4097,6145,8193,12289,16385,24577, 32768 }; + static unsigned char disteb[] = { 0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13 }; + unsigned int bitbuf=0; + int i,j, bitcount=0; + unsigned char *out = NULL; + unsigned char ***hash_table = (unsigned char***) STBIW_MALLOC(stbiw__ZHASH * sizeof(unsigned char**)); + if (hash_table == NULL) + return NULL; + if (quality < 5) quality = 5; + + stbiw__sbpush(out, 0x78); // DEFLATE 32K window + stbiw__sbpush(out, 0x5e); // FLEVEL = 1 + stbiw__zlib_add(1,1); // BFINAL = 1 + stbiw__zlib_add(1,2); // BTYPE = 1 -- fixed huffman + + for (i=0; i < stbiw__ZHASH; ++i) + hash_table[i] = NULL; + + i=0; + while (i < data_len-3) { + // hash next 3 bytes of data to be compressed + int h = stbiw__zhash(data+i)&(stbiw__ZHASH-1), best=3; + unsigned char *bestloc = 0; + unsigned char **hlist = hash_table[h]; + int n = stbiw__sbcount(hlist); + for (j=0; j < n; ++j) { + if (hlist[j]-data > i-32768) { // if entry lies within window + int d = stbiw__zlib_countm(hlist[j], data+i, data_len-i); + if (d >= best) { best=d; bestloc=hlist[j]; } + } } - } - // when hash table entry is too long, delete half the entries - if (hash_table[h] && stbiw__sbn(hash_table[h]) == 2 * quality) { - STBIW_MEMMOVE(hash_table[h], hash_table[h] + quality, sizeof(hash_table[h][0]) * quality); - stbiw__sbn(hash_table[h]) = quality; - } - stbiw__sbpush(hash_table[h], data + i); - - if (bestloc) { - // "lazy matching" - check match at *next* byte, and if it's better, do cur byte as literal - h = stbiw__zhash(data + i + 1) & (stbiw__ZHASH - 1); - hlist = hash_table[h]; - n = stbiw__sbcount(hlist); - for (j = 0; j < n; ++j) { - if (hlist[j] - data > i - 32767) { - int e = stbiw__zlib_countm(hlist[j], data + i + 1, data_len - i - 1); - if (e > best) { // if next match is better, bail on current match - bestloc = NULL; - break; - } - } + // when hash table entry is too long, delete half the entries + if (hash_table[h] && stbiw__sbn(hash_table[h]) == 2*quality) { + STBIW_MEMMOVE(hash_table[h], hash_table[h]+quality, sizeof(hash_table[h][0])*quality); + stbiw__sbn(hash_table[h]) = quality; + } + stbiw__sbpush(hash_table[h],data+i); + + if (bestloc) { + // "lazy matching" - check match at *next* byte, and if it's better, do cur byte as literal + h = stbiw__zhash(data+i+1)&(stbiw__ZHASH-1); + hlist = hash_table[h]; + n = stbiw__sbcount(hlist); + for (j=0; j < n; ++j) { + if (hlist[j]-data > i-32767) { + int e = stbiw__zlib_countm(hlist[j], data+i+1, data_len-i-1); + if (e > best) { // if next match is better, bail on current match + bestloc = NULL; + break; + } + } + } } - } - - if (bestloc) { - int d = (int)(data + i - bestloc); // distance back - STBIW_ASSERT(d <= 32767 && best <= 258); - for (j = 0; best > lengthc[j + 1] - 1; ++j) - ; - stbiw__zlib_huff(j + 257); - if (lengtheb[j]) - stbiw__zlib_add(best - lengthc[j], lengtheb[j]); - for (j = 0; d > distc[j + 1] - 1; ++j) - ; - stbiw__zlib_add(stbiw__zlib_bitrev(j, 5), 5); - if (disteb[j]) - stbiw__zlib_add(d - distc[j], disteb[j]); - i += best; - } else { + + if (bestloc) { + int d = (int) (data+i - bestloc); // distance back + STBIW_ASSERT(d <= 32767 && best <= 258); + for (j=0; best > lengthc[j+1]-1; ++j); + stbiw__zlib_huff(j+257); + if (lengtheb[j]) stbiw__zlib_add(best - lengthc[j], lengtheb[j]); + for (j=0; d > distc[j+1]-1; ++j); + stbiw__zlib_add(stbiw__zlib_bitrev(j,5),5); + if (disteb[j]) stbiw__zlib_add(d - distc[j], disteb[j]); + i += best; + } else { + stbiw__zlib_huffb(data[i]); + ++i; + } + } + // write out final bytes + for (;i < data_len; ++i) stbiw__zlib_huffb(data[i]); - ++i; - } - } - // write out final bytes - for (; i < data_len; ++i) - stbiw__zlib_huffb(data[i]); - stbiw__zlib_huff(256); // end of block - // pad with 0 bits to byte boundary - while (bitcount) - stbiw__zlib_add(0, 1); - - for (i = 0; i < stbiw__ZHASH; ++i) - (void)stbiw__sbfree(hash_table[i]); - STBIW_FREE(hash_table); - - // store uncompressed instead if compression was worse - if (stbiw__sbn(out) > data_len + 2 + ((data_len + 32766) / 32767) * 5) { - stbiw__sbn(out) = 2; // truncate to DEFLATE 32K window and FLEVEL = 1 - for (j = 0; j < data_len;) { - int blocklen = data_len - j; - if (blocklen > 32767) - blocklen = 32767; - stbiw__sbpush(out, data_len - j == blocklen); // BFINAL = ?, BTYPE = 0 -- no compression - stbiw__sbpush(out, STBIW_UCHAR(blocklen)); // LEN - stbiw__sbpush(out, STBIW_UCHAR(blocklen >> 8)); - stbiw__sbpush(out, STBIW_UCHAR(~blocklen)); // NLEN - stbiw__sbpush(out, STBIW_UCHAR(~blocklen >> 8)); - memcpy(out + stbiw__sbn(out), data + j, blocklen); - stbiw__sbn(out) += blocklen; - j += blocklen; - } - } - - { - // compute adler32 on input - unsigned int s1 = 1, s2 = 0; - int blocklen = (int)(data_len % 5552); - j = 0; - while (j < data_len) { - for (i = 0; i < blocklen; ++i) { - s1 += data[j + i]; - s2 += s1; + stbiw__zlib_huff(256); // end of block + // pad with 0 bits to byte boundary + while (bitcount) + stbiw__zlib_add(0,1); + + for (i=0; i < stbiw__ZHASH; ++i) + (void) stbiw__sbfree(hash_table[i]); + STBIW_FREE(hash_table); + + // store uncompressed instead if compression was worse + if (stbiw__sbn(out) > data_len + 2 + ((data_len+32766)/32767)*5) { + stbiw__sbn(out) = 2; // truncate to DEFLATE 32K window and FLEVEL = 1 + for (j = 0; j < data_len;) { + int blocklen = data_len - j; + if (blocklen > 32767) blocklen = 32767; + stbiw__sbpush(out, data_len - j == blocklen); // BFINAL = ?, BTYPE = 0 -- no compression + stbiw__sbpush(out, STBIW_UCHAR(blocklen)); // LEN + stbiw__sbpush(out, STBIW_UCHAR(blocklen >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(~blocklen)); // NLEN + stbiw__sbpush(out, STBIW_UCHAR(~blocklen >> 8)); + memcpy(out+stbiw__sbn(out), data+j, blocklen); + stbiw__sbn(out) += blocklen; + j += blocklen; } - s1 %= 65521; - s2 %= 65521; - j += blocklen; - blocklen = 5552; - } - stbiw__sbpush(out, STBIW_UCHAR(s2 >> 8)); - stbiw__sbpush(out, STBIW_UCHAR(s2)); - stbiw__sbpush(out, STBIW_UCHAR(s1 >> 8)); - stbiw__sbpush(out, STBIW_UCHAR(s1)); - } - *out_len = stbiw__sbn(out); - // make returned pointer freeable - STBIW_MEMMOVE(stbiw__sbraw(out), out, *out_len); - return (unsigned char *)stbiw__sbraw(out); + } + + { + // compute adler32 on input + unsigned int s1=1, s2=0; + int blocklen = (int) (data_len % 5552); + j=0; + while (j < data_len) { + for (i=0; i < blocklen; ++i) { s1 += data[j+i]; s2 += s1; } + s1 %= 65521; s2 %= 65521; + j += blocklen; + blocklen = 5552; + } + stbiw__sbpush(out, STBIW_UCHAR(s2 >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(s2)); + stbiw__sbpush(out, STBIW_UCHAR(s1 >> 8)); + stbiw__sbpush(out, STBIW_UCHAR(s1)); + } + *out_len = stbiw__sbn(out); + // make returned pointer freeable + STBIW_MEMMOVE(stbiw__sbraw(out), out, *out_len); + return (unsigned char *) stbiw__sbraw(out); #endif // STBIW_ZLIB_COMPRESS } static unsigned int stbiw__crc32(unsigned char *buffer, int len) { #ifdef STBIW_CRC32 - return STBIW_CRC32(buffer, len); + return STBIW_CRC32(buffer, len); #else - static unsigned int crc_table[256] = { - 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, 0x0eDB8832, - 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, - 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, - 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, - 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, - 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, - 0xCFBA9599, 0xB8BDA50F, 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, - 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, - 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, - 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, - 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE, 0xA3BC0074, - 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, - 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, - 0x206F85B3, 0xB966D409, 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, - 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, - 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, - 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, - 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8, 0xA1D1937E, - 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, - 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, - 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, - 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, - 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, - 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, - 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, 0xA00AE278, - 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, - 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, - 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, - 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D}; - - unsigned int crc = ~0u; - int i; - for (i = 0; i < len; ++i) - crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)]; - return ~crc; + static unsigned int crc_table[256] = + { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0eDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D + }; + + unsigned int crc = ~0u; + int i; + for (i=0; i < len; ++i) + crc = (crc >> 8) ^ crc_table[buffer[i] ^ (crc & 0xff)]; + return ~crc; #endif } -#define stbiw__wpng4(o, a, b, c, d) \ - ((o)[0] = STBIW_UCHAR(a), (o)[1] = STBIW_UCHAR(b), (o)[2] = STBIW_UCHAR(c), (o)[3] = STBIW_UCHAR(d), (o) += 4) -#define stbiw__wp32(data, v) stbiw__wpng4(data, (v) >> 24, (v) >> 16, (v) >> 8, (v)); -#define stbiw__wptag(data, s) stbiw__wpng4(data, s[0], s[1], s[2], s[3]) +#define stbiw__wpng4(o,a,b,c,d) ((o)[0]=STBIW_UCHAR(a),(o)[1]=STBIW_UCHAR(b),(o)[2]=STBIW_UCHAR(c),(o)[3]=STBIW_UCHAR(d),(o)+=4) +#define stbiw__wp32(data,v) stbiw__wpng4(data, (v)>>24,(v)>>16,(v)>>8,(v)); +#define stbiw__wptag(data,s) stbiw__wpng4(data, s[0],s[1],s[2],s[3]) static void stbiw__wpcrc(unsigned char **data, int len) { - unsigned int crc = stbiw__crc32(*data - len - 4, len + 4); - stbiw__wp32(*data, crc); + unsigned int crc = stbiw__crc32(*data - len - 4, len+4); + stbiw__wp32(*data, crc); } static unsigned char stbiw__paeth(int a, int b, int c) { - int p = a + b - c, pa = abs(p - a), pb = abs(p - b), pc = abs(p - c); - if (pa <= pb && pa <= pc) - return STBIW_UCHAR(a); - if (pb <= pc) - return STBIW_UCHAR(b); - return STBIW_UCHAR(c); + int p = a + b - c, pa = abs(p-a), pb = abs(p-b), pc = abs(p-c); + if (pa <= pb && pa <= pc) return STBIW_UCHAR(a); + if (pb <= pc) return STBIW_UCHAR(b); + return STBIW_UCHAR(c); } // @OPTIMIZE: provide an option that always forces left-predict or paeth predict -static void stbiw__encode_png_line(unsigned char *pixels, int stride_bytes, int width, int height, int y, int n, - int filter_type, signed char *line_buffer) +static void stbiw__encode_png_line(unsigned char *pixels, int stride_bytes, int width, int height, int y, int n, int filter_type, signed char *line_buffer) { - static int mapping[] = {0, 1, 2, 3, 4}; - static int firstmap[] = {0, 1, 0, 5, 6}; - int *mymap = (y != 0) ? mapping : firstmap; - int i; - int type = mymap[filter_type]; - unsigned char *z = pixels + stride_bytes * (stbi__flip_vertically_on_write ? height - 1 - y : y); - int signed_stride = stbi__flip_vertically_on_write ? -stride_bytes : stride_bytes; - - if (type == 0) { - memcpy(line_buffer, z, width * n); - return; - } - - // first loop isn't optimized since it's just one pixel - for (i = 0; i < n; ++i) { - switch (type) { - case 1: - line_buffer[i] = z[i]; - break; - case 2: - line_buffer[i] = z[i] - z[i - signed_stride]; - break; - case 3: - line_buffer[i] = z[i] - (z[i - signed_stride] >> 1); - break; - case 4: - line_buffer[i] = (signed char)(z[i] - stbiw__paeth(0, z[i - signed_stride], 0)); - break; - case 5: - line_buffer[i] = z[i]; - break; - case 6: - line_buffer[i] = z[i]; - break; - } - } - switch (type) { - case 1: - for (i = n; i < width * n; ++i) - line_buffer[i] = z[i] - z[i - n]; - break; - case 2: - for (i = n; i < width * n; ++i) - line_buffer[i] = z[i] - z[i - signed_stride]; - break; - case 3: - for (i = n; i < width * n; ++i) - line_buffer[i] = z[i] - ((z[i - n] + z[i - signed_stride]) >> 1); - break; - case 4: - for (i = n; i < width * n; ++i) - line_buffer[i] = z[i] - stbiw__paeth(z[i - n], z[i - signed_stride], z[i - signed_stride - n]); - break; - case 5: - for (i = n; i < width * n; ++i) - line_buffer[i] = z[i] - (z[i - n] >> 1); - break; - case 6: - for (i = n; i < width * n; ++i) - line_buffer[i] = z[i] - stbiw__paeth(z[i - n], 0, 0); - break; - } + static int mapping[] = { 0,1,2,3,4 }; + static int firstmap[] = { 0,1,0,5,6 }; + int *mymap = (y != 0) ? mapping : firstmap; + int i; + int type = mymap[filter_type]; + unsigned char *z = pixels + stride_bytes * (stbi__flip_vertically_on_write ? height-1-y : y); + int signed_stride = stbi__flip_vertically_on_write ? -stride_bytes : stride_bytes; + + if (type==0) { + memcpy(line_buffer, z, width*n); + return; + } + + // first loop isn't optimized since it's just one pixel + for (i = 0; i < n; ++i) { + switch (type) { + case 1: line_buffer[i] = z[i]; break; + case 2: line_buffer[i] = z[i] - z[i-signed_stride]; break; + case 3: line_buffer[i] = z[i] - (z[i-signed_stride]>>1); break; + case 4: line_buffer[i] = (signed char) (z[i] - stbiw__paeth(0,z[i-signed_stride],0)); break; + case 5: line_buffer[i] = z[i]; break; + case 6: line_buffer[i] = z[i]; break; + } + } + switch (type) { + case 1: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-n]; break; + case 2: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - z[i-signed_stride]; break; + case 3: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - ((z[i-n] + z[i-signed_stride])>>1); break; + case 4: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], z[i-signed_stride], z[i-signed_stride-n]); break; + case 5: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - (z[i-n]>>1); break; + case 6: for (i=n; i < width*n; ++i) line_buffer[i] = z[i] - stbiw__paeth(z[i-n], 0,0); break; + } } -STBIWDEF unsigned char *stbi_write_png_to_mem(const unsigned char *pixels, int stride_bytes, int x, int y, int n, - int *out_len) +STBIWDEF unsigned char *stbi_write_png_to_mem(const unsigned char *pixels, int stride_bytes, int x, int y, int n, int *out_len) { - int force_filter = stbi_write_force_png_filter; - int ctype[5] = {-1, 0, 4, 2, 6}; - unsigned char sig[8] = {137, 80, 78, 71, 13, 10, 26, 10}; - unsigned char *out, *o, *filt, *zlib; - signed char *line_buffer; - int j, zlen; - - if (stride_bytes == 0) - stride_bytes = x * n; - - if (force_filter >= 5) { - force_filter = -1; - } - - filt = (unsigned char *)STBIW_MALLOC((x * n + 1) * y); - if (!filt) - return 0; - line_buffer = (signed char *)STBIW_MALLOC(x * n); - if (!line_buffer) { - STBIW_FREE(filt); - return 0; - } - for (j = 0; j < y; ++j) { - int filter_type; - if (force_filter > -1) { - filter_type = force_filter; - stbiw__encode_png_line((unsigned char *)(pixels), stride_bytes, x, y, j, n, force_filter, line_buffer); - } else { // Estimate the best filter by running through all of them: - int best_filter = 0, best_filter_val = 0x7fffffff, est, i; - for (filter_type = 0; filter_type < 5; filter_type++) { - stbiw__encode_png_line((unsigned char *)(pixels), stride_bytes, x, y, j, n, filter_type, line_buffer); - - // Estimate the entropy of the line using this filter; the less, the better. - est = 0; - for (i = 0; i < x * n; ++i) { - est += abs((signed char)line_buffer[i]); - } - if (est < best_filter_val) { - best_filter_val = est; - best_filter = filter_type; - } - } - if (filter_type != best_filter) { // If the last iteration already got us the best filter, don't redo it - stbiw__encode_png_line((unsigned char *)(pixels), stride_bytes, x, y, j, n, best_filter, line_buffer); - filter_type = best_filter; + int force_filter = stbi_write_force_png_filter; + int ctype[5] = { -1, 0, 4, 2, 6 }; + unsigned char sig[8] = { 137,80,78,71,13,10,26,10 }; + unsigned char *out,*o, *filt, *zlib; + signed char *line_buffer; + int j,zlen; + + if (stride_bytes == 0) + stride_bytes = x * n; + + if (force_filter >= 5) { + force_filter = -1; + } + + filt = (unsigned char *) STBIW_MALLOC((x*n+1) * y); if (!filt) return 0; + line_buffer = (signed char *) STBIW_MALLOC(x * n); if (!line_buffer) { STBIW_FREE(filt); return 0; } + for (j=0; j < y; ++j) { + int filter_type; + if (force_filter > -1) { + filter_type = force_filter; + stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, force_filter, line_buffer); + } else { // Estimate the best filter by running through all of them: + int best_filter = 0, best_filter_val = 0x7fffffff, est, i; + for (filter_type = 0; filter_type < 5; filter_type++) { + stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, filter_type, line_buffer); + + // Estimate the entropy of the line using this filter; the less, the better. + est = 0; + for (i = 0; i < x*n; ++i) { + est += abs((signed char) line_buffer[i]); + } + if (est < best_filter_val) { + best_filter_val = est; + best_filter = filter_type; + } + } + if (filter_type != best_filter) { // If the last iteration already got us the best filter, don't redo it + stbiw__encode_png_line((unsigned char*)(pixels), stride_bytes, x, y, j, n, best_filter, line_buffer); + filter_type = best_filter; + } } - } - // when we get here, filter_type contains the filter type, and line_buffer contains the data - filt[j * (x * n + 1)] = (unsigned char)filter_type; - STBIW_MEMMOVE(filt + j * (x * n + 1) + 1, line_buffer, x * n); - } - STBIW_FREE(line_buffer); - zlib = stbi_zlib_compress(filt, y * (x * n + 1), &zlen, stbi_write_png_compression_level); - STBIW_FREE(filt); - if (!zlib) - return 0; - - // each tag requires 12 bytes of overhead - out = (unsigned char *)STBIW_MALLOC(8 + 12 + 13 + 12 + zlen + 12); - if (!out) - return 0; - *out_len = 8 + 12 + 13 + 12 + zlen + 12; - - o = out; - STBIW_MEMMOVE(o, sig, 8); - o += 8; - stbiw__wp32(o, 13); // header length - stbiw__wptag(o, "IHDR"); - stbiw__wp32(o, x); - stbiw__wp32(o, y); - *o++ = 8; - *o++ = STBIW_UCHAR(ctype[n]); - *o++ = 0; - *o++ = 0; - *o++ = 0; - stbiw__wpcrc(&o, 13); - - stbiw__wp32(o, zlen); - stbiw__wptag(o, "IDAT"); - STBIW_MEMMOVE(o, zlib, zlen); - o += zlen; - STBIW_FREE(zlib); - stbiw__wpcrc(&o, zlen); - - stbiw__wp32(o, 0); - stbiw__wptag(o, "IEND"); - stbiw__wpcrc(&o, 0); - - STBIW_ASSERT(o == out + *out_len); - - return out; + // when we get here, filter_type contains the filter type, and line_buffer contains the data + filt[j*(x*n+1)] = (unsigned char) filter_type; + STBIW_MEMMOVE(filt+j*(x*n+1)+1, line_buffer, x*n); + } + STBIW_FREE(line_buffer); + zlib = stbi_zlib_compress(filt, y*( x*n+1), &zlen, stbi_write_png_compression_level); + STBIW_FREE(filt); + if (!zlib) return 0; + + // each tag requires 12 bytes of overhead + out = (unsigned char *) STBIW_MALLOC(8 + 12+13 + 12+zlen + 12); + if (!out) return 0; + *out_len = 8 + 12+13 + 12+zlen + 12; + + o=out; + STBIW_MEMMOVE(o,sig,8); o+= 8; + stbiw__wp32(o, 13); // header length + stbiw__wptag(o, "IHDR"); + stbiw__wp32(o, x); + stbiw__wp32(o, y); + *o++ = 8; + *o++ = STBIW_UCHAR(ctype[n]); + *o++ = 0; + *o++ = 0; + *o++ = 0; + stbiw__wpcrc(&o,13); + + stbiw__wp32(o, zlen); + stbiw__wptag(o, "IDAT"); + STBIW_MEMMOVE(o, zlib, zlen); + o += zlen; + STBIW_FREE(zlib); + stbiw__wpcrc(&o, zlen); + + stbiw__wp32(o,0); + stbiw__wptag(o, "IEND"); + stbiw__wpcrc(&o,0); + + STBIW_ASSERT(o == out + *out_len); + + return out; } #ifndef STBI_WRITE_NO_STDIO STBIWDEF int stbi_write_png(char const *filename, int x, int y, int comp, const void *data, int stride_bytes) { - FILE *f; - int len; - unsigned char *png = stbi_write_png_to_mem((const unsigned char *)data, stride_bytes, x, y, comp, &len); - if (png == NULL) - return 0; - - f = stbiw__fopen(filename, "wb"); - if (!f) { - STBIW_FREE(png); - return 0; - } - fwrite(png, 1, len, f); - fclose(f); - STBIW_FREE(png); - return 1; + FILE *f; + int len; + unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len); + if (png == NULL) return 0; + + f = stbiw__fopen(filename, "wb"); + if (!f) { STBIW_FREE(png); return 0; } + fwrite(png, 1, len, f); + fclose(f); + STBIW_FREE(png); + return 1; } #endif -STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, - int stride_bytes) +STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int stride_bytes) { - int len; - unsigned char *png = stbi_write_png_to_mem((const unsigned char *)data, stride_bytes, x, y, comp, &len); - if (png == NULL) - return 0; - func(context, png, len); - STBIW_FREE(png); - return 1; + int len; + unsigned char *png = stbi_write_png_to_mem((const unsigned char *) data, stride_bytes, x, y, comp, &len); + if (png == NULL) return 0; + func(context, png, len); + STBIW_FREE(png); + return 1; } + /* *************************************************************************** * * JPEG writer @@ -1328,457 +1247,381 @@ STBIWDEF int stbi_write_png_to_func(stbi_write_func *func, void *context, int x, * public domain Simple, Minimalistic JPEG writer - http://www.jonolick.com/code.html */ -static const unsigned char stbiw__jpg_ZigZag[] = {0, 1, 5, 6, 14, 15, 27, 28, 2, 4, 7, 13, 16, 26, 29, 42, - 3, 8, 12, 17, 25, 30, 41, 43, 9, 11, 18, 24, 31, 40, 44, 53, - 10, 19, 23, 32, 39, 45, 52, 54, 20, 22, 33, 38, 46, 51, 55, 60, - 21, 34, 37, 47, 50, 56, 59, 61, 35, 36, 48, 49, 57, 58, 62, 63}; - -static void stbiw__jpg_writeBits(stbi__write_context *s, int *bitBufP, int *bitCntP, const unsigned short *bs) -{ - int bitBuf = *bitBufP, bitCnt = *bitCntP; - bitCnt += bs[1]; - bitBuf |= bs[0] << (24 - bitCnt); - while (bitCnt >= 8) { - unsigned char c = (bitBuf >> 16) & 255; - stbiw__putc(s, c); - if (c == 255) { - stbiw__putc(s, 0); - } - bitBuf <<= 8; - bitCnt -= 8; - } - *bitBufP = bitBuf; - *bitCntP = bitCnt; +static const unsigned char stbiw__jpg_ZigZag[] = { 0,1,5,6,14,15,27,28,2,4,7,13,16,26,29,42,3,8,12,17,25,30,41,43,9,11,18, + 24,31,40,44,53,10,19,23,32,39,45,52,54,20,22,33,38,46,51,55,60,21,34,37,47,50,56,59,61,35,36,48,49,57,58,62,63 }; + +static void stbiw__jpg_writeBits(stbi__write_context *s, int *bitBufP, int *bitCntP, const unsigned short *bs) { + int bitBuf = *bitBufP, bitCnt = *bitCntP; + bitCnt += bs[1]; + bitBuf |= bs[0] << (24 - bitCnt); + while(bitCnt >= 8) { + unsigned char c = (bitBuf >> 16) & 255; + stbiw__putc(s, c); + if(c == 255) { + stbiw__putc(s, 0); + } + bitBuf <<= 8; + bitCnt -= 8; + } + *bitBufP = bitBuf; + *bitCntP = bitCnt; } -static void stbiw__jpg_DCT(float *d0p, float *d1p, float *d2p, float *d3p, float *d4p, float *d5p, float *d6p, - float *d7p) -{ - float d0 = *d0p, d1 = *d1p, d2 = *d2p, d3 = *d3p, d4 = *d4p, d5 = *d5p, d6 = *d6p, d7 = *d7p; - float z1, z2, z3, z4, z5, z11, z13; - - float tmp0 = d0 + d7; - float tmp7 = d0 - d7; - float tmp1 = d1 + d6; - float tmp6 = d1 - d6; - float tmp2 = d2 + d5; - float tmp5 = d2 - d5; - float tmp3 = d3 + d4; - float tmp4 = d3 - d4; - - // Even part - float tmp10 = tmp0 + tmp3; // phase 2 - float tmp13 = tmp0 - tmp3; - float tmp11 = tmp1 + tmp2; - float tmp12 = tmp1 - tmp2; - - d0 = tmp10 + tmp11; // phase 3 - d4 = tmp10 - tmp11; - - z1 = (tmp12 + tmp13) * 0.707106781f; // c4 - d2 = tmp13 + z1; // phase 5 - d6 = tmp13 - z1; - - // Odd part - tmp10 = tmp4 + tmp5; // phase 2 - tmp11 = tmp5 + tmp6; - tmp12 = tmp6 + tmp7; - - // The rotator is modified from fig 4-8 to avoid extra negations. - z5 = (tmp10 - tmp12) * 0.382683433f; // c6 - z2 = tmp10 * 0.541196100f + z5; // c2-c6 - z4 = tmp12 * 1.306562965f + z5; // c2+c6 - z3 = tmp11 * 0.707106781f; // c4 - - z11 = tmp7 + z3; // phase 5 - z13 = tmp7 - z3; - - *d5p = z13 + z2; // phase 6 - *d3p = z13 - z2; - *d1p = z11 + z4; - *d7p = z11 - z4; - - *d0p = d0; - *d2p = d2; - *d4p = d4; - *d6p = d6; +static void stbiw__jpg_DCT(float *d0p, float *d1p, float *d2p, float *d3p, float *d4p, float *d5p, float *d6p, float *d7p) { + float d0 = *d0p, d1 = *d1p, d2 = *d2p, d3 = *d3p, d4 = *d4p, d5 = *d5p, d6 = *d6p, d7 = *d7p; + float z1, z2, z3, z4, z5, z11, z13; + + float tmp0 = d0 + d7; + float tmp7 = d0 - d7; + float tmp1 = d1 + d6; + float tmp6 = d1 - d6; + float tmp2 = d2 + d5; + float tmp5 = d2 - d5; + float tmp3 = d3 + d4; + float tmp4 = d3 - d4; + + // Even part + float tmp10 = tmp0 + tmp3; // phase 2 + float tmp13 = tmp0 - tmp3; + float tmp11 = tmp1 + tmp2; + float tmp12 = tmp1 - tmp2; + + d0 = tmp10 + tmp11; // phase 3 + d4 = tmp10 - tmp11; + + z1 = (tmp12 + tmp13) * 0.707106781f; // c4 + d2 = tmp13 + z1; // phase 5 + d6 = tmp13 - z1; + + // Odd part + tmp10 = tmp4 + tmp5; // phase 2 + tmp11 = tmp5 + tmp6; + tmp12 = tmp6 + tmp7; + + // The rotator is modified from fig 4-8 to avoid extra negations. + z5 = (tmp10 - tmp12) * 0.382683433f; // c6 + z2 = tmp10 * 0.541196100f + z5; // c2-c6 + z4 = tmp12 * 1.306562965f + z5; // c2+c6 + z3 = tmp11 * 0.707106781f; // c4 + + z11 = tmp7 + z3; // phase 5 + z13 = tmp7 - z3; + + *d5p = z13 + z2; // phase 6 + *d3p = z13 - z2; + *d1p = z11 + z4; + *d7p = z11 - z4; + + *d0p = d0; *d2p = d2; *d4p = d4; *d6p = d6; } -static void stbiw__jpg_calcBits(int val, unsigned short bits[2]) -{ - int tmp1 = val < 0 ? -val : val; - val = val < 0 ? val - 1 : val; - bits[1] = 1; - while (tmp1 >>= 1) { - ++bits[1]; - } - bits[0] = val & ((1 << bits[1]) - 1); +static void stbiw__jpg_calcBits(int val, unsigned short bits[2]) { + int tmp1 = val < 0 ? -val : val; + val = val < 0 ? val-1 : val; + bits[1] = 1; + while(tmp1 >>= 1) { + ++bits[1]; + } + bits[0] = val & ((1< 0) && (DU[end0pos] == 0); --end0pos) { - } - // end0pos = first element in reverse order !=0 - if (end0pos == 0) { - stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); - return DU[0]; - } - for (i = 1; i <= end0pos; ++i) { - int startpos = i; - int nrzeroes; - unsigned short bits[2]; - for (; DU[i] == 0 && i <= end0pos; ++i) { - } - nrzeroes = i - startpos; - if (nrzeroes >= 16) { - int lng = nrzeroes >> 4; - int nrmarker; - for (nrmarker = 1; nrmarker <= lng; ++nrmarker) - stbiw__jpg_writeBits(s, bitBuf, bitCnt, M16zeroes); - nrzeroes &= 15; - } - stbiw__jpg_calcBits(DU[i], bits); - stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTAC[(nrzeroes << 4) + bits[1]]); - stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits); - } - if (end0pos != 63) { - stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); - } - return DU[0]; +static int stbiw__jpg_processDU(stbi__write_context *s, int *bitBuf, int *bitCnt, float *CDU, int du_stride, float *fdtbl, int DC, const unsigned short HTDC[256][2], const unsigned short HTAC[256][2]) { + const unsigned short EOB[2] = { HTAC[0x00][0], HTAC[0x00][1] }; + const unsigned short M16zeroes[2] = { HTAC[0xF0][0], HTAC[0xF0][1] }; + int dataOff, i, j, n, diff, end0pos, x, y; + int DU[64]; + + // DCT rows + for(dataOff=0, n=du_stride*8; dataOff0)&&(DU[end0pos]==0); --end0pos) { + } + // end0pos = first element in reverse order !=0 + if(end0pos == 0) { + stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); + return DU[0]; + } + for(i = 1; i <= end0pos; ++i) { + int startpos = i; + int nrzeroes; + unsigned short bits[2]; + for (; DU[i]==0 && i<=end0pos; ++i) { + } + nrzeroes = i-startpos; + if ( nrzeroes >= 16 ) { + int lng = nrzeroes>>4; + int nrmarker; + for (nrmarker=1; nrmarker <= lng; ++nrmarker) + stbiw__jpg_writeBits(s, bitBuf, bitCnt, M16zeroes); + nrzeroes &= 15; + } + stbiw__jpg_calcBits(DU[i], bits); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, HTAC[(nrzeroes<<4)+bits[1]]); + stbiw__jpg_writeBits(s, bitBuf, bitCnt, bits); + } + if(end0pos != 63) { + stbiw__jpg_writeBits(s, bitBuf, bitCnt, EOB); + } + return DU[0]; } -static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, int comp, const void *data, int quality) -{ - // Constants that don't pollute global namespace - static const unsigned char std_dc_luminance_nrcodes[] = {0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0}; - static const unsigned char std_dc_luminance_values[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; - static const unsigned char std_ac_luminance_nrcodes[] = {0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d}; - static const unsigned char std_ac_luminance_values[] = { - 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, - 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, - 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, - 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, - 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, - 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, - 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, - 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, - 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa}; - static const unsigned char std_dc_chrominance_nrcodes[] = {0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0}; - static const unsigned char std_dc_chrominance_values[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}; - static const unsigned char std_ac_chrominance_nrcodes[] = {0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77}; - static const unsigned char std_ac_chrominance_values[] = { - 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, - 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72, 0xd1, - 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, - 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, - 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, - 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, - 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, - 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, - 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa}; - // Huffman tables - static const unsigned short YDC_HT[256][2] = {{0, 2}, {2, 3}, {3, 3}, {4, 3}, {5, 3}, {6, 3}, - {14, 4}, {30, 5}, {62, 6}, {126, 7}, {254, 8}, {510, 9}}; - static const unsigned short UVDC_HT[256][2] = {{0, 2}, {1, 2}, {2, 2}, {6, 3}, {14, 4}, {30, 5}, - {62, 6}, {126, 7}, {254, 8}, {510, 9}, {1022, 10}, {2046, 11}}; - static const unsigned short YAC_HT[256][2] = { - {10, 4}, {0, 2}, {1, 2}, {4, 3}, {11, 4}, {26, 5}, {120, 7}, {248, 8}, - {1014, 10}, {65410, 16}, {65411, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {12, 4}, {27, 5}, {121, 7}, {502, 9}, {2038, 11}, {65412, 16}, {65413, 16}, - {65414, 16}, {65415, 16}, {65416, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {28, 5}, {249, 8}, {1015, 10}, {4084, 12}, {65417, 16}, {65418, 16}, {65419, 16}, - {65420, 16}, {65421, 16}, {65422, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {58, 6}, {503, 9}, {4085, 12}, {65423, 16}, {65424, 16}, {65425, 16}, {65426, 16}, - {65427, 16}, {65428, 16}, {65429, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {59, 6}, {1016, 10}, {65430, 16}, {65431, 16}, {65432, 16}, {65433, 16}, {65434, 16}, - {65435, 16}, {65436, 16}, {65437, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {122, 7}, {2039, 11}, {65438, 16}, {65439, 16}, {65440, 16}, {65441, 16}, {65442, 16}, - {65443, 16}, {65444, 16}, {65445, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {123, 7}, {4086, 12}, {65446, 16}, {65447, 16}, {65448, 16}, {65449, 16}, {65450, 16}, - {65451, 16}, {65452, 16}, {65453, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {250, 8}, {4087, 12}, {65454, 16}, {65455, 16}, {65456, 16}, {65457, 16}, {65458, 16}, - {65459, 16}, {65460, 16}, {65461, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {504, 9}, {32704, 15}, {65462, 16}, {65463, 16}, {65464, 16}, {65465, 16}, {65466, 16}, - {65467, 16}, {65468, 16}, {65469, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {505, 9}, {65470, 16}, {65471, 16}, {65472, 16}, {65473, 16}, {65474, 16}, {65475, 16}, - {65476, 16}, {65477, 16}, {65478, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {506, 9}, {65479, 16}, {65480, 16}, {65481, 16}, {65482, 16}, {65483, 16}, {65484, 16}, - {65485, 16}, {65486, 16}, {65487, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {1017, 10}, {65488, 16}, {65489, 16}, {65490, 16}, {65491, 16}, {65492, 16}, {65493, 16}, - {65494, 16}, {65495, 16}, {65496, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {1018, 10}, {65497, 16}, {65498, 16}, {65499, 16}, {65500, 16}, {65501, 16}, {65502, 16}, - {65503, 16}, {65504, 16}, {65505, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {2040, 11}, {65506, 16}, {65507, 16}, {65508, 16}, {65509, 16}, {65510, 16}, {65511, 16}, - {65512, 16}, {65513, 16}, {65514, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {65515, 16}, {65516, 16}, {65517, 16}, {65518, 16}, {65519, 16}, {65520, 16}, {65521, 16}, - {65522, 16}, {65523, 16}, {65524, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {2041, 11}, {65525, 16}, {65526, 16}, {65527, 16}, {65528, 16}, {65529, 16}, {65530, 16}, {65531, 16}, - {65532, 16}, {65533, 16}, {65534, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}}; - static const unsigned short UVAC_HT[256][2] = { - {0, 2}, {1, 2}, {4, 3}, {10, 4}, {24, 5}, {25, 5}, {56, 6}, {120, 7}, - {500, 9}, {1014, 10}, {4084, 12}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {11, 4}, {57, 6}, {246, 8}, {501, 9}, {2038, 11}, {4085, 12}, {65416, 16}, - {65417, 16}, {65418, 16}, {65419, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {26, 5}, {247, 8}, {1015, 10}, {4086, 12}, {32706, 15}, {65420, 16}, {65421, 16}, - {65422, 16}, {65423, 16}, {65424, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {27, 5}, {248, 8}, {1016, 10}, {4087, 12}, {65425, 16}, {65426, 16}, {65427, 16}, - {65428, 16}, {65429, 16}, {65430, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {58, 6}, {502, 9}, {65431, 16}, {65432, 16}, {65433, 16}, {65434, 16}, {65435, 16}, - {65436, 16}, {65437, 16}, {65438, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {59, 6}, {1017, 10}, {65439, 16}, {65440, 16}, {65441, 16}, {65442, 16}, {65443, 16}, - {65444, 16}, {65445, 16}, {65446, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {121, 7}, {2039, 11}, {65447, 16}, {65448, 16}, {65449, 16}, {65450, 16}, {65451, 16}, - {65452, 16}, {65453, 16}, {65454, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {122, 7}, {2040, 11}, {65455, 16}, {65456, 16}, {65457, 16}, {65458, 16}, {65459, 16}, - {65460, 16}, {65461, 16}, {65462, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {249, 8}, {65463, 16}, {65464, 16}, {65465, 16}, {65466, 16}, {65467, 16}, {65468, 16}, - {65469, 16}, {65470, 16}, {65471, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {503, 9}, {65472, 16}, {65473, 16}, {65474, 16}, {65475, 16}, {65476, 16}, {65477, 16}, - {65478, 16}, {65479, 16}, {65480, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {504, 9}, {65481, 16}, {65482, 16}, {65483, 16}, {65484, 16}, {65485, 16}, {65486, 16}, - {65487, 16}, {65488, 16}, {65489, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {505, 9}, {65490, 16}, {65491, 16}, {65492, 16}, {65493, 16}, {65494, 16}, {65495, 16}, - {65496, 16}, {65497, 16}, {65498, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {506, 9}, {65499, 16}, {65500, 16}, {65501, 16}, {65502, 16}, {65503, 16}, {65504, 16}, - {65505, 16}, {65506, 16}, {65507, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {2041, 11}, {65508, 16}, {65509, 16}, {65510, 16}, {65511, 16}, {65512, 16}, {65513, 16}, - {65514, 16}, {65515, 16}, {65516, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {0, 0}, {16352, 14}, {65517, 16}, {65518, 16}, {65519, 16}, {65520, 16}, {65521, 16}, {65522, 16}, - {65523, 16}, {65524, 16}, {65525, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, - {1018, 10}, {32707, 15}, {65526, 16}, {65527, 16}, {65528, 16}, {65529, 16}, {65530, 16}, {65531, 16}, - {65532, 16}, {65533, 16}, {65534, 16}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0}}; - static const int YQT[] = {16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19, 26, 58, 60, 55, - 14, 13, 16, 24, 40, 57, 69, 56, 14, 17, 22, 29, 51, 87, 80, 62, - 18, 22, 37, 56, 68, 109, 103, 77, 24, 35, 55, 64, 81, 104, 113, 92, - 49, 64, 78, 87, 103, 121, 120, 101, 72, 92, 95, 98, 112, 100, 103, 99}; - static const int UVQT[] = {17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66, 99, 99, 99, 99, 24, 26, 56, 99, 99, 99, - 99, 99, 47, 66, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, - 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99}; - static const float aasf[] = {1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, - 1.175875602f * 2.828427125f, 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, - 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f}; - - int row, col, i, k, subsample; - float fdtbl_Y[64], fdtbl_UV[64]; - unsigned char YTable[64], UVTable[64]; - - if (!data || !width || !height || comp > 4 || comp < 1) { - return 0; - } - - quality = quality ? quality : 90; - subsample = quality <= 90 ? 1 : 0; - quality = quality < 1 ? 1 : quality > 100 ? 100 : quality; - quality = quality < 50 ? 5000 / quality : 200 - quality * 2; - - for (i = 0; i < 64; ++i) { - int uvti, yti = (YQT[i] * quality + 50) / 100; - YTable[stbiw__jpg_ZigZag[i]] = (unsigned char)(yti < 1 ? 1 : yti > 255 ? 255 : yti); - uvti = (UVQT[i] * quality + 50) / 100; - UVTable[stbiw__jpg_ZigZag[i]] = (unsigned char)(uvti < 1 ? 1 : uvti > 255 ? 255 : uvti); - } - - for (row = 0, k = 0; row < 8; ++row) { - for (col = 0; col < 8; ++col, ++k) { - fdtbl_Y[k] = 1 / (YTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); - fdtbl_UV[k] = 1 / (UVTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); - } - } - - // Write Headers - { - static const unsigned char head0[] = {0xFF, 0xD8, 0xFF, 0xE0, 0, 0x10, 'J', 'F', 'I', 'F', 0, 1, 1, - 0, 0, 1, 0, 1, 0, 0, 0xFF, 0xDB, 0, 0x84, 0}; - static const unsigned char head2[] = {0xFF, 0xDA, 0, 0xC, 3, 1, 0, 2, 0x11, 3, 0x11, 0, 0x3F, 0}; - const unsigned char head1[] = {0xFF, - 0xC0, - 0, - 0x11, - 8, - (unsigned char)(height >> 8), - STBIW_UCHAR(height), - (unsigned char)(width >> 8), - STBIW_UCHAR(width), - 3, - 1, - (unsigned char)(subsample ? 0x22 : 0x11), - 0, - 2, - 0x11, - 1, - 3, - 0x11, - 1, - 0xFF, - 0xC4, - 0x01, - 0xA2, - 0}; - s->func(s->context, (void *)head0, sizeof(head0)); - s->func(s->context, (void *)YTable, sizeof(YTable)); - stbiw__putc(s, 1); - s->func(s->context, UVTable, sizeof(UVTable)); - s->func(s->context, (void *)head1, sizeof(head1)); - s->func(s->context, (void *)(std_dc_luminance_nrcodes + 1), sizeof(std_dc_luminance_nrcodes) - 1); - s->func(s->context, (void *)std_dc_luminance_values, sizeof(std_dc_luminance_values)); - stbiw__putc(s, 0x10); // HTYACinfo - s->func(s->context, (void *)(std_ac_luminance_nrcodes + 1), sizeof(std_ac_luminance_nrcodes) - 1); - s->func(s->context, (void *)std_ac_luminance_values, sizeof(std_ac_luminance_values)); - stbiw__putc(s, 1); // HTUDCinfo - s->func(s->context, (void *)(std_dc_chrominance_nrcodes + 1), sizeof(std_dc_chrominance_nrcodes) - 1); - s->func(s->context, (void *)std_dc_chrominance_values, sizeof(std_dc_chrominance_values)); - stbiw__putc(s, 0x11); // HTUACinfo - s->func(s->context, (void *)(std_ac_chrominance_nrcodes + 1), sizeof(std_ac_chrominance_nrcodes) - 1); - s->func(s->context, (void *)std_ac_chrominance_values, sizeof(std_ac_chrominance_values)); - s->func(s->context, (void *)head2, sizeof(head2)); - } - - // Encode 8x8 macroblocks - { - static const unsigned short fillBits[] = {0x7F, 7}; - int DCY = 0, DCU = 0, DCV = 0; - int bitBuf = 0, bitCnt = 0; - // comp == 2 is grey+alpha (alpha is ignored) - int ofsG = comp > 2 ? 1 : 0, ofsB = comp > 2 ? 2 : 0; - const unsigned char *dataR = (const unsigned char *)data; - const unsigned char *dataG = dataR + ofsG; - const unsigned char *dataB = dataR + ofsB; - int x, y, pos; - if (subsample) { - for (y = 0; y < height; y += 16) { - for (x = 0; x < width; x += 16) { - float Y[256], U[256], V[256]; - for (row = y, pos = 0; row < y + 16; ++row) { - // row >= height => use last input row - int clamped_row = (row < height) ? row : height - 1; - int base_p = (stbi__flip_vertically_on_write ? (height - 1 - clamped_row) : clamped_row) * width * comp; - for (col = x; col < x + 16; ++col, ++pos) { - // if col >= width => use pixel from last input column - int p = base_p + ((col < width) ? col : (width - 1)) * comp; - float r = dataR[p], g = dataG[p], b = dataB[p]; - Y[pos] = +0.29900f * r + 0.58700f * g + 0.11400f * b - 128; - U[pos] = -0.16874f * r - 0.33126f * g + 0.50000f * b; - V[pos] = +0.50000f * r - 0.41869f * g - 0.08131f * b; - } - } - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y + 0, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y + 8, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y + 128, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y + 136, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); - - // subsample U,V - { - float subU[64], subV[64]; - int yy, xx; - for (yy = 0, pos = 0; yy < 8; ++yy) { - for (xx = 0; xx < 8; ++xx, ++pos) { - int j = yy * 32 + xx * 2; - subU[pos] = (U[j + 0] + U[j + 1] + U[j + 16] + U[j + 17]) * 0.25f; - subV[pos] = (V[j + 0] + V[j + 1] + V[j + 16] + V[j + 17]) * 0.25f; - } - } - DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subU, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); - DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subV, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); - } - } +static int stbi_write_jpg_core(stbi__write_context *s, int width, int height, int comp, const void* data, int quality) { + // Constants that don't pollute global namespace + static const unsigned char std_dc_luminance_nrcodes[] = {0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0}; + static const unsigned char std_dc_luminance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; + static const unsigned char std_ac_luminance_nrcodes[] = {0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d}; + static const unsigned char std_ac_luminance_values[] = { + 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08, + 0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28, + 0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59, + 0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89, + 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6, + 0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2, + 0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa + }; + static const unsigned char std_dc_chrominance_nrcodes[] = {0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0}; + static const unsigned char std_dc_chrominance_values[] = {0,1,2,3,4,5,6,7,8,9,10,11}; + static const unsigned char std_ac_chrominance_nrcodes[] = {0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77}; + static const unsigned char std_ac_chrominance_values[] = { + 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91, + 0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26, + 0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58, + 0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87, + 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4, + 0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda, + 0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,0xf9,0xfa + }; + // Huffman tables + static const unsigned short YDC_HT[256][2] = { {0,2},{2,3},{3,3},{4,3},{5,3},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9}}; + static const unsigned short UVDC_HT[256][2] = { {0,2},{1,2},{2,2},{6,3},{14,4},{30,5},{62,6},{126,7},{254,8},{510,9},{1022,10},{2046,11}}; + static const unsigned short YAC_HT[256][2] = { + {10,4},{0,2},{1,2},{4,3},{11,4},{26,5},{120,7},{248,8},{1014,10},{65410,16},{65411,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {12,4},{27,5},{121,7},{502,9},{2038,11},{65412,16},{65413,16},{65414,16},{65415,16},{65416,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {28,5},{249,8},{1015,10},{4084,12},{65417,16},{65418,16},{65419,16},{65420,16},{65421,16},{65422,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {58,6},{503,9},{4085,12},{65423,16},{65424,16},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {59,6},{1016,10},{65430,16},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {122,7},{2039,11},{65438,16},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {123,7},{4086,12},{65446,16},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {250,8},{4087,12},{65454,16},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {504,9},{32704,15},{65462,16},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {505,9},{65470,16},{65471,16},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {506,9},{65479,16},{65480,16},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1017,10},{65488,16},{65489,16},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1018,10},{65497,16},{65498,16},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2040,11},{65506,16},{65507,16},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {65515,16},{65516,16},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2041,11},{65525,16},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} + }; + static const unsigned short UVAC_HT[256][2] = { + {0,2},{1,2},{4,3},{10,4},{24,5},{25,5},{56,6},{120,7},{500,9},{1014,10},{4084,12},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {11,4},{57,6},{246,8},{501,9},{2038,11},{4085,12},{65416,16},{65417,16},{65418,16},{65419,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {26,5},{247,8},{1015,10},{4086,12},{32706,15},{65420,16},{65421,16},{65422,16},{65423,16},{65424,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {27,5},{248,8},{1016,10},{4087,12},{65425,16},{65426,16},{65427,16},{65428,16},{65429,16},{65430,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {58,6},{502,9},{65431,16},{65432,16},{65433,16},{65434,16},{65435,16},{65436,16},{65437,16},{65438,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {59,6},{1017,10},{65439,16},{65440,16},{65441,16},{65442,16},{65443,16},{65444,16},{65445,16},{65446,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {121,7},{2039,11},{65447,16},{65448,16},{65449,16},{65450,16},{65451,16},{65452,16},{65453,16},{65454,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {122,7},{2040,11},{65455,16},{65456,16},{65457,16},{65458,16},{65459,16},{65460,16},{65461,16},{65462,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {249,8},{65463,16},{65464,16},{65465,16},{65466,16},{65467,16},{65468,16},{65469,16},{65470,16},{65471,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {503,9},{65472,16},{65473,16},{65474,16},{65475,16},{65476,16},{65477,16},{65478,16},{65479,16},{65480,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {504,9},{65481,16},{65482,16},{65483,16},{65484,16},{65485,16},{65486,16},{65487,16},{65488,16},{65489,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {505,9},{65490,16},{65491,16},{65492,16},{65493,16},{65494,16},{65495,16},{65496,16},{65497,16},{65498,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {506,9},{65499,16},{65500,16},{65501,16},{65502,16},{65503,16},{65504,16},{65505,16},{65506,16},{65507,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {2041,11},{65508,16},{65509,16},{65510,16},{65511,16},{65512,16},{65513,16},{65514,16},{65515,16},{65516,16},{0,0},{0,0},{0,0},{0,0},{0,0},{0,0}, + {16352,14},{65517,16},{65518,16},{65519,16},{65520,16},{65521,16},{65522,16},{65523,16},{65524,16},{65525,16},{0,0},{0,0},{0,0},{0,0},{0,0}, + {1018,10},{32707,15},{65526,16},{65527,16},{65528,16},{65529,16},{65530,16},{65531,16},{65532,16},{65533,16},{65534,16},{0,0},{0,0},{0,0},{0,0},{0,0} + }; + static const int YQT[] = {16,11,10,16,24,40,51,61,12,12,14,19,26,58,60,55,14,13,16,24,40,57,69,56,14,17,22,29,51,87,80,62,18,22, + 37,56,68,109,103,77,24,35,55,64,81,104,113,92,49,64,78,87,103,121,120,101,72,92,95,98,112,100,103,99}; + static const int UVQT[] = {17,18,24,47,99,99,99,99,18,21,26,66,99,99,99,99,24,26,56,99,99,99,99,99,47,66,99,99,99,99,99,99, + 99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99}; + static const float aasf[] = { 1.0f * 2.828427125f, 1.387039845f * 2.828427125f, 1.306562965f * 2.828427125f, 1.175875602f * 2.828427125f, + 1.0f * 2.828427125f, 0.785694958f * 2.828427125f, 0.541196100f * 2.828427125f, 0.275899379f * 2.828427125f }; + + int row, col, i, k, subsample; + float fdtbl_Y[64], fdtbl_UV[64]; + unsigned char YTable[64], UVTable[64]; + + if(!data || !width || !height || comp > 4 || comp < 1) { + return 0; + } + + quality = quality ? quality : 90; + subsample = quality <= 90 ? 1 : 0; + quality = quality < 1 ? 1 : quality > 100 ? 100 : quality; + quality = quality < 50 ? 5000 / quality : 200 - quality * 2; + + for(i = 0; i < 64; ++i) { + int uvti, yti = (YQT[i]*quality+50)/100; + YTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (yti < 1 ? 1 : yti > 255 ? 255 : yti); + uvti = (UVQT[i]*quality+50)/100; + UVTable[stbiw__jpg_ZigZag[i]] = (unsigned char) (uvti < 1 ? 1 : uvti > 255 ? 255 : uvti); + } + + for(row = 0, k = 0; row < 8; ++row) { + for(col = 0; col < 8; ++col, ++k) { + fdtbl_Y[k] = 1 / (YTable [stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); + fdtbl_UV[k] = 1 / (UVTable[stbiw__jpg_ZigZag[k]] * aasf[row] * aasf[col]); } - } else { - for (y = 0; y < height; y += 8) { - for (x = 0; x < width; x += 8) { - float Y[64], U[64], V[64]; - for (row = y, pos = 0; row < y + 8; ++row) { - // row >= height => use last input row - int clamped_row = (row < height) ? row : height - 1; - int base_p = (stbi__flip_vertically_on_write ? (height - 1 - clamped_row) : clamped_row) * width * comp; - for (col = x; col < x + 8; ++col, ++pos) { - // if col >= width => use pixel from last input column - int p = base_p + ((col < width) ? col : (width - 1)) * comp; - float r = dataR[p], g = dataG[p], b = dataB[p]; - Y[pos] = +0.29900f * r + 0.58700f * g + 0.11400f * b - 128; - U[pos] = -0.16874f * r - 0.33126f * g + 0.50000f * b; - V[pos] = +0.50000f * r - 0.41869f * g - 0.08131f * b; + } + + // Write Headers + { + static const unsigned char head0[] = { 0xFF,0xD8,0xFF,0xE0,0,0x10,'J','F','I','F',0,1,1,0,0,1,0,1,0,0,0xFF,0xDB,0,0x84,0 }; + static const unsigned char head2[] = { 0xFF,0xDA,0,0xC,3,1,0,2,0x11,3,0x11,0,0x3F,0 }; + const unsigned char head1[] = { 0xFF,0xC0,0,0x11,8,(unsigned char)(height>>8),STBIW_UCHAR(height),(unsigned char)(width>>8),STBIW_UCHAR(width), + 3,1,(unsigned char)(subsample?0x22:0x11),0,2,0x11,1,3,0x11,1,0xFF,0xC4,0x01,0xA2,0 }; + s->func(s->context, (void*)head0, sizeof(head0)); + s->func(s->context, (void*)YTable, sizeof(YTable)); + stbiw__putc(s, 1); + s->func(s->context, UVTable, sizeof(UVTable)); + s->func(s->context, (void*)head1, sizeof(head1)); + s->func(s->context, (void*)(std_dc_luminance_nrcodes+1), sizeof(std_dc_luminance_nrcodes)-1); + s->func(s->context, (void*)std_dc_luminance_values, sizeof(std_dc_luminance_values)); + stbiw__putc(s, 0x10); // HTYACinfo + s->func(s->context, (void*)(std_ac_luminance_nrcodes+1), sizeof(std_ac_luminance_nrcodes)-1); + s->func(s->context, (void*)std_ac_luminance_values, sizeof(std_ac_luminance_values)); + stbiw__putc(s, 1); // HTUDCinfo + s->func(s->context, (void*)(std_dc_chrominance_nrcodes+1), sizeof(std_dc_chrominance_nrcodes)-1); + s->func(s->context, (void*)std_dc_chrominance_values, sizeof(std_dc_chrominance_values)); + stbiw__putc(s, 0x11); // HTUACinfo + s->func(s->context, (void*)(std_ac_chrominance_nrcodes+1), sizeof(std_ac_chrominance_nrcodes)-1); + s->func(s->context, (void*)std_ac_chrominance_values, sizeof(std_ac_chrominance_values)); + s->func(s->context, (void*)head2, sizeof(head2)); + } + + // Encode 8x8 macroblocks + { + static const unsigned short fillBits[] = {0x7F, 7}; + int DCY=0, DCU=0, DCV=0; + int bitBuf=0, bitCnt=0; + // comp == 2 is grey+alpha (alpha is ignored) + int ofsG = comp > 2 ? 1 : 0, ofsB = comp > 2 ? 2 : 0; + const unsigned char *dataR = (const unsigned char *)data; + const unsigned char *dataG = dataR + ofsG; + const unsigned char *dataB = dataR + ofsB; + int x, y, pos; + if(subsample) { + for(y = 0; y < height; y += 16) { + for(x = 0; x < width; x += 16) { + float Y[256], U[256], V[256]; + for(row = y, pos = 0; row < y+16; ++row) { + // row >= height => use last input row + int clamped_row = (row < height) ? row : height - 1; + int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp; + for(col = x; col < x+16; ++col, ++pos) { + // if col >= width => use pixel from last input column + int p = base_p + ((col < width) ? col : (width-1))*comp; + float r = dataR[p], g = dataG[p], b = dataB[p]; + Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128; + U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b; + V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b; + } + } + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+0, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+8, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+128, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y+136, 16, fdtbl_Y, DCY, YDC_HT, YAC_HT); + + // subsample U,V + { + float subU[64], subV[64]; + int yy, xx; + for(yy = 0, pos = 0; yy < 8; ++yy) { + for(xx = 0; xx < 8; ++xx, ++pos) { + int j = yy*32+xx*2; + subU[pos] = (U[j+0] + U[j+1] + U[j+16] + U[j+17]) * 0.25f; + subV[pos] = (V[j+0] + V[j+1] + V[j+16] + V[j+17]) * 0.25f; + } + } + DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subU, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); + DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, subV, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); + } } - } - - DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y, 8, fdtbl_Y, DCY, YDC_HT, YAC_HT); - DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, U, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); - DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, V, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); - } + } + } else { + for(y = 0; y < height; y += 8) { + for(x = 0; x < width; x += 8) { + float Y[64], U[64], V[64]; + for(row = y, pos = 0; row < y+8; ++row) { + // row >= height => use last input row + int clamped_row = (row < height) ? row : height - 1; + int base_p = (stbi__flip_vertically_on_write ? (height-1-clamped_row) : clamped_row)*width*comp; + for(col = x; col < x+8; ++col, ++pos) { + // if col >= width => use pixel from last input column + int p = base_p + ((col < width) ? col : (width-1))*comp; + float r = dataR[p], g = dataG[p], b = dataB[p]; + Y[pos]= +0.29900f*r + 0.58700f*g + 0.11400f*b - 128; + U[pos]= -0.16874f*r - 0.33126f*g + 0.50000f*b; + V[pos]= +0.50000f*r - 0.41869f*g - 0.08131f*b; + } + } + + DCY = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, Y, 8, fdtbl_Y, DCY, YDC_HT, YAC_HT); + DCU = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, U, 8, fdtbl_UV, DCU, UVDC_HT, UVAC_HT); + DCV = stbiw__jpg_processDU(s, &bitBuf, &bitCnt, V, 8, fdtbl_UV, DCV, UVDC_HT, UVAC_HT); + } + } } - } - // Do the bit alignment of the EOI marker - stbiw__jpg_writeBits(s, &bitBuf, &bitCnt, fillBits); - } + // Do the bit alignment of the EOI marker + stbiw__jpg_writeBits(s, &bitBuf, &bitCnt, fillBits); + } - // EOI - stbiw__putc(s, 0xFF); - stbiw__putc(s, 0xD9); + // EOI + stbiw__putc(s, 0xFF); + stbiw__putc(s, 0xD9); - return 1; + return 1; } -STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, - int quality) +STBIWDEF int stbi_write_jpg_to_func(stbi_write_func *func, void *context, int x, int y, int comp, const void *data, int quality) { - stbi__write_context s = {0}; - stbi__start_write_callbacks(&s, func, context); - return stbi_write_jpg_core(&s, x, y, comp, (void *)data, quality); + stbi__write_context s = { 0 }; + stbi__start_write_callbacks(&s, func, context); + return stbi_write_jpg_core(&s, x, y, comp, (void *) data, quality); } + #ifndef STBI_WRITE_NO_STDIO STBIWDEF int stbi_write_jpg(char const *filename, int x, int y, int comp, const void *data, int quality) { - stbi__write_context s = {0}; - if (stbi__start_write_file(&s, filename)) { - int r = stbi_write_jpg_core(&s, x, y, comp, data, quality); - stbi__end_write_file(&s); - return r; - } else - return 0; + stbi__write_context s = { 0 }; + if (stbi__start_write_file(&s,filename)) { + int r = stbi_write_jpg_core(&s, x, y, comp, data, quality); + stbi__end_write_file(&s); + return r; + } else + return 0; } #endif diff --git a/3rdparty/tinyexr/CMakeLists.txt b/3rdparty/tinyexr/CMakeLists.txt index 605b19e5d6..a0487cb4a0 100644 --- a/3rdparty/tinyexr/CMakeLists.txt +++ b/3rdparty/tinyexr/CMakeLists.txt @@ -1,6 +1,6 @@ project(${TINYEXR_LIBRARY}) -# Version at: https://github.com/syoyo/tinyexr/commit/3c8e0cb7b406adc1bf9abe31510b2d72849ca6d2 +# Comes from https://github.com/syoyo/tinyexr set(TINYEXR_MAJOR_VERSION 1 PARENT_SCOPE) set(TINYEXR_MINOR_VERSION 0 PARENT_SCOPE) -set(TINYEXR_PATCH_VERSION 2 PARENT_SCOPE) +set(TINYEXR_PATCH_VERSION 9 PARENT_SCOPE) diff --git a/3rdparty/tinyexr/README.ViSP.md b/3rdparty/tinyexr/README.ViSP.md new file mode 100644 index 0000000000..68504a8b2e --- /dev/null +++ b/3rdparty/tinyexr/README.ViSP.md @@ -0,0 +1,2 @@ +Porting of the tinyexr library: https://github.com/syoyo/tinyexr +tinyexr version is 1.0.9. diff --git a/3rdparty/tinyexr/tinyexr.h b/3rdparty/tinyexr/tinyexr.h index cd6a1ef52a..64ee67f2c6 100644 --- a/3rdparty/tinyexr/tinyexr.h +++ b/3rdparty/tinyexr/tinyexr.h @@ -114,6 +114,11 @@ extern "C" { #define TINYEXR_USE_STB_ZLIB (0) #endif +// Use nanozlib. +#ifndef TINYEXR_USE_NANOZLIB +#define TINYEXR_USE_NANOZLIB (0) +#endif + // Disable PIZ compression when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) @@ -381,7 +386,7 @@ extern int IsEXRFromMemory(const unsigned char *memory, size_t size); // error extern int SaveEXRToMemory(const float *data, const int width, const int height, const int components, const int save_as_fp16, - const unsigned char **buffer, const char **err); + unsigned char **buffer, const char **err); // @deprecated { Not recommended, but handy to use. } // Saves single-frame OpenEXR image to a buffer. Assume EXR image contains RGB(A) channels. @@ -607,12 +612,21 @@ extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, #ifndef NOMINMAX #define NOMINMAX #endif -#include // for UTF-8 +#include // for UTF-8 and memory-mapping + +#if !defined(WINAPI_FAMILY) || (WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP) +#define TINYEXR_USE_WIN32_MMAP (1) +#endif +#elif defined(__linux__) || defined(__unix__) +#include // for open() +#include // for memory-mapping +#include // for stat +#include // for close() +#define TINYEXR_USE_POSIX_MMAP (1) #endif #include -#include #include #include #include @@ -636,13 +650,15 @@ extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, #include #endif +#else // __cplusplus > 199711L +#define TINYEXR_HAS_CXX11 (0) #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include #endif -#if TINYEXR_USE_MINIZ +#if defined(TINYEXR_USE_MINIZ) && (TINYEXR_USE_MINIZ==1) #include #else // Issue #46. Please include your own zlib-compatible API header before @@ -650,6 +666,11 @@ extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, //#include "zlib.h" #endif +#if defined(TINYEXR_USE_NANOZLIB) && (TINYEXR_USE_NANOZLIB==1) +#define NANOZLIB_IMPLEMENTATION +#include "nanozlib.h" +#endif + #if TINYEXR_USE_STB_ZLIB // Since we don't know where a project has stb_image.h and stb_image_write.h // and whether they are in the include path, we don't include them here, and @@ -660,6 +681,7 @@ extern "C" int stbi_zlib_decode_buffer(char *obuffer, int olen, const char *ibuf extern "C" unsigned char *stbi_zlib_compress(unsigned char *data, int data_len, int *out_len, int quality); #endif + #if TINYEXR_USE_ZFP #ifdef __clang__ @@ -675,6 +697,27 @@ extern "C" unsigned char *stbi_zlib_compress(unsigned char *data, int data_len, #endif +// cond: conditional expression +// msg: std::string +// err: std::string* +#define TINYEXR_CHECK_AND_RETURN_MSG(cond, msg, err) do { \ + if (!(cond)) { \ + if (!err) { \ + std::ostringstream ss_e; \ + ss_e << __func__ << "():" << __LINE__ << msg << "\n"; \ + (*err) += ss_e.str(); \ + } \ + return false;\ + } \ + } while(0) + +// no error message. +#define TINYEXR_CHECK_AND_RETURN_C(cond, retcode) do { \ + if (!(cond)) { \ + return retcode; \ + } \ + } while(0) + namespace tinyexr { #if __cplusplus > 199711L @@ -737,7 +780,7 @@ static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { } static void swap2(unsigned short *val) { -#ifdef TINYEXR_LITTLE_ENDIAN +#if TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; @@ -796,7 +839,7 @@ static void cpy4(float *dst_val, const float *src_val) { #endif static void swap4(unsigned int *val) { -#ifdef TINYEXR_LITTLE_ENDIAN +#if TINYEXR_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; @@ -811,7 +854,7 @@ static void swap4(unsigned int *val) { } static void swap4(int *val) { -#ifdef TINYEXR_LITTLE_ENDIAN +#if TINYEXR_LITTLE_ENDIAN (void)val; #else int tmp = *val; @@ -826,7 +869,7 @@ static void swap4(int *val) { } static void swap4(float *val) { -#ifdef TINYEXR_LITTLE_ENDIAN +#if TINYEXR_LITTLE_ENDIAN (void)val; #else float tmp = *val; @@ -857,7 +900,7 @@ static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 #endif static void swap8(tinyexr::tinyexr_uint64 *val) { -#ifdef TINYEXR_LITTLE_ENDIAN +#if TINYEXR_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); @@ -1182,6 +1225,7 @@ static bool ReadChannelInfo(std::vector &channels, break; } ChannelInfo info; + info.requested_pixel_type = 0; tinyexr_int64 data_len = static_cast(data.size()) - (p - reinterpret_cast(data.data())); @@ -1313,7 +1357,7 @@ static bool CompressZip(unsigned char *dst, } } -#if TINYEXR_USE_MINIZ +#if defined(TINYEXR_USE_MINIZ) && (TINYEXR_USE_MINIZ==1) // // Compress the data using miniz // @@ -1327,7 +1371,7 @@ static bool CompressZip(unsigned char *dst, } compressedSize = outSize; -#elif TINYEXR_USE_STB_ZLIB +#elif defined(TINYEXR_USE_STB_ZLIB) && (TINYEXR_USE_STB_ZLIB==1) int outSize; unsigned char* ret = stbi_zlib_compress(const_cast(&tmpBuf.at(0)), src_size, &outSize, 8); if (!ret) { @@ -1336,6 +1380,18 @@ static bool CompressZip(unsigned char *dst, memcpy(dst, ret, outSize); free(ret); + compressedSize = outSize; +#elif defined(TINYEXR_USE_NANOZLIB) && (TINYEXR_USE_NANOZLIB==1) + uint64_t dstSize = nanoz_compressBound(static_cast(src_size)); + int outSize{0}; + unsigned char *ret = nanoz_compress(&tmpBuf.at(0), src_size, &outSize, /* quality */8); + if (!ret) { + return false; + } + + memcpy(dst, ret, outSize); + free(ret); + compressedSize = outSize; #else uLong outSize = compressBound(static_cast(src_size)); @@ -1368,7 +1424,7 @@ static bool DecompressZip(unsigned char *dst, } std::vector tmpBuf(*uncompressed_size); -#if TINYEXR_USE_MINIZ +#if defined(TINYEXR_USE_MINIZ) && (TINYEXR_USE_MINIZ==1) int ret = mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (MZ_OK != ret) { @@ -1380,6 +1436,17 @@ static bool DecompressZip(unsigned char *dst, if (ret < 0) { return false; } +#elif defined(TINYEXR_USE_NANOZLIB) && (TINYEXR_USE_NANOZLIB==1) + uint64_t dest_size = (*uncompressed_size); + uint64_t uncomp_size{0}; + nanoz_status_t ret = + nanoz_uncompress(src, src_size, dest_size, &tmpBuf.at(0), &uncomp_size); + if (NANOZ_SUCCESS != ret) { + return false; + } + if ((*uncompressed_size) != uncomp_size) { + return false; + } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { @@ -1548,7 +1615,7 @@ static int rleUncompress(int inLength, int maxLength, const signed char in[], // End of RLE code from OpenEXR ----------------------------------- -static void CompressRle(unsigned char *dst, +static bool CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector tmpBuf(src_size); @@ -1603,7 +1670,7 @@ static void CompressRle(unsigned char *dst, int outSize = rleCompress(static_cast(src_size), reinterpret_cast(&tmpBuf.at(0)), reinterpret_cast(dst)); - assert(outSize > 0); + TINYEXR_CHECK_AND_RETURN_C(outSize > 0, false); compressedSize = static_cast(outSize); @@ -1613,6 +1680,8 @@ static void CompressRle(unsigned char *dst, compressedSize = src_size; memcpy(dst, src, src_size); } + + return true; } static bool DecompressRle(unsigned char *dst, @@ -2152,7 +2221,7 @@ struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; -static void hufBuildEncTable( +static bool hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index @@ -2280,7 +2349,7 @@ static void hufBuildEncTable( for (int j = m;; j = hlink[j]) { scode[j]++; - assert(scode[j] <= 58); + TINYEXR_CHECK_AND_RETURN_C(scode[j] <= 58, false); if (hlink[j] == j) { // @@ -2299,7 +2368,7 @@ static void hufBuildEncTable( for (int j = mm;; j = hlink[j]) { scode[j]++; - assert(scode[j] <= 58); + TINYEXR_CHECK_AND_RETURN_C(scode[j] <= 58, false); if (hlink[j] == j) break; } @@ -2313,6 +2382,8 @@ static void hufBuildEncTable( hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); + + return true; } // @@ -2510,7 +2581,7 @@ static bool hufBuildDecTable(const long long *hcode, // i : encoding table unsigned int *p = pl->p; pl->p = new unsigned int[pl->lit]; - for (unsigned int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; + for (unsigned int i = 0; i < pl->lit - 1u; ++i) pl->p[i] = p[i]; delete[] p; } else { @@ -3024,7 +3095,6 @@ static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } - assert(0); return false; #endif @@ -3150,7 +3220,6 @@ static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, #if !TINYEXR_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } - assert(0); return false; #endif @@ -3190,7 +3259,13 @@ static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, ptr += maxNonZero - minNonZero + 1; readLen += maxNonZero - minNonZero + 1; } else { - return false; + // Issue 194 + if ((minNonZero == (BITMAP_SIZE - 1)) && (maxNonZero == 0)) { + // OK. all pixels are zero. And no need to read `bitmap` data. + } else { + // invalid minNonZero/maxNonZero combination. + return false; + } } std::vector lut(USHORT_RANGE); @@ -3201,12 +3276,12 @@ static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, // Huffman decoding // - int length; - if ((readLen + 4) > inLen) { return false; } + int length=0; + // length = *(reinterpret_cast(ptr)); tinyexr::cpy4(&length, reinterpret_cast(ptr)); ptr += sizeof(int); @@ -3386,8 +3461,8 @@ static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, zfp_stream *zfp = NULL; zfp_field *field = NULL; - assert((dst_width % 4) == 0); - assert((dst_num_lines % 4) == 0); + TINYEXR_CHECK_AND_RETURN_C((dst_width % 4) == 0, false); + TINYEXR_CHECK_AND_RETURN_C((dst_num_lines % 4) == 0, false); if ((size_t(dst_width) & 3U) || (size_t(dst_num_lines) & 3U)) { return false; @@ -3408,7 +3483,7 @@ static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { - assert(0); + return false; } size_t buf_size = zfp_stream_maximum_size(zfp, field); @@ -3452,8 +3527,8 @@ static bool CompressZfp(std::vector *outBuf, zfp_stream *zfp = NULL; zfp_field *field = NULL; - assert((width % 4) == 0); - assert((num_lines % 4) == 0); + TINYEXR_CHECK_AND_RETURN_C((width % 4) == 0, false); + TINYEXR_CHECK_AND_RETURN_C((num_lines % 4) == 0, false); if ((size_t(width) & 3U) || (size_t(num_lines) & 3U)) { return false; @@ -3473,7 +3548,7 @@ static bool CompressZfp(std::vector *outBuf, } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance); } else { - assert(0); + return false; } size_t buf_size = zfp_stream_maximum_size(zfp, field); @@ -3610,7 +3685,7 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { - assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); + TINYEXR_CHECK_AND_RETURN_C(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT, false); for (size_t v = 0; v < static_cast(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast( @@ -3639,7 +3714,7 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { - assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); + TINYEXR_CHECK_AND_RETURN_C(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT, false); for (size_t v = 0; v < static_cast(num_lines); v++) { const float *line_ptr = reinterpret_cast(&outBuf.at( v * pixel_data_size * static_cast(width) + @@ -3666,11 +3741,10 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else { - assert(0); + return false; } } #else - assert(0 && "PIZ is disabled in this build"); return false; #endif @@ -3682,7 +3756,7 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, pixel_data_size); unsigned long dstLen = static_cast(outBuf.size()); - assert(dstLen > 0); + TINYEXR_CHECK_AND_RETURN_C(dstLen > 0, false); if (!tinyexr::DecompressZip( reinterpret_cast(&outBuf.at(0)), &dstLen, data_ptr, static_cast(data_len))) { @@ -3749,7 +3823,7 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { - assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); + TINYEXR_CHECK_AND_RETURN_C(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT, false); for (size_t v = 0; v < static_cast(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast( @@ -3778,7 +3852,7 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { - assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); + TINYEXR_CHECK_AND_RETURN_C(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT, false); for (size_t v = 0; v < static_cast(num_lines); v++) { const float *line_ptr = reinterpret_cast( &outBuf.at(v * pixel_data_size * static_cast(width) + @@ -3805,7 +3879,6 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else { - assert(0); return false; } } @@ -3883,7 +3956,7 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { - assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); + TINYEXR_CHECK_AND_RETURN_C(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT, false); for (size_t v = 0; v < static_cast(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast( @@ -3912,7 +3985,7 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { - assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); + TINYEXR_CHECK_AND_RETURN_C(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT, false); for (size_t v = 0; v < static_cast(num_lines); v++) { const float *line_ptr = reinterpret_cast( &outBuf.at(v * pixel_data_size * static_cast(width) + @@ -3939,7 +4012,6 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else { - assert(0); return false; } } @@ -3950,7 +4022,6 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, if (!tinyexr::FindZFPCompressionParam(&zfp_compression_param, attributes, int(num_attributes), &e)) { // This code path should not be reachable. - assert(0); return false; } @@ -3960,7 +4031,7 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, pixel_data_size); unsigned long dstLen = outBuf.size(); - assert(dstLen > 0); + TINYEXR_CHECK_AND_RETURN_C(dstLen > 0, false); tinyexr::DecompressZfp(reinterpret_cast(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast(data_len), @@ -3977,9 +4048,9 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast(num_channels); c++) { - assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); + TINYEXR_CHECK_AND_RETURN_C(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT, false); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { - assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); + TINYEXR_CHECK_AND_RETURN_C(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT, false); for (size_t v = 0; v < static_cast(num_lines); v++) { const float *line_ptr = reinterpret_cast( &outBuf.at(v * pixel_data_size * static_cast(width) + @@ -4005,7 +4076,6 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, } } } else { - assert(0); return false; } } @@ -4013,7 +4083,6 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, (void)attributes; (void)num_attributes; (void)num_channels; - assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { @@ -4074,7 +4143,6 @@ static bool DecodePixelData(/* out */ unsigned char **out_images, outLine[u] = f32.f; } } else { - assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { @@ -4235,7 +4303,9 @@ static unsigned char **AllocateImage(int num_channels, images[c] = reinterpret_cast( static_cast(malloc(sizeof(float) * data_len))); } else { - assert(0); + images[c] = NULL; // just in case. + valid = false; + break; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); @@ -4390,7 +4460,6 @@ static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, return TINYEXR_ERROR_INVALID_DATA; } - assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; @@ -4777,6 +4846,7 @@ struct OffsetData { int num_y_levels; }; +// -1 = error static int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) { switch (tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: @@ -4789,13 +4859,15 @@ static int LevelIndex(int lx, int ly, int tile_level_mode, int num_x_levels) { return lx + ly * num_x_levels; default: - assert(false); + return -1; } return 0; } static int LevelSize(int toplevel_size, int level, int tile_rounding_mode) { - assert(level >= 0); + if (level < 0) { + return -1; + } int b = static_cast(1u << static_cast(level)); int level_size = toplevel_size / b; @@ -4816,9 +4888,13 @@ static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header, int level_index = LevelIndex(exr_image->level_x, exr_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); int num_y_tiles = int(offset_data.offsets[size_t(level_index)].size()); - assert(num_y_tiles); + if (num_y_tiles < 1) { + return TINYEXR_ERROR_INVALID_DATA; + } int num_x_tiles = int(offset_data.offsets[size_t(level_index)][0].size()); - assert(num_x_tiles); + if (num_x_tiles < 1) { + return TINYEXR_ERROR_INVALID_DATA; + } int num_tiles = num_x_tiles * num_y_tiles; int err_code = TINYEXR_SUCCESS; @@ -4847,7 +4923,7 @@ static int DecodeTiledLevel(EXRImage* exr_image, const EXRHeader* exr_header, } #endif exr_image->tiles = static_cast( - calloc(sizeof(EXRTile), static_cast(num_tiles))); + calloc(static_cast(num_tiles), sizeof(EXRTile))); #if TINYEXR_HAS_CXX11 && (TINYEXR_USE_THREAD > 0) std::vector workers; @@ -5016,10 +5092,24 @@ static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, return TINYEXR_ERROR_INVALID_DATA; } - int data_width = - exr_header->data_window.max_x - exr_header->data_window.min_x + 1; - int data_height = - exr_header->data_window.max_y - exr_header->data_window.min_y + 1; + tinyexr_int64 data_width = + static_cast(exr_header->data_window.max_x) - static_cast(exr_header->data_window.min_x) + static_cast(1); + tinyexr_int64 data_height = + static_cast(exr_header->data_window.max_y) - static_cast(exr_header->data_window.min_y) + static_cast(1); + + if (data_width <= 0) { + if (err) { + (*err) += "Invalid data window width.\n"; + } + return TINYEXR_ERROR_INVALID_DATA; + } + + if (data_height <= 0) { + if (err) { + (*err) += "Invalid data window height.\n"; + } + return TINYEXR_ERROR_INVALID_DATA; + } // Do not allow too large data_width and data_height. header invalid? { @@ -5099,8 +5189,17 @@ static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, } level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level, exr_header->tile_rounding_mode); + if (level_image->width < 1) { + return TINYEXR_ERROR_INVALID_DATA; + } + level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level, exr_header->tile_rounding_mode); + + if (level_image->height < 1) { + return TINYEXR_ERROR_INVALID_DATA; + } + level_image->level_x = level; level_image->level_y = level; @@ -5126,8 +5225,16 @@ static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, level_image->width = LevelSize(exr_header->data_window.max_x - exr_header->data_window.min_x + 1, level_x, exr_header->tile_rounding_mode); + if (level_image->width < 1) { + return TINYEXR_ERROR_INVALID_DATA; + } + level_image->height = LevelSize(exr_header->data_window.max_y - exr_header->data_window.min_y + 1, level_y, exr_header->tile_rounding_mode); + if (level_image->height < 1) { + return TINYEXR_ERROR_INVALID_DATA; + } + level_image->level_x = level_x; level_image->level_y = level_y; @@ -5161,7 +5268,7 @@ static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, bool alloc_success = false; exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, - data_width, data_height, &alloc_success); + int(data_width), int(data_height), &alloc_success); if (!alloc_success) { if (err) { @@ -5261,7 +5368,7 @@ static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast(data_len), exr_header->compression_type, exr_header->line_order, - data_width, data_height, data_width, y, line_no, + int(data_width), int(data_height), int(data_width), y, line_no, num_lines, static_cast(pixel_data_size), static_cast( exr_header->num_custom_attributes), @@ -5313,8 +5420,8 @@ static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, { exr_image->num_channels = num_channels; - exr_image->width = data_width; - exr_image->height = data_height; + exr_image->width = int(data_width); + exr_image->height = int(data_height); } return TINYEXR_SUCCESS; @@ -5323,8 +5430,12 @@ static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, static bool ReconstructLineOffsets( std::vector *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { - assert(head < marker); - assert(offsets->size() == n); + if (head >= marker) { + return false; + } + if (offsets->size() != n) { + return false; + } for (size_t i = 0; i < n; i++) { size_t offset = static_cast(marker - head); @@ -5420,7 +5531,7 @@ static int CalculateNumXLevels(const EXRHeader* exr_header) { default: - assert(false); + return -1; } return num; @@ -5458,25 +5569,29 @@ static int CalculateNumYLevels(const EXRHeader* exr_header) { default: - assert(false); + return -1; } return num; } -static void CalculateNumTiles(std::vector& numTiles, +static bool CalculateNumTiles(std::vector& numTiles, int toplevel_size, int size, int tile_rounding_mode) { for (unsigned i = 0; i < numTiles.size(); i++) { int l = LevelSize(toplevel_size, int(i), tile_rounding_mode); - assert(l <= std::numeric_limits::max() - size + 1); + if (l < 0) { + return false; + } + TINYEXR_CHECK_AND_RETURN_C(l <= std::numeric_limits::max() - size + 1, false); numTiles[i] = (l + size - 1) / size; } + return true; } -static void PrecalculateTileInfo(std::vector& num_x_tiles, +static bool PrecalculateTileInfo(std::vector& num_x_tiles, std::vector& num_y_tiles, const EXRHeader* exr_header) { int min_x = exr_header->data_window.min_x; @@ -5485,20 +5600,35 @@ static void PrecalculateTileInfo(std::vector& num_x_tiles, int max_y = exr_header->data_window.max_y; int num_x_levels = CalculateNumXLevels(exr_header); + + if (num_x_levels < 0) { + return false; + } + int num_y_levels = CalculateNumYLevels(exr_header); + if (num_y_levels < 0) { + return false; + } + num_x_tiles.resize(size_t(num_x_levels)); num_y_tiles.resize(size_t(num_y_levels)); - CalculateNumTiles(num_x_tiles, + if (!CalculateNumTiles(num_x_tiles, max_x - min_x + 1, exr_header->tile_size_x, - exr_header->tile_rounding_mode); + exr_header->tile_rounding_mode)) { + return false; + } - CalculateNumTiles(num_y_tiles, + if (!CalculateNumTiles(num_y_tiles, max_y - min_y + 1, exr_header->tile_size_y, - exr_header->tile_rounding_mode); + exr_header->tile_rounding_mode)) { + return false; + } + + return true; } static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_blocks) { @@ -5510,6 +5640,7 @@ static void InitSingleResolutionOffsets(OffsetData& offset_data, size_t num_bloc } // Return sum of tile blocks. +// 0 = error static int InitTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, const std::vector& num_x_tiles, @@ -5520,7 +5651,7 @@ static int InitTileOffsets(OffsetData& offset_data, switch (exr_header->tile_level_mode) { case TINYEXR_TILE_ONE_LEVEL: case TINYEXR_TILE_MIPMAP_LEVELS: - assert(offset_data.num_x_levels == offset_data.num_y_levels); + TINYEXR_CHECK_AND_RETURN_C(offset_data.num_x_levels == offset_data.num_y_levels, 0); offset_data.offsets.resize(size_t(offset_data.num_x_levels)); for (unsigned int l = 0; l < offset_data.offsets.size(); ++l) { @@ -5551,7 +5682,7 @@ static int InitTileOffsets(OffsetData& offset_data, break; default: - assert(false); + return 0; } return num_tile_blocks; } @@ -5619,9 +5750,9 @@ static bool isValidTile(const EXRHeader* exr_header, return false; } -static void ReconstructTileOffsets(OffsetData& offset_data, +static bool ReconstructTileOffsets(OffsetData& offset_data, const EXRHeader* exr_header, - const unsigned char* head, const unsigned char* marker, const size_t /*size*/, + const unsigned char* head, const unsigned char* marker, const size_t size, bool isMultiPartFile, bool isDeep) { int numXLevels = offset_data.num_x_levels; @@ -5630,11 +5761,20 @@ static void ReconstructTileOffsets(OffsetData& offset_data, for (unsigned int dx = 0; dx < offset_data.offsets[l][dy].size(); ++dx) { tinyexr::tinyexr_uint64 tileOffset = tinyexr::tinyexr_uint64(marker - head); + if (isMultiPartFile) { + if ((marker + sizeof(int)) >= (head + size)) { + return false; + } + //int partNumber; marker += sizeof(int); } + if ((marker + 4 * sizeof(int)) >= (head + size)) { + return false; + } + int tileX; memcpy(&tileX, marker, sizeof(int)); tinyexr::swap4(&tileX); @@ -5656,6 +5796,9 @@ static void ReconstructTileOffsets(OffsetData& offset_data, marker += sizeof(int); if (isDeep) { + if ((marker + 2 * sizeof(tinyexr::tinyexr_int64)) >= (head + size)) { + return false; + } tinyexr::tinyexr_int64 packed_offset_table_size; memcpy(&packed_offset_table_size, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast(&packed_offset_table_size)); @@ -5669,24 +5812,55 @@ static void ReconstructTileOffsets(OffsetData& offset_data, // next Int64 is unpacked sample size - skip that too marker += packed_offset_table_size + packed_sample_size + 8; + if (marker >= (head + size)) { + return false; + } + } else { - int dataSize; - memcpy(&dataSize, marker, sizeof(int)); + if ((marker + sizeof(uint32_t)) >= (head + size)) { + return false; + } + + uint32_t dataSize; + memcpy(&dataSize, marker, sizeof(uint32_t)); tinyexr::swap4(&dataSize); - marker += sizeof(int); + marker += sizeof(uint32_t); + marker += dataSize; + + if (marker >= (head + size)) { + return false; + } } if (!isValidTile(exr_header, offset_data, - tileX, tileY, levelX, levelY)) - return; + tileX, tileY, levelX, levelY)) { + return false; + } int level_idx = LevelIndex(levelX, levelY, exr_header->tile_level_mode, numXLevels); + if (level_idx < 0) { + return false; + } + + if (size_t(level_idx) >= offset_data.offsets.size()) { + return false; + } + + if (size_t(tileY) >= offset_data.offsets[size_t(level_idx)].size()) { + return false; + } + + if (size_t(tileX) >= offset_data.offsets[size_t(level_idx)][size_t(tileY)].size()) { + return false; + } + offset_data.offsets[size_t(level_idx)][size_t(tileY)][size_t(tileX)] = tileOffset; } } } + return true; } // marker output is also @@ -5744,8 +5918,12 @@ static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } - int data_width = - exr_header->data_window.max_x - exr_header->data_window.min_x + 1; + tinyexr_int64 data_width = + static_cast(exr_header->data_window.max_x) - static_cast(exr_header->data_window.min_x) + static_cast(1); + if (data_width <= 0) { + tinyexr::SetErrorMessage("Invalid data window width value", err); + return TINYEXR_ERROR_INVALID_DATA; + } if (exr_header->data_window.max_y < exr_header->data_window.min_y || exr_header->data_window.max_y - exr_header->data_window.min_y == @@ -5753,8 +5931,13 @@ static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } - int data_height = - exr_header->data_window.max_y - exr_header->data_window.min_y + 1; + tinyexr_int64 data_height = + static_cast(exr_header->data_window.max_y) - static_cast(exr_header->data_window.min_y) + static_cast(1); + + if (data_height <= 0) { + tinyexr::SetErrorMessage("Invalid data window height value", err); + return TINYEXR_ERROR_INVALID_DATA; + } // Do not allow too large data_width and data_height. header invalid? { @@ -5787,7 +5970,10 @@ static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, if (exr_header->tiled) { { std::vector num_x_tiles, num_y_tiles; - PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header); + if (!PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_header)) { + tinyexr::SetErrorMessage("Failed to precalculate tile info.", err); + return TINYEXR_ERROR_INVALID_DATA; + } num_blocks = size_t(InitTileOffsets(offset_data, exr_header, num_x_tiles, num_y_tiles)); if (exr_header->chunk_count > 0) { if (exr_header->chunk_count != static_cast(num_blocks)) { @@ -5800,9 +5986,13 @@ static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, int ret = ReadOffsets(offset_data, head, marker, size, err); if (ret != TINYEXR_SUCCESS) return ret; if (IsAnyOffsetsAreInvalid(offset_data)) { - ReconstructTileOffsets(offset_data, exr_header, + if (!ReconstructTileOffsets(offset_data, exr_header, head, marker, size, - exr_header->multipart, exr_header->non_image); + exr_header->multipart, exr_header->non_image)) { + + tinyexr::SetErrorMessage("Invalid Tile Offsets data.", err); + return TINYEXR_ERROR_INVALID_DATA; + } } } else if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. @@ -6103,15 +6293,17 @@ int LoadEXRWithLayer(float **out_rgba, int *width, int *height, static_cast(exr_image.height))); if (exr_header.tiled) { + const size_t tile_size_x = static_cast(exr_header.tile_size_x); + const size_t tile_size_y = static_cast(exr_header.tile_size_y); for (int it = 0; it < exr_image.num_tiles; it++) { - for (int j = 0; j < exr_header.tile_size_y; j++) { - for (int i = 0; i < exr_header.tile_size_x; i++) { - const size_t ii = exr_image.tiles[it].offset_x * - static_cast(exr_header.tile_size_x) + - i; - const size_t jj = exr_image.tiles[it].offset_y * - static_cast(exr_header.tile_size_y) + - j; + for (size_t j = 0; j < tile_size_y; j++) { + for (size_t i = 0; i < tile_size_x; i++) { + const size_t ii = + static_cast(exr_image.tiles[it].offset_x) * tile_size_x + + i; + const size_t jj = + static_cast(exr_image.tiles[it].offset_y) * tile_size_y + + j; const size_t idx = ii + jj * static_cast(exr_image.width); // out of region check. @@ -6121,7 +6313,7 @@ int LoadEXRWithLayer(float **out_rgba, int *width, int *height, if (jj >= static_cast(exr_image.height)) { continue; } - const size_t srcIdx = i + j * exr_header.tile_size_x; + const size_t srcIdx = i + j * tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast(src)[chIdx][srcIdx]; @@ -6175,14 +6367,20 @@ int LoadEXRWithLayer(float **out_rgba, int *width, int *height, malloc(4 * sizeof(float) * static_cast(exr_image.width) * static_cast(exr_image.height))); if (exr_header.tiled) { + const size_t tile_size_x = static_cast(exr_header.tile_size_x); + const size_t tile_size_y = static_cast(exr_header.tile_size_y); for (int it = 0; it < exr_image.num_tiles; it++) { - for (int j = 0; j < exr_header.tile_size_y; j++) { - for (int i = 0; i < exr_header.tile_size_x; i++) { + for (size_t j = 0; j < tile_size_y; j++) { + for (size_t i = 0; i < tile_size_x; i++) { const size_t ii = - exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; + static_cast(exr_image.tiles[it].offset_x) * + tile_size_x + + i; const size_t jj = - exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; - const size_t idx = ii + jj * exr_image.width; + static_cast(exr_image.tiles[it].offset_y) * + tile_size_y + + j; + const size_t idx = ii + jj * static_cast(exr_image.width); // out of region check. if (ii >= static_cast(exr_image.width)) { @@ -6191,7 +6389,7 @@ int LoadEXRWithLayer(float **out_rgba, int *width, int *height, if (jj >= static_cast(exr_image.height)) { continue; } - const size_t srcIdx = i + j * exr_header.tile_size_x; + const size_t srcIdx = i + j * tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast(src)[idxR][srcIdx]; @@ -6385,14 +6583,20 @@ int LoadEXRFromMemory(float **out_rgba, int *width, int *height, static_cast(exr_image.height))); if (exr_header.tiled) { + const size_t tile_size_x = static_cast(exr_header.tile_size_x); + const size_t tile_size_y = static_cast(exr_header.tile_size_y); for (int it = 0; it < exr_image.num_tiles; it++) { - for (int j = 0; j < exr_header.tile_size_y; j++) { - for (int i = 0; i < exr_header.tile_size_x; i++) { + for (size_t j = 0; j < tile_size_y; j++) { + for (size_t i = 0; i < tile_size_x; i++) { const size_t ii = - exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; + static_cast(exr_image.tiles[it].offset_x) * + tile_size_x + + i; const size_t jj = - exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; - const size_t idx = ii + jj * exr_image.width; + static_cast(exr_image.tiles[it].offset_y) * + tile_size_y + + j; + const size_t idx = ii + jj * static_cast(exr_image.width); // out of region check. if (ii >= static_cast(exr_image.width)) { @@ -6401,7 +6605,7 @@ int LoadEXRFromMemory(float **out_rgba, int *width, int *height, if (jj >= static_cast(exr_image.height)) { continue; } - const size_t srcIdx = i + j * exr_header.tile_size_x; + const size_t srcIdx = i + j * tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast(src)[0][srcIdx]; @@ -6453,14 +6657,20 @@ int LoadEXRFromMemory(float **out_rgba, int *width, int *height, static_cast(exr_image.height))); if (exr_header.tiled) { + const size_t tile_size_x = static_cast(exr_header.tile_size_x); + const size_t tile_size_y = static_cast(exr_header.tile_size_y); for (int it = 0; it < exr_image.num_tiles; it++) { - for (int j = 0; j < exr_header.tile_size_y; j++) - for (int i = 0; i < exr_header.tile_size_x; i++) { + for (size_t j = 0; j < tile_size_y; j++) + for (size_t i = 0; i < tile_size_x; i++) { const size_t ii = - exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; + static_cast(exr_image.tiles[it].offset_x) * + tile_size_x + + i; const size_t jj = - exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; - const size_t idx = ii + jj * exr_image.width; + static_cast(exr_image.tiles[it].offset_y) * + tile_size_y + + j; + const size_t idx = ii + jj * static_cast(exr_image.width); // out of region check. if (ii >= static_cast(exr_image.width)) { @@ -6469,7 +6679,7 @@ int LoadEXRFromMemory(float **out_rgba, int *width, int *height, if (jj >= static_cast(exr_image.height)) { continue; } - const size_t srcIdx = i + j * exr_header.tile_size_x; + const size_t srcIdx = i + j * tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast(src)[idxR][srcIdx]; @@ -6514,6 +6724,200 @@ int LoadEXRFromMemory(float **out_rgba, int *width, int *height, return TINYEXR_SUCCESS; } +// Represents a read-only file mapped to an address space in memory. +// If no memory-mapping API is available, falls back to allocating a buffer +// with a copy of the file's data. +struct MemoryMappedFile { + unsigned char *data; // To the start of the file's data. + size_t size; // The size of the file in bytes. +#ifdef TINYEXR_USE_WIN32_MMAP + HANDLE windows_file; + HANDLE windows_file_mapping; +#elif defined(TINYEXR_USE_POSIX_MMAP) + int posix_descriptor; +#endif + + // MemoryMappedFile's constructor tries to map memory to a file. + // If this succeeds, valid() will return true and all fields + // are usable; otherwise, valid() will return false. + MemoryMappedFile(const char *filename) { + data = NULL; + size = 0; +#ifdef TINYEXR_USE_WIN32_MMAP + windows_file_mapping = NULL; + windows_file = + CreateFileW(tinyexr::UTF8ToWchar(filename).c_str(), // lpFileName + GENERIC_READ, // dwDesiredAccess + FILE_SHARE_READ, // dwShareMode + NULL, // lpSecurityAttributes + OPEN_EXISTING, // dwCreationDisposition + FILE_ATTRIBUTE_READONLY, // dwFlagsAndAttributes + NULL); // hTemplateFile + if (windows_file == INVALID_HANDLE_VALUE) { + return; + } + + windows_file_mapping = CreateFileMapping(windows_file, // hFile + NULL, // lpFileMappingAttributes + PAGE_READONLY, // flProtect + 0, // dwMaximumSizeHigh + 0, // dwMaximumSizeLow + NULL); // lpName + if (windows_file_mapping == NULL) { + return; + } + + data = reinterpret_cast( + MapViewOfFile(windows_file_mapping, // hFileMappingObject + FILE_MAP_READ, // dwDesiredAccess + 0, // dwFileOffsetHigh + 0, // dwFileOffsetLow + 0)); // dwNumberOfBytesToMap + if (!data) { + return; + } + + LARGE_INTEGER windows_file_size = {}; + if (!GetFileSizeEx(windows_file, &windows_file_size) || + static_cast(windows_file_size.QuadPart) > + std::numeric_limits::max()) { + UnmapViewOfFile(data); + data = NULL; + return; + } + size = static_cast(windows_file_size.QuadPart); +#elif defined(TINYEXR_USE_POSIX_MMAP) + posix_descriptor = open(filename, O_RDONLY); + if (posix_descriptor == -1) { + return; + } + + struct stat info; + if (fstat(posix_descriptor, &info) < 0) { + return; + } + // Make sure st_size is in the valid range for a size_t. The second case + // can only fail if a POSIX implementation defines off_t to be a larger + // type than size_t - for instance, compiling with _FILE_OFFSET_BITS=64 + // on a 32-bit system. On current 64-bit systems, this check can never + // fail, so we turn off clang's Wtautological-type-limit-compare warning + // around this code. +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wtautological-type-limit-compare" +#endif + if (info.st_size < 0 || + info.st_size > std::numeric_limits::max()) { + return; + } +#ifdef __clang__ +#pragma clang diagnostic pop +#endif + size = static_cast(info.st_size); + + data = reinterpret_cast( + mmap(0, size, PROT_READ, MAP_SHARED, posix_descriptor, 0)); + if (data == MAP_FAILED) { + data = nullptr; + return; + } +#else + FILE *fp = fopen(filename, "rb"); + if (!fp) { + return; + } + + // Calling fseek(fp, 0, SEEK_END) isn't strictly-conforming C code, but + // since neither the WIN32 nor POSIX APIs are available in this branch, this + // is a reasonable fallback option. + if (fseek(fp, 0, SEEK_END) != 0) { + fclose(fp); + return; + } + const long ftell_result = ftell(fp); + if (ftell_result < 0) { + // Error from ftell + fclose(fp); + return; + } + size = static_cast(ftell_result); + if (fseek(fp, 0, SEEK_SET) != 0) { + fclose(fp); + size = 0; + return; + } + + data = reinterpret_cast(malloc(size)); + if (!data) { + size = 0; + fclose(fp); + return; + } + size_t read_bytes = fread(data, 1, size, fp); + if (read_bytes != size) { + // TODO: Try to read data until reading `size` bytes. + fclose(fp); + size = 0; + data = nullptr; + return; + } + fclose(fp); +#endif + } + + // MemoryMappedFile's destructor closes all its handles. + ~MemoryMappedFile() { +#ifdef TINYEXR_USE_WIN32_MMAP + if (data) { + (void)UnmapViewOfFile(data); + data = NULL; + } + + if (windows_file_mapping != NULL) { + (void)CloseHandle(windows_file_mapping); + } + + if (windows_file != INVALID_HANDLE_VALUE) { + (void)CloseHandle(windows_file); + } +#elif defined(TINYEXR_USE_POSIX_MMAP) + if (data) { + (void)munmap(data, size); + data = NULL; + } + + if (posix_descriptor != -1) { + (void)close(posix_descriptor); + } +#else + if (data) { + (void)free(data); + } + data = NULL; +#endif + } + + // A MemoryMappedFile cannot be copied or moved. + // Only check for this when compiling with C++11 or higher, since deleted + // function definitions were added then. +#if TINYEXR_HAS_CXX11 +#ifdef __clang__ +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Wc++98-compat" +#endif + MemoryMappedFile(const MemoryMappedFile &) = delete; + MemoryMappedFile &operator=(const MemoryMappedFile &) = delete; + MemoryMappedFile(MemoryMappedFile &&other) noexcept = delete; + MemoryMappedFile &operator=(MemoryMappedFile &&other) noexcept = delete; +#ifdef __clang__ +#pragma clang diagnostic pop +#endif +#endif + + // Returns whether this was successfully opened. + bool valid() const { return data; } +}; + int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { @@ -6521,50 +6925,19 @@ int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, return TINYEXR_ERROR_INVALID_ARGUMENT; } - FILE *fp = NULL; -#ifdef _WIN32 -#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. - errno_t errcode = - _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); - if (errcode != 0) { - tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); - // TODO(syoyo): return wfopen_s erro code - return TINYEXR_ERROR_CANT_OPEN_FILE; - } -#else - // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. - fp = fopen(filename, "rb"); -#endif -#else - fp = fopen(filename, "rb"); -#endif - if (!fp) { + MemoryMappedFile file(filename); + if (!file.valid()) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } - size_t filesize; - // Compute size - fseek(fp, 0, SEEK_END); - filesize = static_cast(ftell(fp)); - fseek(fp, 0, SEEK_SET); - - if (filesize < 16) { - tinyexr::SetErrorMessage("File size too short " + std::string(filename), + if (file.size < 16) { + tinyexr::SetErrorMessage("File size too short : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } - std::vector buf(filesize); // @todo { use mmap } - { - size_t ret; - ret = fread(&buf[0], 1, filesize, fp); - assert(ret == filesize); - fclose(fp); - (void)ret; - } - - return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, + return LoadEXRImageFromMemory(exr_image, exr_header, file.data, file.size, err); } @@ -6745,13 +7118,16 @@ static bool EncodePixelData(/* out */ std::vector& out_data, } else if ((compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { -#if TINYEXR_USE_MINIZ +#if defined(TINYEXR_USE_MINIZ) && (TINYEXR_USE_MINIZ==1) std::vector block(mz_compressBound( static_cast(buf.size()))); #elif TINYEXR_USE_STB_ZLIB // there is no compressBound() function, so we use a value that // is grossly overestimated, but should always work std::vector block(256 + 2 * buf.size()); +#elif defined(TINYEXR_USE_NANOZLIB) && (TINYEXR_USE_NANOZLIB == 1) + std::vector block(nanoz_compressBound( + static_cast(buf.size()))); #else std::vector block( compressBound(static_cast(buf.size()))); @@ -6780,9 +7156,14 @@ static bool EncodePixelData(/* out */ std::vector& out_data, tinyexr::tinyexr_uint64 outSize = block.size(); - tinyexr::CompressRle(&block.at(0), outSize, + if (!tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast(&buf.at(0)), - static_cast(buf.size())); + static_cast(buf.size()))) { + if (err) { + (*err) += "RLE compresssion failed.\n"; + } + return false; + } // 4 byte: scan line // 4 byte: data size @@ -6799,9 +7180,14 @@ static bool EncodePixelData(/* out */ std::vector& out_data, std::vector block(bufLen); unsigned int outSize = static_cast(block.size()); - CompressPiz(&block.at(0), &outSize, + if (!CompressPiz(&block.at(0), &outSize, reinterpret_cast(&buf.at(0)), - buf.size(), channels, width, num_lines); + buf.size(), channels, width, num_lines)) { + if (err) { + (*err) += "PIZ compresssion failed.\n"; + } + return false; + } // 4 byte: scan line // 4 byte: data size @@ -6855,14 +7241,19 @@ static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_he const void* compression_param, // must be set if zfp compression is enabled std::string* err) { int num_tiles = num_x_tiles * num_y_tiles; - assert(num_tiles == level_image->num_tiles); + if (num_tiles != level_image->num_tiles) { + if (err) { + (*err) += "Invalid number of tiles in argument.\n"; + } + return TINYEXR_ERROR_INVALID_ARGUMENT; + } if ((exr_header->tile_size_x > level_image->width || exr_header->tile_size_y > level_image->height) && level_image->level_x == 0 && level_image->level_y == 0) { if (err) { (*err) += "Failed to encode tile data.\n"; - } - return TINYEXR_ERROR_INVALID_DATA; + } + return TINYEXR_ERROR_INVALID_DATA; } @@ -6925,7 +7316,11 @@ static int EncodeTiledLevel(const EXRImage* level_image, const EXRHeader* exr_he invalid_data = true; continue; } - assert(data_list[data_idx].size() > data_header_size); + if (data_list[data_idx].size() <= data_header_size) { + invalid_data = true; + continue; + } + int data_len = static_cast(data_list[data_idx].size() - data_header_size); //tileX, tileY, levelX, levelY // pixel_data_size(int) memcpy(&data_list[data_idx][0], &x_tile, sizeof(int)); @@ -7005,7 +7400,10 @@ static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { - assert(0); + if (err) { + (*err) += "Invalid requested_pixel_type.\n"; + } + return TINYEXR_ERROR_INVALID_DATA; } } } @@ -7050,6 +7448,13 @@ static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, int level_index_from_image = LevelIndex(level_image->level_x, level_image->level_y, exr_header->tile_level_mode, offset_data.num_x_levels); + if (level_index_from_image < 0) { + if (err) { + (*err) += "Invalid tile level mode\n"; + } + return TINYEXR_ERROR_INVALID_DATA; + } + if (level_index_from_image != level_index) { if (err) { (*err) += "Incorrect level ordering in tiled image\n"; @@ -7057,9 +7462,20 @@ static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, return TINYEXR_ERROR_INVALID_DATA; } int num_y_tiles = int(offset_data.offsets[level_index].size()); - assert(num_y_tiles); + if (num_y_tiles <= 0) { + if (err) { + (*err) += "Invalid Y tile size\n"; + } + return TINYEXR_ERROR_INVALID_DATA; + } + int num_x_tiles = int(offset_data.offsets[level_index][0].size()); - assert(num_x_tiles); + if (num_x_tiles <= 0) { + if (err) { + (*err) += "Invalid X tile size\n"; + } + return TINYEXR_ERROR_INVALID_DATA; + } std::string e; int ret = EncodeTiledLevel(level_image, @@ -7090,7 +7506,7 @@ static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, } level_image = level_image->next_level; } - assert(static_cast(block_idx) == num_blocks); + TINYEXR_CHECK_AND_RETURN_C(static_cast(block_idx) == num_blocks, TINYEXR_ERROR_INVALID_DATA); total_size = offset; } else { // scanlines std::vector& offsets = offset_data.offsets[0][0]; @@ -7143,7 +7559,10 @@ static int EncodeChunk(const EXRImage* exr_image, const EXRHeader* exr_header, invalid_data = true; continue; // "break" cannot be used with OpenMP } - assert(data_list[i].size() > data_header_size); + if (data_list[i].size() <= data_header_size) { + invalid_data = true; + continue; // "break" cannot be used with OpenMP + } int data_len = static_cast(data_list[i].size() - data_header_size); memcpy(&data_list[i][0], &start_y, sizeof(int)); memcpy(&data_list[i][4], &data_len, sizeof(int)); @@ -7205,21 +7624,24 @@ static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, return 0; } #endif -#if !TINYEXR_USE_ZFP if (exr_headers[i]->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { +#if !TINYEXR_USE_ZFP SetErrorMessage("ZFP compression is not supported in this build", err); return 0; - } #else - for (int c = 0; c < exr_header->num_channels; ++c) { - if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) { - SetErrorMessage("Pixel type must be FLOAT for ZFP compression", - err); - return 0; + // All channels must be fp32. + // No fp16 support in ZFP atm(as of 2023 June) + // https://github.com/LLNL/fpzip/issues/2 + for (int c = 0; c < exr_headers[i]->num_channels; ++c) { + if (exr_headers[i]->requested_pixel_types[c] != TINYEXR_PIXELTYPE_FLOAT) { + SetErrorMessage("Pixel type must be FLOAT for ZFP compression", + err); + return 0; + } } - } #endif + } } } @@ -7269,9 +7691,20 @@ static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, } else { { std::vector num_x_tiles, num_y_tiles; - PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); - chunk_count[i] = - InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles); + if (!PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i])) { + SetErrorMessage("Failed to precalculate Tile info", + err); + return TINYEXR_ERROR_INVALID_DATA; + } + int ntiles = InitTileOffsets(offset_data[i], exr_headers[i], num_x_tiles, num_y_tiles); + if (ntiles > 0) { + chunk_count[i] = ntiles; + } else { + SetErrorMessage("Failed to compute Tile offsets", + err); + return TINYEXR_ERROR_INVALID_DATA; + + } total_chunk_count += chunk_count[i]; } } @@ -7471,7 +7904,7 @@ static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, // Allocating required memory if (total_size == 0) { // something went wrong tinyexr::SetErrorMessage("Output memory size is zero", err); - return 0; + return TINYEXR_ERROR_INVALID_DATA; } (*memory_out) = static_cast(malloc(size_t(total_size))); @@ -7490,7 +7923,11 @@ static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, for (size_t j = 0; j < offset_data[i].offsets[level_index].size(); ++j) { size_t num_bytes = sizeof(tinyexr_uint64) * offset_data[i].offsets[level_index][j].size(); sum += num_bytes; - assert(sum <= total_size); + if (sum > total_size) { + tinyexr::SetErrorMessage("Invalid offset bytes in Tiled Part image.", err); + return TINYEXR_ERROR_INVALID_DATA; + } + memcpy(memory_ptr, reinterpret_cast(&offset_data[i].offsets[level_index][j][0]), num_bytes); @@ -7501,7 +7938,10 @@ static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, } else { size_t num_bytes = sizeof(tinyexr::tinyexr_uint64) * static_cast(chunk_count[i]); sum += num_bytes; - assert(sum <= total_size); + if (sum > total_size) { + tinyexr::SetErrorMessage("Invalid offset bytes in Part image.", err); + return TINYEXR_ERROR_INVALID_DATA; + } std::vector& offsets = offset_data[i].offsets[0][0]; memcpy(memory_ptr, reinterpret_cast(&offsets[0]), num_bytes); memory_ptr += num_bytes; @@ -7513,19 +7953,30 @@ static size_t SaveEXRNPartImageToMemory(const EXRImage* exr_images, for (size_t j = 0; j < static_cast(chunk_count[i]); ++j) { if (num_parts > 1) { sum += 4; - assert(sum <= total_size); + if (sum > total_size) { + tinyexr::SetErrorMessage("Buffer overrun in reading Part image chunk data.", err); + return TINYEXR_ERROR_INVALID_DATA; + } unsigned int part_number = i; swap4(&part_number); memcpy(memory_ptr, &part_number, 4); memory_ptr += 4; } sum += data_lists[i][j].size(); - assert(sum <= total_size); + if (sum > total_size) { + tinyexr::SetErrorMessage("Buffer overrun in reading Part image chunk data.", err); + return TINYEXR_ERROR_INVALID_DATA; + } memcpy(memory_ptr, &data_lists[i][j][0], data_lists[i][j].size()); memory_ptr += data_lists[i][j].size(); } } - assert(sum == total_size); + + if (sum != total_size) { + tinyexr::SetErrorMessage("Corrupted Part image chunk data.", err); + return TINYEXR_ERROR_INVALID_DATA; + } + return size_t(total_size); // OK } @@ -7591,6 +8042,7 @@ int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { + fclose(fp); return TINYEXR_ERROR_SERIALIZATION_FAILED; } @@ -7660,6 +8112,7 @@ int SaveEXRMultipartImageToFile(const EXRImage* exr_images, unsigned char *mem = NULL; size_t mem_size = SaveEXRMultipartImageToMemory(exr_images, exr_headers, num_parts, &mem, err); if (mem_size == 0) { + fclose(fp); return TINYEXR_ERROR_SERIALIZATION_FAILED; } @@ -7685,58 +8138,20 @@ int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { return TINYEXR_ERROR_INVALID_ARGUMENT; } -#ifdef _WIN32 - FILE *fp = NULL; -#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. - errno_t errcode = - _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); - if (errcode != 0) { - tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), - err); - return TINYEXR_ERROR_CANT_OPEN_FILE; - } -#else - // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. - fp = fopen(filename, "rb"); -#endif - if (!fp) { - tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), - err); - return TINYEXR_ERROR_CANT_OPEN_FILE; - } -#else - FILE *fp = fopen(filename, "rb"); - if (!fp) { - tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), - err); + MemoryMappedFile file(filename); + if (!file.valid()) { + tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } -#endif - size_t filesize; - // Compute size - fseek(fp, 0, SEEK_END); - filesize = static_cast(ftell(fp)); - fseek(fp, 0, SEEK_SET); - - if (filesize == 0) { - fclose(fp); + if (file.size == 0) { tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } - std::vector buf(filesize); // @todo { use mmap } - { - size_t ret; - ret = fread(&buf[0], 1, filesize, fp); - assert(ret == filesize); - (void)ret; - } - fclose(fp); - - const char *head = &buf[0]; - const char *marker = &buf[0]; + const char *head = reinterpret_cast(file.data); + const char *marker = reinterpret_cast(file.data); // Header check. { @@ -7771,7 +8186,7 @@ int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { std::vector channels; // Read attributes - size_t size = filesize - tinyexr::kEXRVersionSize; + size_t size = file.size - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; @@ -7854,11 +8269,11 @@ int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { } } - assert(dx >= 0); - assert(dy >= 0); - assert(dw >= 0); - assert(dh >= 0); - assert(num_channels >= 1); + TINYEXR_CHECK_AND_RETURN_C(dx >= 0, TINYEXR_ERROR_INVALID_DATA); + TINYEXR_CHECK_AND_RETURN_C(dy >= 0, TINYEXR_ERROR_INVALID_DATA); + TINYEXR_CHECK_AND_RETURN_C(dw >= 0, TINYEXR_ERROR_INVALID_DATA); + TINYEXR_CHECK_AND_RETURN_C(dh >= 0, TINYEXR_ERROR_INVALID_DATA); + TINYEXR_CHECK_AND_RETURN_C(num_channels >= 1, TINYEXR_ERROR_INVALID_DATA); int data_width = dw - dx + 1; int data_height = dh - dy + 1; @@ -7956,7 +8371,7 @@ int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { return false; } - assert(dstLen == pixelOffsetTable.size() * sizeof(int)); + TINYEXR_CHECK_AND_RETURN_C(dstLen == pixelOffsetTable.size() * sizeof(int), TINYEXR_ERROR_INVALID_DATA); for (size_t i = 0; i < static_cast(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } @@ -7975,7 +8390,7 @@ int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { static_cast(packedSampleDataSize))) { return false; } - assert(dstLen == static_cast(unpackedSampleDataSize)); + TINYEXR_CHECK_AND_RETURN_C(dstLen == static_cast(unpackedSampleDataSize), TINYEXR_ERROR_INVALID_DATA); } } @@ -7994,16 +8409,17 @@ int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { - assert(0); + tinyexr::SetErrorMessage("Invalid pixel_type in chnnels.", err); + return TINYEXR_ERROR_INVALID_DATA; } } sampleSize = channel_offset; } - assert(sampleSize >= 2); + TINYEXR_CHECK_AND_RETURN_C(sampleSize >= 2, TINYEXR_ERROR_INVALID_DATA); - assert(static_cast( + TINYEXR_CHECK_AND_RETURN_C(static_cast( pixelOffsetTable[static_cast(data_width - 1)] * - sampleSize) == sample_data.size()); + sampleSize) == sample_data.size(), TINYEXR_ERROR_INVALID_DATA); int samples_per_line = static_cast(sample_data.size()) / sampleSize; // @@ -8202,48 +8618,13 @@ int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, return TINYEXR_ERROR_INVALID_ARGUMENT; } - FILE *fp = NULL; -#ifdef _WIN32 -#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. - errno_t errcode = - _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); - if (errcode != 0) { - tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); - return TINYEXR_ERROR_INVALID_FILE; - } -#else - // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. - fp = fopen(filename, "rb"); -#endif -#else - fp = fopen(filename, "rb"); -#endif - if (!fp) { + MemoryMappedFile file(filename); + if (!file.valid()) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } - size_t filesize; - // Compute size - fseek(fp, 0, SEEK_END); - filesize = static_cast(ftell(fp)); - fseek(fp, 0, SEEK_SET); - - std::vector buf(filesize); // @todo { use mmap } - { - size_t ret; - ret = fread(&buf[0], 1, filesize, fp); - assert(ret == filesize); - fclose(fp); - - if (ret != filesize) { - tinyexr::SetErrorMessage("fread() error on " + std::string(filename), - err); - return TINYEXR_ERROR_INVALID_FILE; - } - } - - return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, + return ParseEXRHeaderFromMemory(exr_header, exr_version, file.data, file.size, err); } @@ -8335,7 +8716,7 @@ int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, if (!ConvertHeader(exr_header, infos[i], &warn, &_err)) { // Free malloc-allocated memory here. - for (size_t k = 0; k < infos[i].attributes.size(); i++) { + for (size_t k = 0; k < infos[i].attributes.size(); k++) { if (infos[i].attributes[k].value) { free(infos[i].attributes[k].value); } @@ -8369,48 +8750,14 @@ int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, return TINYEXR_ERROR_INVALID_ARGUMENT; } - FILE *fp = NULL; -#ifdef _WIN32 -#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. - errno_t errcode = - _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); - if (errcode != 0) { - tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); - return TINYEXR_ERROR_INVALID_FILE; - } -#else - // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. - fp = fopen(filename, "rb"); -#endif -#else - fp = fopen(filename, "rb"); -#endif - if (!fp) { + MemoryMappedFile file(filename); + if (!file.valid()) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } - size_t filesize; - // Compute size - fseek(fp, 0, SEEK_END); - filesize = static_cast(ftell(fp)); - fseek(fp, 0, SEEK_SET); - - std::vector buf(filesize); // @todo { use mmap } - { - size_t ret; - ret = fread(&buf[0], 1, filesize, fp); - assert(ret == filesize); - fclose(fp); - - if (ret != filesize) { - tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); - return TINYEXR_ERROR_INVALID_FILE; - } - } - return ParseEXRMultipartHeaderFromMemory( - exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); + exr_headers, num_headers, exr_version, file.data, file.size, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, @@ -8494,16 +8841,10 @@ int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { return TINYEXR_ERROR_CANT_OPEN_FILE; } - size_t file_size; - // Compute size - fseek(fp, 0, SEEK_END); - file_size = static_cast(ftell(fp)); - fseek(fp, 0, SEEK_SET); - - if (file_size < tinyexr::kEXRVersionSize) { - return TINYEXR_ERROR_INVALID_FILE; - } - + // Try to read kEXRVersionSize bytes; if the file is shorter than + // kEXRVersionSize, this will produce an error. This avoids a call to + // fseek(fp, 0, SEEK_END), which is not required to be supported by C + // implementations. unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); @@ -8581,7 +8922,10 @@ int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, } else { { std::vector num_x_tiles, num_y_tiles; - tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i]); + if (!tinyexr::PrecalculateTileInfo(num_x_tiles, num_y_tiles, exr_headers[i])) { + tinyexr::SetErrorMessage("Invalid tile info.", err); + return TINYEXR_ERROR_INVALID_DATA; + } int num_blocks = InitTileOffsets(offset_data, exr_headers[i], num_x_tiles, num_y_tiles); if (num_blocks != exr_headers[i]->chunk_count) { tinyexr::SetErrorMessage("Invalid offset table size.", err); @@ -8653,48 +8997,18 @@ int LoadEXRMultipartImageFromFile(EXRImage *exr_images, return TINYEXR_ERROR_INVALID_ARGUMENT; } - FILE *fp = NULL; -#ifdef _WIN32 -#if defined(_MSC_VER) || (defined(MINGW_HAS_SECURE_API) && MINGW_HAS_SECURE_API) // MSVC, MinGW GCC, or Clang. - errno_t errcode = - _wfopen_s(&fp, tinyexr::UTF8ToWchar(filename).c_str(), L"rb"); - if (errcode != 0) { - tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); - return TINYEXR_ERROR_CANT_OPEN_FILE; - } -#else - // Unknown compiler or MinGW without MINGW_HAS_SECURE_API. - fp = fopen(filename, "rb"); -#endif -#else - fp = fopen(filename, "rb"); -#endif - if (!fp) { + MemoryMappedFile file(filename); + if (!file.valid()) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } - size_t filesize; - // Compute size - fseek(fp, 0, SEEK_END); - filesize = static_cast(ftell(fp)); - fseek(fp, 0, SEEK_SET); - - std::vector buf(filesize); // @todo { use mmap } - { - size_t ret; - ret = fread(&buf[0], 1, filesize, fp); - assert(ret == filesize); - fclose(fp); - (void)ret; - } - return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, - &buf.at(0), filesize, err); + file.data, file.size, err); } int SaveEXRToMemory(const float *data, int width, int height, int components, - const int save_as_fp16, const unsigned char **outbuf, const char **err) { + const int save_as_fp16, unsigned char **outbuf, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK @@ -8871,18 +9185,20 @@ int SaveEXR(const float *data, int width, int height, int components, image.num_channels = components; std::vector images[4]; + const size_t pixel_count = + static_cast(width) * static_cast(height); if (components == 1) { - images[0].resize(static_cast(width * height)); - memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); + images[0].resize(pixel_count); + memcpy(images[0].data(), data, sizeof(float) * pixel_count); } else { - images[0].resize(static_cast(width * height)); - images[1].resize(static_cast(width * height)); - images[2].resize(static_cast(width * height)); - images[3].resize(static_cast(width * height)); + images[0].resize(pixel_count); + images[1].resize(pixel_count); + images[2].resize(pixel_count); + images[3].resize(pixel_count); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers - for (size_t i = 0; i < static_cast(width * height); i++) { + for (size_t i = 0; i < pixel_count; i++) { images[0][i] = data[static_cast(components) * i + 0]; images[1][i] = data[static_cast(components) * i + 1]; images[2][i] = data[static_cast(components) * i + 2]; @@ -8971,9 +9287,6 @@ int SaveEXR(const float *data, int width, int height, int components, } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); - if (ret != TINYEXR_SUCCESS) { - return ret; - } free(header.channels); free(header.pixel_types); diff --git a/ChangeLog.txt b/ChangeLog.txt index 281b973c9a..9da29cc83e 100644 --- a/ChangeLog.txt +++ b/ChangeLog.txt @@ -40,6 +40,9 @@ ViSP 3.x.x (Version in development) . New scene renderer based on Panda3D. See inheritance diagram for vpPanda3DBaseRenderer class and corresponding tutorial . New docker image for Ubuntu 24.04 in docker folder + . Update apriltag 3rdparty to 3.4.2 release + . Update tinyexr 3rdparty to 1.0.9 release + . Update stb_image 3rdparty to 2.30 version - Applications . Migrate eye-to-hand tutorials in apps - Tutorials diff --git a/cmake/PCLTools.cmake b/cmake/PCLTools.cmake index 4d542a1ede..e0b67b9a30 100644 --- a/cmake/PCLTools.cmake +++ b/cmake/PCLTools.cmake @@ -384,6 +384,8 @@ macro(vp_find_pcl pcl_libraries pcl_deps_include_dirs pcl_deps_libraries) mark_as_advanced(TBB_DIR) mark_as_advanced(Tiff_DIR) + mark_as_advanced(tiff_DIR) + mark_as_advanced(CLI11_DIR) mark_as_advanced(synchronization_LOCATION) # Requested for pcl 1.13.1 on windows mark_as_advanced($<$ // std::fabs -#include // numeric_limits -#include +#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include +#if defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) + #include -#include -#include -#include -#include -#include +#include +#include #include - -#include -#include -#include +#include +#include #include #include #include -#include #include -#include +#include #include - -#include - -// Exception -#include #include -#include -#include -#include +// Define the object CAD model +// Here we consider 4 black blobs whose centers are located on the corners of a square. +#define L 0.06 // To deal with a 12cm by 12cm square int main() { @@ -101,49 +74,43 @@ int main() #endif try { - vpImage I; - vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); + vpImage I; + // Warm up camera for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 100, 100, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); - vpServo task; - vpRobotAfma6 robot; - robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); - // robot.move("zero.pos") ; + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; + // Load the end-effector to camera frame transformation obtained + // using a camera intrinsic model with distortion + robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); + + // Get camera intrinsics vpCameraParameters cam; - // Update camera parameters robot.getCameraParameters(cam, I); - std::cout << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; std::cout << " Simulation " << std::endl; std::cout << " task : servo a line " << std::endl; std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; int nbline = 4; int nbpoint = 4; @@ -151,12 +118,10 @@ int main() vpTRACE("sets the desired position of the visual feature "); vpPoint pointd[nbpoint]; // position of the fours corners vpPoint pointcd; // position of the center of the square - vpFeaturePoint pd; - double L = 0.05; - pointd[0].setWorldCoordinates(L, -L, 0); - pointd[1].setWorldCoordinates(L, L, 0); - pointd[2].setWorldCoordinates(-L, L, 0); + pointd[0].setWorldCoordinates(+L, -L, 0); + pointd[1].setWorldCoordinates(+L, +L, 0); + pointd[2].setWorldCoordinates(-L, +L, 0); pointd[3].setWorldCoordinates(-L, -L, 0); // The coordinates in the object frame of the point used as a feature ie @@ -164,21 +129,21 @@ int main() pointcd.setWorldCoordinates(0, 0, 0); // The desired homogeneous matrix. - vpHomogeneousMatrix cMod(0, 0, 0.4, 0, 0, vpMath::rad(10)); + vpHomogeneousMatrix cd_M_o(0, 0, 0.4, 0, 0, vpMath::rad(10)); - pointd[0].project(cMod); - pointd[1].project(cMod); - pointd[2].project(cMod); - pointd[3].project(cMod); + pointd[0].project(cd_M_o); + pointd[1].project(cd_M_o); + pointd[2].project(cd_M_o); + pointd[3].project(cd_M_o); - pointcd.project(cMod); + pointcd.project(cd_M_o); - vpFeatureBuilder::create(pd, pointcd); + vpFeaturePoint s_pd; + vpFeatureBuilder::create(s_pd, pointcd); - vpTRACE("Initialization of the tracking"); + // Tracking initialization vpMeLine line[nbline]; vpPoint point[nbpoint]; - int i; vpMe me; me.setRange(10); @@ -188,17 +153,16 @@ int main() me.setSampleStep(10); // Initialize the tracking. Define the four lines to track - for (i = 0; i < nbline; i++) { + for (int i = 0; i < nbline; ++i) { line[i].setMe(&me); line[i].initTracking(I); line[i].track(I); } - // Compute the position of the four corners. The goal is to - // compute the pose + // Compute the position of the four corners. The goal is to compute the pose vpImagePoint ip; - for (i = 0; i < nbline; i++) { + for (int i = 0; i < nbline; ++i) { double x = 0, y = 0; if (!vpMeLine::intersection(line[i % nbline], line[(i + 1) % nbline], ip)) { @@ -211,180 +175,139 @@ int main() point[i].set_y(y); } - // Compute the pose cMo + // Compute the pose c_M_o vpPose pose; pose.clearPoint(); - vpHomogeneousMatrix cMo; + vpHomogeneousMatrix c_M_o; - point[0].setWorldCoordinates(L, -L, 0); - point[1].setWorldCoordinates(L, L, 0); - point[2].setWorldCoordinates(-L, L, 0); + point[0].setWorldCoordinates(+L, -L, 0); + point[1].setWorldCoordinates(+L, +L, 0); + point[2].setWorldCoordinates(-L, +L, 0); point[3].setWorldCoordinates(-L, -L, 0); - for (i = 0; i < nbline; i++) { + for (int i = 0; i < nbline; ++i) { pose.addPoint(point[i]); // and added to the pose computation point list } // Pose by Dementhon or Lagrange provides an initialization of the non linear virtual visual-servoing pose estimation - pose.computePose(vpPose::DEMENTHON_LAGRANGE_VIRTUAL_VS, cMo); - - vpTRACE("sets the current position of the visual feature "); + pose.computePose(vpPose::DEMENTHON_LAGRANGE_VIRTUAL_VS, c_M_o); - // The first features are the position in the camera frame x and y of the - // square center + // The first features are the position in the camera frame x and y of the square center vpPoint pointc; // The current position of the center of the square double xc = (point[0].get_x() + point[2].get_x()) / 2; double yc = (point[0].get_y() + point[2].get_y()) / 2; pointc.set_x(xc); pointc.set_y(yc); - vpFeaturePoint p; - pointc.project(cMo); - vpFeatureBuilder::create(p, pointc); + + // Sets the current position of the visual feature + vpFeaturePoint s_p; + pointc.project(c_M_o); + vpFeatureBuilder::create(s_p, pointc); // The second feature is the depth of the current square center relative // to the depth of the desired square center. - vpFeatureDepth logZ; - logZ.build(pointc.get_x(), pointc.get_y(), pointc.get_Z(), log(pointc.get_Z() / pointcd.get_Z())); + vpFeatureDepth s_logZ; + s_logZ.build(pointc.get_x(), pointc.get_y(), pointc.get_Z(), log(pointc.get_Z() / pointcd.get_Z())); // The last three features are the rotations thetau between the current // pose and the desired pose. - vpHomogeneousMatrix cdMc; - cdMc = cMod * cMo.inverse(); - vpFeatureThetaU tu(vpFeatureThetaU::cdRc); - tu.build(cdMc); - - vpTRACE("define the task"); - vpTRACE("\t we want an eye-in-hand control law"); - vpTRACE("\t robot is controlled in the camera frame"); + vpHomogeneousMatrix cd_M_c; + cd_M_c = cd_M_o * c_M_o.inverse(); + vpFeatureThetaU s_tu(vpFeatureThetaU::cdRc); + s_tu.build(cd_M_c); + + // Define the task + // - we want an eye-in-hand control law + // - robot is controlled in the camera frame + vpServo task; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE); - vpTRACE("\t we want to see a point on a point.."); - std::cout << std::endl; - task.addFeature(p, pd); - task.addFeature(logZ); - task.addFeature(tu); + // - we want to see a point on a point + task.addFeature(s_p, s_pd); + task.addFeature(s_logZ); + task.addFeature(s_tu); - vpTRACE("\t set the gain"); - task.setLambda(0.2); + // - set the gain + vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 + task.setLambda(lambda); - vpTRACE("Display task information "); + // - display task information "); task.print(); robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - unsigned int iter = 0; - vpTRACE("\t loop"); - vpColVector v; - vpImage Ic; - double lambda_av = 0.05; - double alpha = 0.05; - double beta = 3; - bool quit = false; - while (!quit) { - std::cout << "---------------------------------------------" << iter << std::endl; - try { - rs.acquire(I); - vpDisplay::display(I); + while (!quit) { + rs.acquire(I); + vpDisplay::display(I); - pose.clearPoint(); + pose.clearPoint(); - // Track the lines and find the current position of the corners - for (i = 0; i < nbline; i++) { - line[i].track(I); + // Track the lines and find the current position of the corners + for (int i = 0; i < nbline; ++i) { + line[i].track(I); - line[i].display(I, vpColor::green); + line[i].display(I, vpColor::green); - double x = 0, y = 0; + double x = 0, y = 0; - if (!vpMeLine::intersection(line[i % nbline], line[(i + 1) % nbline], ip)) { - return EXIT_FAILURE; - } + if (!vpMeLine::intersection(line[i % nbline], line[(i + 1) % nbline], ip)) { + return EXIT_FAILURE; + } - vpPixelMeterConversion::convertPoint(cam, ip, x, y); + vpPixelMeterConversion::convertPoint(cam, ip, x, y); - point[i].set_x(x); - point[i].set_y(y); + point[i].set_x(x); + point[i].set_y(y); - pose.addPoint(point[i]); - } + pose.addPoint(point[i]); + } - // Compute the pose - pose.computePose(vpPose::VIRTUAL_VS, cMo); - - // Update the two first features x and y (position of the square - // center) - xc = (point[0].get_x() + point[2].get_x()) / 2; - yc = (point[0].get_y() + point[2].get_y()) / 2; - pointc.set_x(xc); - pointc.set_y(yc); - pointc.project(cMo); - vpFeatureBuilder::create(p, pointc); - // Print the current and the desired position of the center of the - // square Print the desired position of the four corners - p.display(cam, I, vpColor::green); - pd.display(cam, I, vpColor::red); - for (i = 0; i < nbpoint; i++) - pointd[i].display(I, cam, vpColor::red); - - // Update the second feature - logZ.build(pointc.get_x(), pointc.get_y(), pointc.get_Z(), log(pointc.get_Z() / pointcd.get_Z())); - - // Update the last three features - cdMc = cMod * cMo.inverse(); - tu.build(cdMc); - - // Adaptive gain - double gain; - { - if (std::fabs(alpha) <= std::numeric_limits::epsilon()) - gain = lambda_av; - else { - gain = alpha * exp(-beta * (task.getError()).sumSquare()) + lambda_av; - } - } + // Compute the pose + pose.computePose(vpPose::VIRTUAL_VS, c_M_o); + + // Update the two first features x and y (position of the square center) + xc = (point[0].get_x() + point[2].get_x()) / 2; + yc = (point[0].get_y() + point[2].get_y()) / 2; + pointc.set_x(xc); + pointc.set_y(yc); + pointc.project(c_M_o); + vpFeatureBuilder::create(s_p, pointc); + // Print the current and the desired position of the center of the + // square Print the desired position of the four corners + s_p.display(cam, I, vpColor::green); + s_pd.display(cam, I, vpColor::red); + for (int i = 0; i < nbpoint; ++i) { + pointd[i].display(I, cam, vpColor::red); + } - task.setLambda(gain); + // Update the second feature + s_logZ.build(pointc.get_x(), pointc.get_y(), pointc.get_Z(), log(pointc.get_Z() / pointcd.get_Z())); - v = task.computeControlLaw(); + // Update the last three features + cd_M_c = cd_M_o * c_M_o.inverse(); + s_tu.build(cd_M_c); - std::cout << v.sumSquare() << std::endl; - if (iter == 0) - vpDisplay::getClick(I); - if (v.sumSquare() > 0.5) { - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - robot.stopMotion(); - vpDisplay::getClick(I); - } + vpColVector v_c = task.computeControlLaw(); - robot.setVelocity(vpRobot::CAMERA_FRAME, v); + robot.setVelocity(vpRobot::CAMERA_FRAME, v_c); - vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); - if (vpDisplay::getClick(I, false)) { - quit = true; - } - vpDisplay::flush(I); - } - catch (...) { - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - robot.stopMotion(); - exit(1); + vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); + if (vpDisplay::getClick(I, false)) { + quit = true; } - - vpTRACE("\t\t || s - s* || = %f ", (task.getError()).sumSquare()); - iter++; + vpDisplay::flush(I); } - vpTRACE("Display task information "); + // Display task information task.print(); + return EXIT_SUCCESS; } catch (const vpException &e) { - std::cout << "Test failed with exception: " << e << std::endl; + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/example/servo-afma6/servoAfma6AprilTagIBVS.cpp b/example/servo-afma6/servoAfma6AprilTagIBVS.cpp index af8c1f6535..196730dd9d 100644 --- a/example/servo-afma6/servoAfma6AprilTagIBVS.cpp +++ b/example/servo-afma6/servoAfma6AprilTagIBVS.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,8 +31,8 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ + /*! \example servoAfma6AprilTagIBVS.cpp @@ -48,16 +47,17 @@ The target is an AprilTag that is by default 12cm large. To print your own tag, see https://visp-doc.inria.fr/doxygen/visp-daily/tutorial-detection-apriltag.html You can specify the size of your tag using --tag_size command line option. - */ #include -#include #include + +#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_AFMA6) + +#include #include -#include -#include +#include #include #include #include @@ -67,16 +67,15 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_AFMA6) - #ifdef ENABLE_VISP_NAMESPACE using namespace VISP_NAMESPACE_NAME; #endif -void display_point_trajectory(const vpImage &I, const std::vector &vip, +void display_point_trajectory(const vpImage &I, + const std::vector &vip, std::vector *traj_vip) { - for (size_t i = 0; i < vip.size(); i++) { + for (size_t i = 0; i < vip.size(); ++i) { if (traj_vip[i].size()) { // Add the point only if distance with the previous > 1 pixel if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) { @@ -87,8 +86,8 @@ void display_point_trajectory(const vpImage &I, const std::vector traj_vip[i].push_back(vip[i]); } } - for (size_t i = 0; i < vip.size(); i++) { - for (size_t j = 1; j < traj_vip[i].size(); j++) { + for (size_t i = 0; i < vip.size(); ++i) { + for (size_t j = 1; j < traj_vip[i].size(); ++j) { vpDisplay::displayLine(I, traj_vip[i][j - 1], traj_vip[i][j], vpColor::green, 2); } } @@ -97,17 +96,19 @@ void display_point_trajectory(const vpImage &I, const std::vector int main(int argc, char **argv) { double opt_tagSize = 0.120; - bool display_tag = true; int opt_quad_decimate = 2; bool opt_verbose = false; bool opt_plot = false; bool opt_adaptive_gain = false; bool opt_task_sequencing = false; - double convergence_threshold = 0.00005; + double opt_convergence_threshold = 0.00005; - for (int i = 1; i < argc; i++) { - if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) { + bool display_tag = true; + + for (int i = 1; i < argc; ++i) { + if ((std::string(argv[i]) == "--tag-size") && (i + 1 < argc)) { opt_tagSize = std::stod(argv[i + 1]); + ++i; } else if (std::string(argv[i]) == "--verbose") { opt_verbose = true; @@ -115,29 +116,41 @@ int main(int argc, char **argv) else if (std::string(argv[i]) == "--plot") { opt_plot = true; } - else if (std::string(argv[i]) == "--adaptive_gain") { + else if (std::string(argv[i]) == "--adaptive-gain") { opt_adaptive_gain = true; } - else if (std::string(argv[i]) == "--task_sequencing") { + else if (std::string(argv[i]) == "--task-sequencing") { opt_task_sequencing = true; } - else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { + else if ((std::string(argv[i]) == "--quad-decimate") && (i + 1 < argc)) { opt_quad_decimate = std::stoi(argv[i + 1]); + ++i; } else if (std::string(argv[i]) == "--no-convergence-threshold") { - convergence_threshold = 0.; + opt_convergence_threshold = 0.; } - else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + else if ((std::string(argv[i]) == "--help") || (std::string(argv[i]) == "-h")) { std::cout - << argv[0] << " --tag_size ] " - << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" - << "\n"; + << argv[0] + << " [--tag-size ]" + << " [--quad-decimate ]" + << " [--adaptive-gain]" + << " [--plot]" + << " [--task-sequencing]" + << " [--no-convergence-threshold]" + << " [--verbose]" + << " [--help] [-h]" + << std::endl;; return EXIT_SUCCESS; } } vpRobotAfma6 robot; + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; + + // Load the end-effector to camera frame transformation obtained + // using a camera intrinsic model with distortion + robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); try { std::cout << "WARNING: This example will move the robot! " @@ -145,81 +158,125 @@ int main(int argc, char **argv) << "Press Enter to continue..." << std::endl; std::cin.ignore(); - /* - * Move to a safe position - */ - vpColVector q(6, 0); - q[0] = 0.; - q[1] = 0.; - q[2] = 0.; - q[3] = vpMath::rad(0.); - q[4] = vpMath::rad(0.); - q[5] = vpMath::rad(0.); - - std::cout << "Move to joint position: " << q.t() << std::endl; - robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); - robot.setPosition(vpRobot::JOINT_STATE, q); - vpRealSense2 rs; rs2::config config; - unsigned int width = 640, height = 480; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); - // Get camera intrinsics - vpCameraParameters cam = - rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); - std::cout << "cam:\n" << cam << "\n"; + // Warm up camera + vpImage I; + for (size_t i = 0; i < 10; ++i) { + rs.acquire(I); + } - vpImage I(height, width); + // Get camera intrinsics + vpCameraParameters cam; + robot.getCameraParameters(cam, I); + std::cout << "cam:\n" << cam << std::endl; -#if defined(VISP_HAVE_X11) - vpDisplayX dc(I, 10, 10, "Color image"); -#elif defined(VISP_HAVE_GDI) - vpDisplayGDI dc(I, 10, 10, "Color image"); -#endif + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 10, 10, "Current image"); vpDetectorAprilTag::vpAprilTagFamily tagFamily = vpDetectorAprilTag::TAG_36h11; vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::HOMOGRAPHY_VIRTUAL_VS; - // vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::BEST_RESIDUAL_VIRTUAL_VS; + //vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::BEST_RESIDUAL_VIRTUAL_VS; vpDetectorAprilTag detector(tagFamily); detector.setAprilTagPoseEstimationMethod(poseEstimationMethod); detector.setDisplayTag(display_tag); detector.setAprilTagQuadDecimate(opt_quad_decimate); + detector.setZAlignedWithCameraAxis(true); + + // Tag frame for pose estimation is the following + // - When using + // detector.setZAlignedWithCameraAxis(false); + // detector.detect(); + // we consider the tag frame (o) such as z_o axis is not aligned with camera frame + // (meaning z_o axis is facing the camera) + // + // 3 y 2 + // | + // o--x + // + // 0 1 + // + // In that configuration, it is more difficult to set a desired camera pose c_M_o. + // To ease this step we can introduce an extra transformation matrix o'_M_o to align the axis + // with the camera frame: + // + // o'--x + // | + // y + // + // where + // | 1 0 0 0 | + // o'_M_o = | 0 -1 0 0 | + // | 0 0 -1 0 | + // | 0 0 0 1 | + // + // Defining the desired camera pose in frame o' becomes than easier. + // + // - When using rather + // detector.setZAlignedWithCameraAxis(true); + // detector.detect(); + // we consider the tag frame (o) such as z_o axis is aligned with camera frame + // + // 3 2 + // + // o--x + // | + // 0 y 1 + // + // In that configuration, it is easier to define a desired camera pose c_M_o since all the axis + // (camera frame and tag frame are aligned) // Servo - vpHomogeneousMatrix cdMc, cMo, oMo; + vpHomogeneousMatrix cd_M_c, c_M_o, o_M_o; // Desired pose used to compute the desired features - vpHomogeneousMatrix cdMo(vpTranslationVector(0, 0, opt_tagSize * 3), // 3 times tag with along camera z axis - vpRotationMatrix({ 1, 0, 0, 0, -1, 0, 0, 0, -1 })); - + vpHomogeneousMatrix cd_M_o(vpTranslationVector(0, 0, opt_tagSize * 3.5), // 3.5 times tag with along camera z axis + vpThetaUVector(vpMath::rad(10), vpMath::rad(3), vpMath::rad(5))); + if (!detector.isZAlignedWithCameraAxis()) { + vpHomogeneousMatrix oprim_M_o = { 1, 0, 0, 0, + 0, -1, 0, 0, + 0, 0, -1, 0, + 0, 0, 0, 1 }; + cd_M_o *= oprim_M_o; + } // Create visual features - std::vector p(4), pd(4); // We use 4 points + std::vector s_point(4), s_point_d(4); // We use 4 points // Define 4 3D points corresponding to the CAD model of the Apriltag std::vector point(4); - point[0].setWorldCoordinates(-opt_tagSize / 2., -opt_tagSize / 2., 0); - point[1].setWorldCoordinates(opt_tagSize / 2., -opt_tagSize / 2., 0); - point[2].setWorldCoordinates(opt_tagSize / 2., opt_tagSize / 2., 0); - point[3].setWorldCoordinates(-opt_tagSize / 2., opt_tagSize / 2., 0); + if (detector.isZAlignedWithCameraAxis()) { + point[0].setWorldCoordinates(-opt_tagSize / 2., +opt_tagSize / 2., 0); + point[1].setWorldCoordinates(+opt_tagSize / 2., +opt_tagSize / 2., 0); + point[2].setWorldCoordinates(+opt_tagSize / 2., -opt_tagSize / 2., 0); + point[3].setWorldCoordinates(-opt_tagSize / 2., -opt_tagSize / 2., 0); + } + else { + point[0].setWorldCoordinates(-opt_tagSize / 2., -opt_tagSize / 2., 0); + point[1].setWorldCoordinates(+opt_tagSize / 2., -opt_tagSize / 2., 0); + point[2].setWorldCoordinates(+opt_tagSize / 2., +opt_tagSize / 2., 0); + point[3].setWorldCoordinates(-opt_tagSize / 2., +opt_tagSize / 2., 0); + } vpServo task; // Add the 4 visual feature points - for (size_t i = 0; i < p.size(); i++) { - task.addFeature(p[i], pd[i]); + for (size_t i = 0; i < s_point.size(); ++i) { + task.addFeature(s_point[i], s_point_d[i]); } task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT); + // Set the gain if (opt_adaptive_gain) { vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 task.setLambda(lambda); } else { - task.setLambda(0.5); + task.setLambda(0.2); } vpPlot *plotter = nullptr; @@ -265,8 +322,8 @@ int main(int argc, char **argv) vpDisplay::display(I); - std::vector cMo_vec; - detector.detect(I, opt_tagSize, cam, cMo_vec); + std::vector c_M_o_vec; + detector.detect(I, opt_tagSize, cam, c_M_o_vec); { std::stringstream ss; @@ -276,35 +333,35 @@ int main(int argc, char **argv) vpColVector v_c(6); - // Only one tag is detected - if (cMo_vec.size() == 1) { - cMo = cMo_vec[0]; + // Ensure that only one tag is detected during servoing + if (c_M_o_vec.size() == 1) { + c_M_o = c_M_o_vec[0]; static bool first_time = true; if (first_time) { // Introduce security wrt tag positioning in order to avoid PI rotation - std::vector v_oMo(2), v_cdMc(2); - v_oMo[1].build(0, 0, 0, 0, 0, M_PI); - for (size_t i = 0; i < 2; i++) { - v_cdMc[i] = cdMo * v_oMo[i] * cMo.inverse(); + std::vector v_o_M_o(2), v_cd_M_c(2); + v_o_M_o[1].build(0, 0, 0, 0, 0, M_PI); + for (size_t i = 0; i < 2; ++i) { + v_cd_M_c[i] = cd_M_o * v_o_M_o[i] * c_M_o.inverse(); } - if (std::fabs(v_cdMc[0].getThetaUVector().getTheta()) < std::fabs(v_cdMc[1].getThetaUVector().getTheta())) { - oMo = v_oMo[0]; + if (std::fabs(v_cd_M_c[0].getThetaUVector().getTheta()) < std::fabs(v_cd_M_c[1].getThetaUVector().getTheta())) { + o_M_o = v_o_M_o[0]; } else { std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl; - oMo = v_oMo[1]; // Introduce PI rotation + o_M_o = v_o_M_o[1]; // Introduce PI rotation } // Compute the desired position of the features from the desired pose - for (size_t i = 0; i < point.size(); i++) { - vpColVector cP, p_; - point[i].changeFrame(cdMo * oMo, cP); - point[i].projection(cP, p_); - - pd[i].set_x(p_[0]); - pd[i].set_y(p_[1]); - pd[i].set_Z(cP[2]); + for (size_t i = 0; i < point.size(); ++i) { + vpColVector cP, p; + point[i].changeFrame(cd_M_o * o_M_o, cP); + point[i].projection(cP, p); + + s_point_d[i].set_x(p[0]); + s_point_d[i].set_y(p[1]); + s_point_d[i].set_Z(cP[2]); } } @@ -312,14 +369,14 @@ int main(int argc, char **argv) std::vector corners = detector.getPolygon(0); // Update visual features - for (size_t i = 0; i < corners.size(); i++) { + for (size_t i = 0; i < corners.size(); ++i) { // Update the point feature from the tag corners location - vpFeatureBuilder::create(p[i], cam, corners[i]); + vpFeatureBuilder::create(s_point[i], cam, corners[i]); // Set the feature Z coordinate from the pose - vpColVector cP; - point[i].changeFrame(cMo, cP); + vpColVector c_P; + point[i].changeFrame(c_M_o, c_P); - p[i].set_Z(cP[2]); + s_point[i].set_Z(c_P[2]); } if (opt_task_sequencing) { @@ -337,14 +394,14 @@ int main(int argc, char **argv) // Display the current and desired feature points in the image display vpServoDisplay::display(task, cam, I); - for (size_t i = 0; i < corners.size(); i++) { + for (size_t i = 0; i < corners.size(); ++i) { std::stringstream ss; ss << i; // Display current point indexes vpDisplay::displayText(I, corners[i] + vpImagePoint(15, 15), ss.str(), vpColor::red); // Display desired point indexes vpImagePoint ip; - vpMeterPixelConversion::convertPoint(cam, pd[i].get_x(), pd[i].get_y(), ip); + vpMeterPixelConversion::convertPoint(cam, s_point_d[i].get_x(), s_point_d[i].get_y(), ip); vpDisplay::displayText(I, ip + vpImagePoint(15, 15), ss.str(), vpColor::red); } if (first_time) { @@ -371,16 +428,15 @@ int main(int argc, char **argv) if (opt_verbose) std::cout << "error: " << error << std::endl; - if (error < convergence_threshold) { + if (error < opt_convergence_threshold) { has_converged = true; - std::cout << "Servo task has converged" - << "\n"; + std::cout << "Servo task has converged" << std::endl; vpDisplay::displayText(I, 100, 20, "Servo task has converged", vpColor::red); } if (first_time) { first_time = false; } - } // end if (cMo_vec.size() == 1) + } // end if (c_M_o_vec.size() == 1) else { v_c = 0; } @@ -408,7 +464,6 @@ int main(int argc, char **argv) case vpMouseButton::button3: final_quit = true; - v_c = 0; break; default: @@ -449,10 +504,6 @@ int main(int argc, char **argv) robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; } - catch (const std::exception &e) { - std::cout << "ur_rtde exception: " << e.what() << std::endl; - return EXIT_FAILURE; - } return EXIT_SUCCESS; } diff --git a/example/servo-afma6/servoAfma6AprilTagPBVS.cpp b/example/servo-afma6/servoAfma6AprilTagPBVS.cpp index 7481d7a968..04705ffb63 100644 --- a/example/servo-afma6/servoAfma6AprilTagPBVS.cpp +++ b/example/servo-afma6/servoAfma6AprilTagPBVS.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,8 +29,7 @@ * * Description: * Data acquisition with RealSense RGB-D sensor and Franka robot. - * -*****************************************************************************/ + */ /*! \example servoAfma6AprilTagPBVS.cpp @@ -50,11 +48,13 @@ #include -#include #include + +#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_AFMA6) + +#include #include -#include -#include +#include #include #include #include @@ -64,16 +64,15 @@ #include #include -#if defined(VISP_HAVE_REALSENSE2) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)) && defined(VISP_HAVE_AFMA6) - #ifdef ENABLE_VISP_NAMESPACE using namespace VISP_NAMESPACE_NAME; #endif -void display_point_trajectory(const vpImage &I, const std::vector &vip, +void display_point_trajectory(const vpImage &I, + const std::vector &vip, std::vector *traj_vip) { - for (size_t i = 0; i < vip.size(); i++) { + for (size_t i = 0; i < vip.size(); ++i) { if (traj_vip[i].size()) { // Add the point only if distance with the previous > 1 pixel if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) { @@ -84,8 +83,8 @@ void display_point_trajectory(const vpImage &I, const std::vector traj_vip[i].push_back(vip[i]); } } - for (size_t i = 0; i < vip.size(); i++) { - for (size_t j = 1; j < traj_vip[i].size(); j++) { + for (size_t i = 0; i < vip.size(); ++i) { + for (size_t j = 1; j < traj_vip[i].size(); ++j) { vpDisplay::displayLine(I, traj_vip[i][j - 1], traj_vip[i][j], vpColor::green, 2); } } @@ -94,18 +93,20 @@ void display_point_trajectory(const vpImage &I, const std::vector int main(int argc, char **argv) { double opt_tagSize = 0.120; - bool display_tag = true; int opt_quad_decimate = 2; bool opt_verbose = false; bool opt_plot = false; bool opt_adaptive_gain = false; bool opt_task_sequencing = false; - double convergence_threshold_t = 0.0005; // Value in [m] - double convergence_threshold_tu = 0.5; // Value in [deg] + double opt_convergence_threshold_t = 0.0005; // Value in [m] + double opt_convergence_threshold_tu = 0.5; // Value in [deg] - for (int i = 1; i < argc; i++) { - if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) { + bool display_tag = true; + + for (int i = 1; i < argc; ++i) { + if ((std::string(argv[i]) == "--tag-size") && (i + 1 < argc)) { opt_tagSize = std::stod(argv[i + 1]); + ++i; } else if (std::string(argv[i]) == "--verbose") { opt_verbose = true; @@ -113,30 +114,42 @@ int main(int argc, char **argv) else if (std::string(argv[i]) == "--plot") { opt_plot = true; } - else if (std::string(argv[i]) == "--adaptive_gain") { + else if (std::string(argv[i]) == "--adaptive-gain") { opt_adaptive_gain = true; } - else if (std::string(argv[i]) == "--task_sequencing") { + else if (std::string(argv[i]) == "--task-sequencing") { opt_task_sequencing = true; } - else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) { + else if ((std::string(argv[i]) == "--quad-decimate") && (i + 1 < argc)) { opt_quad_decimate = std::stoi(argv[i + 1]); + ++i; } else if (std::string(argv[i]) == "--no-convergence-threshold") { - convergence_threshold_t = 0.; - convergence_threshold_tu = 0.; + opt_convergence_threshold_t = 0.; + opt_convergence_threshold_tu = 0.; } - else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") { + else if ((std::string(argv[i]) == "--help") || (std::string(argv[i]) == "-h")) { std::cout - << argv[0] << " [--tag_size ] " - << "[--quad_decimate ] [--adaptive_gain] [--plot] [--task_sequencing] [--no-convergence-threshold] [--verbose] [--help] [-h]" - << "\n"; + << argv[0] + << " [--tag-size ]" + << " [--quad-decimate ]" + << " [--adaptive-gain]" + << " [--plot]" + << " [--task-sequencing]" + << " [--no-convergence-threshold]" + << " [--verbose]" + << " [--help] [-h]" + << std::endl;; return EXIT_SUCCESS; } } vpRobotAfma6 robot; + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; + + // Load the end-effector to camera frame transformation obtained + // using a camera intrinsic model with distortion + robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); try { std::cout << "WARNING: This example will move the robot! " @@ -144,40 +157,26 @@ int main(int argc, char **argv) << "Press Enter to continue..." << std::endl; std::cin.ignore(); - /* - * Move to a safe position - */ - vpColVector q(6, 0); - q[0] = 0.; - q[1] = 0.; - q[2] = 0.; - q[3] = vpMath::rad(0.); - q[4] = vpMath::rad(0.); - q[5] = vpMath::rad(0.); - std::cout << "Move to joint position: " << q.t() << std::endl; - robot.setRobotState(vpRobot::STATE_POSITION_CONTROL); - robot.setPosition(vpRobot::JOINT_STATE, q); - vpRealSense2 rs; rs2::config config; - unsigned int width = 640, height = 480; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); - // Get camera intrinsics - vpCameraParameters cam = - rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithDistortion); - std::cout << "cam:\n" << cam << "\n"; + // Warm up camera + vpImage I; + for (size_t i = 0; i < 10; ++i) { + rs.acquire(I); + } - vpImage I(height, width); + // Get camera intrinsics + vpCameraParameters cam; + robot.getCameraParameters(cam, I); + std::cout << "cam:\n" << cam << std::endl; -#if defined(VISP_HAVE_X11) - vpDisplayX dc(I, 10, 10, "Color image"); -#elif defined(VISP_HAVE_GDI) - vpDisplayGDI dc(I, 10, 10, "Color image"); -#endif + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 10, 10, "Current image"); vpDetectorAprilTag::vpAprilTagFamily tagFamily = vpDetectorAprilTag::TAG_36h11; vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::HOMOGRAPHY_VIRTUAL_VS; @@ -186,29 +185,84 @@ int main(int argc, char **argv) detector.setAprilTagPoseEstimationMethod(poseEstimationMethod); detector.setDisplayTag(display_tag); detector.setAprilTagQuadDecimate(opt_quad_decimate); + detector.setZAlignedWithCameraAxis(true); + + // Tag frame for pose estimation is the following + // - When using + // detector.setZAlignedWithCameraAxis(false); + // detector.detect(); + // we consider the tag frame (o) such as z_o axis is not aligned with camera frame + // (meaning z_o axis is facing the camera) + // + // 3 y 2 + // | + // o--x + // + // 0 1 + // + // In that configuration, it is more difficult to set a desired camera pose c_M_o. + // To ease this step we can introduce an extra transformation matrix o'_M_o to align the axis + // with the camera frame: + // + // o'--x + // | + // y + // + // where + // | 1 0 0 0 | + // o'_M_o = | 0 -1 0 0 | + // | 0 0 -1 0 | + // | 0 0 0 1 | + // + // Defining the desired camera pose in frame o' becomes than easier. + // + // - When using rather + // detector.setZAlignedWithCameraAxis(true); + // detector.detect(); + // we consider the tag frame (o) such as z_o axis is aligned with camera frame + // + // 3 2 + // + // o--x + // | + // 0 y 1 + // + // In that configuration, it is easier to define a desired camera pose c_M_o since all the axis + // (camera frame and tag frame are aligned) // Servo - vpHomogeneousMatrix cdMc, cMo, oMo; + vpHomogeneousMatrix cd_M_c, c_M_o, o_M_o; // Desired pose to reach - vpHomogeneousMatrix cdMo(vpTranslationVector(0, 0, opt_tagSize * 3), // 3 times tag with along camera z axis - vpRotationMatrix({ 1, 0, 0, 0, -1, 0, 0, 0, -1 })); + vpHomogeneousMatrix cd_M_o(vpTranslationVector(0, 0, opt_tagSize * 3.5), // 3.5 times tag with along camera z axis + vpThetaUVector(vpMath::rad(10), vpMath::rad(3), vpMath::rad(5))); + if (!detector.isZAlignedWithCameraAxis()) { + vpHomogeneousMatrix oprim_M_o = { 1, 0, 0, 0, + 0, -1, 0, 0, + 0, 0, -1, 0, + 0, 0, 0, 1 }; + cd_M_o *= oprim_M_o; + } + + cd_M_c = cd_M_o * c_M_o.inverse(); - cdMc = cdMo * cMo.inverse(); - vpFeatureTranslation t(vpFeatureTranslation::cdMc); - vpFeatureThetaU tu(vpFeatureThetaU::cdRc); - t.build(cdMc); - tu.build(cdMc); + // Create current visual features + vpFeatureTranslation s_t(vpFeatureTranslation::cdMc); + vpFeatureThetaU s_tu(vpFeatureThetaU::cdRc); + s_t.build(cd_M_c); + s_tu.build(cd_M_c); - vpFeatureTranslation td(vpFeatureTranslation::cdMc); - vpFeatureThetaU tud(vpFeatureThetaU::cdRc); + // Create desired visual features + vpFeatureTranslation s_t_d(vpFeatureTranslation::cdMc); + vpFeatureThetaU s_tu_d(vpFeatureThetaU::cdRc); vpServo task; - task.addFeature(t, td); - task.addFeature(tu, tud); + task.addFeature(s_t, s_t_d); + task.addFeature(s_tu, s_tu_d); task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT); + // Set the gain if (opt_adaptive_gain) { vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 task.setLambda(lambda); @@ -258,8 +312,8 @@ int main(int argc, char **argv) vpDisplay::display(I); - std::vector cMo_vec; - detector.detect(I, opt_tagSize, cam, cMo_vec); + std::vector c_M_o_vec; + detector.detect(I, opt_tagSize, cam, c_M_o_vec); std::stringstream ss; ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit."; @@ -268,30 +322,30 @@ int main(int argc, char **argv) vpColVector v_c(6); // Only one tag is detected - if (cMo_vec.size() == 1) { - cMo = cMo_vec[0]; + if (c_M_o_vec.size() == 1) { + c_M_o = c_M_o_vec[0]; static bool first_time = true; if (first_time) { // Introduce security wrt tag positioning in order to avoid PI rotation - std::vector v_oMo(2), v_cdMc(2); - v_oMo[1].build(0, 0, 0, 0, 0, M_PI); - for (size_t i = 0; i < 2; i++) { - v_cdMc[i] = cdMo * v_oMo[i] * cMo.inverse(); + std::vector v_o_M_o(2), v_cd_M_c(2); + v_o_M_o[1].build(0, 0, 0, 0, 0, M_PI); + for (size_t i = 0; i < 2; ++i) { + v_cd_M_c[i] = cd_M_o * v_o_M_o[i] * c_M_o.inverse(); } - if (std::fabs(v_cdMc[0].getThetaUVector().getTheta()) < std::fabs(v_cdMc[1].getThetaUVector().getTheta())) { - oMo = v_oMo[0]; + if (std::fabs(v_cd_M_c[0].getThetaUVector().getTheta()) < std::fabs(v_cd_M_c[1].getThetaUVector().getTheta())) { + o_M_o = v_o_M_o[0]; } else { std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl; - oMo = v_oMo[1]; // Introduce PI rotation + o_M_o = v_o_M_o[1]; // Introduce PI rotation } } - // Update visual features - cdMc = cdMo * oMo * cMo.inverse(); - t.build(cdMc); - tu.build(cdMc); + // Update current visual features + cd_M_c = cd_M_o * o_M_o * c_M_o.inverse(); + s_t.build(cd_M_c); + s_tu.build(cd_M_c); if (opt_task_sequencing) { if (!servo_started) { @@ -307,8 +361,8 @@ int main(int argc, char **argv) } // Display desired and current pose features - vpDisplay::displayFrame(I, cdMo * oMo, cam, opt_tagSize / 1.5, vpColor::yellow, 2); - vpDisplay::displayFrame(I, cMo, cam, opt_tagSize / 2, vpColor::none, 3); + vpDisplay::displayFrame(I, cd_M_o * o_M_o, cam, opt_tagSize / 1.5, vpColor::yellow, 2); + vpDisplay::displayFrame(I, c_M_o, cam, opt_tagSize / 2, vpColor::none, 3); // Get tag corners std::vector vip = detector.getPolygon(0); // Get the tag cog corresponding to the projection of the tag frame in the image @@ -329,8 +383,8 @@ int main(int argc, char **argv) std::cout << "v_c: " << v_c.t() << std::endl; } - vpTranslationVector cd_t_c = cdMc.getTranslationVector(); - vpThetaUVector cd_tu_c = cdMc.getThetaUVector(); + vpTranslationVector cd_t_c = cd_M_c.getTranslationVector(); + vpThetaUVector cd_tu_c = cd_M_c.getThetaUVector(); double error_tr = sqrt(cd_t_c.sumSquare()); double error_tu = vpMath::deg(sqrt(cd_tu_c.sumSquare())); @@ -344,7 +398,7 @@ int main(int argc, char **argv) if (opt_verbose) std::cout << "error translation: " << error_tr << " ; error rotation: " << error_tu << std::endl; - if (error_tr < convergence_threshold_t && error_tu < convergence_threshold_tu) { + if ((error_tr < opt_convergence_threshold_t) && (error_tu < opt_convergence_threshold_tu)) { has_converged = true; std::cout << "Servo task has converged" << std::endl; ; @@ -354,7 +408,7 @@ int main(int argc, char **argv) if (first_time) { first_time = false; } - } // end if (cMo_vec.size() == 1) + } // end if (c_M_o_vec.size() == 1) else { v_c = 0; } @@ -380,7 +434,6 @@ int main(int argc, char **argv) case vpMouseButton::button3: final_quit = true; - v_c = 0; break; default: @@ -422,10 +475,6 @@ int main(int argc, char **argv) robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; } - catch (const std::exception &e) { - std::cout << "ur_rtde exception: " << e.what() << std::endl; - return EXIT_FAILURE; - } return EXIT_SUCCESS; } diff --git a/example/servo-afma6/servoAfma6Cylinder2DCamVelocity.cpp b/example/servo-afma6/servoAfma6Cylinder2DCamVelocity.cpp index f795460c20..c25c7d35af 100644 --- a/example/servo-afma6/servoAfma6Cylinder2DCamVelocity.cpp +++ b/example/servo-afma6/servoAfma6Cylinder2DCamVelocity.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,68 +31,98 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ /*! - \file servoAfma6Cylinder2DCamVelocity.cpp - \example servoAfma6Cylinder2DCamVelocity.cpp Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in the camera frame. Visual features are the two lines corresponding to the edges of a cylinder. - */ -#include // std::fabs -#include // numeric_limits -#include +#include + #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include +#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_AFMA6) + #include #include #include #include #include #include - #include #include #include #include #include #include -#include - #include - - // Exception -#include #include +#include -int main() -{ #ifdef ENABLE_VISP_NAMESPACE - using namespace VISP_NAMESPACE_NAME; +using namespace VISP_NAMESPACE_NAME; #endif +/** + * To run this example: + * - Put a 8 cm diameter gray cylinder on the table + * - orientate the cylinder along 11:00 and 17:00 (10 or 15 deg) + * - Launch: Afma6_move zero + * - Launch ./servoAfma6Cylinder2DCamVelocity + * - initialize image processing on the right line first, then the left line + */ +int main(int argc, char **argv) +{ + bool opt_verbose = false; + bool opt_adaptive_gain = false; + + for (int i = 1; i < argc; ++i) { + if (std::string(argv[i]) == "--verbose") { + opt_verbose = true; + } + else if (std::string(argv[i]) == "--adaptive-gain") { + opt_adaptive_gain = true; + } + else if ((std::string(argv[i]) == "--help") || (std::string(argv[i]) == "-h")) { + std::cout + << argv[0] + << " [--adaptive-gain]" + << " [--verbose]" + << " [--help] [-h]" + << std::endl;; + return EXIT_SUCCESS; + } + } + + vpRobotAfma6 robot; + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; + + // Load the end-effector to camera frame transformation obtained + // using a camera intrinsic model with distortion + robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); + try { - vpImage I; + std::cout << "WARNING: This example will move the robot! " + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; + std::cin.ignore(); vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); // Warm up camera + vpImage I; for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } @@ -108,20 +137,8 @@ int main() vpDisplay::display(I); vpDisplay::flush(I); - vpServo task; - - std::cout << std::endl; - std::cout << "-------------------------------------------------------" << std::endl; - std::cout << " Test program for vpServo " << std::endl; - std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; - std::cout << " Simulation " << std::endl; - std::cout << " task : servo a point " << std::endl; - std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; - - int i; - int nbline = 2; - vpMeLine line[nbline]; + int nblines = 2; + std::vector line(nblines); vpMe me; me.setRange(10); @@ -131,137 +148,165 @@ int main() me.setSampleStep(10); // Initialize the tracking of the two edges of the cylinder - for (i = 0; i < nbline; i++) { + for (int i = 0; i < nblines; ++i) { line[i].setDisplay(vpMeSite::RANGE_RESULT); line[i].setMe(&me); line[i].initTracking(I); line[i].track(I); + vpDisplay::flush(I); } - vpRobotAfma6 robot; - robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); - // robot.move("zero.pos"); - + // Get camera intrinsics vpCameraParameters cam; - // Update camera parameters robot.getCameraParameters(cam, I); + std::cout << "cam:\n" << cam << std::endl; - vpTRACE("sets the current position of the visual feature "); - vpFeatureLine p[nbline]; - for (i = 0; i < nbline; i++) { - vpFeatureBuilder::create(p[i], cam, line[i]); + // Sets the current position of the visual feature "); + std::vector s_line(nblines); + for (int i = 0; i < nblines; ++i) { + vpFeatureBuilder::create(s_line[i], cam, line[i]); } - vpTRACE("sets the desired position of the visual feature "); - vpCylinder cyld(0, 1, 0, 0, 0, 0, 0.04); + // Sets the desired position of the visual feature "); + vpCylinder cylinder(0, 1, 0, 0, 0, 0, 0.04); - vpHomogeneousMatrix cMo(0, 0, 0.4, 0, 0, vpMath::rad(0)); + vpHomogeneousMatrix c_M_o(0, 0, 0.4, vpMath::rad(0), vpMath::rad(0), vpMath::rad(0)); - cyld.project(cMo); + cylinder.project(c_M_o); - vpFeatureLine pd[nbline]; - vpFeatureBuilder::create(pd[0], cyld, vpCylinder::line1); - vpFeatureBuilder::create(pd[1], cyld, vpCylinder::line2); + std::vector s_line_d(nblines); + vpFeatureBuilder::create(s_line_d[0], cylinder, vpCylinder::line1); + vpFeatureBuilder::create(s_line_d[1], cylinder, vpCylinder::line2); - // Those lines are needed to keep the conventions define in vpMeLine - // (Those in vpLine are less restrictive) Another way to have the - // coordinates of the desired features is to learn them before executing - // the program. - pd[0].setRhoTheta(-fabs(pd[0].getRho()), 0); - pd[1].setRhoTheta(-fabs(pd[1].getRho()), M_PI); + // { + // std::cout << "Desired features: " << std::endl; + // std::cout << " - line 1: rho: " << s_line_d[0].getRho() << " theta: " << vpMath::deg(s_line_d[0].getTheta()) << "deg" << std::endl; + // std::cout << " - line 2: rho: " << s_line_d[1].getRho() << " theta: " << vpMath::deg(ss_line_d_d[1].getTheta()) << "deg" << std::endl; + // } - vpTRACE("define the task"); - vpTRACE("\t we want an eye-in-hand control law"); - vpTRACE("\t robot is controlled in the camera frame"); + // Next 2 lines are needed to keep the conventions defined in vpMeLine + s_line_d[0].setRhoTheta(+fabs(s_line_d[0].getRho()), 0); + s_line_d[1].setRhoTheta(+fabs(s_line_d[1].getRho()), M_PI); + // { + // std::cout << "Modified desired features: " << std::endl; + // std::cout << " - line 1: rho: " << s_line_d[0].getRho() << " theta: " << vpMath::deg(s_line_d[0].getTheta()) << "deg" << std::endl; + // std::cout << " - line 2: rho: " << s_s_line_dd[1].getRho() << " theta: " << vpMath::deg(s_line_d[1].getTheta()) << "deg" << std::endl; + // } + + // Define the task + vpServo task; + // - we want an eye-in-hand control law + // - robot is controlled in the camera frame task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE); - - vpTRACE("\t we want to see a two lines on two lines.."); - std::cout << std::endl; - for (i = 0; i < nbline; i++) { - task.addFeature(p[i], pd[i]); + // - we want to see a two lines on two lines + for (int i = 0; i < nblines; ++i) { + task.addFeature(s_line[i], s_line_d[i]); } - vpTRACE("\t set the gain"); - task.setLambda(0.2); + // Set the gain + if (opt_adaptive_gain) { + vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 + task.setLambda(lambda); + } + else { + task.setLambda(0.5); + } - vpTRACE("Display task information "); + // Display task information task.print(); - robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - unsigned int iter = 0; - vpTRACE("\t loop"); - vpColVector v; - vpImage Ic; - double lambda_av = 0.05; - double alpha = 0.2; - double beta = 3; - bool quit = false; - while (!quit) { - std::cout << "---------------------------------------------" << iter << std::endl; - - try { - rs.acquire(I); - vpDisplay::display(I); + vpColVector v_c(6); + bool final_quit = false; + bool send_velocities = false; - // Track the two edges and update the features - for (i = 0; i < nbline; i++) { - line[i].track(I); - line[i].display(I, vpColor::red); + while (!final_quit) { + double t_start = vpTime::measureTimeMs(); - vpFeatureBuilder::create(p[i], cam, line[i]); - vpTRACE("%f %f ", line[i].getRho(), line[i].getTheta()); + rs.acquire(I); + vpDisplay::display(I); - p[i].display(cam, I, vpColor::red); - pd[i].display(cam, I, vpColor::green); - } + std::stringstream ss; + ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit."; + vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red); + // Track the two edges and update the features + for (int i = 0; i < nblines; ++i) { + line[i].track(I); + line[i].display(I, vpColor::red); - // Adaptative gain - double gain; - { - if (std::fabs(alpha) <= std::numeric_limits::epsilon()) - gain = lambda_av; - else { - gain = alpha * exp(-beta * (task.getError()).sumSquare()) + lambda_av; - } - } - task.setLambda(gain); + vpFeatureBuilder::create(s_line[i], cam, line[i]); + //std::cout << "line " << i << " rho: " << s[i].getRho() << " theta: " << vpMath::deg(s[i].getTheta()) << " deg" << std::endl; + + s_line[i].display(cam, I, vpColor::red); + s_line_d[i].display(cam, I, vpColor::green); + } + + v_c = task.computeControlLaw(); + + if (opt_verbose) { + std::cout << "v: " << v_c.t() << std::endl; + std::cout << "\t\t || s - s* || = " << task.getError().sumSquare() << std::endl;; + } + + if (!send_velocities) { + v_c = 0; + } - v = task.computeControlLaw(); + robot.setVelocity(vpRobot::CAMERA_FRAME, v_c); - if (iter == 0) { - vpDisplay::getClick(I); + ss.str(""); + ss << "Loop time: " << vpTime::measureTimeMs() - t_start << " ms"; + vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red); + vpDisplay::flush(I); + + vpMouseButton::vpMouseButtonType button; + if (vpDisplay::getClick(I, button, false)) { + switch (button) { + case vpMouseButton::button1: + send_velocities = !send_velocities; + break; + + case vpMouseButton::button3: + final_quit = true; + break; + + default: + break; } - robot.setVelocity(vpRobot::CAMERA_FRAME, v); + } + } + + std::cout << "Stop the robot " << std::endl; + robot.setRobotState(vpRobot::STATE_STOP); + + if (!final_quit) { + while (!final_quit) { + rs.acquire(I); + vpDisplay::display(I); + + vpDisplay::displayText(I, 20, 20, "Click to quit the program.", vpColor::red); + vpDisplay::displayText(I, 40, 20, "Visual servo converged.", vpColor::red); - vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); if (vpDisplay::getClick(I, false)) { - quit = true; + final_quit = true; } + vpDisplay::flush(I); } - catch (...) { - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - robot.stopMotion(); - return EXIT_FAILURE; - } - - vpTRACE("\t\t || s - s* || = %f ", (task.getError()).sumSquare()); - iter++; } - - vpTRACE("Display task information "); task.print(); - return EXIT_SUCCESS; } catch (const vpException &e) { - std::cout << "Test failed with exception: " << e << std::endl; + std::cout << "ViSP exception: " << e.what() << std::endl; + std::cout << "Stop the robot " << std::endl; + robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; } + + return EXIT_SUCCESS; } #else diff --git a/example/servo-afma6/servoAfma6Cylinder2DCamVelocitySecondaryTask.cpp b/example/servo-afma6/servoAfma6Cylinder2DCamVelocitySecondaryTask.cpp index 171e24f705..b60fb403a0 100644 --- a/example/servo-afma6/servoAfma6Cylinder2DCamVelocitySecondaryTask.cpp +++ b/example/servo-afma6/servoAfma6Cylinder2DCamVelocitySecondaryTask.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,13 +31,10 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ /*! - \file servoAfma6Cylinder2DCamVelocitySecondaryTask.cpp - \example servoAfma6Cylinder2DCamVelocitySecondaryTask.cpp Example of eye-in-hand control law. We control here a real robot, @@ -49,55 +45,80 @@ This example illustrates in one hand a classical visual servoing with a cylinder. And in the other hand it illustrates the behaviour of the robot when adding a secondary task. - */ -#include // std::fabs -#include // numeric_limits -#include +#include + #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include +#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_AFMA6) + #include #include #include #include #include #include - #include #include #include #include #include #include -#include - #include - -// Exception -#include #include +#include -int main() -{ #ifdef ENABLE_VISP_NAMESPACE - using namespace VISP_NAMESPACE_NAME; +using namespace VISP_NAMESPACE_NAME; #endif +int main(int argc, char **argv) +{ + bool opt_verbose = false; + bool opt_adaptive_gain = false; + + for (int i = 1; i < argc; ++i) { + if (std::string(argv[i]) == "--verbose") { + opt_verbose = true; + } + else if (std::string(argv[i]) == "--adaptive-gain") { + opt_adaptive_gain = true; + } + else if ((std::string(argv[i]) == "--help") || (std::string(argv[i]) == "-h")) { + std::cout + << argv[0] + << " [--adaptive-gain]" + << " [--verbose]" + << " [--help] [-h]" + << std::endl;; + return EXIT_SUCCESS; + } + } + + vpRobotAfma6 robot; + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; + + // Load the end-effector to camera frame transformation obtained + // using a camera intrinsic model with distortion + robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); + try { - vpImage I; + std::cout << "WARNING: This example will move the robot! " + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; + std::cin.ignore(); vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); // Warm up camera + vpImage I; for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } @@ -109,260 +130,277 @@ int main() #elif defined(VISP_HAVE_GTK) vpDisplayGTK display(I, 100, 100, "Current image"); #endif - vpDisplay::display(I); vpDisplay::flush(I); - vpServo task; - - std::cout << std::endl; - std::cout << "-------------------------------------------------------" << std::endl; - std::cout << " Test program for vpServo " << std::endl; - std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; - std::cout << " Simulation " << std::endl; - std::cout << " task : servo a point " << std::endl; - std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; - - int i; - int nbline = 2; - vpMeLine line[nbline]; + int nblines = 2; + std::vector line(nblines); vpMe me; - me.setRange(20); + me.setRange(10); me.setPointsToTrack(100); me.setLikelihoodThresholdType(vpMe::NORMALIZED_THRESHOLD); me.setThreshold(15); me.setSampleStep(10); // Initialize the tracking of the two edges of the cylinder - for (i = 0; i < nbline; i++) { + for (int i = 0; i < nblines; ++i) { line[i].setDisplay(vpMeSite::RANGE_RESULT); line[i].setMe(&me); line[i].initTracking(I); line[i].track(I); + vpDisplay::flush(I); } - vpRobotAfma6 robot; - robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); - // robot.move("zero.pos"); - + // Get camera intrinsics vpCameraParameters cam; - // Update camera parameters robot.getCameraParameters(cam, I); + std::cout << "cam:\n" << cam << std::endl; - vpTRACE("sets the current position of the visual feature "); - vpFeatureLine p[nbline]; - for (i = 0; i < nbline; i++) - vpFeatureBuilder::create(p[i], cam, line[i]); + // Sets the current position of the visual feature "); + std::vector s_line(nblines); + for (int i = 0; i < nblines; ++i) { + vpFeatureBuilder::create(s_line[i], cam, line[i]); + } - vpTRACE("sets the desired position of the visual feature "); - vpCylinder cyld(0, 1, 0, 0, 0, 0, 0.04); + // Sets the desired position of the visual feature "); + vpCylinder cylinder(0, 1, 0, 0, 0, 0, 0.04); - vpHomogeneousMatrix cMo(0, 0, 0.5, 0, 0, vpMath::rad(0)); + vpHomogeneousMatrix c_M_o(0, 0, 0.4, vpMath::rad(0), vpMath::rad(0), vpMath::rad(0)); - cyld.project(cMo); + cylinder.project(c_M_o); - vpFeatureLine pd[nbline]; - vpFeatureBuilder::create(pd[0], cyld, vpCylinder::line1); - vpFeatureBuilder::create(pd[1], cyld, vpCylinder::line2); + std::vector s_line_d(nblines); + vpFeatureBuilder::create(s_line_d[0], cylinder, vpCylinder::line1); + vpFeatureBuilder::create(s_line_d[1], cylinder, vpCylinder::line2); - // Those lines are needed to keep the conventions define in vpMeLine - // (Those in vpLine are less restrictive) Another way to have the - // coordinates of the desired features is to learn them before executing - // the program. - pd[0].setRhoTheta(-fabs(pd[0].getRho()), 0); - pd[1].setRhoTheta(-fabs(pd[1].getRho()), M_PI); + { + std::cout << "Desired features: " << std::endl; + std::cout << " - line 1: rho: " << s_line_d[0].getRho() << " theta: " << vpMath::deg(s_line_d[0].getTheta()) << "deg" << std::endl; + std::cout << " - line 2: rho: " << s_line_d[1].getRho() << " theta: " << vpMath::deg(s_line_d[1].getTheta()) << "deg" << std::endl; + } - vpTRACE("define the task"); - vpTRACE("\t we want an eye-in-hand control law"); - vpTRACE("\t robot is controlled in the camera frame"); + // { + // std::cout << "Desired features: " << std::endl; + // std::cout << " - line 1: rho: " << s_d[0].getRho() << " theta: " << vpMath::deg(s_d[0].getTheta()) << "deg" << std::endl; + // std::cout << " - line 2: rho: " << s_d[1].getRho() << " theta: " << vpMath::deg(s_d[1].getTheta()) << "deg" << std::endl; + // } + + // Next 2 lines are needed to keep the conventions defined in vpMeLine + s_line_d[0].setRhoTheta(+fabs(s_line_d[0].getRho()), 0); + s_line_d[1].setRhoTheta(+fabs(s_line_d[1].getRho()), M_PI); + // { + // std::cout << "Modified desired features: " << std::endl; + // std::cout << " - line 1: rho: " << s_d[0].getRho() << " theta: " << vpMath::deg(s_d[0].getTheta()) << "deg" << std::endl; + // std::cout << " - line 2: rho: " << s_d[1].getRho() << " theta: " << vpMath::deg(s_d[1].getTheta()) << "deg" << std::endl; + // } + + // Define the task + vpServo task; + // - we want an eye-in-hand control law + // - robot is controlled in the camera frame task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE); + // - we want to see a two lines on two lines + for (int i = 0; i < nblines; ++i) { + task.addFeature(s_line[i], s_line_d[i]); + } - vpTRACE("\t we want to see a point on a point.."); - std::cout << std::endl; - for (i = 0; i < nbline; i++) - task.addFeature(p[i], pd[i]); - - vpTRACE("\t set the gain"); - task.setLambda(0.3); + // Set the gain + if (opt_adaptive_gain) { + vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 + task.setLambda(lambda); + } + else { + task.setLambda(0.5); + } - vpTRACE("Display task information "); + // Display task information task.print(); - robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - unsigned int iter = 0; - vpTRACE("\t loop"); - vpColVector v; - vpImage Ic; - double lambda_av = 0.05; - double alpha = 0.02; - double beta = 3; - double erreur = 1; - bool quit = false; + vpColVector v_c(6); + bool final_quit = false; + bool send_velocities = false; + double task_error = 1.; // First loop to reach the convergence position - while ((erreur > 0.00001) && (!quit)) { - std::cout << "---------------------------------------------" << iter << std::endl; + while ((task_error > 0.00001) && (!final_quit)) { + double t_start = vpTime::measureTimeMs(); - try { - rs.acquire(I); - vpDisplay::display(I); + rs.acquire(I); + vpDisplay::display(I); - // Track the two edges and update the features - for (i = 0; i < nbline; i++) { - line[i].track(I); - line[i].display(I, vpColor::red); + std::stringstream ss; + ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit."; + vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red); - vpFeatureBuilder::create(p[i], cam, line[i]); + // Track the two edges and update the features + for (int i = 0; i < nblines; ++i) { + line[i].track(I); + line[i].display(I, vpColor::red); - p[i].display(cam, I, vpColor::red); - pd[i].display(cam, I, vpColor::green); - } + vpFeatureBuilder::create(s_line[i], cam, line[i]); + //std::cout << "line " << i << " rho: " << s[i].getRho() << " theta: " << vpMath::deg(s[i].getTheta()) << " deg" << std::endl; - // Adaptative gain - double gain; - { - if (std::fabs(alpha) <= std::numeric_limits::epsilon()) - gain = lambda_av; - else { - gain = alpha * exp(-beta * (task.getError()).sumSquare()) + lambda_av; - } - } - task.setLambda(gain); + s_line[i].display(cam, I, vpColor::red); + s_line_d[i].display(cam, I, vpColor::green); + } - v = task.computeControlLaw(); + v_c = task.computeControlLaw(); + task_error = task.getError().sumSquare(); - if (iter == 0) { - vpDisplay::getClick(I); - } + if (opt_verbose) { + std::cout << "v: " << v_c.t() << std::endl; + std::cout << "\t\t || s - s* || = " << task_error << std::endl;; + } + + if (!send_velocities) { + v_c = 0; + } - robot.setVelocity(vpRobot::CAMERA_FRAME, v); + robot.setVelocity(vpRobot::CAMERA_FRAME, v_c); - vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); - if (vpDisplay::getClick(I, false)) { - quit = true; + vpDisplay::flush(I); + + ss.str(""); + ss << "Loop time: " << vpTime::measureTimeMs() - t_start << " ms"; + vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red); + vpDisplay::flush(I); + + vpMouseButton::vpMouseButtonType button; + if (vpDisplay::getClick(I, button, false)) { + switch (button) { + case vpMouseButton::button1: + send_velocities = !send_velocities; + break; + + case vpMouseButton::button3: + final_quit = true; + break; + + default: + break; } - vpDisplay::flush(I); - } - catch (...) { - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - robot.stopMotion(); - return EXIT_FAILURE; } - - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - erreur = (task.getError()).sumSquare(); - vpTRACE("\t\t || s - s* || = %f ", (task.getError()).sumSquare()); - iter++; } - /**********************************************************************************************/ - - // Second loop is to compute the control while taking into account the - // secondary task. - vpColVector e1(6); - e1 = 0; - vpColVector e2(6); - e2 = 0; - vpColVector proj_e1; - vpColVector proj_e2; - iter = 0; - double rapport = 0; - double vitesse = 0.02; + // Second loop is to compute the control while taking into account the secondary task. + vpColVector e1(6, 0); + vpColVector e2(6, 0); + vpColVector proj_e1, proj_e2; + unsigned long iter = 0; + double secondary_task_speed = 0.02; // 2cm/s unsigned int tempo = 1200; - quit = false; - while (!quit) { - std::cout << "---------------------------------------------" << iter << std::endl; - try { - rs.acquire(I); - vpDisplay::display(I); + while (!final_quit) { + double t_start = vpTime::measureTimeMs(); - // Track the two edges and update the features - for (i = 0; i < nbline; i++) { - line[i].track(I); - line[i].display(I, vpColor::red); + rs.acquire(I); + vpDisplay::display(I); - vpFeatureBuilder::create(p[i], cam, line[i]); + std::stringstream ss; + ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit."; + vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red); - p[i].display(cam, I, vpColor::red); - pd[i].display(cam, I, vpColor::green); - } + // Track the two edges and update the features + for (int i = 0; i < nblines; ++i) { + line[i].track(I); + line[i].display(I, vpColor::red); - v = task.computeControlLaw(); - - // Compute the new control law corresponding to the secondary task - if (iter % tempo < 400 /*&& iter%tempo >= 0*/) { - e2 = 0; - e1[0] = fabs(vitesse); - proj_e1 = task.secondaryTask(e1); - rapport = vitesse / proj_e1[0]; - proj_e1 *= rapport; - v += proj_e1; - if (iter == 199) - iter += 200; // This line is needed to make on ly an half turn - // during the first cycle - } + vpFeatureBuilder::create(s_line[i], cam, line[i]); + //std::cout << "line " << i << " rho: " << s[i].getRho() << " theta: " << vpMath::deg(s[i].getTheta()) << " deg" << std::endl; - if (iter % tempo < 600 && iter % tempo >= 400) { - e1 = 0; - e2[1] = fabs(vitesse); - proj_e2 = task.secondaryTask(e2); - rapport = vitesse / proj_e2[1]; - proj_e2 *= rapport; - v += proj_e2; - } + s_line[i].display(cam, I, vpColor::red); + s_line_d[i].display(cam, I, vpColor::green); + } - if (iter % tempo < 1000 && iter % tempo >= 600) { - e2 = 0; - e1[0] = -fabs(vitesse); - proj_e1 = task.secondaryTask(e1); - rapport = -vitesse / proj_e1[0]; - proj_e1 *= rapport; - v += proj_e1; - } + v_c = task.computeControlLaw(); + + // Compute the new control law corresponding to the secondary task + if (iter % tempo < 400 /*&& iter%tempo >= 0*/) { + e2 = 0; + e1[0] = fabs(secondary_task_speed); + proj_e1 = task.secondaryTask(e1); + double frac = secondary_task_speed / proj_e1[0]; + proj_e1 *= frac; + v_c += proj_e1; + if (iter == 199) + iter += 200; // This line is needed to make only an half turn // during the first cycle + } - if (iter % tempo < 1200 && iter % tempo >= 1000) { - e1 = 0; - e2[1] = -fabs(vitesse); - proj_e2 = task.secondaryTask(e2); - rapport = -vitesse / proj_e2[1]; - proj_e2 *= rapport; - v += proj_e2; - } + if (iter % tempo < 600 && iter % tempo >= 400) { + e1 = 0; + e2[1] = fabs(secondary_task_speed); + proj_e2 = task.secondaryTask(e2); + double frac = secondary_task_speed / proj_e2[1]; + proj_e2 *= frac; + v_c += proj_e2; + } - robot.setVelocity(vpRobot::CAMERA_FRAME, v); + if (iter % tempo < 1000 && iter % tempo >= 600) { + e2 = 0; + e1[0] = -fabs(secondary_task_speed); + proj_e1 = task.secondaryTask(e1); + double frac = -secondary_task_speed / proj_e1[0]; + proj_e1 *= frac; + v_c += proj_e1; + } - vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); - if (vpDisplay::getClick(I, false)) { - quit = true; - } - vpDisplay::flush(I); + if (iter % tempo < 1200 && iter % tempo >= 1000) { + e1 = 0; + e2[1] = -fabs(secondary_task_speed); + proj_e2 = task.secondaryTask(e2); + double frac = -secondary_task_speed / proj_e2[1]; + proj_e2 *= frac; + v_c += proj_e2; } - catch (...) { - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - robot.stopMotion(); - return EXIT_FAILURE; + + robot.setVelocity(vpRobot::CAMERA_FRAME, v_c); + + ss.str(""); + ss << "Loop time: " << vpTime::measureTimeMs() - t_start << " ms"; + vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red); + vpDisplay::displayText(I, 60, 20, "Secondary task started", vpColor::red); + vpDisplay::flush(I); + + vpMouseButton::vpMouseButtonType button; + if (vpDisplay::getClick(I, button, false)) { + final_quit = true; } - vpTRACE("\t\t || s - s* || = %f ", (task.getError()).sumSquare()); iter++; } - vpTRACE("Display task information "); + std::cout << "Stop the robot " << std::endl; + robot.setRobotState(vpRobot::STATE_STOP); + + if (!final_quit) { + while (!final_quit) { + rs.acquire(I); + vpDisplay::display(I); + + vpDisplay::displayText(I, 20, 20, "Click to quit the program.", vpColor::red); + vpDisplay::displayText(I, 40, 20, "Visual servo converged.", vpColor::red); + + if (vpDisplay::getClick(I, false)) { + final_quit = true; + } + + vpDisplay::flush(I); + } + } task.print(); - return EXIT_SUCCESS; } catch (const vpException &e) { - std::cout << "Test failed with exception: " << e << std::endl; + std::cout << "ViSP exception: " << e.what() << std::endl; + std::cout << "Stop the robot " << std::endl; + robot.setRobotState(vpRobot::STATE_STOP); return EXIT_FAILURE; } + + return EXIT_SUCCESS; } #else diff --git a/example/servo-afma6/servoAfma6Ellipse2DCamVelocity.cpp b/example/servo-afma6/servoAfma6Ellipse2DCamVelocity.cpp index f417fd8b37..1a9e31d048 100644 --- a/example/servo-afma6/servoAfma6Ellipse2DCamVelocity.cpp +++ b/example/servo-afma6/servoAfma6Ellipse2DCamVelocity.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,25 +31,15 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ /*! \file servoAfma6Ellipse2DCamVelocity.cpp + \example servoAfma6Ellipse2DCamVelocity.cpp \brief Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in the camera frame. The used visual feature is a circle. - -*/ - -/*! - \example servoAfma6Ellipse2DCamVelocity.cpp - - Example of eye-in-hand control law. We control here a real robot, the Afma6 - robot (cartesian robot, with 6 degrees of freedom). The velocity is computed - in the camera frame. The used visual feature is a circle. - */ #include // std::fabs @@ -93,9 +82,10 @@ int main() vpImage I; vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); // Warm up camera @@ -114,14 +104,12 @@ int main() vpDisplay::display(I); vpDisplay::flush(I); - std::cout << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; std::cout << " Simulation " << std::endl; std::cout << " task : servo a point " << std::endl; std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; vpDot dot; @@ -136,12 +124,11 @@ int main() dot.track(I); - vpCameraParameters cam; - vpRobotAfma6 robot; robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); - // Update camera parameters + // Get camera intrinsics + vpCameraParameters cam; robot.getCameraParameters(cam, I); vpTRACE("sets the current position of the visual feature "); diff --git a/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp b/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp index 5deed9add6..0d32040fcc 100644 --- a/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp +++ b/example/servo-afma6/servoAfma6FourPoints2DArtVelocity.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,62 +31,38 @@ * tests the control law * eye-in-hand control * velocity computed in the articular frame - * -*****************************************************************************/ + */ + /*! \file servoAfma6FourPoints2DArtVelocity.cpp + \example servoAfma6FourPoints2DArtVelocity.cpp \brief Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in articular. Visual features are the image coordinates of 4 vdot points. - -*/ - -/*! - \example servoAfma6FourPoints2DArtVelocity.cpp - - Example of eye-in-hand control law. We control here a real robot, the Afma6 - robot (cartesian robot, with 6 degrees of freedom). The velocity is computed - in articular. Visual features are the image coordinates of 4 vdot points. - */ -#include #include -#include -#include -#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include -#include -#include -#include -#include -#include -#include +#if defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) -#include +#include #include -#include -#include +#include +#include +#include #include #include #include #include - -// Exception -#include #include -#include +// Define the object CAD model +// Here we consider 4 black blobs whose centers are located on the corners of a square. +#define L 0.06 // To deal with a 12cm by 12cm square -// Define the square CAD model -// Square dimension -#define L 0.075 // Distance between the camera and the square at the desired // position after visual servoing convergence #define D 0.5 @@ -104,13 +79,12 @@ int main() // - the 6 measured joint velocities (m/s, rad/s) // - the 6 measured joint positions (m, rad) // - the 8 values of s - s* - std::string username; + // Get the user login name - vpIoTools::getUserName(username); + std::string username = vpIoTools::getUserName(); // Create a log filename to save velocities... - std::string logdirname; - logdirname = "/tmp/" + username; + std::string logdirname = "/tmp/" + username; // Test if the output path exist. If no try to create it if (vpIoTools::checkDirectory(logdirname) == false) { @@ -124,117 +98,108 @@ int main() return EXIT_FAILURE; } } - std::string logfilename; - logfilename = logdirname + "/log.dat"; + std::string logfilename = logdirname + "/log.dat"; // Open the log file name std::ofstream flog(logfilename.c_str()); try { - vpServo task; - - vpImage I; - int i; - vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); + vpImage I; + // Warm up camera for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 100, 100, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); - std::cout << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; - vpDot dot[4]; - vpImagePoint cog; + std::vector dot(4); std::cout << "Click on the 4 dots clockwise starting from upper/left dot..." << std::endl; - - for (i = 0; i < 4; i++) { + for (size_t i = 0; i < dot.size(); ++i) { dot[i].initTracking(I); - cog = dot[i].getCog(); + vpImagePoint cog = dot[i].getCog(); vpDisplay::displayCross(I, cog, 10, vpColor::blue); vpDisplay::flush(I); } vpRobotAfma6 robot; - robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; - vpCameraParameters cam; + // Load the end-effector to camera frame transformation obtained + // using a camera intrinsic model with distortion + robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); - // Update camera parameters + // Get camera intrinsics + vpCameraParameters cam; robot.getCameraParameters(cam, I); // Sets the current position of the visual feature - vpFeaturePoint p[4]; - for (i = 0; i < 4; i++) - vpFeatureBuilder::create(p[i], cam, dot[i]); // retrieve x,y and Z of the vpPoint structure - - // sets the desired position of the visual feature - vpFeaturePoint pd[4]; - - pd[0].build(-L, -L, D); - pd[1].build(L, -L, D); - pd[2].build(L, L, D); - pd[3].build(-L, L, D); - - // We want to see a point on a point - std::cout << std::endl; - for (i = 0; i < 4; i++) { - task.addFeature(p[i], pd[i]); + std::vector s(4); + for (size_t i = 0; i < s.size(); ++i) { + vpFeatureBuilder::create(s[i], cam, dot[i]); // retrieve x,y of the vpFeaturePoint structure } - // Set the proportional gain - task.setLambda(0.2); + // Sets the desired position of the visual feature + vpFeaturePoint s_d[4]; - // Display task information - task.print(); + s_d[0].build(-L/D, -L/D, D); + s_d[1].build(+L/D, -L/D, D); + s_d[2].build(+L/D, +L/D, D); + s_d[3].build(-L/D, +L/D, D); // Define the task // - we want an eye-in-hand control law - // - articular velocity are computed + // - joint velocity are computed + // - Interaction matrix is computed with the current visual features + vpServo task; task.setServo(vpServo::EYEINHAND_L_cVe_eJe); task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE); - task.print(); - vpVelocityTwistMatrix cVe; - robot.get_cVe(cVe); - task.set_cVe(cVe); - task.print(); + // We want to see a point on a point + for (size_t i = 0; i < s.size(); ++i) { + task.addFeature(s[i], s_d[i]); + } + + // Set task proportional gain + task.setLambda(0.4); + + // Set the camera to end-effector velocity twist matrix transformation + vpVelocityTwistMatrix c_V_e; + robot.get_cVe(c_V_e); + task.set_cVe(c_V_e); // Set the Jacobian (expressed in the end-effector frame) - vpMatrix eJe; - robot.get_eJe(eJe); - task.set_eJe(eJe); + vpMatrix e_J_e; + robot.get_eJe(e_J_e); + task.set_eJe(e_J_e); + + // Display task information task.print(); // Initialise the velocity control of the robot robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); std::cout << "\nHit CTRL-C to stop the loop...\n" << std::flush; + bool quit = false; while (!quit) { // Acquire a new image from the camera @@ -243,64 +208,46 @@ int main() // Display this image vpDisplay::display(I); - try { - // For each point... - for (i = 0; i < 4; i++) { - // Achieve the tracking of the dot in the image - dot[i].track(I); - // Get the dot cog - cog = dot[i].getCog(); - // Display a green cross at the center of gravity position in the - // image - vpDisplay::displayCross(I, cog, 10, vpColor::green); - } - } - catch (...) { - flog.close(); // Close the log file - vpTRACE("Error detected while tracking visual features"); - robot.stopMotion(); - return EXIT_FAILURE; + // For each point... + for (size_t i = 0; i < dot.size(); ++i) { + // Achieve the tracking of the dot in the image + dot[i].track(I); + // Update the point feature from the dot location + vpFeatureBuilder::create(s[i], cam, dot[i]); } - // Update the point feature from the dot location - for (i = 0; i < 4; i++) - vpFeatureBuilder::create(p[i], cam, dot[i]); + // Get the Jacobian of the robot + robot.get_eJe(e_J_e); - // Get the jacobian of the robot - robot.get_eJe(eJe); // Update this jacobian in the task structure. It will be used to - // compute the velocity skew (as an articular velocity) qdot = -lambda * - // L^+ * cVe * eJe * (s-s*) - task.set_eJe(eJe); + task.set_eJe(e_J_e); - vpColVector v; - // Compute the visual servoing skew vector - v = task.computeControlLaw(); + // Compute the visual servoing skew vector (as a joint velocity) + // qdot = -lambda * L^+ * cVe * eJe * (s-s*) + vpColVector qdot = task.computeControlLaw(); // Display the current and desired feature points in the image display vpServoDisplay::display(task, cam, I); // Apply the computed joint velocities to the robot - robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); + robot.setVelocity(vpRobot::JOINT_STATE, qdot); // Save velocities applied to the robot in the log file - // v[0], v[1], v[2] correspond to joint translation velocities in m/s - // v[3], v[4], v[5] correspond to joint rotation velocities in rad/s - flog << v[0] << " " << v[1] << " " << v[2] << " " << v[3] << " " << v[4] << " " << v[5] << " "; + // qdot[0], qdot[1], qdot[2] correspond to joint translation velocities in m/s + // qdot[3], qdot[4], qdot[5] correspond to joint rotation velocities in rad/s + flog << qdot[0] << " " << qdot[1] << " " << qdot[2] << " " << qdot[3] << " " << qdot[4] << " " << qdot[5] << " "; // Get the measured joint velocities of the robot - vpColVector qvel; - robot.getVelocity(vpRobot::ARTICULAR_FRAME, qvel); + vpColVector qdot_mes; + robot.getVelocity(vpRobot::JOINT_STATE, qdot_mes); // Save measured joint velocities of the robot in the log file: - // - qvel[0], qvel[1], qvel[2] correspond to measured joint translation - // velocities in m/s - // - qvel[3], qvel[4], qvel[5] correspond to measured joint rotation - // velocities in rad/s - flog << qvel[0] << " " << qvel[1] << " " << qvel[2] << " " << qvel[3] << " " << qvel[4] << " " << qvel[5] << " "; + // - qdot_mes[0], qdot_mes[1], qdot_mes[2] correspond to measured joint translation velocities in m/s + // - qdot_mes[3], qdot_mes[4], qdot_mes[5] correspond to measured joint rotation velocities in rad/s + flog << qdot_mes[0] << " " << qdot_mes[1] << " " << qdot_mes[2] << " " << qdot_mes[3] << " " << qdot_mes[4] << " " << qdot_mes[5] << " "; // Get the measured joint positions of the robot vpColVector q; - robot.getPosition(vpRobot::ARTICULAR_FRAME, q); + robot.getPosition(vpRobot::JOINT_STATE, q); // Save measured joint positions of the robot in the log file // - q[0], q[1], q[2] correspond to measured joint translation // positions in m @@ -321,14 +268,18 @@ int main() vpDisplay::flush(I); } - vpTRACE("Display task information "); + // Close the log file + flog.close(); + + // Display task information task.print(); - flog.close(); // Close the log file + return EXIT_SUCCESS; } catch (const vpException &e) { - flog.close(); // Close the log file - std::cout << "Test failed with exception: " << e << std::endl; + // Close the log file + flog.close(); + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp index 07ff6d1f0e..5a642b9c67 100644 --- a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp +++ b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,52 +31,29 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ /*! \file servoAfma6FourPoints2DCamVelocityLs_cur.cpp + \example servoAfma6FourPoints2DCamVelocityLs_cur.cpp \brief Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in the camera frame. Visual features are the image coordinates of 4 vpDot2 points. The interaction matrix is computed using the current visual features. - -*/ - -/*! - \example servoAfma6FourPoints2DCamVelocityLs_cur.cpp - - Example of eye-in-hand control law. We control here a real robot, the Afma6 - robot (cartesian robot, with 6 degrees of freedom). The velocity is computed - in the camera frame. Visual features are the image coordinates of 4 vpDot2 - points. The interaction matrix is computed using the current visual - features. - */ -#include +#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include -#include -#include -#include -#include -#include -#include +#if defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) -#include -#include +#include #include -#include -#include -#include -#include -#include +#include +#include +#include #include #include #include @@ -85,61 +61,48 @@ #include #include -// Exception -#include - -#define L 0.06 // to deal with a 12cm by 12cm square +// Define the object CAD model +// Here we consider 4 black blobs whose centers are located on the corners of a square. +#define L 0.06 // To deal with a 12cm by 12cm square #ifdef ENABLE_VISP_NAMESPACE using namespace VISP_NAMESPACE_NAME; #endif /*! - - Compute the pose \e cMo from the 3D coordinates of the points \e point and + Compute the pose \e c_M_o from the 3D coordinates of the points \e point and their corresponding 2D coordinates \e dot. The pose is computed using a Lowe non linear method. \param point : 3D coordinates of the points. - \param dot : 2D coordinates of the points. - - \param ndot : Number of points or dots used for the pose estimation. - \param cam : Intrinsic camera parameters. - - \param cMo : Homogeneous matrix in output describing the transformation + \param c_M_o : Homogeneous matrix in output describing the transformation between the camera and object frame. - \param init : Indicates if the we have to estimate an initial pose with Lagrange or Dementhon methods. - */ -void compute_pose(vpPoint point[], vpDot2 dot[], int ndot, vpCameraParameters cam, vpHomogeneousMatrix &cMo, bool init) +void compute_pose(std::vector &point, const std::vector &dot, const vpCameraParameters &cam, + vpHomogeneousMatrix &c_M_o, bool init) { - vpRotationMatrix cRo; + vpRotationMatrix c_R_o; vpPose pose; vpImagePoint cog; - for (int i = 0; i < ndot; i++) { + for (size_t i = 0; i < point.size(); ++i) { double x = 0, y = 0; - cog = dot[i].getCog(); - vpPixelMeterConversion::convertPoint(cam, cog, x, - y); // pixel to meter conversion - // std::cout << "point cam: " << i << x << " " << y << std::endl; - point[i].set_x(x); // projection perspective p + vpPixelMeterConversion::convertPoint(cam, cog, x, y); // Pixel to meter conversion + point[i].set_x(x); // Perspective projection point[i].set_y(y); pose.addPoint(point[i]); - // std::cout << "point " << i << std::endl; - // point[i].print(); } if (init == true) { - pose.computePose(vpPose::DEMENTHON_LAGRANGE_VIRTUAL_VS, cMo); + pose.computePose(vpPose::DEMENTHON_LAGRANGE_VIRTUAL_VS, c_M_o); } else { // init = false; use of the previous pose to initialise VIRTUAL_VS - pose.computePose(vpPose::VIRTUAL_VS, cMo); + pose.computePose(vpPose::VIRTUAL_VS, c_M_o); } } @@ -151,15 +114,14 @@ int main() // - the 6 measured joint velocities (m/s, rad/s) // - the 6 measured joint positions (m, rad) // - the 8 values of s - s* - // - the 6 values of the pose cMo (tx,ty,tz, rx,ry,rz) with translation + // - the 6 values of the pose c_M_o (tx,ty,tz, rx,ry,rz) with translation // in meters and rotations in radians - std::string username; + // Get the user login name - vpIoTools::getUserName(username); + std::string username = vpIoTools::getUserName(); // Create a log filename to save velocities... - std::string logdirname; - logdirname = "/tmp/" + username; + std::string logdirname = "/tmp/" + username; // Test if the output path exist. If no try to create it if (vpIoTools::checkDirectory(logdirname) == false) { @@ -173,41 +135,32 @@ int main() return EXIT_FAILURE; } } - std::string logfilename; - logfilename = logdirname + "/log.dat"; + std::string logfilename = logdirname + "/log.dat"; // Open the log file name std::ofstream flog(logfilename.c_str()); try { - vpServo task; - - vpImage I; - int i; - vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); + vpImage I; + // Warm up camera for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 100, 100, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); - std::cout << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; @@ -215,78 +168,77 @@ int main() std::cout << " Interaction matrix computed with the current features " << std::endl; std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; - vpDot2 dot[4]; - vpImagePoint cog; + std::vector dot(4); std::cout << "Click on the 4 dots clockwise starting from upper/left dot..." << std::endl; - for (i = 0; i < 4; i++) { + for (size_t i = 0; i < dot.size(); ++i) { dot[i].initTracking(I); - cog = dot[i].getCog(); + vpImagePoint cog = dot[i].getCog(); vpDisplay::displayCross(I, cog, 10, vpColor::blue); vpDisplay::flush(I); } - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; vpRobotAfma6 robot; + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; // Load the end-effector to camera frame transformation obtained // using a camera intrinsic model with distortion robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); + // Get camera intrinsics vpCameraParameters cam; - // Update camera parameters robot.getCameraParameters(cam, I); // Sets the current position of the visual feature - vpFeaturePoint p[4]; - for (i = 0; i < 4; i++) - vpFeatureBuilder::create(p[i], cam, dot[i]); // retrieve x,y of the vpFeaturePoint structure + std::vector s(4); + for (size_t i = 0; i < s.size(); ++i) { + vpFeatureBuilder::create(s[i], cam, dot[i]); // retrieve x,y of the vpFeaturePoint structure + } // Set the position of the square target in a frame which origin is // centered in the middle of the square - vpPoint point[4]; + std::vector point(4); point[0].setWorldCoordinates(-L, -L, 0); - point[1].setWorldCoordinates(L, -L, 0); - point[2].setWorldCoordinates(L, L, 0); - point[3].setWorldCoordinates(-L, L, 0); + point[1].setWorldCoordinates(+L, -L, 0); + point[2].setWorldCoordinates(+L, +L, 0); + point[3].setWorldCoordinates(-L, +L, 0); // Initialise a desired pose to compute s*, the desired 2D point features - vpHomogeneousMatrix cMo; - vpTranslationVector cto(0, 0, 0.5); // tz = 0.7 meter - vpRxyzVector cro(vpMath::rad(0), vpMath::rad(0), - vpMath::rad(0)); // No rotations - vpRotationMatrix cRo(cro); // Build the rotation matrix - cMo.build(cto, cRo); // Build the homogeneous matrix + vpHomogeneousMatrix c_M_o; + vpTranslationVector c_t_o(0, 0, 0.5); // tz = 0.5 meter + vpRxyzVector c_r_o(vpMath::rad(0), vpMath::rad(0), vpMath::rad(0)); // No rotations + vpRotationMatrix c_R_o(c_r_o); // Build the rotation matrix + c_M_o.build(c_t_o, c_R_o); // Build the homogeneous matrix // Sets the desired position of the 2D visual feature - vpFeaturePoint pd[4]; + std::vector s_d(4); // Compute the desired position of the features from the desired pose - for (int i = 0; i < 4; i++) { + for (size_t i = 0; i < s_d.size(); ++i) { vpColVector cP, p; - point[i].changeFrame(cMo, cP); + point[i].changeFrame(c_M_o, cP); point[i].projection(cP, p); - pd[i].set_x(p[0]); - pd[i].set_y(p[1]); - pd[i].set_Z(cP[2]); + s_d[i].set_x(p[0]); + s_d[i].set_y(p[1]); + s_d[i].set_Z(cP[2]); } // Define the task // - we want an eye-in-hand control law // - robot is controlled in the camera frame // - Interaction matrix is computed with the current visual features + vpServo task; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE); // We want to see a point on a point - std::cout << std::endl; - for (i = 0; i < 4; i++) - task.addFeature(p[i], pd[i]); + for (size_t i = 0; i < s.size(); ++i) { + task.addFeature(s[i], s_d[i]); + } // Set the proportional gain - task.setLambda(0.1); + task.setLambda(0.3); // Display task information task.print(); @@ -307,39 +259,30 @@ int main() vpDisplay::display(I); // For each point... - for (i = 0; i < 4; i++) { + for (size_t i = 0; i < dot.size(); ++i) { // Achieve the tracking of the dot in the image dot[i].track(I); - // Get the dot cog - cog = dot[i].getCog(); - // Display a green cross at the center of gravity position in the - // image - vpDisplay::displayCross(I, cog, 10, vpColor::green); } // At first iteration, we initialise non linear pose estimation with a linear approach. // For the other iterations, non linear pose estimation is initialized with the pose estimated at previous iteration of the loop - compute_pose(point, dot, 4, cam, cMo, init_pose_from_linear_method); + compute_pose(point, dot, cam, c_M_o, init_pose_from_linear_method); if (init_pose_from_linear_method) { init_pose_from_linear_method = false; } - for (i = 0; i < 4; i++) { + for (size_t i = 0; i < dot.size(); ++i) { // Update the point feature from the dot location - vpFeatureBuilder::create(p[i], cam, dot[i]); + vpFeatureBuilder::create(s[i], cam, dot[i]); // Set the feature Z coordinate from the pose vpColVector cP; - point[i].changeFrame(cMo, cP); + point[i].changeFrame(c_M_o, cP); - p[i].set_Z(cP[2]); + s[i].set_Z(cP[2]); } - // Printing on stdout concerning task information - // task.print() ; - - vpColVector v; // Compute the visual servoing skew vector - v = task.computeControlLaw(); + vpColVector v = task.computeControlLaw(); // Display the current and desired feature points in the image display vpServoDisplay::display(task, cam, I); @@ -377,10 +320,10 @@ int main() // expressed in meters in the camera frame flog << (task.getError()).t() << " "; // s-s* for points - // Save the current cMo pose: translations in meters, rotations (rx, ry, + // Save the current c_M_o pose: translations in meters, rotations (rx, ry, // rz) in radians - flog << cto[0] << " " << cto[1] << " " << cto[2] << " " // translation - << cro[0] << " " << cro[1] << " " << cro[2] << std::endl; // rot + flog << c_t_o[0] << " " << c_t_o[1] << " " << c_t_o[2] << " " // translation + << c_r_o[0] << " " << c_r_o[1] << " " << c_r_o[2] << std::endl; // rot vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); if (vpDisplay::getClick(I, false)) { @@ -391,7 +334,8 @@ int main() vpDisplay::flush(I); } - flog.close(); // Close the log file + // Close the log file + flog.close(); // Display task information task.print(); @@ -399,9 +343,10 @@ int main() return EXIT_SUCCESS; } catch (const vpException &e) { - flog.close(); // Close the log file + // Close the log file + flog.close(); - std::cout << "Test failed with exception: " << e << std::endl; + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur_integrator.cpp b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur_integrator.cpp new file mode 100644 index 0000000000..f79758e659 --- /dev/null +++ b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_cur_integrator.cpp @@ -0,0 +1,384 @@ +/* + * ViSP, open source Visual Servoing Platform software. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. + * + * This software is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * See the file LICENSE.txt at the root directory of this source + * distribution for additional information about the GNU GPL. + * + * For using ViSP with software that can not be combined with the GNU + * GPL, please contact Inria about acquiring a ViSP Professional + * Edition License. + * + * See https://visp.inria.fr for more information. + * + * This software was developed at: + * Inria Rennes - Bretagne Atlantique + * Campus Universitaire de Beaulieu + * 35042 Rennes Cedex + * France + * + * If you have questions regarding the use of this file, please contact + * Inria at visp@inria.fr + * + * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE + * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + * + * Description: + * tests the control law + * eye-in-hand control + * velocity computed in the camera frame + * compensate target motion by integrator + */ + +/*! + \file servoAfma6FourPoints2DCamVelocityLs_cur_integrator.cpp + \example servoAfma6FourPoints2DCamVelocityLs_cur_integrator.cpp + + \brief Example of eye-in-hand control law. We control here a real robot, the + Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is + computed in the camera frame. Visual features are the image coordinates of + 4 vpDot2 points. The interaction matrix is computed using the current visual + features. Here we compensate target motion by using an integrator. +*/ + +#include +#include + +#if defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// Define the object CAD model +// Here we consider 4 black blobs whose centers are located on the corners of a square. +#define L 0.06 // To deal with a 12cm by 12cm square + +#ifdef ENABLE_VISP_NAMESPACE +using namespace VISP_NAMESPACE_NAME; +#endif + +/*! + Compute the pose \e c_M_o from the 3D coordinates of the points \e point and + their corresponding 2D coordinates \e dot. The pose is computed using a Lowe + non linear method. + + \param point : 3D coordinates of the points. + \param dot : 2D coordinates of the points. + \param cam : Intrinsic camera parameters. + \param c_M_o : Homogeneous matrix in output describing the transformation + between the camera and object frame. + \param init : Indicates if the we have to estimate an initial pose with + Lagrange or Dementhon methods. +*/ +void compute_pose(std::vector &point, const std::vector &dot, const vpCameraParameters &cam, + vpHomogeneousMatrix &c_M_o, bool init) +{ + vpRotationMatrix c_R_o; + vpPose pose; + vpImagePoint cog; + + for (size_t i = 0; i < point.size(); ++i) { + double x = 0, y = 0; + cog = dot[i].getCog(); + vpPixelMeterConversion::convertPoint(cam, cog, x, y); // Pixel to meter conversion + point[i].set_x(x); // Perspective projection + point[i].set_y(y); + pose.addPoint(point[i]); + } + + if (init == true) { + pose.computePose(vpPose::DEMENTHON_LAGRANGE_VIRTUAL_VS, c_M_o); + } + else { // init = false; use of the previous pose to initialise VIRTUAL_VS + pose.computePose(vpPose::VIRTUAL_VS, c_M_o); + } +} + +int main() +{ + double opt_mu = 0.02; // Integrator gain + double opt_target_tracking_min_error = 0.0001; // Min task error to trigger integrator for target motion compensation + + // Log file creation in /tmp/$USERNAME/log.dat + // This file contains by line: + // - the 6 computed camera velocities (m/s, rad/s) to achieve the task + // - the 6 measured joint velocities (m/s, rad/s) + // - the 6 measured joint positions (m, rad) + // - the 8 values of s - s* + // - the 6 values of the pose c_M_o (tx,ty,tz, rx,ry,rz) with translation + // in meters and rotations in radians + + // Get the user login name + std::string username = vpIoTools::getUserName(); + + // Create a log filename to save velocities... + std::string logdirname = "/tmp/" + username; + + // Test if the output path exist. If no try to create it + if (vpIoTools::checkDirectory(logdirname) == false) { + try { + // Create the dirname + vpIoTools::makeDirectory(logdirname); + } + catch (...) { + std::cerr << std::endl << "ERROR:" << std::endl; + std::cerr << " Cannot create " << logdirname << std::endl; + return EXIT_FAILURE; + } + } + std::string logfilename = logdirname + "/log.dat"; + + // Open the log file name + std::ofstream flog(logfilename.c_str()); + + try { + vpRealSense2 rs; + rs2::config config; + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); + rs.open(config); + + vpImage I; + + // Warm up camera + for (size_t i = 0; i < 10; ++i) { + rs.acquire(I); + } + + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 100, 100, "Current image"); + + vpDisplay::display(I); + vpDisplay::flush(I); + + std::cout << "-------------------------------------------------------" << std::endl; + std::cout << " Test program for vpServo " << std::endl; + std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; + std::cout << " Use of the Afma6 robot " << std::endl; + std::cout << " Interaction matrix computed with the current features " << std::endl; + std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; + std::cout << "-------------------------------------------------------" << std::endl; + + std::vector dot(4); + + std::cout << "Click on the 4 dots clockwise starting from upper/left dot..." << std::endl; + for (size_t i = 0; i < dot.size(); ++i) { + dot[i].initTracking(I); + vpImagePoint cog = dot[i].getCog(); + vpDisplay::displayCross(I, cog, 10, vpColor::blue); + vpDisplay::flush(I); + } + + vpRobotAfma6 robot; + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; + + // Load the end-effector to camera frame transformation obtained + // using a camera intrinsic model with distortion + robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); + + // Get camera intrinsics + vpCameraParameters cam; + robot.getCameraParameters(cam, I); + + // Sets the current position of the visual feature + std::vector s(4); + for (size_t i = 0; i < s.size(); ++i) { + vpFeatureBuilder::create(s[i], cam, dot[i]); // retrieve x,y of the vpFeaturePoint structure + } + + // Set the position of the square target in a frame which origin is + // centered in the middle of the square + std::vector point(4); + point[0].setWorldCoordinates(-L, -L, 0); + point[1].setWorldCoordinates(+L, -L, 0); + point[2].setWorldCoordinates(+L, +L, 0); + point[3].setWorldCoordinates(-L, +L, 0); + + // Initialise a desired pose to compute s*, the desired 2D point features + vpHomogeneousMatrix c_M_o; + vpTranslationVector c_t_o(0, 0, 0.5); // tz = 0.5 meter + vpRxyzVector c_r_o(vpMath::rad(0), vpMath::rad(-20), vpMath::rad(0)); // No rotations + vpRotationMatrix c_R_o(c_r_o); // Build the rotation matrix + c_M_o.build(c_t_o, c_R_o); // Build the homogeneous matrix + + // Sets the desired position of the 2D visual feature + std::vector s_d(4); + // Compute the desired position of the features from the desired pose + for (size_t i = 0; i < s_d.size(); ++i) { + vpColVector cP, p; + point[i].changeFrame(c_M_o, cP); + point[i].projection(cP, p); + + s_d[i].set_x(p[0]); + s_d[i].set_y(p[1]); + s_d[i].set_Z(cP[2]); + } + + // Define the task + // - we want an eye-in-hand control law + // - robot is controlled in the camera frame + // - Interaction matrix is computed with the current visual features + vpServo task; + task.setServo(vpServo::EYEINHAND_CAMERA); + task.setInteractionMatrixType(vpServo::CURRENT, vpServo::PSEUDO_INVERSE); + + // We want to see a point on a point + for (size_t i = 0; i < s.size(); ++i) { + task.addFeature(s[i], s_d[i]); + } + + // - set the task adaptive gain + vpAdaptiveGain lambda_adaptive; + lambda_adaptive.initStandard(1.7, 0.3, 1.5); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 + task.setLambda(lambda_adaptive); + + // Display task information + task.print(); + + // Initialise the velocity control of the robot + robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); + + std::cout << "\nHit CTRL-C to stop the loop...\n" << std::flush; + + bool init_pose_from_linear_method = true; + bool start_integrator = false; + double norm_error = 1.; + vpColVector sum_dedt(task.getDimension()); + + bool quit = false; + while (!quit) { + // Acquire a new image from the camera + rs.acquire(I); + + // Display this image + vpDisplay::display(I); + + // For each point... + for (size_t i = 0; i < dot.size(); ++i) { + // Achieve the tracking of the dot in the image + dot[i].track(I); + } + + // At first iteration, we initialise non linear pose estimation with a linear approach. + // For the other iterations, non linear pose estimation is initialized with the pose estimated at previous iteration of the loop + compute_pose(point, dot, cam, c_M_o, init_pose_from_linear_method); + if (init_pose_from_linear_method) { + init_pose_from_linear_method = false; + } + + for (size_t i = 0; i < dot.size(); ++i) { + // Update the point feature from the dot location + vpFeatureBuilder::create(s[i], cam, dot[i]); + // Set the feature Z coordinate from the pose + vpColVector cP; + point[i].changeFrame(c_M_o, cP); + + s[i].set_Z(cP[2]); + } + + // Compute the visual servoing skew vector + vpColVector v = task.computeControlLaw(); + + // Get task error + norm_error = task.getError().sumSquare(); + if (norm_error < opt_target_tracking_min_error) { + start_integrator = true; + } + + if (start_integrator) { + sum_dedt += task.getError(); + vpMatrix J1p = task.getTaskJacobianPseudoInverse(); + v -= opt_mu * J1p * sum_dedt; + } + // Display the current and desired feature points in the image display + vpServoDisplay::display(task, cam, I); + + // Apply the computed camera velocities to the robot + robot.setVelocity(vpRobot::CAMERA_FRAME, v); + + // Save velocities applied to the robot in the log file + // v[0], v[1], v[2] correspond to camera translation velocities in m/s + // v[3], v[4], v[5] correspond to camera rotation velocities in rad/s + flog << v[0] << " " << v[1] << " " << v[2] << " " << v[3] << " " << v[4] << " " << v[5] << " "; + + // Get the measured joint velocities of the robot + vpColVector qvel; + robot.getVelocity(vpRobot::ARTICULAR_FRAME, qvel); + // Save measured joint velocities of the robot in the log file: + // - qvel[0], qvel[1], qvel[2] correspond to measured joint translation + // velocities in m/s + // - qvel[3], qvel[4], qvel[5] correspond to measured joint rotation + // velocities in rad/s + flog << qvel[0] << " " << qvel[1] << " " << qvel[2] << " " << qvel[3] << " " << qvel[4] << " " << qvel[5] << " "; + + // Get the measured joint positions of the robot + vpColVector q; + robot.getPosition(vpRobot::ARTICULAR_FRAME, q); + // Save measured joint positions of the robot in the log file + // - q[0], q[1], q[2] correspond to measured joint translation + // positions in m + // - q[3], q[4], q[5] correspond to measured joint rotation + // positions in rad + flog << q[0] << " " << q[1] << " " << q[2] << " " << q[3] << " " << q[4] << " " << q[5] << " "; + + // Save feature error (s-s*) for the 4 feature points. For each feature + // point, we have 2 errors (along x and y axis). This error is + // expressed in meters in the camera frame + flog << (task.getError()).t() << " "; // s-s* for points + + // Save the current c_M_o pose: translations in meters, rotations (rx, ry, + // rz) in radians + flog << c_t_o[0] << " " << c_t_o[1] << " " << c_t_o[2] << " " // translation + << c_r_o[0] << " " << c_r_o[1] << " " << c_r_o[2] << std::endl; // rotation + + vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); + if (start_integrator) { + vpDisplay::displayText(I, 35, 20, "Integrator enabled", vpColor::red); + } + if (vpDisplay::getClick(I, false)) { + quit = true; + } + + // Flush the display + vpDisplay::flush(I); + } + + // Close the log file + flog.close(); + + // Display task information + task.print(); + + return EXIT_SUCCESS; + } + catch (const vpException &e) { + // Close the log file + flog.close(); + + std::cout << "Visual servo failed with exception: " << e << std::endl; + return EXIT_FAILURE; + } +} + +#else +int main() +{ + std::cout << "You do not have an afma6 robot connected to your computer..." << std::endl; + return EXIT_SUCCESS; +} + +#endif diff --git a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp index f12f0d2ac4..8143fc34b1 100644 --- a/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp +++ b/example/servo-afma6/servoAfma6FourPoints2DCamVelocityLs_des.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,62 +31,38 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ /*! \file servoAfma6FourPoints2DCamVelocityLs_des.cpp + \example servoAfma6FourPoints2DCamVelocityLs_des.cpp \brief Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in the camera frame. Visual features are the image coordinates of 4 vpDot2 points. The interaction matrix is computed using the desired visual features. - -*/ - -/*! - \example servoAfma6FourPoints2DCamVelocityLs_des.cpp - - Example of eye-in-hand control law. We control here a real robot, the Afma6 - robot (cartesian robot, with 6 degrees of freedom). The velocity is computed - in the camera frame. Visual features are the image coordinates of 4 vpDot2 - points. The interaction matrix is computed using the desired visual - features. - */ -#include +#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include -#include -#include -#include -#include -#include -#include +#if defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) -#include -#include +#include #include -#include -#include -#include -#include -#include +#include +#include +#include #include #include #include #include #include -// Exception -#include - -#define L 0.06 // to deal with a 12cm by 12cm square +// Define the object CAD model +// Here we consider 4 black blobs whose centers are located on the corners of a square. +#define L 0.06 // To deal with a 12cm by 12cm square int main() { @@ -96,18 +71,17 @@ int main() #endif // Log file creation in /tmp/$USERNAME/log.dat - // This file contains by line: + // This file contains by line:} // - the 6 computed camera velocities (m/s, rad/s) to achieve the task // - the 6 measured camera velocities (m/s, rad/s) // - the 6 measured joint positions (m, rad) // - the 8 values of s - s* - std::string username; + // Get the user login name - vpIoTools::getUserName(username); + std::string username = vpIoTools::getUserName(); // Create a log filename to save velocities... - std::string logdirname; - logdirname = "/tmp/" + username; + std::string logdirname = "/tmp/" + username; // Test if the output path exist. If no try to create it if (vpIoTools::checkDirectory(logdirname) == false) { @@ -121,41 +95,32 @@ int main() return EXIT_FAILURE; } } - std::string logfilename; - logfilename = logdirname + "/log.dat"; + std::string logfilename = logdirname + "/log.dat"; // Open the log file name std::ofstream flog(logfilename.c_str()); try { - vpServo task; - - vpImage I; - int i; - vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); + vpImage I; + // Warm up camera for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 100, 100, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); - std::cout << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; @@ -164,76 +129,74 @@ int main() std::cout << " task : servo 4 points on a square with dimension " << L << " meters" << std::endl; std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; - vpDot2 dot[4]; - vpImagePoint cog; + std::vector dot(4); std::cout << "Click on the 4 dots clockwise starting from upper/left dot..." << std::endl; - for (i = 0; i < 4; i++) { + for (size_t i = 0; i < dot.size(); ++i) { dot[i].initTracking(I); - cog = dot[i].getCog(); + vpImagePoint cog = dot[i].getCog(); vpDisplay::displayCross(I, cog, 10, vpColor::blue); vpDisplay::flush(I); } vpRobotAfma6 robot; - vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; // Load the end-effector to camera frame transformation obtained // using a camera intrinsic model with distortion - robot.init(vpAfma6::TOOL_CCMOP, projModel); + robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); + // Get camera intrinsics vpCameraParameters cam; - // Update camera parameters robot.getCameraParameters(cam, I); // Sets the current position of the visual feature - vpFeaturePoint p[4]; - for (i = 0; i < 4; i++) - vpFeatureBuilder::create(p[i], cam, dot[i]); // retrieve x,y of the vpFeaturePoint structure + std::vector s(4); + for (size_t i = 0; i < s.size(); ++i) { + vpFeatureBuilder::create(s[i], cam, dot[i]); // Retrieve x,y of the vpFeaturePoint structure + } // Set the position of the square target in a frame which origin is // centered in the middle of the square vpPoint point[4]; point[0].setWorldCoordinates(-L, -L, 0); - point[1].setWorldCoordinates(L, -L, 0); - point[2].setWorldCoordinates(L, L, 0); - point[3].setWorldCoordinates(-L, L, 0); + point[1].setWorldCoordinates(+L, -L, 0); + point[2].setWorldCoordinates(+L, +L, 0); + point[3].setWorldCoordinates(-L, +L, 0); // Initialise a desired pose to compute s*, the desired 2D point features - vpHomogeneousMatrix cMo; - vpTranslationVector cto(0, 0, 0.7); // tz = 0.7 meter - vpRxyzVector cro(vpMath::rad(0), vpMath::rad(0), - vpMath::rad(0)); // No rotations - vpRotationMatrix cRo(cro); // Build the rotation matrix - cMo.build(cto, cRo); // Build the homogeneous matrix - - // sets the desired position of the 2D visual feature - vpFeaturePoint pd[4]; + vpHomogeneousMatrix c_M_o; + vpTranslationVector c_t_o(0, 0, 0.5); // tz = 0.5 meter + vpRxyzVector c_r_o(vpMath::rad(0), vpMath::rad(0), vpMath::rad(0)); // No rotations + vpRotationMatrix c_R_o(c_r_o); // Build the rotation matrix + c_M_o.build(c_t_o, c_R_o); // Build the homogeneous matrix + + // Sets the desired position of the 2D visual feature + std::vector s_d(4); // Compute the desired position of the features from the desired pose - for (int i = 0; i < 4; i++) { + for (size_t i = 0; i < s_d.size(); ++i) { vpColVector cP, p; - point[i].changeFrame(cMo, cP); + point[i].changeFrame(c_M_o, cP); point[i].projection(cP, p); - pd[i].set_x(p[0]); - pd[i].set_y(p[1]); - pd[i].set_Z(cP[2]); + s_d[i].set_x(p[0]); + s_d[i].set_y(p[1]); + s_d[i].set_Z(cP[2]); } // Define the task // - we want an eye-in-hand control law // - robot is controlled in the camera frame // - Interaction matrix is computed with the desired visual features + vpServo task; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE); // We want to see a point on a point - std::cout << std::endl; - for (i = 0; i < 4; i++) - task.addFeature(p[i], pd[i]); + for (size_t i = 0; i < s.size(); ++i) { + task.addFeature(s[i], s_d[i]); + } // Set the proportional gain task.setLambda(0.4); @@ -255,26 +218,15 @@ int main() vpDisplay::display(I); // For each point... - for (i = 0; i < 4; i++) { + for (size_t i = 0; i < dot.size(); ++i) { // Achieve the tracking of the dot in the image dot[i].track(I); - // Get the dot cog - cog = dot[i].getCog(); - // Display a green cross at the center of gravity position in the - // image - vpDisplay::displayCross(I, cog, 10, vpColor::green); + // Update the point feature from the dot location + vpFeatureBuilder::create(s[i], cam, dot[i]); } - // Printing on stdout concerning task information - // task.print() ; - - // Update the point feature from the dot location - for (i = 0; i < 4; i++) - vpFeatureBuilder::create(p[i], cam, dot[i]); - - vpColVector v; // Compute the visual servoing skew vector - v = task.computeControlLaw(); + vpColVector v = task.computeControlLaw(); // Display the current and desired feature points in the image display vpServoDisplay::display(task, cam, I); @@ -320,7 +272,8 @@ int main() vpDisplay::flush(I); } - flog.close(); // Close the log file + // Close the log file + flog.close(); // Display task information task.print(); @@ -328,8 +281,9 @@ int main() return EXIT_SUCCESS; } catch (const vpException &e) { - flog.close(); // Close the log file - std::cout << "Test failed with exception: " << e << std::endl; + // Close the log file + flog.close(); + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/example/servo-afma6/servoAfma6Line2DCamVelocity.cpp b/example/servo-afma6/servoAfma6Line2DCamVelocity.cpp index b59498161e..59fe708ba3 100644 --- a/example/servo-afma6/servoAfma6Line2DCamVelocity.cpp +++ b/example/servo-afma6/servoAfma6Line2DCamVelocity.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,49 +31,33 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ /*! \file servoAfma6Line2DCamVelocity.cpp + \example servoAfma6Line2DCamVelocity.cpp \brief Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in the camera frame. The visual feature is a line. */ -/*! - \example servoAfma6Line2DCamVelocity.cpp - - Example of eye-in-hand control law. We control here a real robot, the Afma6 - robot (cartesian robot, with 6 degrees of freedom). The velocity is computed - in the camera frame. The visual feature is a line. -*/ - -#include +#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include -#include -#include -#include -#include -#include +#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_AFMA6) +#include #include #include #include +#include +#include +#include #include #include #include #include - -#include - -// Exception -#include #include int main() @@ -82,148 +65,162 @@ int main() #ifdef ENABLE_VISP_NAMESPACE using namespace VISP_NAMESPACE_NAME; #endif + vpRobotAfma6 robot; + vpCameraParameters::vpCameraParametersProjType projModel = vpCameraParameters::perspectiveProjWithDistortion; + + // Load the end-effector to camera frame transformation obtained + // using a camera intrinsic model with distortion + robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel); try { - vpImage I; + std::cout << "WARNING: This example will move the robot! " + << "Please make sure to have the user stop button at hand!" << std::endl + << "Press Enter to continue..." << std::endl; + std::cin.ignore(); vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); // Warm up camera + vpImage I; for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + // Get camera intrinsics + vpCameraParameters cam; + robot.getCameraParameters(cam, I); + std::cout << "cam:\n" << cam << std::endl; + + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 10, 10, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); - vpServo task; - - std::cout << std::endl; - std::cout << "-------------------------------------------------------" << std::endl; - std::cout << " Test program for vpServo " << std::endl; - std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; - std::cout << " Simulation " << std::endl; - std::cout << " task : servo a line " << std::endl; - std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; - - vpMeLine line; - vpMe me; me.setRange(10); me.setPointsToTrack(100); me.setLikelihoodThresholdType(vpMe::NORMALIZED_THRESHOLD); - me.setThreshold(50); + me.setThreshold(10); me.setSampleStep(10); - line.setDisplay(vpMeSite::RANGE_RESULT); + vpMeLine line; + line.setDisplay(vpMeSite::RANGE_RESULT); line.setMe(&me); // Initialize the tracking. Define the line to track. line.initTracking(I); line.track(I); + vpDisplay::flush(I); - vpRobotAfma6 robot; - robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); - // robot.move("pos-init.pos"); + // Sets the current position of the visual feature + vpFeatureLine s_line; + vpFeatureBuilder::create(s_line, cam, line); - vpCameraParameters cam; - // Update camera parameters - robot.getCameraParameters(cam, I); + // Sets the desired position of the visual feature + vpLine line_d; + line_d.setWorldCoordinates(1, 0, 0, 0, 0, 0, 1, 0); + vpHomogeneousMatrix c_M_o(0, 0, 0.3, vpMath::rad(0), vpMath::rad(0), vpMath::rad(0)); + line_d.project(c_M_o); - vpTRACE("sets the current position of the visual feature "); - vpFeatureLine p; - vpFeatureBuilder::create(p, cam, line); + vpFeatureLine s_line_d; + vpFeatureBuilder::create(s_line_d, line_d); - vpTRACE("sets the desired position of the visual feature "); - vpLine lined; - lined.setWorldCoordinates(1, 0, 0, 0, 0, 0, 1, 0); - vpHomogeneousMatrix cMo(0, 0, 0.3, 0, 0, vpMath::rad(0)); - lined.project(cMo); - lined.setRho(-fabs(lined.getRho())); - lined.setTheta(0); + // Define the task + vpServo task; + // - We want an eye-in-hand control law + // - Robot is controlled in the camera frame + task.setServo(vpServo::EYEINHAND_CAMERA); + // - We want to see a line on a line + task.addFeature(s_line, s_line_d); + // - Set the gain + task.setLambda(0.5); + // - Display task information + task.print(); - vpFeatureLine pd; - vpFeatureBuilder::create(pd, lined); + robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - vpTRACE("define the task"); - vpTRACE("\t we want an eye-in-hand control law"); - vpTRACE("\t robot is controlled in the camera frame"); - task.setServo(vpServo::EYEINHAND_CAMERA); + bool final_quit = false; + bool send_velocities = false; - vpTRACE("\t we want to see a point on a point.."); - std::cout << std::endl; - task.addFeature(p, pd); + while (!final_quit) { + double t_start = vpTime::measureTimeMs(); + rs.acquire(I); + vpDisplay::display(I); - vpTRACE("\t set the gain"); - task.setLambda(0.2); + // Track the line + line.track(I); + line.display(I, vpColor::red); - vpTRACE("Display task information "); - task.print(); + // Update the current line feature + vpFeatureBuilder::create(s_line, cam, line); - robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); + // Display the current and the desired features + s_line.display(cam, I, vpColor::red); + s_line_d.display(cam, I, vpColor::green); - unsigned int iter = 0; - vpTRACE("\t loop"); - vpColVector v; - bool quit = false; - while (!quit) { - std::cout << "---------------------------------------------" << iter << std::endl; + vpColVector v = task.computeControlLaw(); - try { - rs.acquire(I); - vpDisplay::display(I); + if (!send_velocities) { + v = 0; + } - // Track the line - line.track(I); - line.display(I, vpColor::red); + // Send camera frame velocities to the robot + robot.setVelocity(vpRobot::CAMERA_FRAME, v); + + { + std::stringstream ss; + ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit."; + vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red); + ss.clear(); + ss.str(""); + ss << "Loop time: " << vpTime::measureTimeMs() - t_start << " ms"; + vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red); + } - // Update the current line feature - vpFeatureBuilder::create(p, cam, line); + vpDisplay::flush(I); - // displqy the current and the desired features - p.display(cam, I, vpColor::red); - pd.display(cam, I, vpColor::green); + vpMouseButton::vpMouseButtonType button; + if (vpDisplay::getClick(I, button, false)) { + switch (button) { + case vpMouseButton::button1: + send_velocities = !send_velocities; + break; - v = task.computeControlLaw(); + case vpMouseButton::button3: + final_quit = true; + break; - if (iter == 0) { - vpDisplay::getClick(I); + default: + break; } - robot.setVelocity(vpRobot::CAMERA_FRAME, v); + } + } + + std::cout << "Stop the robot " << std::endl; + robot.setRobotState(vpRobot::STATE_STOP); + + if (!final_quit) { + while (!final_quit) { + rs.acquire(I); + vpDisplay::display(I); + + vpDisplay::displayText(I, 20, 20, "Click to quit the program.", vpColor::red); - vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); if (vpDisplay::getClick(I, false)) { - quit = true; + final_quit = true; } vpDisplay::flush(I); } - catch (...) { - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - robot.stopMotion(); - return EXIT_FAILURE; - } - - vpTRACE("\t\t || s - s* || = %f ", (task.getError()).sumSquare()); - iter++; } - vpTRACE("Display task information "); + // Display task information task.print(); return EXIT_SUCCESS; } diff --git a/example/servo-afma6/servoAfma6MegaposePBVS.cpp b/example/servo-afma6/servoAfma6MegaposePBVS.cpp index 4016c14888..307c4df74e 100644 --- a/example/servo-afma6/servoAfma6MegaposePBVS.cpp +++ b/example/servo-afma6/servoAfma6MegaposePBVS.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -30,8 +29,7 @@ * * Description: * Pose-based visual servoing using MegaPose, on an Afma6 platform. - * - *****************************************************************************/ + */ /*! \example servoAfma6MegaposePBVS.cpp @@ -184,7 +182,7 @@ int main(int argc, const char *argv[]) // Get camera intrinsics vpCameraParameters cam = rs.getCameraParameters(RS2_STREAM_COLOR, vpCameraParameters::perspectiveProjWithoutDistortion); - std::cout << "cam:\n" << cam << "\n"; + std::cout << "cam:\n" << cam << std::endl; // Initialize Megapose std::shared_ptr megapose; try { diff --git a/example/servo-afma6/servoAfma6Point2DArtVelocity.cpp b/example/servo-afma6/servoAfma6Point2DArtVelocity.cpp index 7e1d516f0f..d2684518a5 100644 --- a/example/servo-afma6/servoAfma6Point2DArtVelocity.cpp +++ b/example/servo-afma6/servoAfma6Point2DArtVelocity.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,60 +31,34 @@ * tests the control law * eye-in-hand control * velocity computed in articular - * -*****************************************************************************/ + */ /*! \file servoAfma6Point2DArtVelocity.cpp + \example servoAfma6Point2DArtVelocity.cpp \brief Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in articular. The visual feature is the center of gravity of a point. - */ -/*! - \example servoAfma6Point2DArtVelocity.cpp - - Example of eye-in-hand control law. We control here a real robot, the Afma6 - robot (cartesian robot, with 6 degrees of freedom). The velocity is computed - in articular. The visual feature is the center of gravity of a point. - -*/ - -#include #include -#include -#include -#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include -#include -#include -#include -#include -#include -#include +#if defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) -#include +#include #include -#include -#include +#include +#include +#include #include #include #include #include - -// Exception -#include #include -#include - int main() { #ifdef ENABLE_VISP_NAMESPACE @@ -98,13 +71,12 @@ int main() // - the 6 measured joint velocities (m/s, rad/s) // - the 6 measured joint positions (m, rad) // - the 2 values of s - s* - std::string username; + // Get the user login name - vpIoTools::getUserName(username); + std::string username = vpIoTools::getUserName(); // Create a log filename to save velocities... - std::string logdirname; - logdirname = "/tmp/" + username; + std::string logdirname = "/tmp/" + username; // Test if the output path exist. If no try to create it if (vpIoTools::checkDirectory(logdirname) == false) { @@ -118,110 +90,96 @@ int main() return EXIT_FAILURE; } } - std::string logfilename; - logfilename = logdirname + "/log.dat"; + std::string logfilename = logdirname + "/log.dat"; // Open the log file name std::ofstream flog(logfilename.c_str()); try { - vpServo task; - - vpImage I; - vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); + vpImage I; + // Warm up camera for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 100, 100, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); - // exit(1) ; - std::cout << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl; std::cout << " Use of the Afma6 robot " << std::endl; std::cout << " task : servo a point " << std::endl; std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; vpDot dot; - vpImagePoint cog; + dot.setGraphics(true); std::cout << "Click on a dot..." << std::endl; dot.initTracking(I); + // Get the dot cog - cog = dot.getCog(); + vpImagePoint cog = dot.getCog(); vpDisplay::displayCross(I, cog, 10, vpColor::blue); vpDisplay::flush(I); vpRobotAfma6 robot; robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); + // Get camera intrinsics vpCameraParameters cam; - // Update camera parameters robot.getCameraParameters(cam, I); - vpTRACE("sets the current position of the visual feature "); - vpFeaturePoint p; - vpFeatureBuilder::create(p, cam, dot); // retrieve x,y and Z of the vpPoint structure + // Sets the current position of the visual feature + vpFeaturePoint s; + // Update visual feature from camera parameters and blob center of gravity + vpFeatureBuilder::create(s, cam, dot); - p.set_Z(1); - vpTRACE("sets the desired position of the visual feature "); - vpFeaturePoint pd; - pd.build(0, 0, 1); + // Sets the desired position of the visual feature + vpFeaturePoint s_d; + s_d.build(0, 0, 1); // Here we consider the center of the image (x=y=0 and Z=1 meter) - vpTRACE("define the task"); - vpTRACE("\t we want an eye-in-hand control law"); - vpTRACE("\t articular velocity are computed"); + // Define the task + // - we want an eye-in-hand control law + // - joint velocities are computed + vpServo task; task.setServo(vpServo::EYEINHAND_L_cVe_eJe); task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE); - vpTRACE("Set the position of the end-effector frame in the camera frame"); - vpHomogeneousMatrix cMe; - // robot.get_cMe(cMe) ; + // - set the camera to end-effector velocity twist matrix transformation + vpVelocityTwistMatrix c_V_e; + robot.get_cVe(c_V_e); + task.set_cVe(c_V_e); - vpVelocityTwistMatrix cVe; - robot.get_cVe(cVe); - std::cout << cVe << std::endl; - task.set_cVe(cVe); + // - set the Jacobian (expressed in the end-effector frame) + vpMatrix e_J_e; + robot.get_eJe(e_J_e); + task.set_eJe(e_J_e); - // vpDisplay::getClick(I) ; - vpTRACE("Set the Jacobian (expressed in the end-effector frame)"); - vpMatrix eJe; - robot.get_eJe(eJe); - task.set_eJe(eJe); + // - we want to see a point on a point + task.addFeature(s, s_d); - vpTRACE("\t we want to see a point on a point.."); - std::cout << std::endl; - task.addFeature(p, pd); + // - set the gain + task.setLambda(0.4); - vpTRACE("\t set the gain"); - task.setLambda(0.8); - - vpTRACE("Display task information "); + // Display task information task.print(); robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); std::cout << "\nHit CTRL-C to stop the loop...\n" << std::flush; + bool quit = false; while (!quit) { // Acquire a new image from the camera @@ -233,52 +191,41 @@ int main() // Achieve the tracking of the dot in the image dot.track(I); - // Get the dot cog - cog = dot.getCog(); - - // Display a green cross at the center of gravity position in the image - vpDisplay::displayCross(I, cog, 10, vpColor::green); - // Update the point feature from the dot location - vpFeatureBuilder::create(p, cam, dot); + vpFeatureBuilder::create(s, cam, dot); - // Get the jacobian of the robot - robot.get_eJe(eJe); - // Update this jacobian in the task structure. It will be used to - // compute the velocity skew (as an articular velocity) qdot = -lambda * - // L^+ * cVe * eJe * (s-s*) - task.set_eJe(eJe); + // Get the Jacobian of the robot + robot.get_eJe(e_J_e); - // std::cout << (vpMatrix)cVe*eJe << std::endl ; + // Update this jacobian in the task structure. It will be used to + task.set_eJe(e_J_e); - vpColVector v; - // Compute the visual servoing skew vector - v = task.computeControlLaw(); + // Compute the visual servoing skew vector (as a joint velocity) + // qdot = -lambda * L^+ * cVe * eJe * (s-s*) + vpColVector qdot = task.computeControlLaw(); // Display the current and desired feature points in the image display vpServoDisplay::display(task, cam, I); // Apply the computed joint velocities to the robot - robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); + robot.setVelocity(vpRobot::JOINT_STATE, qdot); // Save velocities applied to the robot in the log file - // v[0], v[1], v[2] correspond to joint translation velocities in m/s - // v[3], v[4], v[5] correspond to joint rotation velocities in rad/s - flog << v[0] << " " << v[1] << " " << v[2] << " " << v[3] << " " << v[4] << " " << v[5] << " "; + // qdot[0], qdot[1], qdot[2] correspond to joint translation velocities in m/s + // qdot[3], qdot[4], qdot[5] correspond to joint rotation velocities in rad/s + flog << qdot[0] << " " << qdot[1] << " " << qdot[2] << " " << qdot[3] << " " << qdot[4] << " " << qdot[5] << " "; // Get the measured joint velocities of the robot - vpColVector qvel; - robot.getVelocity(vpRobot::ARTICULAR_FRAME, qvel); + vpColVector qdot_mes; + robot.getVelocity(vpRobot::JOINT_STATE, qdot_mes); // Save measured joint velocities of the robot in the log file: - // - qvel[0], qvel[1], qvel[2] correspond to measured joint translation - // velocities in m/s - // - qvel[3], qvel[4], qvel[5] correspond to measured joint rotation - // velocities in rad/s - flog << qvel[0] << " " << qvel[1] << " " << qvel[2] << " " << qvel[3] << " " << qvel[4] << " " << qvel[5] << " "; + // - qdot_mes[0], qdot_mes[1], qdot_mes[2] correspond to measured joint translation velocities in m/s + // - qdot_mes[3], qdot_mes[4], qdot_mes[5] correspond to measured joint rotation velocities in rad/s + flog << qdot_mes[0] << " " << qdot_mes[1] << " " << qdot_mes[2] << " " << qdot_mes[3] << " " << qdot_mes[4] << " " << qdot_mes[5] << " "; // Get the measured joint positions of the robot vpColVector q; - robot.getPosition(vpRobot::ARTICULAR_FRAME, q); + robot.getPosition(vpRobot::JOINT_STATE, q); // Save measured joint positions of the robot in the log file // - q[0], q[1], q[2] correspond to measured joint translation // positions in m @@ -286,7 +233,7 @@ int main() // positions in rad flog << q[0] << " " << q[1] << " " << q[2] << " " << q[3] << " " << q[4] << " " << q[5] << " "; - // Save feature error (s-s*) for the feature point. For this feature + // Save feature error (s-s*) for the 4 feature points. For each feature // point, we have 2 errors (along x and y axis). This error is // expressed in meters in the camera frame flog << (task.getError()).t() << std::endl; @@ -295,21 +242,22 @@ int main() if (vpDisplay::getClick(I, false)) { quit = true; } + // Flush the display vpDisplay::flush(I); - - // vpTRACE("\t\t || s - s* || = %f ", ( task.getError() - // ).sumSquare()) ; } - flog.close(); // Close the log file + // Close the log file + flog.close(); - vpTRACE("Display task information "); + // Display task information task.print(); + return EXIT_SUCCESS; } catch (const vpException &e) { - flog.close(); // Close the log file - std::cout << "Test failed with exception: " << e << std::endl; + // Close the log file + flog.close(); + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/example/servo-afma6/servoAfma6Point2DCamVelocity.cpp b/example/servo-afma6/servoAfma6Point2DCamVelocity.cpp index ce0f876330..a073de700c 100644 --- a/example/servo-afma6/servoAfma6Point2DCamVelocity.cpp +++ b/example/servo-afma6/servoAfma6Point2DCamVelocity.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,52 +31,35 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ /*! \file servoAfma6Point2DCamVelocity.cpp + \example servoAfma6Point2DCamVelocity.cpp \brief Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in camera frame. The visual feature is the center of gravity of a point. - -*/ - -/*! - \example servoAfma6Point2DCamVelocity.cpp - - Example of eye-in-hand control law. We control here a real robot, the Afma6 - robot (cartesian robot, with 6 degrees of freedom). The velocity is computed - in camera frame. The visual feature is the center of gravity of a point. - */ -#include +#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include -#include -#include -#include +#if defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) + #include -#include #include -#include -#include -#include -#include -#include -#include +#include #include +#include +#include #include #include #include #include + int main() { #ifdef ENABLE_VISP_NAMESPACE @@ -86,17 +68,16 @@ int main() // Log file creation in /tmp/$USERNAME/log.dat // This file contains by line: - // - the 6 computed cam velocities (m/s, rad/s) to achieve the task + // - the 6 computed joint velocities (m/s, rad/s) to achieve the task // - the 6 measured joint velocities (m/s, rad/s) // - the 6 measured joint positions (m, rad) // - the 2 values of s - s* - std::string username; + // Get the user login name - vpIoTools::getUserName(username); + std::string username = vpIoTools::getUserName(); // Create a log filename to save velocities... - std::string logdirname; - logdirname = "/tmp/" + username; + std::string logdirname = "/tmp/" + username; // Test if the output path exist. If no try to create it if (vpIoTools::checkDirectory(logdirname) == false) { @@ -110,75 +91,77 @@ int main() return EXIT_FAILURE; } } - std::string logfilename; - logfilename = logdirname + "/log.dat"; + std::string logfilename = logdirname + "/log.dat"; // Open the log file name std::ofstream flog(logfilename.c_str()); try { - vpServo task; - - vpImage I; - vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); + vpImage I; + // Warm up camera for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 100, 100, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); + std::cout << "-------------------------------------------------------" << std::endl; + std::cout << " Test program for vpServo " << std::endl; + std::cout << " Eye-in-hand task control, velocity computed in the joint space" << std::endl; + std::cout << " Use of the Afma6 robot " << std::endl; + std::cout << " task : servo a point " << std::endl; + std::cout << "-------------------------------------------------------" << std::endl; + vpDot dot; - vpImagePoint cog; + dot.setGraphics(true); std::cout << "Click on a dot..." << std::endl; dot.initTracking(I); - cog = dot.getCog(); + + // Get the dot cog + vpImagePoint cog = dot.getCog(); vpDisplay::displayCross(I, cog, 10, vpColor::blue); vpDisplay::flush(I); vpRobotAfma6 robot; robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); + // Get camera intrinsics vpCameraParameters cam; - // Update camera parameters robot.getCameraParameters(cam, I); - // sets the current position of the visual feature - vpFeaturePoint p; - // retrieve x,y and Z of the vpPoint structure - vpFeatureBuilder::create(p, cam, dot); + // Sets the current position of the visual feature + vpFeaturePoint s; + // Update visual feature from camera parameters and blob center of gravity + vpFeatureBuilder::create(s, cam, dot); - // sets the desired position of the visual feature - vpFeaturePoint pd; - pd.build(0, 0, 1); + // Sets the desired position of the visual feature + vpFeaturePoint s_d; + s_d.build(0, 0, 1); // Here we consider the center of the image (x=y=0 and Z=1 meter) - // define the task + // Define the task // - we want an eye-in-hand control law // - robot is controlled in the camera frame + vpServo task; task.setServo(vpServo::EYEINHAND_CAMERA); // - we want to see a point on a point - task.addFeature(p, pd); + task.addFeature(s, s_d); // - set the constant gain - task.setLambda(0.8); + task.setLambda(0.4); // Display task information task.print(); @@ -199,43 +182,34 @@ int main() // Achieve the tracking of the dot in the image dot.track(I); - // Get the dot cog - cog = dot.getCog(); - - // Display a green cross at the center of gravity position in the image - vpDisplay::displayCross(I, cog, 10, vpColor::green); - // Update the point feature from the dot location - vpFeatureBuilder::create(p, cam, dot); + vpFeatureBuilder::create(s, cam, dot); - vpColVector v; // Compute the visual servoing skew vector - v = task.computeControlLaw(); + vpColVector v_c = task.computeControlLaw(); // Display the current and desired feature points in the image display vpServoDisplay::display(task, cam, I); - // Apply the computed joint velocities to the robot - robot.setVelocity(vpRobot::CAMERA_FRAME, v); + // Apply the computed cartesian camera velocities to the robot + robot.setVelocity(vpRobot::CAMERA_FRAME, v_c); // Save velocities applied to the robot in the log file // v[0], v[1], v[2] correspond to camera translation velocities in m/s // v[3], v[4], v[5] correspond to camera rotation velocities in rad/s - flog << v[0] << " " << v[1] << " " << v[2] << " " << v[3] << " " << v[4] << " " << v[5] << " "; + flog << v_c[0] << " " << v_c[1] << " " << v_c[2] << " " << v_c[3] << " " << v_c[4] << " " << v_c[5] << " "; // Get the measured joint velocities of the robot - vpColVector qvel; - robot.getVelocity(vpRobot::ARTICULAR_FRAME, qvel); + vpColVector qdot_mes; + robot.getVelocity(vpRobot::JOINT_STATE, qdot_mes); // Save measured joint velocities of the robot in the log file: - // - qvel[0], qvel[1], qvel[2] correspond to measured joint translation - // velocities in m/s - // - qvel[3], qvel[4], qvel[5] correspond to measured joint rotation - // velocities in rad/s - flog << qvel[0] << " " << qvel[1] << " " << qvel[2] << " " << qvel[3] << " " << qvel[4] << " " << qvel[5] << " "; + // - qdot_mes[0], qdot_mes[1], qdot_mes[2] correspond to measured joint translation velocities in m/s + // - qdot_mes[3], qdot_mes[4], qdot_mes[5] correspond to measured joint rotation velocities in rad/s + flog << qdot_mes[0] << " " << qdot_mes[1] << " " << qdot_mes[2] << " " << qdot_mes[3] << " " << qdot_mes[4] << " " << qdot_mes[5] << " "; // Get the measured joint positions of the robot vpColVector q; - robot.getPosition(vpRobot::ARTICULAR_FRAME, q); + robot.getPosition(vpRobot::JOINT_STATE, q); // Save measured joint positions of the robot in the log file // - q[0], q[1], q[2] correspond to measured joint translation // positions in m @@ -257,15 +231,17 @@ int main() vpDisplay::flush(I); } - flog.close(); // Close the log file + // Close the log file + flog.close(); // Display task information task.print(); return EXIT_SUCCESS; } catch (const vpException &e) { - flog.close(); // Close the log file - std::cout << "Test failed with exception: " << e << std::endl; + // Close the log file + flog.close(); + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/example/servo-afma6/servoAfma6Points2DCamVelocityEyeToHand.cpp b/example/servo-afma6/servoAfma6Points2DCamVelocityEyeToHand.cpp index e648fbc99f..19e54c4c7d 100644 --- a/example/servo-afma6/servoAfma6Points2DCamVelocityEyeToHand.cpp +++ b/example/servo-afma6/servoAfma6Points2DCamVelocityEyeToHand.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,49 +31,29 @@ * tests the control law * eye-to-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ + /*! \file servoAfma6Points2DCamVelocityEyeToHand.cpp + \example servoAfma6Points2DCamVelocityEyeToHand.cpp \brief Example of a eye-to-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The robot is controlled in the camera frame. - -*/ - -/*! - \example servoAfma6Points2DCamVelocityEyeToHand.cpp - - Example of a eye-to-hand control law. We control here a real robot, the - Afma6 robot (cartesian robot, with 6 degrees of freedom). The robot is - controlled in the camera frame. - */ -#include // std::fabs -#include // numeric_limits -#include -#include +#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include -#include -#include -#include +#if defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) + #include -#include #include -#include -#include -#include -#include -#include +#include #include -#include #include +#include +#include #include #include #include @@ -107,50 +86,39 @@ int main() } } } - vpServo task; - - vpCameraParameters cam; - vpImage I; - int i; vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); // Warm up camera + vpImage I; for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 100, 100, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); - std::cout << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-to-hand task control" << std::endl; std::cout << " Simulation " << std::endl; std::cout << " task : servo a point " << std::endl; std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; int nbPoint = 7; vpDot dot[nbPoint]; vpImagePoint cog; - for (i = 0; i < nbPoint; i++) { + for (int i = 0; i < nbPoint; ++i) { dot[i].initTracking(I); dot[i].setGraphics(true); dot[i].track(I); @@ -172,13 +140,14 @@ int main() vpRobotAfma6 robot; robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); - // Update camera parameters + // Get camera intrinsics + vpCameraParameters cam; robot.getCameraParameters(cam, I); - vpHomogeneousMatrix cMo, cdMo; + vpHomogeneousMatrix c_M_o, cd_M_o; vpPose pose; pose.clearPoint(); - for (i = 0; i < nbPoint; i++) { + for (int i = 0; i < nbPoint; ++i) { cog = dot[i].getCog(); double x = 0, y = 0; vpPixelMeterConversion::convertPoint(cam, cog, x, y); @@ -189,121 +158,96 @@ int main() // compute the initial pose using Dementhon method followed by a non // linear minimization method - pose.computePose(vpPose::DEMENTHON_LAGRANGE_VIRTUAL_VS, cMo); - - std::cout << cMo << std::endl; - cMo.print(); - - /*------------------------------------------------------------------ - -- Learning the desired position - -- or reading the desired position - ------------------------------------------------------------------ - */ - std::cout << " Learning 0/1 " << std::endl; - std::string name = "cdMo.dat"; + pose.computePose(vpPose::DEMENTHON_LAGRANGE_VIRTUAL_VS, c_M_o); + + std::cout << "c_M_o: \n" << c_M_o << std::endl; + + /* + * Learning or reading the desired position + */ + std::cout << "Learning (0/1)? " << std::endl; + std::string name = "cd_M_o.dat"; int learning; std::cin >> learning; if (learning == 1) { // save the object position - vpTRACE("Save the location of the object in a file cdMo.dat"); + std::cout << "Save the location of the object in a file cd_M_o.dat" << std::endl; std::ofstream f(name.c_str()); - cMo.save(f); + c_M_o.save(f); f.close(); exit(1); } { - vpTRACE("Loading desired location from cdMo.dat"); - std::ifstream f("cdMo.dat"); - cdMo.load(f); + std::cout << "Loading desired location from cd_M_o.dat" << std::endl; + std::ifstream f("cd_M_o.dat"); + cd_M_o.load(f); f.close(); } - vpFeaturePoint p[nbPoint], pd[nbPoint]; + vpFeaturePoint s[nbPoint], s_d[nbPoint]; - // set the desired position of the point by forward projection using - // the pose cdMo - for (i = 0; i < nbPoint; i++) { + // Set the desired position of the point by forward projection using the pose cd_M_o + for (int i = 0; i < nbPoint; ++i) { vpColVector cP, p; - point[i].changeFrame(cdMo, cP); + point[i].changeFrame(cd_M_o, cP); point[i].projection(cP, p); - pd[i].set_x(p[0]); - pd[i].set_y(p[1]); + s_d[i].set_x(p[0]); + s_d[i].set_y(p[1]); } - //------------------------------------------------------------------ - - vpTRACE("define the task"); - vpTRACE("\t we want an eye-in-hand control law"); - vpTRACE("\t robot is controlled in the camera frame"); + // Define the task + // - we want an eye-in-hand control law + // - robot is controlled in the camera frame + vpServo task; task.setServo(vpServo::EYETOHAND_L_cVe_eJe); task.setInteractionMatrixType(vpServo::CURRENT); - for (i = 0; i < nbPoint; i++) { - task.addFeature(p[i], pd[i]); + // - we want to see a point on a point + for (int i = 0; i < nbPoint; ++i) { + task.addFeature(s[i], s_d[i]); } - vpTRACE("Display task information "); + // - display task information task.print(); - //------------------------------------------------------------------ - double convergence_threshold = 0.00; // 025 ; - vpDisplay::getClick(I); - //------------------------------------------------------------- double error = 1; unsigned int iter = 0; - vpTRACE("\t loop"); robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - vpColVector v; // computed robot velocity // position of the object in the effector frame - vpHomogeneousMatrix oMcamrobot; - oMcamrobot[0][3] = -0.05; + vpHomogeneousMatrix o_M_camrobot; + o_M_camrobot[0][3] = -0.05; - vpImage Ic; int it = 0; - double lambda_av = 0.1; - double alpha = 1; // 1 ; - double beta = 3; // 3 ; - - std::cout << "alpha 0.7" << std::endl; - std::cin >> alpha; - std::cout << "beta 5" << std::endl; - std::cin >> beta; std::list Lcog; - vpImagePoint ip; bool quit = false; while ((error > convergence_threshold) && (!quit)) { std::cout << "---------------------------------------------" << iter++ << std::endl; rs.acquire(I); vpDisplay::display(I); - ip.set_i(265); - ip.set_j(150); - vpDisplay::displayText(I, ip, "Eye-To-Hand Visual Servoing", vpColor::green); - ip.set_i(280); - ip.set_j(150); - vpDisplay::displayText(I, ip, "IRISA-INRIA Rennes, Lagadic project", vpColor::green); + try { - for (i = 0; i < nbPoint; i++) { + for (int i = 0; i < nbPoint; ++i) { dot[i].track(I); Lcog.push_back(dot[i].getCog()); } } catch (...) { - vpTRACE("Error detected while tracking visual features"); + std::cout << "Error detected while tracking visual features" << std::endl; robot.stopMotion(); - exit(1); + return EXIT_FAILURE; } // compute the initial pose using a non linear minimization method pose.clearPoint(); - for (i = 0; i < nbPoint; i++) { + for (int i = 0; i < nbPoint; ++i) { double x = 0, y = 0; cog = dot[i].getCog(); vpPixelMeterConversion::convertPoint(cam, cog, x, y); @@ -311,78 +255,67 @@ int main() point[i].set_y(y); vpColVector cP; - point[i].changeFrame(cdMo, cP); + point[i].changeFrame(cd_M_o, cP); - p[i].set_x(x); - p[i].set_y(y); - p[i].set_Z(cP[2]); + s[i].set_x(x); + s[i].set_y(y); + s[i].set_Z(cP[2]); pose.addPoint(point[i]); - point[i].display(I, cMo, cam, vpColor::green); - point[i].display(I, cdMo, cam, vpColor::blue); + point[i].display(I, c_M_o, cam, vpColor::green); + point[i].display(I, cd_M_o, cam, vpColor::blue); } - pose.computePose(vpPose::LOWE, cMo); - - //! set up the Jacobian - vpHomogeneousMatrix cMe, camrobotMe; - robot.get_cMe(camrobotMe); - cMe = cMo * oMcamrobot * camrobotMe; - - task.set_cVe(cMe); - - vpMatrix eJe; - robot.get_eJe(eJe); - task.set_eJe(eJe); - - // Compute the adaptative gain (speed up the convergence) - double gain; - if (iter > 2) { - if (std::fabs(alpha) <= std::numeric_limits::epsilon()) - gain = lambda_av; - else { - gain = alpha * exp(-beta * (task.getError()).sumSquare()) + lambda_av; - } - } - else - gain = lambda_av; - if (SAVE == 1) - gain = gain / 5; + pose.computePose(vpPose::LOWE, c_M_o); + + // - set the camera to end-effector velocity twist matrix transformation + vpHomogeneousMatrix c_M_e, camrobot_M_e; + robot.get_cMe(camrobot_M_e); + c_M_e = c_M_o * o_M_camrobot * camrobot_M_e; + + task.set_cVe(c_M_e); + + // - set the Jacobian (expressed in the end-effector frame) + vpMatrix e_J_e; + robot.get_eJe(e_J_e); + task.set_eJe(e_J_e); - vpTRACE("%f %f %f %f %f", alpha, beta, lambda_av, (task.getError()).sumSquare(), gain); - task.setLambda(gain); + // - set the task adaptive gain + vpAdaptiveGain lambda_adaptive; + lambda_adaptive.initStandard(1.7, 0.3, 1.5); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30 + task.setLambda(lambda_adaptive); - v = task.computeControlLaw(); + vpColVector qdot = task.computeControlLaw(); - // display points trajectory + // Display points trajectory for (std::list::const_iterator it_cog = Lcog.begin(); it_cog != Lcog.end(); ++it_cog) { vpDisplay::displayPoint(I, *it_cog, vpColor::red); } + + // Display task visual features feature vpServoDisplay::display(task, cam, I); - robot.setVelocity(vpRobot::ARTICULAR_FRAME, v); + + // Apply joint velocity to the robot + robot.setVelocity(vpRobot::JOINT_STATE, qdot); error = (task.getError()).sumSquare(); std::cout << "|| s - s* || = " << error << std::endl; if (error > 7) { - vpTRACE("Error detected while tracking visual features"); + std::cout << "Error detected while tracking visual features" << std::endl; robot.stopMotion(); return EXIT_FAILURE; } - // display the pose - // pose.display(I,cMo,cam, 0.04, vpColor::red) ; - // display the pose - // pose.display(I,cdMo,cam, 0.04, vpColor::blue) ; if ((SAVE == 1) && (iter % 3 == 0)) { - + vpImage Ic; vpDisplay::getImage(I, Ic); std::stringstream ss; ss << logdirname; ss << "/image."; ss << std::setfill('0') << std::setw(4); ss << it++; - ss << ".ppm"; + ss << ".png"; vpImageIo::write(Ic, ss.str()); } @@ -393,13 +326,11 @@ int main() vpDisplay::flush(I); } - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - vpDisplay::getClick(I); + return EXIT_SUCCESS; } catch (const vpException &e) { - std::cout << "Test failed with exception: " << e << std::endl; + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/example/servo-afma6/servoAfma6Segment2DCamVelocity.cpp b/example/servo-afma6/servoAfma6Segment2DCamVelocity.cpp index a2b38c7947..e0fcb743ef 100644 --- a/example/servo-afma6/servoAfma6Segment2DCamVelocity.cpp +++ b/example/servo-afma6/servoAfma6Segment2DCamVelocity.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,11 +31,7 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * - * Authors: - * Filip Novotny - * -*****************************************************************************/ + */ /*! \example servoAfma6Segment2DCamVelocity.cpp @@ -44,7 +39,6 @@ Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in camera frame. The visual feature is the segment between two points. - */ #include @@ -117,9 +111,10 @@ int main() vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); // Warm up camera @@ -144,11 +139,11 @@ int main() vpRobotAfma6 robot; robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); + // Get camera intrinsics vpCameraParameters cam; - - // Update camera parameters robot.getCameraParameters(cam, I); - std::cout << "define the initial segment" << std::endl; + + std::cout << "Define the initial segment" << std::endl; for (std::vector::iterator i = dot.begin(); i != dot.end(); ++i) { std::cout << "Click on a dot..." << std::endl; @@ -230,7 +225,8 @@ int main() vpDisplay::flush(I); } - flog.close(); // Close the log file + // Close the log file + flog.close(); // Display task information task.print(); @@ -238,8 +234,9 @@ int main() return EXIT_SUCCESS; } catch (const vpException &e) { - flog.close(); // Close the log file - std::cout << "Test failed with exception: " << e << std::endl; + // Close the log file + flog.close(); + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/example/servo-afma6/servoAfma6SquareLines2DCamVelocity.cpp b/example/servo-afma6/servoAfma6SquareLines2DCamVelocity.cpp index d0bc247c77..1b4e2ce931 100644 --- a/example/servo-afma6/servoAfma6SquareLines2DCamVelocity.cpp +++ b/example/servo-afma6/servoAfma6SquareLines2DCamVelocity.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,8 +31,7 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ /*! \file servoAfma6SquareLines2DCamVelocity.cpp @@ -54,33 +52,22 @@ */ -#include // std::fabs -#include // numeric_limits -#include +#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include -#include -#include -#include -#include -#include -#include +#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_AFMA6) +#include #include #include #include +#include +#include +#include #include #include #include #include - -#include - -// Exception -#include #include int main() @@ -90,46 +77,36 @@ int main() #endif try { - vpImage I; - vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); + vpImage I; + // Warm up camera for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 10, 10, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); - vpServo task; - - std::cout << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; std::cout << " Simulation " << std::endl; std::cout << " task : servo a line " << std::endl; std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; - int i; - int nbline = 4; + int nb_lines = 4; - vpMeLine line[nbline]; + std::vector line(nb_lines); vpMe me; me.setRange(10); @@ -139,7 +116,7 @@ int main() me.setSampleStep(10); // Initialize the tracking. Define the four lines to track. - for (i = 0; i < nbline; i++) { + for (int i = 0; i < nb_lines; ++i) { line[i].setDisplay(vpMeSite::RANGE_RESULT); line[i].setMe(&me); @@ -149,146 +126,104 @@ int main() vpRobotAfma6 robot; robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); - // robot.move("zero.pos"); + // Get camera intrinsics vpCameraParameters cam; - // Update camera parameters robot.getCameraParameters(cam, I); - vpTRACE("sets the current position of the visual feature "); - vpFeatureLine p[nbline]; - for (i = 0; i < nbline; i++) - vpFeatureBuilder::create(p[i], cam, line[i]); + // Sets the current position of the visual feature + std::vector s_line(nb_lines); + for (int i = 0; i < nb_lines; ++i) + vpFeatureBuilder::create(s_line[i], cam, line[i]); - vpTRACE("sets the desired position of the visual feature "); - vpLine lined[nbline]; - lined[0].setWorldCoordinates(1, 0, 0, 0.05, 0, 0, 1, 0); - lined[1].setWorldCoordinates(0, 1, 0, 0.05, 0, 0, 1, 0); - lined[2].setWorldCoordinates(1, 0, 0, -0.05, 0, 0, 1, 0); - lined[3].setWorldCoordinates(0, 1, 0, -0.05, 0, 0, 1, 0); + // Sets the desired position of the visual feature + std::vector line_d(nb_lines); + line_d[0].setWorldCoordinates(1, 0, 0, 0.05, 0, 0, 1, 0); + line_d[1].setWorldCoordinates(0, 1, 0, 0.05, 0, 0, 1, 0); + line_d[2].setWorldCoordinates(1, 0, 0, -0.05, 0, 0, 1, 0); + line_d[3].setWorldCoordinates(0, 1, 0, -0.05, 0, 0, 1, 0); - vpHomogeneousMatrix cMo(0, 0, 0.5, 0, 0, vpMath::rad(0)); + vpHomogeneousMatrix c_M_o(0, 0, 0.5, 0, 0, vpMath::rad(0)); - lined[0].project(cMo); - lined[1].project(cMo); - lined[2].project(cMo); - lined[3].project(cMo); + line_d[0].project(c_M_o); + line_d[1].project(c_M_o); + line_d[2].project(c_M_o); + line_d[3].project(c_M_o); // Those lines are needed to keep the conventions define in vpMeLine // (Those in vpLine are less restrictive) Another way to have the // coordinates of the desired features is to learn them before executing // the program. - lined[0].setRho(-fabs(lined[0].getRho())); - lined[0].setTheta(0); - lined[1].setRho(-fabs(lined[1].getRho())); - lined[1].setTheta(M_PI / 2); - lined[2].setRho(-fabs(lined[2].getRho())); - lined[2].setTheta(M_PI); - lined[3].setRho(-fabs(lined[3].getRho())); - lined[3].setTheta(-M_PI / 2); - - vpFeatureLine pd[nbline]; - - vpFeatureBuilder::create(pd[0], lined[0]); - vpFeatureBuilder::create(pd[1], lined[1]); - vpFeatureBuilder::create(pd[2], lined[2]); - vpFeatureBuilder::create(pd[3], lined[3]); - - vpTRACE("define the task"); - vpTRACE("\t we want an eye-in-hand control law"); - vpTRACE("\t robot is controlled in the camera frame"); + line_d[0].setRho(-fabs(line_d[0].getRho())); + line_d[0].setTheta(0); + line_d[1].setRho(-fabs(line_d[1].getRho())); + line_d[1].setTheta(M_PI / 2); + line_d[2].setRho(-fabs(line_d[2].getRho())); + line_d[2].setTheta(M_PI); + line_d[3].setRho(-fabs(line_d[3].getRho())); + line_d[3].setTheta(-M_PI / 2); + + std::vector s_line_d(nb_lines); + + vpFeatureBuilder::create(s_line_d[0], line_d[0]); + vpFeatureBuilder::create(s_line_d[1], line_d[1]); + vpFeatureBuilder::create(s_line_d[2], line_d[2]); + vpFeatureBuilder::create(s_line_d[3], line_d[3]); + + // Define the task + // - we want an eye-in-hand control law + // - robot is controlled in the camera frame + vpServo task; task.setServo(vpServo::EYEINHAND_CAMERA); task.setInteractionMatrixType(vpServo::DESIRED, vpServo::PSEUDO_INVERSE); - vpTRACE("\t we want to see a point on a point.."); - std::cout << std::endl; - for (i = 0; i < nbline; i++) - task.addFeature(p[i], pd[i]); + // - we want to see 4 lines on 4 lines + for (int i = 0; i < nb_lines; ++i) { + task.addFeature(s_line[i], s_line_d[i]); + } - vpTRACE("\t set the gain"); + // - set the gain task.setLambda(0.2); - vpTRACE("Display task information "); + // - display task information task.print(); robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - unsigned int iter = 0; - vpTRACE("\t loop"); - vpColVector v; - vpImage Ic; - double lambda_av = 0.05; - double alpha = 0.05; - double beta = 3; - bool quit = false; while (!quit) { - std::cout << "---------------------------------------------" << iter << std::endl; - - try { - rs.acquire(I); - vpDisplay::display(I); - - // Track the lines and update the features - for (i = 0; i < nbline; i++) { - line[i].track(I); - line[i].display(I, vpColor::red); - - vpFeatureBuilder::create(p[i], cam, line[i]); - - p[i].display(cam, I, vpColor::red); - pd[i].display(cam, I, vpColor::green); - } - - double gain; - { - if (std::fabs(alpha) <= std::numeric_limits::epsilon()) - gain = lambda_av; - else { - gain = alpha * exp(-beta * (task.getError()).sumSquare()) + lambda_av; - } - } - - task.setLambda(gain); - - v = task.computeControlLaw(); - - if (iter == 0) { - vpDisplay::getClick(I); - } - if (v.sumSquare() > 0.5) { - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - robot.stopMotion(); - vpDisplay::getClick(I); - } - - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - - vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); - if (vpDisplay::getClick(I, false)) { - quit = true; - } - - vpDisplay::flush(I); + rs.acquire(I); + vpDisplay::display(I); + + // Track the lines and update the features + for (int i = 0; i < nb_lines; ++i) { + line[i].track(I); + line[i].display(I, vpColor::red); + + vpFeatureBuilder::create(s_line[i], cam, line[i]); + + s_line[i].display(cam, I, vpColor::red); + s_line_d[i].display(cam, I, vpColor::green); } - catch (...) { - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - robot.stopMotion(); - exit(1); + + vpColVector v_c = task.computeControlLaw(); + + robot.setVelocity(vpRobot::CAMERA_FRAME, v_c); + + vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); + if (vpDisplay::getClick(I, false)) { + quit = true; } - vpTRACE("\t\t || s - s* || = %f ", (task.getError()).sumSquare()); - iter++; + vpDisplay::flush(I); } - vpTRACE("Display task information "); + // Display task information task.print(); return EXIT_SUCCESS; } catch (const vpException &e) { - std::cout << "Test failed with exception: " << e << std::endl; + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/example/servo-afma6/servoAfma6TwoLines2DCamVelocity.cpp b/example/servo-afma6/servoAfma6TwoLines2DCamVelocity.cpp index 73bdd66dae..417347a04f 100644 --- a/example/servo-afma6/servoAfma6TwoLines2DCamVelocity.cpp +++ b/example/servo-afma6/servoAfma6TwoLines2DCamVelocity.cpp @@ -1,7 +1,6 @@ -/**************************************************************************** - * +/* * ViSP, open source Visual Servoing Platform software. - * Copyright (C) 2005 - 2023 by Inria. All rights reserved. + * Copyright (C) 2005 - 2024 by Inria. All rights reserved. * * This software is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -32,56 +31,33 @@ * tests the control law * eye-in-hand control * velocity computed in the camera frame - * -*****************************************************************************/ + */ /*! - \file servoAfma6TwoLines2DCamVelocity.cpp + \example servoAfma6TwoLines2DCamVelocity.cpp \brief Example of eye-in-hand control law. We control here a real robot, the Afma6 robot (cartesian robot, with 6 degrees of freedom). The velocity is computed in the camera frame. Visual features are the two lines. - -*/ - -/*! - - \example servoAfma6TwoLines2DCamVelocity.cpp - - Example of eye-in-hand control law. We control here a real robot, the Afma6 - robot (cartesian robot, with 6 degrees of freedom). The velocity is computed - in the camera frame. Visual features are the two lines. - */ -#include // std::fabs -#include // numeric_limits -#include +#include #include -#include // Debug trace -#if (defined(VISP_HAVE_AFMA6) && defined(VISP_HAVE_DC1394)) -#include -#include -#include -#include -#include -#include -#include +#if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_AFMA6) +#include #include #include #include +#include +#include +#include #include #include #include #include - -#include - -// Exception -#include #include int main() @@ -91,46 +67,36 @@ int main() #endif try { - vpImage I; - vpRealSense2 rs; rs2::config config; - config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30); - config.enable_stream(RS2_STREAM_DEPTH, 640, 480, RS2_FORMAT_Z16, 30); - config.enable_stream(RS2_STREAM_INFRARED, 640, 480, RS2_FORMAT_Y8, 30); + unsigned int width = 640, height = 480, fps = 60; + config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps); + config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps); + config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps); rs.open(config); + vpImage I; + // Warm up camera for (size_t i = 0; i < 10; ++i) { rs.acquire(I); } -#ifdef VISP_HAVE_X11 - vpDisplayX display(I, 100, 100, "Current image"); -#elif defined(HAVE_OPENCV_HIGHGUI) - vpDisplayOpenCV display(I, 100, 100, "Current image"); -#elif defined(VISP_HAVE_GTK) - vpDisplayGTK display(I, 100, 100, "Current image"); -#endif + std::shared_ptr d = vpDisplayFactory::createDisplay(I, 10, 10, "Current image"); vpDisplay::display(I); vpDisplay::flush(I); - vpServo task; - - std::cout << std::endl; std::cout << "-------------------------------------------------------" << std::endl; std::cout << " Test program for vpServo " << std::endl; std::cout << " Eye-in-hand task control, velocity computed in the camera frame" << std::endl; std::cout << " Simulation " << std::endl; std::cout << " task : servo a point " << std::endl; std::cout << "-------------------------------------------------------" << std::endl; - std::cout << std::endl; - int i; - int nbline = 2; + int nb_lines = 2; - vpMeLine line[nbline]; + std::vector line(nb_lines); vpMe me; me.setRange(10); @@ -140,141 +106,106 @@ int main() me.setSampleStep(10); // Initialize the tracking. Define the two lines to track - vpTRACE("The two lines to track must be parallels "); - // vpTRACE("The two lines to track must be perpendicular ") ; - for (i = 0; i < nbline; i++) { + // The two lines to track must be parallels + for (int i = 0; i < nb_lines; ++i) { line[i].setDisplay(vpMeSite::RANGE_RESULT); line[i].setMe(&me); line[i].initTracking(I); line[i].track(I); + vpDisplay::flush(I); } vpRobotAfma6 robot; robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, vpCameraParameters::perspectiveProjWithoutDistortion); - // robot.move("zero.pos"); + // Get camera intrinsics vpCameraParameters cam; - // Update camera parameters robot.getCameraParameters(cam, I); - vpTRACE("sets the current position of the visual feature "); - vpFeatureLine p[nbline]; - for (i = 0; i < nbline; i++) - vpFeatureBuilder::create(p[i], cam, line[i]); + // Sets the current position of the visual feature + std::vector s_line(nb_lines); + for (int i = 0; i < nb_lines; ++i) { + vpFeatureBuilder::create(s_line[i], cam, line[i]); + } - vpTRACE("sets the desired position of the visual feature "); - vpLine lined[2]; - lined[0].setWorldCoordinates(1, 0, 0, -0.05, 0, 0, 1, 0); - lined[1].setWorldCoordinates(1, 0, 0, 0.05, 0, 0, 1, 0); + // Sets the desired position of the visual feature + std::vector line_d(2); + line_d[0].setWorldCoordinates(1, 0, 0, -0.05, 0, 0, 1, 0); + line_d[1].setWorldCoordinates(1, 0, 0, 0.05, 0, 0, 1, 0); - vpHomogeneousMatrix cMo(0, 0, 0.5, 0, 0, vpMath::rad(0)); + vpHomogeneousMatrix c_M_o(0, 0, 0.5, 0, 0, vpMath::rad(0)); - lined[0].project(cMo); - lined[1].project(cMo); + line_d[0].project(c_M_o); + line_d[1].project(c_M_o); // Those lines are needed to keep the conventions define in vpMeLine // (Those in vpLine are less restrictive) Another way to have the // coordinates of the desired features is to learn them before executing // the program. - lined[0].setRho(-fabs(lined[0].getRho())); - lined[0].setTheta(0); - lined[1].setRho(-fabs(lined[1].getRho())); - lined[1].setTheta(M_PI); - - vpFeatureLine pd[nbline]; - vpFeatureBuilder::create(pd[0], lined[0]); - vpFeatureBuilder::create(pd[1], lined[1]); - - vpTRACE("define the task"); - vpTRACE("\t we want an eye-in-hand control law"); - vpTRACE("\t robot is controlled in the camera frame"); + line_d[0].setRho(-fabs(line_d[0].getRho())); + line_d[0].setTheta(0); + line_d[1].setRho(-fabs(line_d[1].getRho())); + line_d[1].setTheta(M_PI); + + std::vector s_line_d(nb_lines); + vpFeatureBuilder::create(s_line_d[0], line_d[0]); + vpFeatureBuilder::create(s_line_d[1], line_d[1]); + + // Define the task + // - we want an eye-in-hand control law + // - robot is controlled in the camera frame + vpServo task; task.setServo(vpServo::EYEINHAND_CAMERA); - vpTRACE("\t we want to see a point on a point.."); - std::cout << std::endl; - for (i = 0; i < nbline; i++) { - task.addFeature(p[i], pd[i]); + // - we want to see a line on a line + for (int i = 0; i < nb_lines; ++i) { + task.addFeature(s_line[i], s_line_d[i]); } - vpTRACE("\t set the gain"); + // - set the gain task.setLambda(0.2); - vpTRACE("Display task information "); + // -Display task information task.print(); robot.setRobotState(vpRobot::STATE_VELOCITY_CONTROL); - unsigned int iter = 0; - vpTRACE("\t loop"); - vpColVector v; - - vpImage Ic; - double lambda_av = 0.05; - double alpha = 0.2; - double beta = 3; - bool quit = false; while (!quit) { - std::cout << "---------------------------------------------" << iter << std::endl; - - try { - rs.acquire(I); - vpDisplay::display(I); - - // Track the lines and update the features - for (i = 0; i < nbline; i++) { - line[i].track(I); - line[i].display(I, vpColor::red); - - vpFeatureBuilder::create(p[i], cam, line[i]); - - p[i].display(cam, I, vpColor::red); - pd[i].display(cam, I, vpColor::green); - } - - // Adaptative gain - double gain; - { - if (std::fabs(alpha) <= std::numeric_limits::epsilon()) - gain = lambda_av; - else { - gain = alpha * exp(-beta * (task.getError()).sumSquare()) + lambda_av; - } - } - task.setLambda(gain); - - v = task.computeControlLaw(); - - if (iter == 0) { - vpDisplay::getClick(I); - } - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - - vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); - if (vpDisplay::getClick(I, false)) { - quit = true; - } - - vpDisplay::flush(I); + rs.acquire(I); + vpDisplay::display(I); + + // Track the lines and update the features + for (int i = 0; i < nb_lines; ++i) { + line[i].track(I); + line[i].display(I, vpColor::red); + + vpFeatureBuilder::create(s_line[i], cam, line[i]); + + s_line[i].display(cam, I, vpColor::red); + s_line_d[i].display(cam, I, vpColor::green); } - catch (...) { - v = 0; - robot.setVelocity(vpRobot::CAMERA_FRAME, v); - robot.stopMotion(); - return EXIT_FAILURE; + + vpColVector v_c = task.computeControlLaw(); + + robot.setVelocity(vpRobot::CAMERA_FRAME, v_c); + + vpDisplay::displayText(I, 20, 20, "Click to quit...", vpColor::red); + if (vpDisplay::getClick(I, false)) { + quit = true; } - vpTRACE("\t\t || s - s* || = %f ", (task.getError()).sumSquare()); - iter++; + vpDisplay::flush(I); } - vpTRACE("Display task information "); + // Display task information task.print(); + return EXIT_SUCCESS; } catch (const vpException &e) { - std::cout << "Test failed with exception: " << e << std::endl; + std::cout << "Visual servo failed with exception: " << e << std::endl; return EXIT_FAILURE; } } diff --git a/modules/core/include/visp3/core/vpArray2D.h b/modules/core/include/visp3/core/vpArray2D.h index 89e5484619..f0ba0196e2 100644 --- a/modules/core/include/visp3/core/vpArray2D.h +++ b/modules/core/include/visp3/core/vpArray2D.h @@ -151,15 +151,15 @@ template class vpArray2D * Basic constructor of a 2D array. * Number of columns and rows are set to zero. */ - vpArray2D() : data(nullptr), rowNum(0), colNum(0), rowPtrs(nullptr), dsize(0) { } + vpArray2D() : data(nullptr), rowNum(0), colNum(0), rowPtrs(nullptr), dsize(0) { } /*! Copy constructor of a 2D array. */ - vpArray2D(const vpArray2D &A) + vpArray2D(const vpArray2D &A) : #if ((__cplusplus >= 201103L) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L))) // Check if cxx11 or higher - vpArray2D() + vpArray2D() #else data(nullptr), rowNum(0), colNum(0), rowPtrs(nullptr), dsize(0) #endif @@ -174,10 +174,10 @@ template class vpArray2D \param r : Array number of rows. \param c : Array number of columns. */ - vpArray2D(unsigned int r, unsigned int c) + vpArray2D(unsigned int r, unsigned int c) : #if ((__cplusplus >= 201103L) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L))) // Check if cxx11 or higher - vpArray2D() + vpArray2D() #else data(nullptr), rowNum(0), colNum(0), rowPtrs(nullptr), dsize(0) #endif @@ -192,10 +192,10 @@ template class vpArray2D \param c : Array number of columns. \param val : Each element of the array is set to \e val. */ - vpArray2D(unsigned int r, unsigned int c, Type val) + vpArray2D(unsigned int r, unsigned int c, Type val) : #if ((__cplusplus >= 201103L) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L))) // Check if cxx11 or higher - vpArray2D() + vpArray2D() #else data(nullptr), rowNum(0), colNum(0), rowPtrs(nullptr), dsize(0) #endif @@ -215,10 +215,10 @@ template class vpArray2D \param c : Array number of columns. \param vec : Data used to initialize the 2D array. */ - vpArray2D(const std::vector &vec, unsigned int r = 0, unsigned int c = 0) + vpArray2D(const std::vector &vec, unsigned int r = 0, unsigned int c = 0) : #if ((__cplusplus >= 201103L) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L))) // Check if cxx11 or higher - vpArray2D() + vpArray2D() #else data(nullptr), rowNum(0), colNum(0), rowPtrs(nullptr), dsize(0) #endif @@ -257,7 +257,7 @@ template class vpArray2D } #if ((__cplusplus >= 201103L) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201103L))) // Check if cxx11 or higher - vpArray2D(vpArray2D &&A) noexcept + vpArray2D(vpArray2D &&A) noexcept { rowNum = A.rowNum; colNum = A.colNum; @@ -272,13 +272,13 @@ template class vpArray2D A.data = nullptr; } - VP_EXPLICIT vpArray2D(const std::initializer_list &list) : vpArray2D() + VP_EXPLICIT vpArray2D(const std::initializer_list &list) : vpArray2D() { resize(1, static_cast(list.size()), false, false); std::copy(list.begin(), list.end(), data); } - VP_EXPLICIT vpArray2D(unsigned int nrows, unsigned int ncols, const std::initializer_list &list) + VP_EXPLICIT vpArray2D(unsigned int nrows, unsigned int ncols, const std::initializer_list &list) : data(nullptr), rowNum(0), colNum(0), rowPtrs(nullptr), dsize(0) { if ((nrows * ncols) != static_cast(list.size())) { @@ -291,7 +291,7 @@ template class vpArray2D std::copy(list.begin(), list.end(), data); } - VP_EXPLICIT vpArray2D(const std::initializer_list > &lists) : vpArray2D() + VP_EXPLICIT vpArray2D(const std::initializer_list > &lists) : vpArray2D() { unsigned int nrows = static_cast(lists.size()), ncols = 0; for (auto &l : lists) { @@ -311,7 +311,7 @@ template class vpArray2D /*! * Destructor that deallocate memory. */ - virtual ~vpArray2D() + virtual ~vpArray2D() { if (data != nullptr) { free(data); diff --git a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h index 0b47bc0aa3..b57915a5ff 100644 --- a/modules/detection/include/visp3/detection/vpDetectorAprilTag.h +++ b/modules/detection/include/visp3/detection/vpDetectorAprilTag.h @@ -65,7 +65,7 @@ BEGIN_VISP_NAMESPACE * If camera parameters and the size of the tag are provided, you can also estimate * the 3D pose of the tag in terms of position and orientation wrt the camera considering 2 cases: * - 1. If all the tags have the same size use - * detect(const vpImage &, const double, const vpCameraParameters &, std::vector &) + * detect(const vpImage &, double, const vpCameraParameters &, std::vector &, std::vector *, std::vector *, std::vector *) * - 2. If tag sizes differ, use rather getPose() * * The following sample code shows how to use this class to detect the location @@ -116,8 +116,14 @@ BEGIN_VISP_NAMESPACE * Message: "36h11 id: 1" * \endcode * + * As shown in the next image, two different tag frames could be considered for pose estimation. + * \image html img-tag-frame.jpg Tag 36h11_00000 with location of the 4 corners and tag frame + * There is the function setZAlignedWithCameraAxis() that allows to choose which tag frame has to be considered. + * * This other example shows how to estimate the 3D pose of 36h11 AprilTag * patterns considering that all the tags have the same size (in our example 0.053 m). + * Here we consider the default case, when z-camera and z-tag axis are not aligned. + * * \code * #include * #include @@ -133,6 +139,7 @@ BEGIN_VISP_NAMESPACE * vpImageIo::read(I, "image-tag36h11.pgm"); * * vpDetectorAprilTag detector(vpDetectorAprilTag::TAG_36h11); + * detector.setZAlignedWithCameraAxis(false); // Default configuration * std::vector cMo; * vpCameraParameters cam; * cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779); @@ -292,6 +299,8 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase std::vector > getTagsPoints3D(const std::vector &tagsId, const std::map &tagsSize) const; + bool isZAlignedWithCameraAxis() const; + void setAprilTagDecodeSharpening(double decodeSharpening); void setAprilTagFamily(const vpAprilTagFamily &tagFamily); void setAprilTagNbThreads(int nThreads); @@ -311,7 +320,8 @@ class VISP_EXPORT vpDetectorAprilTag : public vpDetectorBase m_displayTagThickness = thickness; } - inline friend void swap(vpDetectorAprilTag& o1, vpDetectorAprilTag& o2) { + inline friend void swap(vpDetectorAprilTag &o1, vpDetectorAprilTag &o2) + { using std::swap; swap(o1.m_impl, o2.m_impl); } diff --git a/modules/detection/src/tag/vpDetectorAprilTag.cpp b/modules/detection/src/tag/vpDetectorAprilTag.cpp index 47c49a0125..0c46714de6 100644 --- a/modules/detection/src/tag/vpDetectorAprilTag.cpp +++ b/modules/detection/src/tag/vpDetectorAprilTag.cpp @@ -822,9 +822,20 @@ class vpDetectorAprilTag::Impl void setRefinePose(bool) { } - void setPoseEstimationMethod(const vpPoseEstimationMethod &method) { m_poseEstimationMethod = method; } + void setPoseEstimationMethod(const vpPoseEstimationMethod &method) + { + m_poseEstimationMethod = method; + } - void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame) { m_zAlignedWithCameraFrame = zAlignedWithCameraFrame; } + void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame) + { + m_zAlignedWithCameraFrame = zAlignedWithCameraFrame; + } + + bool isZAlignedWithCameraAxis() const + { + return m_zAlignedWithCameraFrame; + } protected: std::map m_mapOfCorrespondingPoseMethods; @@ -1212,14 +1223,27 @@ void vpDetectorAprilTag::setAprilTagRefinePose(bool refinePose) { m_impl->setRef #endif /*! - * Modify the resulting tag pose returned by getPose() in order to get - * a pose where z-axis is aligned when the camera plane is parallel to the tag. - * \param zAlignedWithCameraFrame : Flag to get a pose where z-axis is aligned with the camera frame. + * Modify the resulting tag pose returned by getPose() or + * detect(const vpImage &, double, const vpCameraParameters &, std::vector &, std::vector *, std::vector *, std::vector *) + * in order to get a pose where z-axis is aligned when the camera plane is parallel to the tag. + * + * \image html img-tag-frame.jpg Tag 36h11_00000 with location of the 4 corners and tag frame + * \param zAlignedWithCameraFrame : When set to true, estimated tag pose has a z-axis aligned with the + * camera frame. */ void vpDetectorAprilTag::setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame) { m_impl->setZAlignedWithCameraAxis(zAlignedWithCameraFrame); } + +/*! + * Indicate it z-axis of the tag frame is aligned with the camera frame or not. + * @return When true, z-axis are aligned, false otherwise + */ +bool vpDetectorAprilTag::isZAlignedWithCameraAxis() const +{ + return m_impl->isZAlignedWithCameraAxis(); +} END_VISP_NAMESPACE #elif !defined(VISP_BUILD_SHARED_LIBS) // Work around to avoid warning: libvisp_core.a(vpDetectorAprilTag.cpp.o) has diff --git a/modules/robot/src/real-robot/afma6/vpAfma6.cpp b/modules/robot/src/real-robot/afma6/vpAfma6.cpp index 03bcdc0fea..344d05115d 100644 --- a/modules/robot/src/real-robot/afma6/vpAfma6.cpp +++ b/modules/robot/src/real-robot/afma6/vpAfma6.cpp @@ -134,20 +134,19 @@ vpAfma6::vpAfma6() // ... in init (vpAfma6::vpAfma6ToolType tool, // vpCameraParameters::vpCameraParametersProjType projModel) // Maximal value of the joints - this->_joint_max[0] = 0.7001; - this->_joint_max[1] = 0.5201; + this->_joint_min[0] = -0.7501; + this->_joint_min[1] = -0.6501; + this->_joint_min[2] = -0.5001; + this->_joint_min[3] = -2.7301; + this->_joint_min[4] = -0.3001; + this->_joint_min[5] = -1.5901; + //_joint_max.resize(njoint); + this->_joint_max[0] = 0.6001; + this->_joint_max[1] = 0.6701; this->_joint_max[2] = 0.4601; this->_joint_max[3] = 2.7301; this->_joint_max[4] = 2.4801; this->_joint_max[5] = 1.5901; - // Minimal value of the joints - this->_joint_min[0] = -0.6501; - this->_joint_min[1] = -0.6001; - this->_joint_min[2] = -0.5001; - this->_joint_min[3] = -2.7301; - this->_joint_min[4] = -0.1001; - this->_joint_min[5] = -1.5901; - init(); } diff --git a/modules/robot/src/robot-simulator/vpSimulatorAfma6.cpp b/modules/robot/src/robot-simulator/vpSimulatorAfma6.cpp index 7d42558b0a..971da0a4a0 100644 --- a/modules/robot/src/robot-simulator/vpSimulatorAfma6.cpp +++ b/modules/robot/src/robot-simulator/vpSimulatorAfma6.cpp @@ -173,15 +173,15 @@ void vpSimulatorAfma6::init() // Software joint limits in radians //_joint_min.resize(njoint); - _joint_min[0] = -0.6501; - _joint_min[1] = -0.6001; + _joint_min[0] = -0.7501; + _joint_min[1] = -0.6501; _joint_min[2] = -0.5001; _joint_min[3] = -2.7301; - _joint_min[4] = -0.1001; + _joint_min[4] = -0.3001; _joint_min[5] = -1.5901; //_joint_max.resize(njoint); - _joint_max[0] = 0.7001; - _joint_max[1] = 0.5201; + _joint_max[0] = 0.6001; + _joint_max[1] = 0.6701; _joint_max[2] = 0.4601; _joint_max[3] = 2.7301; _joint_max[4] = 2.4801; diff --git a/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-save.cpp b/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-save.cpp index 29f6a8f54b..a16f1cac19 100644 --- a/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-save.cpp +++ b/tutorial/tracking/model-based/generic/tutorial-mb-generic-tracker-save.cpp @@ -27,8 +27,8 @@ std::string toString(const std::string &name, int val) { auto fmt = name.c_str(); int sz = std::snprintf(nullptr, 0, fmt, val); - std::vector buf(sz + 1); // note +1 for null terminator - std::sprintf(buf.data(), fmt, val); + std::vector buf(sz); + std::snprintf(&buf[0], sz, fmt, val); std::string str(buf.begin(), buf.end()); return str;