diff --git a/docs/conf.py b/docs/conf.py index 1c5567b44f92..53f2bd3a2341 100755 --- a/docs/conf.py +++ b/docs/conf.py @@ -76,7 +76,7 @@ # # We don't follow "The short X.Y version" vs "The full version, including alpha/beta/rc tags" # breakdown, so use the same version identifier for both to avoid confusion. -version = release = '1.22' +version = release = '1.23' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -90,7 +90,77 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ["build", ".venv"] +exclude_patterns = ['build', '.venv', "esp32", "esp8266", "pyboard", "wipy", + "develop/cmodules.rst", + "develop/compiler.rst", + "develop/extendingmicropython.rst", + "develop/gettingstarted.rst", + "develop/index.rst", + "develop/library.rst", + "develop/maps.rst", + "develop/memorymgt.rst", + "develop/natmod.rst", + "develop/optimizations.rst", + "develop/porting.rst", + "develop/publiccapi.rst", + "develop/qstr.rst", + "develop/writingtests.rst", + "library/esp.rst", + "library/espnow.rst", + "library/esp32.rst", + "library/framebuf.rst", + "library/rp2.rst", + "library/rp2.DMA.rst", + "library/rp2.Flash.rst", + "library/rp2.PIO.rst", + "library/rp2.StateMachine.rst", + "library/wm8960.rst", + "library/zephyr.rst", + "library/zephyr.DiskAccess.rst", + "library/zephyr.FlashArea.rst", + "library/zephyr.zsensor.rst", + "library/lcd160cr.rst", + "library/machine.ADCWiPy.rst", + "library/machine.SD.rst", + "library/machine.SDCard.rst", + "library/machine.TimerWiPy.rst", + "library/machine.USBDevice.rst", + "library/network.CC3K.rst", + "library/network.WIZNET5K.rst", + "library/network.WLANWiPy.rst", + "library/neopixel.rst", + "library/pyb.Accel.rst", + "library/pyb.LCD.rst", + "library/pyb.Switch.rst", + "library/wipy.rst", + "mimxrt/general.rst", + "mimxrt/pinout.rst", + "mimxrt/tutorial/intro.rst", + "mimxrt/quickref.rst", + "renesas-ra/general.rst", + "renesas-ra/quickref.rst", + "renesas-ra/tutorial/index.rst", + "renesas-ra/tutorial/intro.rst", + "renesas-ra/tutorial/program_in_flash.rst", + "renesas-ra/tutorial/reset.rst", + "renesas-ra/tutorial/troubleshooting.rst", + "renesas-ra/tutorial/using_peripheral.rst", + "rp2/general.rst", + "rp2/quickref.rst", + "rp2/tutorial/intro.rst", + "rp2/tutorial/pio.rst", + "samd/general.rst", + "samd/quickref.rst", + "samd/pinout.rst", + "samd/tutorial/intro.rst", + "unix/quickref.rst", + "zephyr/general.rst", + "zephyr/quickref.rst", + "zephyr/tutorial/index.rst", + "zephyr/tutorial/intro.rst", + "zephyr/tutorial/pins.rst", + "zephyr/tutorial/repl.rst", + "zephyr/tutorial/storage.rst"] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/docs/genrst/builtin_types.rst b/docs/genrst/builtin_types.rst index 4e22076722c9..8bce264ac3ac 100644 --- a/docs/genrst/builtin_types.rst +++ b/docs/genrst/builtin_types.rst @@ -2,7 +2,7 @@ Builtin types ============= -Generated Mon 14 Nov 2022 04:08:40 UTC +Generated Tue 23 Jul 2024 04:46:38 UTC Exception --------- @@ -22,15 +22,15 @@ Sample code:: print(e.value) print(e.errno) -+-----------------------------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-----------------------------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| Traceback (most recent call last): | /bin/sh: 1: ../ports/unix/micropython: not found | -| File "", line 8, in | | -| AttributeError: 'Exception' object has no attribute 'value' | | -+-----------------------------------------------------------------+------------------------------------------------------+ ++-----------------------------------------------------------------+-------------+ +| CPy output: | uPy output: | ++-----------------------------------------------------------------+-------------+ +| :: | :: | +| | | +| Traceback (most recent call last): | 1 | +| File "", line 8, in | 1 | +| AttributeError: 'Exception' object has no attribute 'value' | | ++-----------------------------------------------------------------+-------------+ .. _cpydiff_types_exception_chaining: @@ -44,21 +44,21 @@ Sample code:: except TypeError: raise ValueError -+-------------------------------------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------------------------------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| Traceback (most recent call last): | /bin/sh: 1: ../ports/unix/micropython: not found | -| File "", line 8, in | | -| TypeError | | -| | | -| During handling of the above exception, another exception occurred: | | -| | | -| Traceback (most recent call last): | | -| File "", line 10, in | | -| ValueError | | -+-------------------------------------------------------------------------+------------------------------------------------------+ ++-------------------------------------------------------------------------+--------------------------------------------+ +| CPy output: | uPy output: | ++-------------------------------------------------------------------------+--------------------------------------------+ +| :: | :: | +| | | +| Traceback (most recent call last): | Traceback (most recent call last): | +| File "", line 8, in | File "", line 10, in | +| TypeError | ValueError: | +| | | +| During handling of the above exception, another exception occurred: | | +| | | +| Traceback (most recent call last): | | +| File "", line 10, in | | +| ValueError | | ++-------------------------------------------------------------------------+--------------------------------------------+ .. _cpydiff_types_exception_instancevar: @@ -75,13 +75,15 @@ Sample code:: e.x = 0 print(e.x) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| 0 | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-------------------------------------------------------------+ +| :: | :: | +| | | +| 0 | Traceback (most recent call last): | +| | File "", line 8, in | +| | AttributeError: 'Exception' object has no attribute 'x' | ++-------------+-------------------------------------------------------------+ .. _cpydiff_types_exception_loops: @@ -99,17 +101,17 @@ Sample code:: print("iter") i += 1 -+--------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+--------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| iter | /bin/sh: 1: ../ports/unix/micropython: not found | -| iter | | -| Traceback (most recent call last): | | -| File "", line 10, in | | -| IndexError: list index out of range | | -+--------------------------------------------+------------------------------------------------------+ ++--------------------------------------------+--------------------------------------------+ +| CPy output: | uPy output: | ++--------------------------------------------+--------------------------------------------+ +| :: | :: | +| | | +| iter | iter | +| iter | iter | +| Traceback (most recent call last): | Traceback (most recent call last): | +| File "", line 10, in | File "", line 12, in | +| IndexError: list index out of range | IndexError: list index out of range | ++--------------------------------------------+--------------------------------------------+ .. _cpydiff_types_exception_subclassinit: @@ -135,13 +137,16 @@ Sample code:: a = A() -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| | :: | -| | | -| | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-------------------------------------------------------------------------+ +| | :: | +| | | +| | Traceback (most recent call last): | +| | File "", line 18, in | +| | File "", line 15, in __init__ | +| | AttributeError: type object 'Exception' has no attribute '__init__' | ++-------------+-------------------------------------------------------------------------+ bytearray --------- @@ -157,13 +162,15 @@ Sample code:: b[0:1] = [1, 2] print(b) -+----------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+----------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| bytearray(b'\x01\x02\x00\x00\x00') | /bin/sh: 1: ../ports/unix/micropython: not found | -+----------------------------------------+------------------------------------------------------+ ++----------------------------------------+-------------------------------------------------------------+ +| CPy output: | uPy output: | ++----------------------------------------+-------------------------------------------------------------+ +| :: | :: | +| | | +| bytearray(b'\x01\x02\x00\x00\x00') | Traceback (most recent call last): | +| | File "", line 8, in | +| | NotImplementedError: array/bytes required on right side | ++----------------------------------------+-------------------------------------------------------------+ bytes ----- @@ -181,15 +188,15 @@ Sample code:: print(b"{}".format(1)) -+--------------------------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+--------------------------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| Traceback (most recent call last): | /bin/sh: 1: ../ports/unix/micropython: not found | -| File "", line 7, in | | -| AttributeError: 'bytes' object has no attribute 'format' | | -+--------------------------------------------------------------+------------------------------------------------------+ ++--------------------------------------------------------------+-------------+ +| CPy output: | uPy output: | ++--------------------------------------------------------------+-------------+ +| :: | :: | +| | | +| Traceback (most recent call last): | b'1' | +| File "", line 7, in | | +| AttributeError: 'bytes' object has no attribute 'format' | | ++--------------------------------------------------------------+-------------+ .. _cpydiff_types_bytes_keywords: @@ -202,13 +209,15 @@ Sample code:: print(bytes("abc", encoding="utf8")) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| b'abc' | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+----------------------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+----------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| b'abc' | Traceback (most recent call last): | +| | File "", line 7, in | +| | NotImplementedError: keyword argument(s) not implemented - use normal args instead | ++-------------+----------------------------------------------------------------------------------------+ .. _cpydiff_types_bytes_subscrstep: @@ -223,13 +232,15 @@ Sample code:: print(b"123"[0:3:2]) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| b'13' | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+---------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+---------------------------------------------------------------------------+ +| :: | :: | +| | | +| b'13' | Traceback (most recent call last): | +| | File "", line 7, in | +| | NotImplementedError: only slices with step=1 (aka None) are supported | ++-------------+---------------------------------------------------------------------------+ dict ---- @@ -247,17 +258,47 @@ Sample code:: print({1: 2, 3: 4}.keys() & {1}) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| {1} | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+------------------------------------------------------------------+ +| :: | :: | +| | | +| {1} | Traceback (most recent call last): | +| | File "", line 7, in | +| | TypeError: unsupported types for __and__: 'dict_view', 'set' | ++-------------+------------------------------------------------------------------+ float ----- +.. _cpydiff_types_float_implicit_conversion: + +uPy allows implicit conversion of objects in maths operations while CPython does not. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Workaround:** Objects should be wrapped in ``float(obj)`` for compatibility with CPython. + +Sample code:: + + + + class Test: + def __float__(self): + return 0.5 + + + print(2.0 * Test()) + ++----------------------------------------------------------------------+-------------+ +| CPy output: | uPy output: | ++----------------------------------------------------------------------+-------------+ +| :: | :: | +| | | +| Traceback (most recent call last): | 1.0 | +| File "", line 14, in | | +| TypeError: unsupported operand type(s) for *: 'float' and 'Test' | | ++----------------------------------------------------------------------+-------------+ + .. _cpydiff_types_float_rounding: uPy and CPython outputs formats may differ @@ -267,13 +308,13 @@ Sample code:: print("%.1g" % -9.9) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| -1e+01 | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------+ +| CPy output: | uPy output: | ++-------------+-------------+ +| :: | :: | +| | | +| -1e+01 | -10 | ++-------------+-------------+ int --- @@ -293,13 +334,15 @@ Sample code:: x = 255 print("{} is {} bits long.".format(x, x.bit_length())) -+-------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| 255 is 8 bits long. | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------------------+------------------------------------------------------+ ++-------------------------+----------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------------------+----------------------------------------------------------------+ +| :: | :: | +| | | +| 255 is 8 bits long. | Traceback (most recent call last): | +| | File "", line 9, in | +| | AttributeError: 'int' object has no attribute 'bit_length' | ++-------------------------+----------------------------------------------------------------+ .. _cpydiff_types_int_subclassconv: @@ -319,13 +362,16 @@ Sample code:: a = A(42) print(a + a) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| 84 | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-------------------------------------------------------------+ +| :: | :: | +| | | +| 84 | Traceback (most recent call last): | +| | File "", line 14, in | +| | File "", line 10, in | +| | TypeError: unsupported types for __radd__: 'int', 'int' | ++-------------+-------------------------------------------------------------+ list ---- @@ -343,13 +389,15 @@ Sample code:: del l[0:4:2] print(l) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| [2, 4] | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-------------------------------------------+ +| :: | :: | +| | | +| [2, 4] | Traceback (most recent call last): | +| | File "", line 8, in | +| | NotImplementedError: | ++-------------+-------------------------------------------+ .. _cpydiff_types_list_store_noniter: @@ -366,13 +414,15 @@ Sample code:: l[0:1] = range(4) print(l) -+----------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+----------------------+------------------------------------------------------+ -| :: | :: | -| | | -| [0, 1, 2, 3, 20] | /bin/sh: 1: ../ports/unix/micropython: not found | -+----------------------+------------------------------------------------------+ ++----------------------+-----------------------------------------------------+ +| CPy output: | uPy output: | ++----------------------+-----------------------------------------------------+ +| :: | :: | +| | | +| [0, 1, 2, 3, 20] | Traceback (most recent call last): | +| | File "", line 8, in | +| | TypeError: object 'range' isn't a tuple or list | ++----------------------+-----------------------------------------------------+ .. _cpydiff_types_list_store_subscrstep: @@ -387,13 +437,46 @@ Sample code:: l[0:4:2] = [5, 6] print(l) -+------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+------------------+------------------------------------------------------+ -| :: | :: | -| | | -| [5, 2, 6, 4] | /bin/sh: 1: ../ports/unix/micropython: not found | -+------------------+------------------------------------------------------+ ++------------------+-------------------------------------------+ +| CPy output: | uPy output: | ++------------------+-------------------------------------------+ +| :: | :: | +| | | +| [5, 2, 6, 4] | Traceback (most recent call last): | +| | File "", line 8, in | +| | NotImplementedError: | ++------------------+-------------------------------------------+ + +memoryview +---------- + +.. _cpydiff_types_memoryview_invalid: + +memoryview can become invalid if its target is resized +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Cause:** CPython prevents a ``bytearray`` or ``io.bytesIO`` object from changing size while there is a ``memoryview`` object that references it. MicroPython requires the programmer to manually ensure that an object is not resized while any ``memoryview`` references it. + +In the worst case scenario, resizing an object which is the target of a memoryview can cause the memoryview(s) to reference invalid freed memory (a use-after-free bug) and corrupt the MicroPython runtime. + +**Workaround:** Do not change the size of any ``bytearray`` or ``io.bytesIO`` object that has a ``memoryview`` assigned to it. + +Sample code:: + + b = bytearray(b"abcdefg") + m = memoryview(b) + b.extend(b"hijklmnop") + print(b, bytes(m)) + ++----------------------------------------------------------------------+-----------------------------------------------+ +| CPy output: | uPy output: | ++----------------------------------------------------------------------+-----------------------------------------------+ +| :: | :: | +| | | +| Traceback (most recent call last): | bytearray(b'abcdefghijklmnop') b'abcdefg' | +| File "", line 11, in | | +| BufferError: Existing exports of data: object cannot be re-sized | | ++----------------------------------------------------------------------+-----------------------------------------------+ str --- @@ -407,13 +490,15 @@ Sample code:: print("abc".endswith("c", 1)) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| True | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+--------------------------------------------+ +| CPy output: | uPy output: | ++-------------+--------------------------------------------+ +| :: | :: | +| | | +| True | Traceback (most recent call last): | +| | File "", line 7, in | +| | NotImplementedError: start/end indices | ++-------------+--------------------------------------------+ .. _cpydiff_types_str_formatsubscr: @@ -424,13 +509,15 @@ Sample code:: print("{a[0]}".format(a=[1, 2])) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| 1 | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+---------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+---------------------------------------------------+ +| :: | :: | +| | | +| 1 | Traceback (most recent call last): | +| | File "", line 7, in | +| | NotImplementedError: attributes not supported | ++-------------+---------------------------------------------------+ .. _cpydiff_types_str_keywords: @@ -443,13 +530,15 @@ Sample code:: print(str(b"abc", encoding="utf8")) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| abc | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+----------------------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+----------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| abc | Traceback (most recent call last): | +| | File "", line 7, in | +| | NotImplementedError: keyword argument(s) not implemented - use normal args instead | ++-------------+----------------------------------------------------------------------------------------+ .. _cpydiff_types_str_ljust_rjust: @@ -464,13 +553,15 @@ Sample code:: print("abc".ljust(10)) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| abc | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-----------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-----------------------------------------------------------+ +| :: | :: | +| | | +| abc | Traceback (most recent call last): | +| | File "", line 7, in | +| | AttributeError: 'str' object has no attribute 'ljust' | ++-------------+-----------------------------------------------------------+ .. _cpydiff_types_str_rsplitnone: @@ -481,13 +572,15 @@ Sample code:: print("a a a".rsplit(None, 1)) -+------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+------------------+------------------------------------------------------+ -| :: | :: | -| | | -| ['a a', 'a'] | /bin/sh: 1: ../ports/unix/micropython: not found | -+------------------+------------------------------------------------------+ ++------------------+-------------------------------------------+ +| CPy output: | uPy output: | ++------------------+-------------------------------------------+ +| :: | :: | +| | | +| ['a a', 'a'] | Traceback (most recent call last): | +| | File "", line 7, in | +| | NotImplementedError: rsplit(None,n) | ++------------------+-------------------------------------------+ .. _cpydiff_types_str_subscrstep: @@ -498,13 +591,15 @@ Sample code:: print("abcdefghi"[0:9:2]) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| acegi | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+---------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+---------------------------------------------------------------------------+ +| :: | :: | +| | | +| acegi | Traceback (most recent call last): | +| | File "", line 7, in | +| | NotImplementedError: only slices with step=1 (aka None) are supported | ++-------------+---------------------------------------------------------------------------+ tuple ----- @@ -518,11 +613,13 @@ Sample code:: print((1, 2, 3, 4)[0:4:2]) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| (1, 3) | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+---------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+---------------------------------------------------------------------------+ +| :: | :: | +| | | +| (1, 3) | Traceback (most recent call last): | +| | File "", line 7, in | +| | NotImplementedError: only slices with step=1 (aka None) are supported | ++-------------+---------------------------------------------------------------------------+ diff --git a/docs/genrst/core_language.rst b/docs/genrst/core_language.rst index 967fccb685ee..bb6c3155e7ed 100644 --- a/docs/genrst/core_language.rst +++ b/docs/genrst/core_language.rst @@ -2,7 +2,7 @@ Core language ============= -Generated Mon 14 Nov 2022 04:08:40 UTC +Generated Tue 23 Jul 2024 04:46:38 UTC .. _cpydiff_core_fstring_concat: @@ -23,17 +23,17 @@ Sample code:: print(f"{x}" "a{}b") # fails print(f"{x}" f"{y}") # fails -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| aa1 | /bin/sh: 1: ../ports/unix/micropython: not found | -| 1ab | | -| a{}a1 | | -| 1a{}b | | -| 12 | | -+-------------+------------------------------------------------------+ ++-------------+----------------------------------------+ +| CPy output: | uPy output: | ++-------------+----------------------------------------+ +| :: | :: | +| | | +| aa1 | Traceback (most recent call last): | +| 1ab | File "", line 13 | +| a{}a1 | SyntaxError: invalid syntax | +| 1a{}b | | +| 12 | | ++-------------+----------------------------------------+ .. _cpydiff_core_fstring_parser: @@ -50,14 +50,15 @@ Sample code:: print(f'{"hello { world"}') print(f'{"hello ] world"}') -+-------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------------+------------------------------------------------------+ -| :: | :: | -| | | -| hello { world | /bin/sh: 1: ../ports/unix/micropython: not found | -| hello ] world | | -+-------------------+------------------------------------------------------+ ++-------------------+----------------------------------------+ +| CPy output: | uPy output: | ++-------------------+----------------------------------------+ +| :: | :: | +| | | +| hello { world | Traceback (most recent call last): | +| hello ] world | File "", line 9 | +| | SyntaxError: invalid syntax | ++-------------------+----------------------------------------+ .. _cpydiff_core_fstring_raw: @@ -71,46 +72,39 @@ Sample code:: rf"hello" -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| | :: | -| | | -| | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+--------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+--------------------------------------------------+ +| | :: | +| | | +| | Traceback (most recent call last): | +| | File "", line 8 | +| | SyntaxError: raw f-strings are not supported | ++-------------+--------------------------------------------------+ .. _cpydiff_core_fstring_repr: -f-strings don't support the !r, !s, and !a conversions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +f-strings don't support !a conversions +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -**Cause:** MicroPython is optimised for code space. +**Cause:** MicropPython does not implement ascii() -**Workaround:** Use repr(), str(), and ascii() explictly. +**Workaround:** None Sample code:: - - class X: - def __repr__(self): - return "repr" - - def __str__(self): - return "str" - - - print(f"{X()!r}") - print(f"{X()!s}") + f"{'unicode text'!a}" -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| repr | /bin/sh: 1: ../ports/unix/micropython: not found | -| str | | -+-------------+------------------------------------------------------+ ++-------------+----------------------------------------+ +| CPy output: | uPy output: | ++-------------+----------------------------------------+ +| | :: | +| | | +| | Traceback (most recent call last): | +| | File "", line 8 | +| | SyntaxError: invalid syntax | ++-------------+----------------------------------------+ Classes ------- @@ -135,13 +129,13 @@ Sample code:: gc.collect() -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| __del__ | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------+ +| CPy output: | uPy output: | ++-------------+-------------+ +| :: | | +| | | +| __del__ | | ++-------------+-------------+ .. _cpydiff_core_class_mro: @@ -168,13 +162,57 @@ Sample code:: t = C((1, 2, 3)) print(t) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| Foo | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+---------------+ +| CPy output: | uPy output: | ++-------------+---------------+ +| :: | :: | +| | | +| Foo | (1, 2, 3) | ++-------------+---------------+ + +.. _cpydiff_core_class_name_mangling: + +Private Class Members name mangling is not implemented +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**Cause:** The MicroPython compiler does not implement name mangling for private class members. + +**Workaround:** Avoid using or having a collision with global names, by adding a unique prefix to the private class member name manually. + +Sample code:: + + + + def __print_string(string): + print(string) + + + class Foo: + def __init__(self, string): + self.string = string + + def do_print(self): + __print_string(self.string) + + + example_string = "Example String to print." + + class_item = Foo(example_string) + print(class_item.string) + + class_item.do_print() + ++------------------------------------------------------------------------------------------+------------------------------+ +| CPy output: | uPy output: | ++------------------------------------------------------------------------------------------+------------------------------+ +| :: | :: | +| | | +| Example String to print. | Example String to print. | +| Traceback (most recent call last): | Example String to print. | +| File "", line 26, in | | +| File "", line 18, in do_print | | +| NameError: name '_Foo__print_string' is not defined. Did you mean: '__print_string'? | | ++------------------------------------------------------------------------------------------+------------------------------+ .. _cpydiff_core_class_supermultiple: @@ -214,16 +252,16 @@ Sample code:: D() -+----------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+----------------+------------------------------------------------------+ -| :: | :: | -| | | -| D.__init__ | /bin/sh: 1: ../ports/unix/micropython: not found | -| B.__init__ | | -| C.__init__ | | -| A.__init__ | | -+----------------+------------------------------------------------------+ ++----------------+----------------+ +| CPy output: | uPy output: | ++----------------+----------------+ +| :: | :: | +| | | +| D.__init__ | D.__init__ | +| B.__init__ | B.__init__ | +| C.__init__ | A.__init__ | +| A.__init__ | | ++----------------+----------------+ .. _cpydiff_core_class_superproperty: @@ -249,13 +287,13 @@ Sample code:: a = AA() print(a.p) -+---------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+---------------+------------------------------------------------------+ -| :: | :: | -| | | -| {'a': 10} | /bin/sh: 1: ../ports/unix/micropython: not found | -+---------------+------------------------------------------------------+ ++---------------+----------------+ +| CPy output: | uPy output: | ++---------------+----------------+ +| :: | :: | +| | | +| {'a': 10} | | ++---------------+----------------+ Functions --------- @@ -276,13 +314,13 @@ Sample code:: except Exception as e: print(e) -+---------------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+---------------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| append() takes exactly one argument (0 given) | /bin/sh: 1: ../ports/unix/micropython: not found | -+---------------------------------------------------+------------------------------------------------------+ ++--------------------------------------------------------+------------------------------------------------------------+ +| CPy output: | uPy output: | ++--------------------------------------------------------+------------------------------------------------------------+ +| :: | :: | +| | | +| list.append() takes exactly one argument (0 given) | function takes 2 positional arguments but 1 were given | ++--------------------------------------------------------+------------------------------------------------------------+ .. _cpydiff_core_function_moduleattr: @@ -303,13 +341,15 @@ Sample code:: print(f.__module__) -+--------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+--------------+------------------------------------------------------+ -| :: | :: | -| | | -| __main__ | /bin/sh: 1: ../ports/unix/micropython: not found | -+--------------+------------------------------------------------------+ ++--------------+---------------------------------------------------------------------+ +| CPy output: | uPy output: | ++--------------+---------------------------------------------------------------------+ +| :: | :: | +| | | +| __main__ | Traceback (most recent call last): | +| | File "", line 13, in | +| | AttributeError: 'function' object has no attribute '__module__' | ++--------------+---------------------------------------------------------------------+ .. _cpydiff_core_function_userattr: @@ -331,13 +371,15 @@ Sample code:: f.x = 0 print(f.x) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| 0 | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+------------------------------------------------------------+ +| :: | :: | +| | | +| 0 | Traceback (most recent call last): | +| | File "", line 13, in | +| | AttributeError: 'function' object has no attribute 'x' | ++-------------+------------------------------------------------------------+ Generator --------- @@ -374,17 +416,17 @@ Sample code:: func() -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| Enter | /bin/sh: 1: ../ports/unix/micropython: not found | -| 1 | | -| 2 | | -| 3 | | -| Exit | | -+-------------+------------------------------------------------------+ ++-------------+-------------+ +| CPy output: | uPy output: | ++-------------+-------------+ +| :: | :: | +| | | +| Enter | Enter | +| 1 | 1 | +| 2 | 2 | +| 3 | 3 | +| Exit | | ++-------------+-------------+ Runtime ------- @@ -407,13 +449,13 @@ Sample code:: test() -+----------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+----------------+------------------------------------------------------+ -| :: | :: | -| | | -| {'val': 2} | /bin/sh: 1: ../ports/unix/micropython: not found | -+----------------+------------------------------------------------------+ ++----------------+------------------------------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++----------------+------------------------------------------------------------------------------------------------+ +| :: | :: | +| | | +| {'val': 2} | {'test': , '__name__': '__main__', '__file__': ''} | ++----------------+------------------------------------------------------------------------------------------------+ .. _cpydiff_core_locals_eval: @@ -435,14 +477,14 @@ Sample code:: test() -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| 2 | /bin/sh: 1: ../ports/unix/micropython: not found | -| 2 | | -+-------------+------------------------------------------------------+ ++-------------+-------------+ +| CPy output: | uPy output: | ++-------------+-------------+ +| :: | :: | +| | | +| 2 | 2 | +| 2 | 1 | ++-------------+-------------+ import ------ @@ -462,20 +504,22 @@ Sample code:: foo.hello() -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| hello | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-------------------------------------------+ +| :: | :: | +| | | +| hello | Traceback (most recent call last): | +| | File "", line 9, in | +| | NameError: name 'foo' isn't defined | ++-------------+-------------------------------------------+ .. _cpydiff_core_import_path: __path__ attribute of a package has a different type (single string instead of list of strings) in MicroPython ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -**Cause:** MicroPython does't support namespace packages split across filesystem. Beyond that, MicroPython's import system is highly optimized for minimal memory usage. +**Cause:** MicroPython doesn't support namespace packages split across filesystem. Beyond that, MicroPython's import system is highly optimized for minimal memory usage. **Workaround:** Details of import handling is inherently implementation dependent. Don't rely on such details in portable applications. @@ -485,53 +529,18 @@ Sample code:: print(modules.__path__) -+-----------------------------------------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-----------------------------------------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| ['/home/kwagyeman/GitHub/openmv-doc/micropython/tests/cpydiff/modules'] | /bin/sh: 1: ../ports/unix/micropython: not found | -+-----------------------------------------------------------------------------+------------------------------------------------------+ - -.. _cpydiff_core_import_prereg: - -Failed to load modules are still registered as loaded -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Cause:** To make module handling more efficient, it's not wrapped with exception handling. - -**Workaround:** Test modules before production use; during development, use ``del sys.modules["name"]``, or just soft or hard reset the board. - -Sample code:: - - import sys - - try: - from modules import foo - except NameError as e: - print(e) - try: - from modules import foo - - print("Should not get here") - except NameError as e: - print(e) - -+-------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| foo | /bin/sh: 1: ../ports/unix/micropython: not found | -| name 'xxx' is not defined | | -| foo | | -| name 'xxx' is not defined | | -+-------------------------------+------------------------------------------------------+ ++-----------------------------------------------------------------------------+------------------------------+ +| CPy output: | uPy output: | ++-----------------------------------------------------------------------------+------------------------------+ +| :: | :: | +| | | +| ['/home/kwagyeman/github/openmv-doc/micropython/tests/cpydiff/modules'] | ../tests/cpydiff/modules | ++-----------------------------------------------------------------------------+------------------------------+ .. _cpydiff_core_import_split_ns_pkgs: -MicroPython does't support namespace packages split across filesystem. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +MicroPython doesn't support namespace packages split across filesystem. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **Cause:** MicroPython's import system is highly optimized for simplicity, minimal memory usage, and minimal filesystem search overhead. @@ -549,11 +558,13 @@ Sample code:: print("Two modules of a split namespace package imported") -+-------------------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| Two modules of a split namespace package imported | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------------------------------------------------+------------------------------------------------------+ ++-------------------------------------------------------+-----------------------------------------------+ +| CPy output: | uPy output: | ++-------------------------------------------------------+-----------------------------------------------+ +| :: | :: | +| | | +| Two modules of a split namespace package imported | Traceback (most recent call last): | +| | File "", line 13, in | +| | ImportError: no module named 'subpkg.bar' | ++-------------------------------------------------------+-----------------------------------------------+ diff --git a/docs/genrst/modules.rst b/docs/genrst/modules.rst index f58e237968ae..53ccc929d479 100644 --- a/docs/genrst/modules.rst +++ b/docs/genrst/modules.rst @@ -2,7 +2,41 @@ Modules ======= -Generated Mon 14 Nov 2022 04:08:40 UTC +Generated Tue 23 Jul 2024 04:46:38 UTC + +.. Preamble section inserted into generated output + +Positional-only Parameters +-------------------------- + +To save code size, many functions that accept keyword arguments in CPython only accept positional arguments in MicroPython. + +MicroPython marks positional-only parameters in the same way as CPython, by inserting a ``/`` to mark the end of the positional parameters. Any function whose signature ends in ``/`` takes *only* positional arguments. For more details, see `PEP 570 `_. + +Example +~~~~~~~ + +For example, in CPython 3.4 this is the signature of the constructor ``socket.socket``:: + + socket.socket(family=AF_INET, type=SOCK_STREAM, proto=0, fileno=None) + +However, the signature documented in :func:`MicroPython` is:: + + socket(af=AF_INET, type=SOCK_STREAM, proto=IPPROTO_TCP, /) + +The ``/`` at the end of the parameters indicates that they are all positional-only in MicroPython. The following code works in CPython but not in most MicroPython ports:: + + import socket + s = socket.socket(type=socket.SOCK_DGRAM) + +MicroPython will raise an exception:: + + TypeError: function doesn't take keyword arguments + +The following code will work in both CPython and MicroPython:: + + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) array ----- @@ -22,13 +56,15 @@ Sample code:: array.array("b", [1, 2]) == array.array("i", [1, 2]) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| | :: | -| | | -| | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-------------------------------------------+ +| | :: | +| | | +| | Traceback (most recent call last): | +| | File "", line 9, in | +| | NotImplementedError: | ++-------------+-------------------------------------------+ .. _cpydiff_module_array_constructor: @@ -46,15 +82,15 @@ Sample code:: a = array.array("b", [257]) print(a) -+--------------------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+--------------------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| Traceback (most recent call last): | /bin/sh: 1: ../ports/unix/micropython: not found | -| File "", line 9, in | | -| OverflowError: signed char is greater than maximum | | -+--------------------------------------------------------+------------------------------------------------------+ ++--------------------------------------------------------+---------------------+ +| CPy output: | uPy output: | ++--------------------------------------------------------+---------------------+ +| :: | :: | +| | | +| Traceback (most recent call last): | array('b', [1]) | +| File "", line 9, in | | +| OverflowError: signed char is greater than maximum | | ++--------------------------------------------------------+---------------------+ .. _cpydiff_modules_array_containment: @@ -67,13 +103,15 @@ Sample code:: print(1 in array.array("B", b"12")) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| False | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-------------------------------------------+ +| :: | :: | +| | | +| False | Traceback (most recent call last): | +| | File "", line 9, in | +| | NotImplementedError: | ++-------------+-------------------------------------------+ .. _cpydiff_modules_array_deletion: @@ -88,13 +126,15 @@ Sample code:: del a[1] print(a) -+------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| array('b', [1, 3]) | /bin/sh: 1: ../ports/unix/micropython: not found | -+------------------------+------------------------------------------------------+ ++------------------------+-------------------------------------------------------------+ +| CPy output: | uPy output: | ++------------------------+-------------------------------------------------------------+ +| :: | :: | +| | | +| array('b', [1, 3]) | Traceback (most recent call last): | +| | File "", line 10, in | +| | TypeError: 'array' object doesn't support item deletion | ++------------------------+-------------------------------------------------------------+ .. _cpydiff_modules_array_subscrstep: @@ -108,13 +148,15 @@ Sample code:: a = array.array("b", (1, 2, 3)) print(a[3:2:2]) -+----------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+----------------+------------------------------------------------------+ -| :: | :: | -| | | -| array('b') | /bin/sh: 1: ../ports/unix/micropython: not found | -+----------------+------------------------------------------------------+ ++----------------+---------------------------------------------------------------------------+ +| CPy output: | uPy output: | ++----------------+---------------------------------------------------------------------------+ +| :: | :: | +| | | +| array('b') | Traceback (most recent call last): | +| | File "", line 10, in | +| | NotImplementedError: only slices with step=1 (aka None) are supported | ++----------------+---------------------------------------------------------------------------+ builtins -------- @@ -137,13 +179,15 @@ Sample code:: print(next(iter(range(0)), 42)) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| 42 | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-----------------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-----------------------------------------------------------------------+ +| :: | :: | +| | | +| 42 | Traceback (most recent call last): | +| | File "", line 12, in | +| | TypeError: function takes 1 positional arguments but 2 were given | ++-------------+-----------------------------------------------------------------------+ deque ----- @@ -162,13 +206,15 @@ Sample code:: D = collections.deque() print(D) -+---------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+---------------+------------------------------------------------------+ -| :: | :: | -| | | -| deque([]) | /bin/sh: 1: ../ports/unix/micropython: not found | -+---------------+------------------------------------------------------+ ++---------------+-----------------------------------------------------------------+ +| CPy output: | uPy output: | ++---------------+-----------------------------------------------------------------+ +| :: | :: | +| | | +| deque([]) | Traceback (most recent call last): | +| | File "", line 9, in | +| | TypeError: function missing 2 required positional arguments | ++---------------+-----------------------------------------------------------------+ json ---- @@ -190,13 +236,15 @@ Sample code:: except TypeError: print("TypeError") -+---------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+---------------+------------------------------------------------------+ -| :: | :: | -| | | -| TypeError | /bin/sh: 1: ../ports/unix/micropython: not found | -+---------------+------------------------------------------------------+ ++---------------+--------------------------------------------+ +| CPy output: | uPy output: | ++---------------+--------------------------------------------+ +| :: | :: | +| | | +| TypeError | Traceback (most recent call last): | +| | File "", line 12, in | +| | UnicodeError: | ++---------------+--------------------------------------------+ os -- @@ -222,14 +270,15 @@ Sample code:: os.putenv("NEW_VARIABLE", "VALUE") print(os.getenv("NEW_VARIABLE")) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| None | /bin/sh: 1: ../ports/unix/micropython: not found | -| VALUE | | -+-------------+------------------------------------------------------+ ++-------------+-------------------------+ +| CPy output: | uPy output: | ++-------------+-------------------------+ +| :: | :: | +| | | +| None | should not get here | +| VALUE | None | +| | VALUE | ++-------------+-------------------------+ .. _cpydiff_modules_os_getenv: @@ -246,40 +295,14 @@ Sample code:: os.putenv("NEW_VARIABLE", "VALUE") print(os.getenv("NEW_VARIABLE")) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| None | /bin/sh: 1: ../ports/unix/micropython: not found | -| None | | -+-------------+------------------------------------------------------+ - -.. _cpydiff_modules_os_getenv_argcount: - -``getenv`` only allows one argument -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -**Workaround:** Test that the return value is ``None`` - -Sample code:: - - import os - - try: - print(os.getenv("NEW_VARIABLE", "DEFAULT")) - except TypeError: - print("should not get here") - # this assumes NEW_VARIABLE is never an empty variable - print(os.getenv("NEW_VARIABLE") or "DEFAULT") - -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| DEFAULT | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-------------+ +| CPy output: | uPy output: | ++-------------+-------------+ +| :: | :: | +| | | +| None | None | +| None | VALUE | ++-------------+-------------+ random ------ @@ -302,13 +325,15 @@ Sample code:: x = random.getrandbits(64) print("{}".format(x)) -+--------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+--------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| 10869376513326761403 | /bin/sh: 1: ../ports/unix/micropython: not found | -+--------------------------+------------------------------------------------------+ ++--------------------------+--------------------------------------------+ +| CPy output: | uPy output: | ++--------------------------+--------------------------------------------+ +| :: | :: | +| | | +| 16149866429655178725 | Traceback (most recent call last): | +| | File "", line 11, in | +| | ValueError: bits must be 32 or less | ++--------------------------+--------------------------------------------+ .. _cpydiff_modules_random_randint: @@ -328,13 +353,15 @@ Sample code:: x = random.randint(2**128 - 1, 2**128) print("x={}".format(x)) -+-----------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-----------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| x=340282366920938463463374607431768211456 | /bin/sh: 1: ../ports/unix/micropython: not found | -+-----------------------------------------------+------------------------------------------------------+ ++-----------------------------------------------+-----------------------------------------------------------------+ +| CPy output: | uPy output: | ++-----------------------------------------------+-----------------------------------------------------------------+ +| :: | :: | +| | | +| x=340282366920938463463374607431768211455 | Traceback (most recent call last): | +| | File "", line 11, in | +| | OverflowError: overflow converting long int to machine word | ++-----------------------------------------------+-----------------------------------------------------------------+ struct ------ @@ -354,13 +381,14 @@ Sample code:: except: print("struct.error") -+------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+------------------+------------------------------------------------------+ -| :: | :: | -| | | -| struct.error | /bin/sh: 1: ../ports/unix/micropython: not found | -+------------------+------------------------------------------------------+ ++------------------+-------------------------+ +| CPy output: | uPy output: | ++------------------+-------------------------+ +| :: | :: | +| | | +| struct.error | b'\x01\x00' | +| | Should not get here | ++------------------+-------------------------+ .. _cpydiff_modules_struct_manyargs: @@ -377,13 +405,14 @@ Sample code:: except: print("struct.error") -+------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+------------------+------------------------------------------------------+ -| :: | :: | -| | | -| struct.error | /bin/sh: 1: ../ports/unix/micropython: not found | -+------------------+------------------------------------------------------+ ++------------------+-------------------------+ +| CPy output: | uPy output: | ++------------------+-------------------------+ +| :: | :: | +| | | +| struct.error | b'\x01\x02' | +| | Should not get here | ++------------------+-------------------------+ .. _cpydiff_modules_struct_whitespace_in_format: @@ -404,14 +433,14 @@ Sample code:: except: print("struct.error") -+------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| b'\x01\x02' | /bin/sh: 1: ../ports/unix/micropython: not found | -| Should have worked | | -+------------------------+------------------------------------------------------+ ++------------------------+------------------+ +| CPy output: | uPy output: | ++------------------------+------------------+ +| :: | :: | +| | | +| b'\x01\x02' | struct.error | +| Should have worked | | ++------------------------+------------------+ sys --- @@ -430,11 +459,13 @@ Sample code:: sys.stdin = None print(sys.stdin) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| None | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+--------------------------------------------------------------+ +| CPy output: | uPy output: | ++-------------+--------------------------------------------------------------+ +| :: | :: | +| | | +| None | Traceback (most recent call last): | +| | File "", line 9, in | +| | AttributeError: 'module' object has no attribute 'stdin' | ++-------------+--------------------------------------------------------------+ diff --git a/docs/genrst/syntax.rst b/docs/genrst/syntax.rst index 92834abae0d2..c15bbe48c42d 100644 --- a/docs/genrst/syntax.rst +++ b/docs/genrst/syntax.rst @@ -2,7 +2,7 @@ Syntax ====== -Generated Mon 14 Nov 2022 04:08:40 UTC +Generated Tue 23 Jul 2024 04:46:38 UTC .. _cpydiff_syntax_arg_unpacking: @@ -32,13 +32,15 @@ Sample code:: ) -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| 67 | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+--------------------------------------------+ +| CPy output: | uPy output: | ++-------------+--------------------------------------------+ +| :: | :: | +| | | +| 67 | Traceback (most recent call last): | +| | File "", line 21, in | +| | SyntaxError: too many args | ++-------------+--------------------------------------------+ Operators --------- @@ -56,14 +58,15 @@ Sample code:: print([i := -1 for i in range(4)]) -+-------------------------------------------------------------------------------------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------------------------------------------------------------------------------------+------------------------------------------------------+ -| :: | :: | -| | | -| File "", line 7 | /bin/sh: 1: ../ports/unix/micropython: not found | -| SyntaxError: assignment expression cannot rebind comprehension iteration variable 'i' | | -+-------------------------------------------------------------------------------------------+------------------------------------------------------+ ++-------------------------------------------------------------------------------------------+-------------------------------------------------+ +| CPy output: | uPy output: | ++-------------------------------------------------------------------------------------------+-------------------------------------------------+ +| :: | :: | +| | | +| File "", line 7 | Traceback (most recent call last): | +| SyntaxError: assignment expression cannot rebind comprehension iteration variable 'i' | File "", line 7, in | +| | SyntaxError: identifier redefined as global | ++-------------------------------------------------------------------------------------------+-------------------------------------------------+ Spaces ------ @@ -88,15 +91,15 @@ Sample code:: except SyntaxError: print("Should have worked") -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| 0 | /bin/sh: 1: ../ports/unix/micropython: not found | -| 1 | | -| 1 | | -+-------------+------------------------------------------------------+ ++-------------+------------------------+ +| CPy output: | uPy output: | ++-------------+------------------------+ +| :: | :: | +| | | +| 0 | Should have worked | +| 1 | Should have worked | +| 1 | Should have worked | ++-------------+------------------------+ Unicode ------- @@ -110,11 +113,11 @@ Sample code:: print("\N{LATIN SMALL LETTER A}") -+-------------+------------------------------------------------------+ -| CPy output: | uPy output: | -+-------------+------------------------------------------------------+ -| :: | :: | -| | | -| a | /bin/sh: 1: ../ports/unix/micropython: not found | -+-------------+------------------------------------------------------+ ++-------------+-----------------------------------------------+ +| CPy output: | uPy output: | ++-------------+-----------------------------------------------+ +| :: | :: | +| | | +| a | NotImplementedError: unicode name escapes | ++-------------+-----------------------------------------------+ diff --git a/docs/library/index.rst b/docs/library/index.rst index cee7c83e1978..2fb3bc55ad2b 100644 --- a/docs/library/index.rst +++ b/docs/library/index.rst @@ -98,10 +98,8 @@ the following libraries. btree.rst cryptolib.rst deflate.rst - framebuf.rst machine.rst micropython.rst - neopixel.rst network.rst openamp.rst uctypes.rst @@ -122,11 +120,10 @@ The following libraries are specific to the OpenMV Cam. stm.rst omv.sensor.rst omv.image.rst - omv.tf.rst + omv.ml.rst omv.gif.rst omv.mjpeg.rst omv.audio.rst - omv.micro_speech.rst omv.display.rst omv.fir.rst omv.tv.rst @@ -179,12 +176,6 @@ Examples scripts are located in OpenMV IDE under the ``IMU Shield`` examples fol Examples scripts are located in OpenMV IDE under the ``Motor Shield`` examples folder. -:mod:`lsm6dsox` --- `lsm6dsox Driver `_ -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. module:: lsm6dsox - :synopsis: lsm6dsox Driver - :mod:`modbus` --- `modbus protocol library `_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/library/machine.I2C.rst b/docs/library/machine.I2C.rst index 1ef5891b1fd1..1c5625f80623 100644 --- a/docs/library/machine.I2C.rst +++ b/docs/library/machine.I2C.rst @@ -97,12 +97,6 @@ General Methods requested frequency. This is dependent on the platform hardware. The actual rate may be determined by printing the I2C object. -.. method:: I2C.deinit() - - Turn off the I2C bus. - - Availability: WiPy. - .. method:: I2C.scan() Scan all I2C addresses between 0x08 and 0x77 inclusive and return a list of diff --git a/docs/library/machine.UART.rst b/docs/library/machine.UART.rst index 3e1d516a8ae5..2238561e708c 100644 --- a/docs/library/machine.UART.rst +++ b/docs/library/machine.UART.rst @@ -83,14 +83,6 @@ Methods Also do not call ``deinit()`` as it will prevent calling ``init()`` again. - .. note:: - It is possible to call ``init()`` multiple times on the same object in - order to reconfigure UART on the fly. That allows using single UART - peripheral to serve different devices attached to different GPIO pins. - Only one device can be served at a time in that case. - Also do not call ``deinit()`` as it will prevent calling ``init()`` - again. - .. method:: UART.deinit() Turn off the UART bus. @@ -170,12 +162,3 @@ Methods added in the calling script. Availability: rp2, esp32, esp8266, mimxrt, cc3200, stm32, nrf ports, renesas-ra - -Constants ---------- - -.. data:: UART.RX_ANY - - IRQ trigger sources - - Availability: WiPy. diff --git a/docs/library/machine.rst b/docs/library/machine.rst index 7ed611ed3e41..004188ba6ec5 100644 --- a/docs/library/machine.rst +++ b/docs/library/machine.rst @@ -251,6 +251,3 @@ Classes machine.RTC.rst machine.Timer.rst machine.WDT.rst - machine.SD.rst - machine.SDCard.rst - machine.USBDevice.rst diff --git a/docs/library/mutex.rst b/docs/library/mutex.rst index 09a392c69a27..e755ad4e7441 100644 --- a/docs/library/mutex.rst +++ b/docs/library/mutex.rst @@ -66,11 +66,11 @@ Constructors Methods ~~~~~~~ - .. method:: release() + .. method:: release() -> None Unlock the mutex. - .. method:: test() + .. method:: test() -> bool Try to acquire the mutex in a non-blocking way. Return ``True`` on success and ``False`` on failure. diff --git a/docs/library/network.rst b/docs/library/network.rst index 279f8a6337dd..0ea90772bb0c 100644 --- a/docs/library/network.rst +++ b/docs/library/network.rst @@ -193,18 +193,3 @@ The following are functions available in the network module. is raised. The default hostname is typically the name of the board. - -.. function:: phy_mode([mode]) - - Get or set the PHY mode. - - If the *mode* parameter is provided, the PHY mode will be set to this value. - If the function is called without parameters, it returns the current PHY - mode. - - The possible modes are defined as constants: - * ``MODE_11B`` -- IEEE 802.11b, - * ``MODE_11G`` -- IEEE 802.11g, - * ``MODE_11N`` -- IEEE 802.11n. - - Availability: ESP8266. diff --git a/docs/library/omv.audio.rst b/docs/library/omv.audio.rst index 48f62829d2af..3c35b004ce69 100644 --- a/docs/library/omv.audio.rst +++ b/docs/library/omv.audio.rst @@ -4,44 +4,45 @@ .. module:: audio :synopsis: Get audio samples. -The ``audio`` module is used to record audio samples from a microphone on the Arduino Portenta. - -Please read about `PDM Microphones `__. +The ``audio`` module is used to record audio samples from a microphone on the Arduino Portenta or the Arduino Nicla. Functions --------- -.. function:: init([channels=2, [frequency=16000, [gain_db=24, [highpass=0.9883]]]]) +.. function:: init(channels:int=2, frequency:int=16000, gain_db:float=24, highpass:float=0.9883, samples:int=-1) -> None Initializes the audio module. Must be called first before using the audio module. ``channels`` specifies the number of audio channels. May be 1 or 2. Audio samples are - interleaved for two audio channels. + interleaved for two audio channels. Using more than one channel is only possible on boards + with more than one mic. ``frequency`` is the sample frequency to run at. Running at a higher sample frequency results - in a higher noise flow which means less effective bits per sample. By default audio samples are - 8-bits with 7-bits of effective dynamic range for voice recording. + in a higher noise floor which means less effective bits per sample. ``gain_db`` is the microphone gain to apply. - ``highpass`` is the high pass filter cut off given the target sample frequency. - -.. function:: deint() + ``highpass`` is the high pass filter cut-off given the target sample frequency. This parameter + is applicable for the Arduino Portenta H7 only. - Deinitializes the audio module. + ``samples`` is the number of samples to accumulate per callback. This is typically caluclated + based on the decimation factor and number of channels. If set to -1, the number of samples + will be calculated automatically based on the decimation factor and number of channels. -.. function:: start_streaming(callback) +.. function:: start_streaming(callback) -> None Calls the ``callback`` that takes one argument ``pcmbuf`` automatically forever when enough - PCM samples have accumulated based on the audio module settings. + PCM samples have accumulated based on the `audio` module settings. You can cast the ``pcmbuf`` + into an ``ndarray`` for processing the audio samples in numpy and then pass the ``ndarray`` + to a `ml.Model` object for inference. - ``pcmbuf`` is a 16-bit array of audio samples sized based on the decimation factor and number - of channels. + ``pcmbuf`` is a signed 16-bit array of audio samples who's sized based on the decimation factor + and number of channels, or the number of samples specified in the `audio.init()` function. - In single channel mode audio samples will be 8-bits each filling up the 16-bit array. + In single channel mode audio samples will be 16-bits each filling up the 16-bit array. - In dual channel mode audio samples will be 8-bits each in pairs filling up the 16-bit array. + In dual channel mode audio samples will be 16-bits each in pairs filling up the 16-bit array. -.. function:: stop_streaming() +.. function:: stop_streaming() -> None Stops audio streaming and the callback from being called. diff --git a/docs/library/omv.buzzer.rst b/docs/library/omv.buzzer.rst index 37b5f8ad3afa..f5bfaa5ea295 100644 --- a/docs/library/omv.buzzer.rst +++ b/docs/library/omv.buzzer.rst @@ -13,13 +13,13 @@ The ``buzzer`` module is used to control the amplitude and frequency of a buzzer Functions --------- -.. function:: freq(freq) +.. function:: freq(freq:int) -> None Sets the buzzer frequency independently of the volume. ``freq`` any frequency to drive the buzzer at. -.. function:: duty(duty) +.. function:: duty(duty:int) -> None Sets the buzzer duty cycle independently of the frequency. @@ -29,5 +29,6 @@ Constants --------- .. data:: RESONANT_FREQ + :type: int Constant definting the highest volume frequency of the buzzer (typically 4000 Hz). diff --git a/docs/library/omv.cpufreq.rst b/docs/library/omv.cpufreq.rst index 58d9b4f31c11..9bad2e0f56f9 100644 --- a/docs/library/omv.cpufreq.rst +++ b/docs/library/omv.cpufreq.rst @@ -14,16 +14,16 @@ The ``cpufreq`` module is used to get/set the CPU frequency to save power. Functions --------- -.. function:: set_frequency(supported_frequency) +.. function:: set_frequency(supported_frequency:int) -> None Sets the CPU frequency to a supported frequency in MHz. Peripherals frequencies are not changed. Only the CPU performance. -.. function:: get_current_frequencies() +.. function:: get_current_frequencies() -> Tuple[int, int, int, int] Returns (cpu_clk_in_mhz, hclk_in_mhz, pclk1_in_mhz, pclk2_in_mhz). -.. function:: get_supported_frequencies() +.. function:: get_supported_frequencies() -> List[int] Returns the supported CPU frequencies [120, 144, 168, 192, 216] on the OpenMV Cam M7 and [60/50, 120/100, 240/200, 480/400] on the OpenMV Cam H7 Rev V/XY silicon in MHz. diff --git a/docs/library/omv.display.DACBacklight.rst b/docs/library/omv.display.DACBacklight.rst index 07c9a5e9d71b..3fc7a5ef37a8 100644 --- a/docs/library/omv.display.DACBacklight.rst +++ b/docs/library/omv.display.DACBacklight.rst @@ -9,7 +9,7 @@ The `DACBacklight` class is used to control a screen backlight. Constructors ------------ -.. class:: display.DACBacklight(channel, [bits=8]) +.. class:: display.DACBacklight(channel:int, bits=8) Creates a backlight object to initialize the display backlight. This class should be passed as the ``backlight`` argument to any display object constructor which can use a backlight controller. @@ -22,11 +22,11 @@ Constructors Methods ------- -.. method:: DACBacklight.deinit() +.. method:: DACBacklight.deinit() -> None Deinitializes the backlight controller. -.. method:: DACBacklight.backlight([value]) +.. method:: DACBacklight.backlight(value:Optional[int]=None) -> int Sets the backlight strength from 0-100. Note that a linear voltage on the backlight output will not necessary result in a linear brightness change on the screen. Typically there's diff --git a/docs/library/omv.display.PWMBacklight.rst b/docs/library/omv.display.PWMBacklight.rst index f352062ca423..11d79324a7e2 100644 --- a/docs/library/omv.display.PWMBacklight.rst +++ b/docs/library/omv.display.PWMBacklight.rst @@ -9,7 +9,7 @@ The `PWMBacklight` class is used to control a screen backlight. Constructors ------------ -.. class:: display.PWMBacklight(pin, [timer=3, [channel=3, [frequency=200]]]) +.. class:: display.PWMBacklight(pin, timer=3, channel=3, frequency=200) Creates a backlight object to initialize the display backlight. This class should be passed as the ``backlight`` argument to any display object constructor which can use a backlight controller. @@ -25,11 +25,11 @@ Constructors Methods ------- -.. method:: PWMBacklight.deinit() +.. method:: PWMBacklight.deinit() -> None Deinitializes the backlight controller. -.. method:: PWMBacklight.backlight([value]) +.. method:: PWMBacklight.backlight(value:Optional[int]=None) -> int Sets the backlight strength from 0-100. Note that a linear pwm duty cycle on the backlight output will not necessary result in a linear brightness change on the screen. Typically there's diff --git a/docs/library/omv.display.ST7701.rst b/docs/library/omv.display.ST7701.rst index 26be1a16a57b..ac801b81f432 100644 --- a/docs/library/omv.display.ST7701.rst +++ b/docs/library/omv.display.ST7701.rst @@ -18,11 +18,11 @@ Constructors Methods ------- -.. method:: ST7701.init(display_controller) +.. method:: ST7701.init(display_controller) -> None Initializes the display using the display controller which must provide `display.DSIDisplay.bus_write()` and `display.DSIDisplay.bus_read()` methods. -.. method:: ST7701.read_id() +.. method:: ST7701.read_id() -> int Returns the display id. diff --git a/docs/library/omv.display.displaydata.rst b/docs/library/omv.display.displaydata.rst index 13b6396e85a7..6fff501a4086 100644 --- a/docs/library/omv.display.displaydata.rst +++ b/docs/library/omv.display.displaydata.rst @@ -9,7 +9,7 @@ The `DisplayData` class is used for getting information about the attached Displ Constructors ------------ -.. class:: display.DisplayData([cec=False, [ddc=False, [ddc_addr=0x50]]]) +.. class:: display.DisplayData(cec=False, ddc=False, ddc_addr=0x50) ``cec`` Pass `True` to enable CEC communication to an external display (if possible). @@ -20,7 +20,7 @@ Constructors Methods ------- -.. method:: display.DisplayData.display_id() +.. method:: display.DisplayData.display_id() -> int Returns the external display EDID data as a bytes() object. Verifying the EDID headers, checksums, and concatenating all sections into one bytes() diff --git a/docs/library/omv.display.dsidisplay.rst b/docs/library/omv.display.dsidisplay.rst index dfc6bf736f8c..f83f16b8d3e6 100644 --- a/docs/library/omv.display.dsidisplay.rst +++ b/docs/library/omv.display.dsidisplay.rst @@ -24,7 +24,7 @@ Example usage for driving the 800x480 MIPI LCD:: Constructors ------------ -.. class:: display.DSIDisplay([framesize=display.FWVGA, [refresh=60, [portrait=False, [channel=0, [controller, [backlight]]]]]]) +.. class:: display.DSIDisplay(framesize=FWVGA, refresh=60, portrait=False, channel=0, controller, backlight) ``framesize`` One of the standard supported resolutions. @@ -43,23 +43,23 @@ Constructors Methods ------- -.. method:: DSIDisplay.deinit() +.. method:: DSIDisplay.deinit() -> None Releases the I/O pins and RAM used by the class. This is called automatically on destruction. -.. method:: DSIDisplay.width() +.. method:: DSIDisplay.width() -> int Returns the width of the screen. -.. method:: DSIDisplay.height() +.. method:: DSIDisplay.height() -> int Returns the height of the screen. -.. method:: DSIDisplay.refresh() +.. method:: DSIDisplay.refresh() -> int Returns the refresh rate. -.. method:: DSIDisplay.write(image, [x=0, [y=0, [x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None]]]]]]]]]]) +.. method:: DSIDisplay.write(image, x=0, y=0, x_scale=1.0, y_scale=1.0, roi=None, rgb_channel=-1, alpha=256, color_palette=None, alpha_palette=None) -> None Displays an ``image`` whose top-left corner starts at location x, y. @@ -112,7 +112,7 @@ Methods * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). -.. method:: DSIDisplay.clear([display_off=False]) +.. method:: DSIDisplay.clear(display_off=False) -> None Clears the lcd screen to black. @@ -120,7 +120,7 @@ Methods frame buffer to black. You should also turn off the backlight too after this to ensure the screen goes to black as many displays are white when only the backlight is on. -.. method:: DSIDisplay.backlight([value]) +.. method:: DSIDisplay.backlight(value:Optional[int]=None) -> int Sets the lcd backlight dimming value. 0 (off) to 100 (on). @@ -129,10 +129,10 @@ Methods Pass no arguments to get the state of the backlight value. -.. method:: DSIDisplay.bus_write(cmd, [args=None, [dcs=False]]) +.. method:: DSIDisplay.bus_write(cmd:int, args=None, dcs=False) -> None Send the DSI Display ``cmd`` with ``args``. -.. method:: DSIDisplay.bus_read(cmd, [len, [args=None, [dcs=False]]]) +.. method:: DSIDisplay.bus_read(cmd:int, len:int, args=None, dcs=False) -> bytes Read ``len`` using ``cmd`` with ``args`` from the DSI Display. diff --git a/docs/library/omv.display.rgbdisplay.rst b/docs/library/omv.display.rgbdisplay.rst index a6740e047413..01e558b07c09 100644 --- a/docs/library/omv.display.rgbdisplay.rst +++ b/docs/library/omv.display.rgbdisplay.rst @@ -24,7 +24,7 @@ Example usage for driving the 800x480 24-bit Parallel LCD:: Constructors ------------ -.. class:: display.RGBDisplay([framesize=display.FWVGA, [refresh=60, [display_on=True, [portrait=False, [controller, [backlight]]]]]]) +.. class:: display.RGBDisplay(framesize=FWVGA, refresh=60, display_on=True, portrait=False, controller, backlight) ``framesize`` One of the standard supported resolutions. @@ -44,23 +44,23 @@ Constructors Methods ------- -.. method:: RGBDisplay.deinit() +.. method:: RGBDisplay.deinit() -> None Releases the I/O pins and RAM used by the class. This is called automatically on destruction. -.. method:: RGBDisplay.width() +.. method:: RGBDisplay.width() -> int Returns the width of the screen. -.. method:: RGBDisplay.height() +.. method:: RGBDisplay.height() -> int Returns the height of the screen. -.. method:: RGBDisplay.refresh() +.. method:: RGBDisplay.refresh() -> int Returns the refresh rate. -.. method:: RGBDisplay.write(image, [x=0, [y=0, [x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0]]]]]]]]]]) +.. method:: RGBDisplay.write(image:image.Image, x=0, y=0, x_scale=1.0, y_scale=1.0, roi=None, rgb_channel=-1, alpha=256, color_palette=None, alpha_palette=None, hint=0) -> None Displays an ``image`` whose top-left corner starts at location x, y. @@ -113,7 +113,7 @@ Methods * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). -.. method:: RGBDisplay.clear([display_off=False]) +.. method:: RGBDisplay.clear(display_off=False) -> None Clears the lcd screen to black. @@ -121,7 +121,7 @@ Methods frame buffer to black. You should also turn off the backlight too after this to ensure the screen goes to black as many displays are white when only the backlight is on. -.. method:: RGBDisplay.backlight([value]) +.. method:: RGBDisplay.backlight(value:Optional[int]=None) -> int Sets the lcd backlight dimming value. 0 (off) to 100 (on). diff --git a/docs/library/omv.display.rst b/docs/library/omv.display.rst index be301f7ec66f..d70d57023a0c 100644 --- a/docs/library/omv.display.rst +++ b/docs/library/omv.display.rst @@ -24,74 +24,92 @@ Constants --------- .. data:: QVGA + :type: int 320x240 resolution for framesize. .. data:: TQVGA + :type: int 240x320 resolution for framesize. .. data:: FHVGA + :type: int 480x272 resolution for framesize. .. data:: FHVGA2 + :type: int 480x128 resolution for framesize. .. data:: VGA + :type: int 640x480 resolution for framesize. .. data:: THVGA + :type: int 320x480 resolution for framesize. .. data:: FWVGA + :type: int 800x480 resolution for framesize. .. data:: FWVGA2 + :type: int 800x320 resolution for framesize. .. data:: TFWVGA + :type: int 480x800 resolution for framesize. .. data:: TFWVGA2 + :type: int 480x480 resolution for framesize. .. data:: SVGA + :type: int 800x600 resolution for framesize. .. data:: WSVGA + :type: int 1024x600 resolution for framesize. .. data:: XGA + :type: int 1024x768 resolution for framesize. .. data:: SXGA + :type: int 1280x1024 resolution for framesize. .. data:: SXGA2 + :type: int 1280x400 resolution for framesize. .. data:: UXGA + :type: int 1600x1200 resolution for framesize. .. data:: HD + :type: int 1280x720 resolution for framesize. .. data:: FHD + :type: int 1920x1080 resolution for framesize. diff --git a/docs/library/omv.display.spidisplay.rst b/docs/library/omv.display.spidisplay.rst index f332936d9d8d..29a02cd32fb3 100644 --- a/docs/library/omv.display.spidisplay.rst +++ b/docs/library/omv.display.spidisplay.rst @@ -24,7 +24,7 @@ Example usage for driving the 128x160 LCD shield:: Constructors ------------ -.. class:: SPIDisplay([width=128, [height=160, [refresh=60, [bgr=False, [byte_swap=False, [triple_buffer, [controller, [backlight]]]]]]]]) +.. class:: SPIDisplay(width=128, height=160, refresh=60, bgr=False, byte_swap=False, triple_buffer, controller, backlight) ``width`` SPI LCD width. By default this is 128 to match the OpenMV 128x160 LCD shield. @@ -53,35 +53,35 @@ Constructors Methods ------- -.. method:: SPIDisplay.deinit() +.. method:: SPIDisplay.deinit() -> None Releases the I/O pins and RAM used by the class. This is called automatically on destruction. -.. method:: SPIDisplay.width() +.. method:: SPIDisplay.width() -> int Returns the width of the screen. -.. method:: SPIDisplay.height() +.. method:: SPIDisplay.height() -> int Returns the height of the screen. -.. method:: SPIDisplay.refresh() +.. method:: SPIDisplay.refresh() -> int Returns the refresh rate. -.. method:: SPIDisplay.bgr() +.. method:: SPIDisplay.bgr() -> bool Returns if the red and blue channels are swapped. -.. method:: SPIDisplay.byte_swap() +.. method:: SPIDisplay.byte_swap() -> bool Returns if the RGB565 pixels are displayed byte reversed. -.. method:: SPIDisplay.triple_buffer() +.. method:: SPIDisplay.triple_buffer() -> bool Returns if triple buffering is enabled. -.. method:: SPIDisplay.write(image, [x=0, [y=0, [x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0]]]]]]]]]]) +.. method:: SPIDisplay.write(image:image.Image, x=0, y=0, x_scale=1.0, y_scale=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel=-1, alpha=256, color_palette=None, alpha_palette=None, hint=0) Displays an ``image`` whose top-left corner starts at location x, y. @@ -134,7 +134,7 @@ Methods * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). -.. method:: SPIDisplay.clear([display_off=False]) +.. method:: SPIDisplay.clear(display_off=False) -> None Clears the lcd screen to black. @@ -142,7 +142,7 @@ Methods frame buffer to black. You should also turn off the backlight too after this to ensure the screen goes to black as many displays are white when only the backlight is on. -.. method:: SPIDisplay.backlight([value]) +.. method:: SPIDisplay.backlight(value:Optional[int]=None) -> int Sets the lcd backlight dimming value. 0 (off) to 100 (on). @@ -151,6 +151,6 @@ Methods Pass no arguments to get the state of the backlight value. -.. method:: SPIDisplay.bus_write(cmd, [args=None]) +.. method:: SPIDisplay.bus_write(cmd:int, args=None) -> None Send the SPI Display ``cmd`` with ``args``. diff --git a/docs/library/omv.fir.rst b/docs/library/omv.fir.rst index 1f64124eb167..24815134bcfb 100644 --- a/docs/library/omv.fir.rst +++ b/docs/library/omv.fir.rst @@ -30,7 +30,7 @@ Example usage:: Functions --------- -.. function:: init([type=-1, [refresh, [resolution]]]) +.. function:: init(type=-1, refresh:Optional[int]=None, resolution:Optional[int]=None) -> None Initializes an attached thermopile shield using I/O pins P4 and P5 (and P0, P1, P2, P3 for `fir.FIR_LEPTON`) @@ -100,11 +100,11 @@ Functions FLIR Lepton 3.x. Triple buffering ensures that reading an image with `fir.read_ir()` and `fir.snapshot()` never block. For all other sensors the I2C bus is accessed to read the image. -.. function:: deinit() +.. function:: deinit() -> None Deinitializes the thermal sensor freeing up resources. -.. function:: width() +.. function:: width() -> int Returns the width (horizontal resolution) of the thermal sensor in-use: @@ -116,7 +116,7 @@ Functions * `fir.FIR_AMG8833`: 8 pixels. * `fir.FIR_LEPTON`: 80 pixels (FLIR Lepton 1.x/2.x) or 160 pixels (FLIR Lepton 3.x). -.. function:: height() +.. function:: height() -> int Returns the height (vertical resolution) of the thermal sensor in-use: @@ -128,7 +128,7 @@ Functions * `fir.FIR_AMG8833`: 8 pixels. * `fir.FIR_LEPTON`: 60 pixels (FLIR Lepton 1.x/2.x) or 120 pixels (FLIR Lepton 3.x). -.. function:: type() +.. function:: type() -> int Returns the type of the thermal sensor in-use: @@ -140,21 +140,21 @@ Functions * `fir.FIR_AMG8833` * `fir.FIR_LEPTON` -.. function:: refresh() +.. function:: refresh() -> int Returns the current refresh rate set during `fir.init()` call. -.. function:: resolution() +.. function:: resolution() -> int Returns the current resolution set during the `fir.init()` call. -.. function:: radiometric() +.. function:: radiometric() -> bool Returns if the thermal sensor reports accurate temperature readings (True or False). If False this means that the thermal sensor reports relative temperature readings based on its ambient temperature which may not be very accurate. -.. function:: register_vsync_cb(cb) +.. function:: register_vsync_cb(cb) -> None For the `fir.FIR_LEPTON` mode only on the OpenMV Cam Pure Thermal. @@ -165,7 +165,7 @@ Functions ``cb`` takes no arguments. -.. function:: register_frame_cb(cb) +.. function:: register_frame_cb(cb) -> None For the `fir.FIR_LEPTON` mode only on the OpenMV Cam Pure Thermal. @@ -178,11 +178,11 @@ Functions Use this to get an interrupt to schedule reading a frame later with `micropython.schedule()`. -.. function:: get_frame_available() +.. function:: get_frame_available() -> bool Returns True if a frame is available to read by calling `fir.read_ir()` or `fir.snapshot()`. -.. function:: trigger_ffc([timeout=-1]) +.. function:: trigger_ffc(timeout=-1) -> None For the `fir.FIR_LEPTON` mode only. @@ -192,7 +192,7 @@ Functions ``timeout`` if not -1 then how many milliseconds to wait for FFC to complete. -.. function:: read_ta() +.. function:: read_ta() -> float Returns the ambient temperature (i.e. sensor temperature). @@ -202,7 +202,7 @@ Functions The value returned is a float that represents the temperature in celsius. -.. function:: read_ir([hmirror=False, [vflip=False, [transpose=False, [timeout=-1]]]]) +.. function:: read_ir(hmirror=False, vflip=False, transpose=False, timeout=-1) Returns a tuple containing the ambient temperature (i.e. sensor temperature), the temperature list (width * height), the minimum temperature seen, and @@ -233,7 +233,7 @@ Functions ``ir`` is a (width * height) list of floats (4-bytes each). -.. function:: draw_ir(image, ir, [x, [y, [x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=128, [color_palette=image.PALETTE_RAINBOW, [alpha_palette=-1, [hint=0, [scale=(ir_min, ir_max)]]]]]]]]]]]) +.. function:: draw_ir(image:image.Image, ir, x:Optional[int]=None, y:Optional[int]=None, x_scale=1.0, y_scale=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel=-1, alpha=128, color_palette=image.PALETTE_RAINBOW, alpha_palette=-1, hint=0, scale=Optional[Tuple[float, float]]) -> None Draws an ``ir`` array on ``image`` whose top-left corner starts at location x, y. This method automatically handles rendering the image passed into the correct pixel format for the destination @@ -300,7 +300,7 @@ Functions (w, h, ir) as the ``ir`` array instead to use `draw_ir` to draw any floating point array with width ``w`` and height ``h``. -.. function:: snapshot([hmirror=False, [vflip=False, [transpose=False, [x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=128, [color_palette=fir.PALETTE_RAINBOW, [alpha_palette=None, [hint=0, [scale=(ir_min, ir_max), [pixformat=image.RGB565, [copy_to_fb=False, [timeout=-1]]]]]]]]]]]]]]) +.. function:: snapshot(hmirror=False, vflip=False, transpose=False, x_scale=1.0, y_scale=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel=-1, alpha=128, color_palette=image.PALETTE_RAINBOW, alpha_palette=None, hint=0, scale:Optional[Tuple[float, float]]=None, pixformat=image.RGB565, copy_to_fb=False, timeout=-1) -> image.Image Works like `sensor.snapshot()` and returns an `image` object that is either `image.GRAYSCALE` (grayscale) or `image.RGB565` (color). If ``copy_to_fb`` is False then @@ -382,29 +382,36 @@ Constants --------- .. data:: FIR_NONE + :type: int No FIR sensor type. .. data:: FIR_SHIELD + :type: int The OpenMV Cam Thermopile Shield Type (MLX90621). .. data:: FIR_MLX90621 + :type: int FIR_MLX90621 FIR sensor. .. data:: FIR_MLX90640 + :type: int FIR_MLX90640 FIR sensor. .. data:: FIR_MLX90641 + :type: int FIR_MLX90640 FIR sensor. .. data:: FIR_AMG8833 + :type: int FIR_AMG8833 FIR sensor. .. data:: FIR_LEPTON + :type: int FIR_LEPTON FIR sensor. diff --git a/docs/library/omv.ft5x06.rst b/docs/library/omv.ft5x06.rst index 982f7a35d68d..c98e6f03ab4d 100644 --- a/docs/library/omv.ft5x06.rst +++ b/docs/library/omv.ft5x06.rst @@ -13,7 +13,7 @@ Touch Screen Driver for the OpenMV Pure Thermal. Constructors ------------ -.. class:: ft5x06.FT5X06([i2c_addr=0x38]) +.. class:: ft5x06.FT5X06(i2c_addr=0x38) Creates a touch screen controller object @@ -87,41 +87,51 @@ Constants --------- .. data:: LCD_GESTURE_MOVE_UP + :type: int Touch screen move up gesture. .. data:: LCD_GESTURE_MOVE_LEFT + :type: int Touch screen move left gesture. .. data:: LCD_GESTURE_MOVE_DOWN + :type: int Touch screen move down gesture. .. data:: LCD_GESTURE_MOVE_RIGHT + :type: int Touch screen move right gesture. .. data:: LCD_GESTURE_ZOOM_IN + :type: int Touch screen zoom in gesture. .. data:: LCD_GESTURE_ZOOM_OUT + :type: int Touch screen zoom out gesture. .. data:: LCD_GESTURE_NONE + :type: int Touch screen no gesture. .. data:: LCD_FLAG_PRESSED + :type: int Touch point is pressed. .. data:: LCD_FLAG_RELEASED + :type: int Touch point is released. .. data:: LCD_FLAG_MOVED + :type: int Touch point is moved. diff --git a/docs/library/omv.gif.rst b/docs/library/omv.gif.rst index e16d83f15422..e99c7ad02628 100644 --- a/docs/library/omv.gif.rst +++ b/docs/library/omv.gif.rst @@ -36,7 +36,7 @@ Example usage:: Constructors ~~~~~~~~~~~~ -.. class:: Gif(filename, [width, [height, [color, [loop=True]]]]) +.. class:: Gif(filename:str, width:Optional[int]=None, height:Optional[int]=None, color:Optional[bool]=None, loop=True) Create a Gif object which you can add frames to. ``filename`` is the path to save the gif recording to. @@ -58,27 +58,27 @@ Constructors Methods ~~~~~~~ - .. method:: width() + .. method:: width() -> int Returns the width (horizontal resolution) for the gif object. - .. method:: height() + .. method:: height() -> int Returns the height (vertical resolution) for the gif object. - .. method:: format() + .. method:: format() -> int Returns `sensor.RGB565` if color is True or `sensor.GRAYSCALE` if not. - .. method:: size() + .. method:: size() -> int Returns the file size of the gif so far. This value is updated after adding frames. - .. method:: loop() + .. method:: loop() -> bool Returns if the gif object had loop set in its constructor. - .. method:: add_frame(image, [delay=10]) + .. method:: add_frame(image:image.Image, delay=10) -> None Add an image to the gif recording. The image width, height, and color mode, must be equal to the same width, height, and color modes used in the constructor @@ -87,7 +87,7 @@ Constructors ``delay`` is the number of centi-seconds to wait before displaying this frame after the previous frame (if not the first frame). - .. method:: close() + .. method:: close() -> None Finalizes the gif recording. This method must be called once the recording is complete to make the file viewable. diff --git a/docs/library/omv.gt911.rst b/docs/library/omv.gt911.rst index 7a273c7cc6e2..3c3a33b0a64a 100644 --- a/docs/library/omv.gt911.rst +++ b/docs/library/omv.gt911.rst @@ -21,22 +21,22 @@ Basic polling mode example usage:: Constructors ------------ -.. class:: gt911.GT911(bus, reset_pin, irq_pin, [address=0x5D, [width=800, [height=480, [touch_points=1, [reserve_x=False, [reserve_y=False, [reverse_axis=True, [stio=True, [refresh_rate=240, [touch_callback=None]]]]]]]]]]) +.. class:: gt911.GT911(bus:int, reset_pin, irq_pin, address=0x5D, width=800, height=480, touch_points=1, reserve_x=False, reserve_y=False, reverse_axis=True, stio=True, refresh_rate=240, touch_callback=None) Creates a touch screen controller object. You should initialize it according to the example above. Methods ------- -.. method:: GT911._read_reg(reg, [size=1, [buf=None]]) +.. method:: GT911._read_reg(reg:int, size=1, buf=None) Reads a register value. -.. method:: GT911._write_reg(reg, val, [size=1]) +.. method:: GT911._write_reg(reg:int, val:int, size=1) Writes a register value. -.. method:: GT911.read_id() +.. method:: GT911.read_id() -> int Returns the ID of the gt911 chip. @@ -46,6 +46,6 @@ Methods an x[0], y[1], size[2], and id[3]. x/y are the position on screen. Size is the amount of pressure applied. And id is a unique id per point which should correlate to the same point over reads. -.. method:: GT911.reset() +.. method:: GT911.reset() -> None Resets the gt911 chip. diff --git a/docs/library/omv.image.rst b/docs/library/omv.image.rst index e4d56a197bae..7b6d614d12d3 100644 --- a/docs/library/omv.image.rst +++ b/docs/library/omv.image.rst @@ -9,31 +9,31 @@ The ``image`` module is used for machine vision. Functions --------- -.. function:: binary_to_grayscale(binary_image_value) +.. function:: binary_to_grayscale(binary_image_value:Union[0,1]) -> int Returns a converted binary value (0-1) to a grayscale value (0-255). -.. function:: binary_to_rgb(binary_image_value) +.. function:: binary_to_rgb(binary_image_value:Union[0,1]) -> Tuple[int, int, int] Returns a converted binary value (0-1) to a 3 value RGB888 tuple. -.. function:: binary_to_lab(binary_image_value) +.. function:: binary_to_lab(binary_image_value:Union[0,1]) -> Tuple[int, int, int] Returns a converted binary value (0-1) to a 3 value LAB tuple. L goes between 0 and 100 and A/B go from -128 to 128. -.. function:: binary_to_yuv(binary_image_value) +.. function:: binary_to_yuv(binary_image_value:Union[0,1]) -> Tuple[int, int, int] Returns a converted binary value (0-1) to a 3 value YUV tuple. Y goes between 0 and 255 and U/V go from -128 to 128. -.. function:: grayscale_to_binary(grayscale_value) +.. function:: grayscale_to_binary(grayscale_value:int) -> Union[0,1] Returns a converted grayscale value (0-255) to a binary value (0-1). -.. function:: grayscale_to_rgb(grayscale_value) +.. function:: grayscale_to_rgb(grayscale_value:int) -> Tuple[int, int, int] Returns a converted grayscale value to a 3 value RGB888 tuple. @@ -43,7 +43,7 @@ Functions so this method won't return the exact values as a pure RGB888 system would. However, it's true to how the image lib works internally. -.. function:: grayscale_to_lab(grayscale_value) +.. function:: grayscale_to_lab(grayscale_value:int) -> Tuple[int, int, int] Returns a converted grayscale value to a 3 value LAB tuple. @@ -55,7 +55,7 @@ Functions so this method won't return the exact values as a pure LAB system would. However, it's true to how the image lib works internally. -.. function:: grayscale_to_yuv(grayscale_value) +.. function:: grayscale_to_yuv(grayscale_value:int) -> Tuple[int, int, int] Returns a converted grayscale value to a 3 value YUV tuple. @@ -67,7 +67,7 @@ Functions so this method won't return the exact values as a pure YUV system would. However, it's true to how the image lib works internally. -.. function:: rgb_to_binary(rgb_tuple) +.. function:: rgb_to_binary(rgb_tuple:Tuple[int, int, int]) -> Union[0,1] Returns a converted 3 value RGB888 tuple to a center range thresholded binary value (0-1). @@ -77,7 +77,7 @@ Functions so this method won't return the exact values as a pure RGB888 system would. However, it's true to how the image lib works internally. -.. function:: rgb_to_grayscale(rgb_tuple) +.. function:: rgb_to_grayscale(rgb_tuple:Tuple[int, int, int]) -> int Returns a converted 3 value RGB888 tuple to a grayscale value (0-255). @@ -87,7 +87,7 @@ Functions so this method won't return the exact values as a pure RGB888 system would. However, it's true to how the image lib works internally. -.. function:: rgb_to_lab(rgb_tuple) +.. function:: rgb_to_lab(rgb_tuple:Tuple[int, int, int]) -> Tuple[int, int, int] Returns a converted 3 value RGB888 tuple to a 3 value LAB tuple. @@ -99,7 +99,7 @@ Functions so this method won't return the exact values as a pure RGB888 system would. However, it's true to how the image lib works internally. -.. function:: rgb_to_yuv(rgb_tuple) +.. function:: rgb_to_yuv(rgb_tuple:Tuple[int, int, int]) -> Tuple[int, int, int] Returns a converted 3 value RGB888 tuple to a 3 value YUV tuple. @@ -111,7 +111,7 @@ Functions so this method won't return the exact values as a pure RGB888 system would. However, it's true to how the image lib works internally. -.. function:: lab_to_binary(lab_tuple) +.. function:: lab_to_binary(lab_tuple:Tuple[int, int, int]) -> Union[0,1] Returns a converted 3 value LAB tuple to a center range thresholded binary value (0-1). @@ -121,7 +121,7 @@ Functions so this method won't return the exact values as a pure LAB system would. However, it's true to how the image lib works internally. -.. function:: lab_to_grayscale(lab_tuple) +.. function:: lab_to_grayscale(lab_tuple:Tuple[int, int, int]) -> int Returns a converted 3 value LAB tuple to a grayscale value (0-255). @@ -131,7 +131,7 @@ Functions so this method won't return the exact values as a pure LAB system would. However, it's true to how the image lib works internally. -.. function:: lab_to_rgb(lab_tuple) +.. function:: lab_to_rgb(lab_tuple:Tuple[int, int, int]) -> Tuple[int, int, int] Returns a converted 3 value LAB tuple to a 3 value RGB888 tuple. @@ -141,7 +141,7 @@ Functions so this method won't return the exact values as a pure LAB system would. However, it's true to how the image lib works internally. -.. function:: lab_to_yuv(lab_tuple) +.. function:: lab_to_yuv(lab_tuple:Tuple[int, int, int]) -> Tuple[int, int, int] Returns a converted 3 value LAB tuple to a 3 value YUV tuple. @@ -153,7 +153,7 @@ Functions so this method won't return the exact values as a pure LAB system would. However, it's true to how the image lib works internally. -.. function:: yuv_to_binary(yuv_tuple) +.. function:: yuv_to_binary(yuv_tuple:Tuple[int, int, int]) -> Union[0,1] Returns a converted 3 value YUV tuple to a center range thresholded binary value (0-1). @@ -163,7 +163,7 @@ Functions so this method won't return the exact values as a pure YUV system would. However, it's true to how the image lib works internally. -.. function:: yuv_to_grayscale(yuv_tuple) +.. function:: yuv_to_grayscale(yuv_tuple:Tuple[int, int, int]) -> int Returns a converted 3 value YUV tuple to a grayscale value (0-255). @@ -173,7 +173,7 @@ Functions so this method won't return the exact values as a pure YUV system would. However, it's true to how the image lib works internally. -.. function:: yuv_to_rgb(lab_tuple) +.. function:: yuv_to_rgb(lab_tuple:Tuple[int, int, int]) -> Tuple[int, int, int] Returns a converted 3 value YUV tuple to a 3 value RGB888 tuple. @@ -183,7 +183,7 @@ Functions so this method won't return the exact values as a pure YUV system would. However, it's true to how the image lib works internally. -.. function:: yuv_to_lab(yuv_tuple) +.. function:: yuv_to_lab(yuv_tuple:Tuple[int, int, int]) -> Tuple[int, int, int] Returns a converted 3 value YUV tuple to a 3 value LAB tuple. @@ -195,19 +195,19 @@ Functions so this method won't return the exact values as a pure YUV system would. However, it's true to how the image lib works internally. -.. function:: load_decriptor(path) +.. function:: load_decriptor(path:str) Loads a descriptor object from disk. ``path`` is the path to the descriptor file to load. -.. function:: save_descriptor(path, descriptor) +.. function:: save_descriptor(path:str, descriptor) Saves the descriptor object ``descriptor`` to disk. ``path`` is the path to the descriptor file to save. -.. function:: match_descriptor(descritor0, descriptor1, [threshold=70, [filter_outliers=False]]) +.. function:: match_descriptor(descritor0, descriptor1, threshold=70, filter_outliers=False) For LBP descriptors this function returns an integer representing the difference between the two descriptors. You may then threshold/compare this @@ -229,7 +229,7 @@ class HaarCascade -- Feature Descriptor The Haar Cascade feature descriptor is used for the `Image.find_features()` method. It doesn't have any methods itself for you to call. -.. class:: HaarCascade(path, [stages=Auto]) +.. class:: HaarCascade(path:str, stages:Optional[int]=None) Loads a Haar Cascade into memory from a Haar Cascade binary file formatted for your OpenMV Cam. If you pass "frontalface" instead of a path then this @@ -271,6 +271,45 @@ method. It doesn't have any methods itself for you to call. not cat like things labeled differently. The generator algorithm will then produce a Haar Cascade that detects cats. +class Similarity -- Similarity Object +------------------------------------- + +The similarity object is returned by `Image.get_similarity()`. + +.. class:: Similarity() + + Please call `Image.get_similarity()` to create this object. + + .. method:: mean() -> float + + Returns the mean of the similarity values computed across the image (float). + + You may also get this value doing ``[0]`` on the object. + + .. method:: stdev() -> float + + Returns the standard deviation of the similarity values computed across the image ( (float). + + You may also get this value doing ``[1]`` on the object. + + .. method:: min() -> float + + Returns the min of the similarity values computed across the image ( (float). + + Generally, for the SSIM you want to threshold the min value to determine if two images + are different. + + You may also get this value doing ``[2]`` on the object. + + .. method:: max() -> float + + Returns the max of the similarity values computed across the image ( (float). + + Generally, for the DSIM you want to threshold the max value to determine if two images + are different. + + You may also get this value doing ``[3]`` on the object. + class Histogram -- Histogram Object ----------------------------------- @@ -286,31 +325,31 @@ are normalized so that all bins in a channel sum to 1. Please call `Image.get_histogram()` to create this object. - .. method:: bins() + .. method:: bins() -> List[float] Returns a list of floats for the grayscale histogram. You may also get this value doing ``[0]`` on the object. - .. method:: l_bins() + .. method:: l_bins() -> List[float] Returns a list of floats for the RGB565 histogram LAB L channel. You may also get this value doing ``[0]`` on the object. - .. method:: a_bins() + .. method:: a_bins() -> List[float] Returns a list of floats for the RGB565 histogram LAB A channel. You may also get this value doing ``[1]`` on the object. - .. method:: b_bins() + .. method:: b_bins() -> List[float] Returns a list of floats for the RGB565 histogram LAB B channel. You may also get this value doing ``[2]`` on the object. - .. method:: get_percentile(percentile) + .. method:: get_percentile(percentile) -> percentile Computes the CDF of the histogram channels and returns a `image.percentile` object with the values of the histogram at the passed in ``percentile`` (0.0 @@ -320,14 +359,14 @@ are normalized so that all bins in a channel sum to 1. 0.1) and max (with 0.9) of a color distribution without outlier effects ruining your results for adaptive color tracking. - .. method:: get_threshold() + .. method:: get_threshold() -> threshold Uses Otsu's Method to compute the optimal threshold values that split the histogram into two halves for each channel of the histogram. This method returns a `image.threshold` object. This method is particularly useful for determining optimal `Image.binary()` thresholds. - .. method:: get_statistics() + .. method:: get_statistics() -> statistics Computes the mean, median, mode, standard deviation, min, max, lower quartile, and upper quartile of each color channel in the histogram and @@ -351,25 +390,25 @@ methods. Please call `histogram.get_percentile()` to create this object. - .. method:: value() + .. method:: value() -> int Return the grayscale percentile value (between 0 and 255). You may also get this value doing ``[0]`` on the object. - .. method:: l_value() + .. method:: l_value() -> int Return the RGB565 LAB L channel percentile value (between 0 and 100). You may also get this value doing ``[0]`` on the object. - .. method:: a_value() + .. method:: a_value() -> int Return the RGB565 LAB A channel percentile value (between -128 and 127). You may also get this value doing ``[1]`` on the object. - .. method:: b_value() + .. method:: b_value() -> int Return the RGB565 LAB B channel percentile value (between -128 and 127). @@ -390,25 +429,25 @@ methods. Please call `histogram.get_threshold()` to create this object. - .. method:: value() + .. method:: value() -> int Return the grayscale threshold value (between 0 and 255). You may also get this value doing ``[0]`` on the object. - .. method:: l_value() + .. method:: l_value() -> int Return the RGB565 LAB L channel threshold value (between 0 and 100). You may also get this value doing ``[0]`` on the object. - .. method:: a_value() + .. method:: a_value() -> int Return the RGB565 LAB A channel threshold value (between -128 and 127). You may also get this value doing ``[1]`` on the object. - .. method:: b_value() + .. method:: b_value() -> int Return the RGB565 LAB B channel threshold value (between -128 and 127). @@ -430,193 +469,193 @@ methods. Please call `histogram.get_statistics()` or `Image.get_statistics()` to create this object. - .. method:: mean() + .. method:: mean() -> int Returns the grayscale mean (0-255) (int). You may also get this value doing ``[0]`` on the object. - .. method:: median() + .. method:: median() -> int Returns the grayscale median (0-255) (int). You may also get this value doing ``[1]`` on the object. - .. method:: mode() + .. method:: mode() -> int Returns the grayscale mode (0-255) (int). You may also get this value doing ``[2]`` on the object. - .. method:: stdev() + .. method:: stdev() -> int Returns the grayscale standard deviation (0-255) (int). You may also get this value doing ``[3]`` on the object. - .. method:: min() + .. method:: min() -> int Returns the grayscale min (0-255) (int). You may also get this value doing ``[4]`` on the object. - .. method:: max() + .. method:: max() -> int Returns the grayscale max (0-255) (int). You may also get this value doing ``[5]`` on the object. - .. method:: lq() + .. method:: lq() -> int Returns the grayscale lower quartile (0-255) (int). You may also get this value doing ``[6]`` on the object. - .. method:: uq() + .. method:: uq() -> int Returns the grayscale upper quartile (0-255) (int). You may also get this value doing ``[7]`` on the object. - .. method:: l_mean() + .. method:: l_mean() -> int Returns the RGB565 LAB L mean (0-255) (int). You may also get this value doing ``[0]`` on the object. - .. method:: l_median() + .. method:: l_median() -> int Returns the RGB565 LAB L median (0-255) (int). You may also get this value doing ``[1]`` on the object. - .. method:: l_mode() + .. method:: l_mode() -> int Returns the RGB565 LAB L mode (0-255) (int). You may also get this value doing ``[2]`` on the object. - .. method:: l_stdev() + .. method:: l_stdev() -> int Returns the RGB565 LAB L standard deviation (0-255) (int). You may also get this value doing ``[3]`` on the object. - .. method:: l_min() + .. method:: l_min() -> int Returns the RGB565 LAB L min (0-255) (int). You may also get this value doing ``[4]`` on the object. - .. method:: l_max() + .. method:: l_max() -> int Returns the RGB565 LAB L max (0-255) (int). You may also get this value doing ``[5]`` on the object. - .. method:: l_lq() + .. method:: l_lq() -> int Returns the RGB565 LAB L lower quartile (0-255) (int). You may also get this value doing ``[6]`` on the object. - .. method:: l_uq() + .. method:: l_uq() -> int Returns the RGB565 LAB L upper quartile (0-255) (int). You may also get this value doing ``[7]`` on the object. - .. method:: a_mean() + .. method:: a_mean() -> int Returns the RGB565 LAB A mean (0-255) (int). You may also get this value doing ``[8]`` on the object. - .. method:: a_median() + .. method:: a_median() -> int Returns the RGB565 LAB A median (0-255) (int). You may also get this value doing ``[9]`` on the object. - .. method:: a_mode() + .. method:: a_mode() -> int Returns the RGB565 LAB A mode (0-255) (int). You may also get this value doing ``[10]`` on the object. - .. method:: a_stdev() + .. method:: a_stdev() -> int Returns the RGB565 LAB A standard deviation (0-255) (int). You may also get this value doing ``[11]`` on the object. - .. method:: a_min() + .. method:: a_min() -> int Returns the RGB565 LAB A min (0-255) (int). You may also get this value doing ``[12]`` on the object. - .. method:: a_max() + .. method:: a_max() -> int Returns the RGB565 LAB A max (0-255) (int). You may also get this value doing ``[13]`` on the object. - .. method:: a_lq() + .. method:: a_lq() -> int Returns the RGB565 LAB A lower quartile (0-255) (int). You may also get this value doing ``[14]`` on the object. - .. method:: a_uq() + .. method:: a_uq() -> int Returns the RGB565 LAB A upper quartile (0-255) (int). You may also get this value doing ``[15]`` on the object. - .. method:: b_mean() + .. method:: b_mean() -> int Returns the RGB565 LAB B mean (0-255) (int). You may also get this value doing ``[16]`` on the object. - .. method:: b_median() + .. method:: b_median() -> int Returns the RGB565 LAB B median (0-255) (int). You may also get this value doing ``[17]`` on the object. - .. method:: b_mode() + .. method:: b_mode() -> int Returns the RGB565 LAB B mode (0-255) (int). You may also get this value doing ``[18]`` on the object. - .. method:: b_stdev() + .. method:: b_stdev() -> int Returns the RGB565 LAB B standard deviation (0-255) (int). You may also get this value doing ``[19]`` on the object. - .. method:: b_min() + .. method:: b_min() -> int Returns the RGB565 LAB B min (0-255) (int). You may also get this value doing ``[20]`` on the object. - .. method:: b_max() + .. method:: b_max() -> int Returns the RGB565 LAB B max (0-255) (int). You may also get this value doing ``[21]`` on the object. - .. method:: b_lq() + .. method:: b_lq() -> int Returns the RGB565 LAB B lower quartile (0-255) (int). You may also get this value doing ``[22]`` on the object. - .. method:: b_uq() + .. method:: b_uq() -> int Returns the RGB565 LAB B upper quartile (0-255) (int). @@ -631,73 +670,73 @@ The blob object is returned by `Image.find_blobs()`. Please call `Image.find_blobs()` to create this object. - .. method:: corners() + .. method:: corners() -> List[Tuple[int, int]] Returns a list of 4 (x,y) tuples of the 4 corners of the object. Corners are always returned in sorted clock-wise order starting from the top left. - .. method:: min_corners() + .. method:: min_corners() -> List[Tuple[int, int]] Returns a list of 4 (x,y) tuples of the 4 corners than bound the min area rectangle of the blob. Unlike `blob.corners()` the min area rectangle corners do not necessarily lie on the blob. - .. method:: rect() + .. method:: rect() -> Tuple[int, int, int, int] Returns a rectangle tuple (x, y, w, h) for use with other `image` methods like `Image.draw_rectangle()` of the blob's bounding box. - .. method:: x() + .. method:: x() -> int Returns the blob's bounding box x coordinate (int). You may also get this value doing ``[0]`` on the object. - .. method:: y() + .. method:: y() -> int Returns the blob's bounding box y coordinate (int). You may also get this value doing ``[1]`` on the object. - .. method:: w() + .. method:: w() -> int Returns the blob's bounding box w coordinate (int). You may also get this value doing ``[2]`` on the object. - .. method:: h() + .. method:: h() -> int Returns the blob's bounding box h coordinate (int). You may also get this value doing ``[3]`` on the object. - .. method:: pixels() + .. method:: pixels() -> int Returns the number of pixels that are part of this blob (int). You may also get this value doing ``[4]`` on the object. - .. method:: cx() + .. method:: cx() -> int Returns the centroid x position of the blob (int). You may also get this value doing ``[5]`` on the object. - .. method:: cxf() + .. method:: cxf() -> int Returns the centroid x position of the blob (float). - .. method:: cy() + .. method:: cy() -> int Returns the centroid y position of the blob (int). You may also get this value doing ``[6]`` on the object. - .. method:: cyf() + .. method:: cyf() -> int Returns the centroid y position of the blob (float). - .. method:: rotation() + .. method:: rotation() -> float Returns the rotation of the blob in radians (float). If the blob is like a pencil or pen this value will be unique for 0-180 degrees. If the blob @@ -705,16 +744,16 @@ The blob object is returned by `Image.find_blobs()`. You may also get this value doing ``[7]`` on the object. - .. method:: rotation_deg() + .. method:: rotation_deg() -> float Returns the rotation of the blob in degrees. - .. method:: rotation_rad() + .. method:: rotation_rad() -> float Returns the rotation of the blob in radians. This method is more descriptive than just `blob.rotation()`. - .. method:: code() + .. method:: code() -> int Returns a 32-bit binary number with a bit set in it for each color threshold that's part of this blob. For example, if you passed `Image.find_blobs()` @@ -726,79 +765,79 @@ The blob object is returned by `Image.find_blobs()`. You may also get this value doing ``[8]`` on the object. - .. method:: count() + .. method:: count() -> int Returns the number of blobs merged into this blob. This is 1 unless you called `Image.find_blobs()` with ``merge=True``. You may also get this value doing ``[9]`` on the object. - .. method:: perimeter() + .. method:: perimeter() -> int Returns the number of pixels on this blob's perimeter. - .. method:: roundness() + .. method:: roundness() -> float Returns a value between 0 and 1 representing how round the object is. A circle would be a 1. - .. method:: elongation() + .. method:: elongation() -> float Returns a value between 0 and 1 representing how long (not round) the object is. A line would be a 1. - .. method:: area() + .. method:: area() -> int Returns the area of the bounding box around the blob. (w * h). - .. method:: density() + .. method:: density() -> float Returns the density ratio of the blob. This is the number of pixels in the blob over its bounding box area. A low density ratio means in general that the lock on the object isn't very good. The result is between 0 and 1. - .. method:: extent() + .. method:: extent() -> float Alias for `blob.density()`. - .. method:: compactness() + .. method:: compactness() -> float Like `blob.density()`, but, uses the perimeter of the blob instead to measure the objects density and is thus more accurate. The result is between 0 and 1. - .. method:: solidity() + .. method:: solidity() -> float Like `blob.density()` but, uses the minimum area rotated rectangle versus the bounding rectangle to measure density. The result is between 0 and 1. - .. method:: convexity() + .. method:: convexity() -> float Returns a value between 0 and 1 representing how convex the object is. A square would be 1. - .. method:: x_hist_bins() + .. method:: x_hist_bins() -> List[float] Returns a histogram of the x axis of all columns in a blob. Bin values are scaled between 0 and 1. - .. method:: y_hist_bins() + .. method:: y_hist_bins() -> List[float] Returns a histogram of the y axis of all the rows in a blob. Bin values are scaled between 0 and 1. - .. method:: major_axis_line() + .. method:: major_axis_line() -> Tuple[int, int, int, int] Returns a line tuple (x1, y1, x2, y2) that can be drawn with `Image.draw_line()` of the major axis of the blob (the line going through the longest side of the min area rectangle). - .. method:: minor_axis_line() + .. method:: minor_axis_line() -> Tuple[int, int, int, int] Returns a line tuple (x1, y1, x2, y2) that can be drawn with `Image.draw_line()` of the minor axis of the blob (the line going through the shortest side of the min area rectangle). - .. method:: enclosing_circle() + .. method:: enclosing_circle() -> Tuple[int, int, int] Returns a circle tuple (x, y, r) that can be drawn with `Image.draw_circle()` of the circle that encloses the min area rectangle of a blob. - .. method:: enclosed_ellipse() + .. method:: enclosed_ellipse() -> Tuple[int, int, int, int, float] Returns an ellipse tuple (x, y, rx, ry, rotation) that can be drawn with `Image.draw_ellipse()` of the ellipse that fits inside of the min area rectangle of a blob. @@ -812,54 +851,54 @@ The line object is returned by `Image.find_lines()`, `Image.find_line_segments() Please call `Image.find_lines()`, `Image.find_line_segments()`, or `Image.get_regression()` to create this object. - .. method:: line() + .. method:: line() -> Tuple[int, int, int, int] Returns a line tuple (x1, y1, x2, y2) for use with other `image` methods like `Image.draw_line()`. - .. method:: x1() + .. method:: x1() -> int Returns the line's p1 x component. You may also get this value doing ``[0]`` on the object. - .. method:: y1() + .. method:: y1() -> int Returns the line's p1 y component. You may also get this value doing ``[1]`` on the object. - .. method:: x2() + .. method:: x2() -> int Returns the line's p2 x component. You may also get this value doing ``[2]`` on the object. - .. method:: y2() + .. method:: y2() -> int Returns the line's p2 y component. You may also get this value doing ``[3]`` on the object. - .. method:: length() + .. method:: length() -> int Returns the line's length: sqrt(((x2-x1)^2) + ((y2-y1)^2). You may also get this value doing ``[4]`` on the object. - .. method:: magnitude() + .. method:: magnitude() -> int Returns the magnitude of the line from the hough transform. You may also get this value doing ``[5]`` on the object. - .. method:: theta() + .. method:: theta() -> int Returns the angle of the line from the hough transform - (0 - 179) degrees. You may also get this value doing ``[7]`` on the object. - .. method:: rho() + .. method:: rho() -> int Returns the the rho value for the line from the hough transform. @@ -874,25 +913,25 @@ The circle object is returned by `Image.find_circles()`. Please call `Image.find_circles()` to create this object. - .. method:: x() + .. method:: x() -> int Returns the circle's x position. You may also get this value doing ``[0]`` on the object. - .. method:: y() + .. method:: y() -> int Returns the circle's y position. You may also get this value doing ``[1]`` on the object. - .. method:: r() + .. method:: r() -> int Returns the circle's radius. You may also get this value doing ``[2]`` on the object. - .. method:: magnitude() + .. method:: magnitude() -> int Returns the circle's magnitude. @@ -907,41 +946,41 @@ The rect object is returned by `Image.find_rects()`. Please call `Image.find_rects()` to create this object. - .. method:: corners() + .. method:: corners() -> List[Tuple[int, int]] Returns a list of 4 (x,y) tuples of the 4 corners of the object. Corners are always returned in sorted clock-wise order starting from the top left. - .. method:: rect() + .. method:: rect() -> Tuple[int, int, int, int] Returns a rectangle tuple (x, y, w, h) for use with other `image` methods like `Image.draw_rectangle()` of the rect's bounding box. - .. method:: x() + .. method:: x() -> int Returns the rectangle's top left corner's x position. You may also get this value doing ``[0]`` on the object. - .. method:: y() + .. method:: y() -> int Returns the rectangle's top left corner's y position. You may also get this value doing ``[1]`` on the object. - .. method:: w() + .. method:: w() -> int Returns the rectangle's width. You may also get this value doing ``[2]`` on the object. - .. method:: h() + .. method:: h() -> int Returns the rectangle's height. You may also get this value doing ``[3]`` on the object. - .. method:: magnitude() + .. method:: magnitude() -> int Returns the rectangle's magnitude. @@ -956,71 +995,71 @@ The qrcode object is returned by `Image.find_qrcodes()`. Please call `Image.find_qrcodes()` to create this object. - .. method:: corners() + .. method:: corners() -> List[Tuple[int, int]] Returns a list of 4 (x,y) tuples of the 4 corners of the object. Corners are always returned in sorted clock-wise order starting from the top left. - .. method:: rect() + .. method:: rect() -> Tuple[int, int, int, int] Returns a rectangle tuple (x, y, w, h) for use with other `image` methods like `Image.draw_rectangle()` of the qrcode's bounding box. - .. method:: x() + .. method:: x() -> int Returns the qrcode's bounding box x coordinate (int). You may also get this value doing ``[0]`` on the object. - .. method:: y() + .. method:: y() -> int Returns the qrcode's bounding box y coordinate (int). You may also get this value doing ``[1]`` on the object. - .. method:: w() + .. method:: w() -> int Returns the qrcode's bounding box w coordinate (int). You may also get this value doing ``[2]`` on the object. - .. method:: h() + .. method:: h() -> int Returns the qrcode's bounding box h coordinate (int). You may also get this value doing ``[3]`` on the object. - .. method:: payload() + .. method:: payload() -> str Returns the payload string of the qrcode. E.g. the URL. You may also get this value doing ``[4]`` on the object. - .. method:: version() + .. method:: version() -> int Returns the version number of the qrcode (int). You may also get this value doing ``[5]`` on the object. - .. method:: ecc_level() + .. method:: ecc_level() -> int Returns the ecc_level of the qrcode (int). You may also get this value doing ``[6]`` on the object. - .. method:: mask() + .. method:: mask() -> int Returns the mask of the qrcode (int). You may also get this value doing ``[7]`` on the object. - .. method:: data_type() + .. method:: data_type() -> int Returns the data type of the qrcode (int). You may also get this value doing ``[8]`` on the object. - .. method:: eci() + .. method:: eci() -> int Returns the eci of the qrcode (int). The eci stores the encoding of data bytes in the QR Code. If you plan to handling QR Codes that contain more @@ -1028,22 +1067,22 @@ The qrcode object is returned by `Image.find_qrcodes()`. You may also get this value doing ``[9]`` on the object. - .. method:: is_numeric() + .. method:: is_numeric() -> bool Returns True if the data_type of the qrcode is numeric. - .. method:: is_alphanumeric() + .. method:: is_alphanumeric() -> bool Returns True if the data_type of the qrcode is alpha numeric. - .. method:: is_binary() + .. method:: is_binary() -> bool Returns True if the data_type of the qrcode is binary. If you are serious about handling all types of text you need to check the eci if this is True to determine the text encoding of the data. Usually, it's just standard ASCII, but, it could be UTF8 that has some 2-byte characters in it. - .. method:: is_kanji() + .. method:: is_kanji() -> bool Returns True if the data_type of the qrcode is alpha Kanji. If this is True then you'll need to decode the string yourself as Kanji symbols are 10-bits @@ -1059,41 +1098,41 @@ The apriltag object is returned by `Image.find_apriltags()`. Please call `Image.find_apriltags()` to create this object. - .. method:: corners() + .. method:: corners() -> List[Tuple[int, int]] Returns a list of 4 (x,y) tuples of the 4 corners of the object. Corners are always returned in sorted clock-wise order starting from the top left. - .. method:: rect() + .. method:: rect() -> Tuple[int, int, int, int] Returns a rectangle tuple (x, y, w, h) for use with other `image` methods like `Image.draw_rectangle()` of the apriltag's bounding box. - .. method:: x() + .. method:: x() -> int Returns the apriltag's bounding box x coordinate (int). You may also get this value doing ``[0]`` on the object. - .. method:: y() + .. method:: y() -> int Returns the apriltag's bounding box y coordinate (int). You may also get this value doing ``[1]`` on the object. - .. method:: w() + .. method:: w() -> int Returns the apriltag's bounding box w coordinate (int). You may also get this value doing ``[2]`` on the object. - .. method:: h() + .. method:: h() -> int Returns the apriltag's bounding box h coordinate (int). You may also get this value doing ``[3]`` on the object. - .. method:: id() + .. method:: id() -> int Returns the numeric id of the apriltag. @@ -1106,7 +1145,7 @@ The apriltag object is returned by `Image.find_apriltags()`. You may also get this value doing ``[4]`` on the object. - .. method:: family() + .. method:: family() -> int Returns the numeric family of the apriltag. @@ -1119,39 +1158,39 @@ The apriltag object is returned by `Image.find_apriltags()`. You may also get this value doing ``[5]`` on the object. - .. method:: cx() + .. method:: cx() -> int Returns the centroid x position of the apriltag (int). - .. method:: cxf() + .. method:: cxf() -> float Returns the centroid x position of the apriltag (float). You may also get this value doing ``[6]`` on the object. - .. method:: cy() + .. method:: cy() -> int Returns the centroid y position of the apriltag (int). - .. method:: cyf() + .. method:: cyf() -> float Returns the centroid y position of the apriltag (float). You may also get this value doing ``[7]`` on the object. - .. method:: rotation() + .. method:: rotation() -> float Returns the rotation of the apriltag in radians (float). You may also get this value doing ``[8]`` on the object. - .. method:: decision_margin() + .. method:: decision_margin() -> float Returns the quality of the apriltag match (0.0 - 1.0) where 1.0 is the best. You may also get this value doing ``[9]`` on the object. - .. method:: hamming() + .. method:: hamming() -> int Returns the number of accepted bit errors for this tag. @@ -1164,7 +1203,7 @@ The apriltag object is returned by `Image.find_apriltags()`. You may also get this value doing ``[10]`` on the object. - .. method:: goodness() + .. method:: goodness() -> float Returns the quality of the apriltag image (0.0 - 1.0) where 1.0 is the best. @@ -1176,7 +1215,7 @@ The apriltag object is returned by `Image.find_apriltags()`. You may also get this value doing ``[11]`` on the object. - .. method:: x_translation() + .. method:: x_translation() -> float Returns the translation in unknown units from the camera in the X direction. @@ -1190,7 +1229,7 @@ The apriltag object is returned by `Image.find_apriltags()`. You may also get this value doing ``[12]`` on the object. - .. method:: y_translation() + .. method:: y_translation() -> float Returns the translation in unknown units from the camera in the Y direction. @@ -1204,7 +1243,7 @@ The apriltag object is returned by `Image.find_apriltags()`. You may also get this value doing ``[13]`` on the object. - .. method:: z_translation() + .. method:: z_translation() -> float Returns the translation in unknown units from the camera in the Z direction. @@ -1218,21 +1257,21 @@ The apriltag object is returned by `Image.find_apriltags()`. You may also get this value doing ``[14]`` on the object. - .. method:: x_rotation() + .. method:: x_rotation() -> float Returns the rotation in radians of the apriltag in the X plane. E.g. moving the camera left-to-right while looking at the tag. You may also get this value doing ``[15]`` on the object. - .. method:: y_rotation() + .. method:: y_rotation() -> float Returns the rotation in radians of the apriltag in the Y plane. E.g. moving the camera up-to-down while looking at the tag. You may also get this value doing ``[16]`` on the object. - .. method:: z_rotation() + .. method:: z_rotation() -> float Returns the rotation in radians of the apriltag in the Z plane. E.g. rotating the camera while looking directly at the tag. @@ -1250,71 +1289,71 @@ The datamatrix object is returned by `Image.find_datamatrices()`. Please call `Image.find_datamatrices()` to create this object. - .. method:: corners() + .. method:: corners() -> List[Tuple[int, int]] Returns a list of 4 (x,y) tuples of the 4 corners of the object. Corners are always returned in sorted clock-wise order starting from the top left. - .. method:: rect() + .. method:: rect() -> Tuple[int, int, int, int] Returns a rectangle tuple (x, y, w, h) for use with other `image` methods like `Image.draw_rectangle()` of the datamatrix's bounding box. - .. method:: x() + .. method:: x() -> int Returns the datamatrix's bounding box x coordinate (int). You may also get this value doing ``[0]`` on the object. - .. method:: y() + .. method:: y() -> int Returns the datamatrix's bounding box y coordinate (int). You may also get this value doing ``[1]`` on the object. - .. method:: w() + .. method:: w() -> int Returns the datamatrix's bounding box w coordinate (int). You may also get this value doing ``[2]`` on the object. - .. method:: h() + .. method:: h() -> int Returns the datamatrix's bounding box h coordinate (int). You may also get this value doing ``[3]`` on the object. - .. method:: payload() + .. method:: payload() -> str Returns the payload string of the datamatrix. E.g. The string. You may also get this value doing ``[4]`` on the object. - .. method:: rotation() + .. method:: rotation() -> float Returns the rotation of the datamatrix in radians (float). You may also get this value doing ``[5]`` on the object. - .. method:: rows() + .. method:: rows() -> int Returns the number of rows in the data matrix (int). You may also get this value doing ``[6]`` on the object. - .. method:: columns() + .. method:: columns() -> int Returns the number of columns in the data matrix (int). You may also get this value doing ``[7]`` on the object. - .. method:: capacity() + .. method:: capacity() -> int Returns how many characters could fit in this data matrix. You may also get this value doing ``[8]`` on the object. - .. method:: padding() + .. method:: padding() -> int Returns how many unused characters are in this data matrix. @@ -1329,47 +1368,47 @@ The barcode object is returned by `Image.find_barcodes()`. Please call `Image.find_barcodes()` to create this object. - .. method:: corners() + .. method:: corners() -> List[Tuple[int, int]] Returns a list of 4 (x,y) tuples of the 4 corners of the object. Corners are always returned in sorted clock-wise order starting from the top left. - .. method:: rect() + .. method:: rect() -> Tuple[int, int, int, int] Returns a rectangle tuple (x, y, w, h) for use with other `image` methods like `Image.draw_rectangle()` of the barcode's bounding box. - .. method:: x() + .. method:: x() -> int Returns the barcode's bounding box x coordinate (int). You may also get this value doing ``[0]`` on the object. - .. method:: y() + .. method:: y() -> int Returns the barcode's bounding box y coordinate (int). You may also get this value doing ``[1]`` on the object. - .. method:: w() + .. method:: w() -> int Returns the barcode's bounding box w coordinate (int). You may also get this value doing ``[2]`` on the object. - .. method:: h() + .. method:: h() -> int Returns the barcode's bounding box h coordinate (int). You may also get this value doing ``[3]`` on the object. - .. method:: payload() + .. method:: payload() -> str Returns the payload string of the barcode. E.g. The number. You may also get this value doing ``[4]`` on the object. - .. method:: type() + .. method:: type() -> int Returns the type enumeration of the barcode (int). @@ -1392,13 +1431,13 @@ The barcode object is returned by `Image.find_barcodes()`. * image.CODE93 * image.CODE128 - .. method:: rotation() + .. method:: rotation() -> float Returns the rotation of the barcode in radians (float). You may also get this value doing ``[6]`` on the object. - .. method:: quality() + .. method:: quality() -> int Returns the number of times this barcode was detected in the image (int). @@ -1416,33 +1455,33 @@ The displacement object is returned by `Image.find_displacement()`. Please call `Image.find_displacement()` to create this object. - .. method:: x_translation() + .. method:: x_translation() -> float Returns the x translation in pixels between two images. This is sub pixel accurate so it's a float. You may also get this value doing ``[0]`` on the object. - .. method:: y_translation() + .. method:: y_translation() -> float Returns the y translation in pixels between two images. This is sub pixel accurate so it's a float. You may also get this value doing ``[1]`` on the object. - .. method:: rotation() + .. method:: rotation() -> float Returns the rotation in radians between two images. You may also get this value doing ``[2]`` on the object. - .. method:: scale() + .. method:: scale() -> float Returns the scale change between two images. You may also get this value doing ``[3]`` on the object. - .. method:: response() + .. method:: response() -> float Returns the quality of the results of displacement matching between two images. Between 0-1. A ``displacement`` object with a response less than 0.1 is likely noise. @@ -1458,60 +1497,60 @@ The kptmatch object is returned by `image.match_descriptor()` for keypoint match Please call `image.match_descriptor()` to create this object. - .. method:: rect() + .. method:: rect() -> Tuple[int, int, int, int] Returns a rectangle tuple (x, y, w, h) for use with other `image` methods like `Image.draw_rectangle()` of the kptmatch's bounding box. - .. method:: cx() + .. method:: cx() -> int Returns the centroid x position of the kptmatch (int). You may also get this value doing ``[0]`` on the object. - .. method:: cy() + .. method:: cy() -> int Returns the centroid y position of the kptmatch (int). You may also get this value doing ``[1]`` on the object. - .. method:: x() + .. method:: x() -> int Returns the kptmatch's bounding box x coordinate (int). You may also get this value doing ``[2]`` on the object. - .. method:: y() + .. method:: y() -> int Returns the kptmatch's bounding box y coordinate (int). You may also get this value doing ``[3]`` on the object. - .. method:: w() + .. method:: w() -> int Returns the kptmatch's bounding box w coordinate (int). You may also get this value doing ``[4]`` on the object. - .. method:: h() + .. method:: h() -> int Returns the kptmatch's bounding box h coordinate (int). You may also get this value doing ``[5]`` on the object. - .. method:: count() + .. method:: count() -> int Returns the number of keypoints matched (int). You may also get this value doing ``[6]`` on the object. - .. method:: theta() + .. method:: theta() -> int Returns the estimated angle of rotation for the keypoint (int). You may also get this value doing ``[7]`` on the object. - .. method:: match() + .. method:: match() -> List[Tuple[int, int]] Returns the list of (x,y) tuples of matching keypoints. @@ -1523,7 +1562,7 @@ class ImageIO -- ImageIO Object The ImageIO object allows you to read/write OpenMV Image objects in their native form to disk or to memory. This class provides fast read/write random access for loading/storing images. -.. class:: ImageIO(path, mode) +.. class:: ImageIO(path:str, mode) Creates an ImageIO object. @@ -1535,38 +1574,38 @@ or to memory. This class provides fast read/write random access for loading/stor storage buffer is not allowed to grow in size after being allocated. Use a ``bpp`` value of 0 for binary images, 1 for grayscale images, and 2 for rgb565 images. - .. method:: type() + .. method:: type() -> int Returns if the `ImageIO` object is a `FILE_STREAM` or `MEMORY_STREAM`. - .. method:: is_closed() + .. method:: is_closed() -> bool Returns if the `ImageIO` object is closed and can no longer be used. - .. method:: count() + .. method:: count() -> int Returns the number of frames stored. - .. method:: offset() + .. method:: offset() -> int Returns the image index offset. - .. method:: version() + .. method:: version() -> Optional[int] Returns the version of the object if it's `FILE_STREAM`. `MEMORY_STREAM` versions are ``none``. - .. method:: buffer_size() + .. method:: buffer_size() -> int Returns the size allocated by the object for a frame in a single buffer. ``buffer_size() * count() == size()`` - .. method:: size() + .. method:: size() -> int Returns the number of bytes on disk or memory used by the ImageIO object. - .. method:: write(img) + .. method:: write(img:Image) -> ImageIO Writes a new image ``img`` to the ImageIO object. For on disk ImageIO objects the file will grow as new images are added. For in-memory ImageIO objects this just writes an image to the @@ -1574,7 +1613,7 @@ or to memory. This class provides fast read/write random access for loading/stor Returns the ImageIO object. - .. method:: read([copy_to_fb=True, [loop=True, [pause=True]]]) + .. method:: read(copy_to_fb=True, loop=True, pause=True) -> Image Returns an image object from the ImageIO object. If ``copy_to_fb`` is False then the new image is allocated on the MicroPython heap. However, the MicroPython heap is limited @@ -1587,51 +1626,58 @@ or to memory. This class provides fast read/write random access for loading/stor ``pause`` if True causes this method to pause for a previously recorded number of milliseconds by write in-order to match the original frame rate that captured the image data. - .. method:: seek(offset) + .. method:: seek(offset) -> None Seeks to the image slot number ``offset`` in the ImageIO object. Works for on disk or in-memory objects. - .. method:: sync() + .. method:: sync() -> None Writes out all data pending for on-disk ImageIO objects. - .. method:: close() + .. method:: close() -> None Closes the ImageIO object. For in-memory objects this free's the allocated space and for on-disk files this closes the file and writes out all meta-data. .. data:: FILE_STREAM + :type: int ImageIO object was opened on a file. .. data:: MEMORY_STREAM + :type: int - iamgeIO object was opened in memory. + ImageIO object was opened in memory. class Image -- Image object --------------------------- The image object is the basic object for machine vision operations. -.. class:: Image(path, [buffer=None, [copy_to_fb=False]]) - - Creates a new image object from a file at ``path``. Alternatively, you may - pass a `width`, `height`, and either they any image format value like ``image.GRAYSCALE`` - to create new blank image object (initialized to 0 - black). - - Supports bmp/pgm/ppm/jpg/jpeg/png image files. - - ``copy_to_fb`` if True the image is loaded directly into the frame buffer - allowing you to load up large images. If False, the image is loaded into - MicroPython's heap which is much smaller than the frame buffer. - - ``buffer`` can be set to the any buffer object to use that as the data source - for the image. For example, if you'd like to create a JPEG image from a JPEG - ``bytes()`` or ``bytearray()`` object you can pass the ``width``, ``height``, - ``image.JPEG`` for the JPEG along with setting ``buffer`` to the JPEG byte stream - to create a JPEG image. Finally, note that images are buffer objects themselves. +.. class:: Image(arg, buffer:Optional[bytes, bytearray, memoryview]=None, copy_to_fb:bool=False) + + If ``arg`` is a string then this creates a new image object from a file at ``arg`` path. + Supports loading bmp/pgm/ppm/jpg/jpeg/png image files from disk. If ``copy_to_fb`` is true + the image is copied to the frame buffer verus being allocated on the heap. + + If ``arg`` is an ``ndarray`` then this creates a new image object from the ``ndarray``. + ``ndarray`` objects with a shape of ``(w, h)`` are treated as grayscale images, ``(w, h, 3)`` are treated + as RGB565 images. Only float32 point ``ndarrays`` are supported at this time. When creating + an image this way if you pass a ``buffer`` argument it will be used to store the image data + versus allocating space on the heap. If ``copy_to_fb`` is true the image is copied to the + frame buffer verus being allocated on the heap or using the ``buffer``. + + If ``arg`` is an ``int`` it is then considered the width of a new image and a ``height`` value + and a ``format`` value must follow to create a new blank image object. ``format`` can be + be any image pixformat value like `image.GRAYSCALE`. The image will be initialized + to all zeros. Note that a ``buffer`` value is expected for compressed image formats. + ``buffer`` is considered as the source of image data for creating images this way. If used with + ``copy_to_fb`` the data from ``buffer`` is copied to the frame buffer. If you'd like to create a + JPEG image from a JPEG `bytes()` or `bytearray()` object you can pass the ``width``, + ``height``, ``image.JPEG`` for the JPEG along with setting ``buffer`` to the JPEG byte stream + to create a JPEG image. Images support "[]" notation. Do ``image[index] = 8/16-bit value`` to assign an image pixel or ``image[index]`` to get an image pixel which will be @@ -1647,25 +1693,28 @@ The image object is the basic object for machine vision operations. particular, if you'd like to transmit an image you can just pass it to the UART/SPI/I2C write functions to be transmitted automatically. - .. method:: width() + Basic Methods + ~~~~~~~~~~~~~ + + .. method:: width() -> int Returns the image width in pixels. - .. method:: height() + .. method:: height() -> int Returns the image height in pixels. - .. method:: format() + .. method:: format() -> int Returns `image.GRAYSCALE` for grayscale images, `image.RGB565` for RGB565 images, `image.BAYER` for bayer pattern images, and `image.JPEG` for JPEG images. - .. method:: size() + .. method:: size() -> int Returns the image size in bytes. - .. method:: bytearray() + .. method:: bytearray() -> bytearray Returns a `bytearray` object that points to the image data for byte-level read/write access. @@ -1675,7 +1724,7 @@ The image object is the basic object for machine vision operations. that requires a `bytes` like object. This is read-only access. Call `bytearray()` to get read/write access. - .. method:: get_pixel(x, y, [rgbtuple]) + .. method:: get_pixel(x:int, y:int, rgbtuple:Optional[bool]=None) -> Union[int, Tuple[int, int, int]] For grayscale images: Returns the grayscale pixel value at location (x, y). For RGB565 images: Returns the RGB888 pixel tuple (r, g, b) at location (x, y). @@ -1700,7 +1749,7 @@ The image object is the basic object for machine vision operations. odd rows. Each pixel is 8-bits. If you call this method with ``rgbtuple`` set then `Image.get_pixel()` will debayer the source image at that pixel location and return a valid RGB888 tuple for the pixel location. - .. method:: set_pixel(x, y, pixel) + .. method:: set_pixel(x:int, y:int, pixel:Union[int, Tuple[int, int, int]]) -> Image For grayscale images: Sets the pixel at location (x, y) to the grayscale value ``pixel``. For RGB565 images: Sets the pixel at location (x, y) to the RGB888 tuple (r, g, b) ``pixel``. @@ -1724,58 +1773,32 @@ The image object is the basic object for machine vision operations. odd rows. Each pixel is 8-bits. If you call this method with an RGB888 tuple the grayscale value of that RGB888 tuple is extracted and set to the pixel location. - .. method:: mean_pool(x_div, y_div) - - Finds the mean of ``x_div`` * ``y_div`` squares in the image and returns - the modified image composed of the mean of each square. - - This method allows you to shrink an image down very quickly in-place. - - Not supported on compressed images or bayer images. - - .. method:: mean_pooled(x_div, y_div) - - Finds the mean of ``x_div`` * ``y_div`` squares in the image and returns - a new image composed of the mean of each square. - - This method allows you to create a shrunken down image copy. - - Not supported on compressed images or bayer images. - - .. method:: midpoint_pool(x_div, y_div, [bias=0.5]) - - Finds the midpoint of ``x_div`` * ``y_div`` squares in the image and returns - the modified image composed of the midpoint of each square. - - A ``bias`` of 0.0 returns the min of each area while a ``bias`` of 1.0 returns - the max of each area. + Conversion Methods + ~~~~~~~~~~~~~~~~~~ - This method allows you to shrink an image down very quickly in-place. + .. method:: to_ndarray(dtype:str, buffer:Optional[bytes, bytearray, memoryview]=None) -> ndarray - Not supported on compressed images or bayer images. - - .. method:: midpoint_pooled(x_div, y_div, [bias=0.5]) + Returns a ``ndarray`` object created from the image. + This only works for GRAYSCALE or RGB565 images currently. - Finds the midpoint of ``x_div`` * ``y_div`` squares in the image and returns - a new image composed of the midpoint of each square. + ``dtype`` can be ``b``, ``B``, or ``f`` for creating a signed 8-bit, unsigned 8-bit, or 32-bit floating point ``ndarray``. + GRAYSCALE images are directly converted to unsigned 8-bit ``ndarray`` objects. For signed 8-bit ``ndarray`` + objects the values (0:255) are mapped to (-127:128). For float 32-bit ``ndarray`` objects the values are + mapped to (0.0:255.0). RGB565 images are converted to 3-channel ``ndarray`` objects and the same + process described above for GRAYSCALE images is applied to each channel depending on ``dtype``. Note that + ``dtype`` also accepts the integer values (e.g. `ord()`) of ``b``, ``B``, and ``f`` respectively. - A ``bias`` of 0.0 returns the min of each area while a ``bias`` of 1.0 returns - the max of each area. + ``buffer`` if not ``None`` is a ``bytearray`` object to use as the buffer for the ``ndarray``. + If ``None`` a new buffer is allocated on the heap to store the ``ndarray`` image data. You can + use the ``buffer`` argument to directly allocate the ``ndarray`` in a pre-allocated buffer saving + a heap allocation and a copy operation. - This method allows you to create a shrunken down image copy. - - Not supported on compressed images or bayer images. + The ``ndarray`` returned has the shape of ``(height, width)`` for GRAYSCALE images and + ``(height, width, 3)`` for RGB565 images. - .. method:: to_bitmap([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0, [copy=False]]]]]]]]]) + .. method:: to_bitmap(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0, copy:bool=False, copy_to_fb:bool=False) -> Image - Converts an image to a bitmap image (1 bit per pixel). If ``copy`` is False - this method will try to modify the image in-place. If ``copy`` is True then - this method will return a new image copy allocated on the heap. - - ``copy`` may also be another image object, which in this case this method will try to - re-use that image objects storage space and will return a new image object that uses - the previous image objects storage space. After doing this do not use any references - to the old image object anymore as they will be stale. + Converts an image to a bitmap image (1 bit per pixel). ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -1797,11 +1820,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -1825,6 +1848,13 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. + + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. + .. note:: Bitmap images are like grayscale images with only two pixels values - 0 @@ -1840,16 +1870,9 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - .. method:: to_grayscale([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0, [copy=False]]]]]]]]]) + .. method:: to_grayscale(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, copy:bool=False, copy_to_fb:bool=False) -> Image - Converts an image to a grayscale image (8-bits per pixel). If ``copy`` is False - this method will try to modify the image in-place. If ``copy`` is True then - this method will return a new image copy allocated on the heap. - - ``copy`` may also be another image object, which in this case this method will try to - re-use that image objects storage space and will return a new image object that uses - the previous image objects storage space. After doing this do not use any references - to the old image object anymore as they will be stale. + Converts an image to a grayscale image (8-bits per pixel). ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -1871,11 +1894,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -1899,18 +1922,18 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). - Returns the image object so you can call another method using ``.`` notation. + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. + + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. - .. method:: to_rgb565([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0, [copy=False]]]]]]]]]) + Returns the image object so you can call another method using ``.`` notation. - Converts an image to an RGB565 image (16-bits per pixel). If ``copy`` is False - this method will try to modify the image in-place. If ``copy`` is True then - this method will return a new image copy allocated on the heap. + .. method:: to_rgb565(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0, copy:bool=False, copy_to_fb:bool=False) -> Image - ``copy`` may also be another image object, which in this case this method will try to - re-use that image objects storage space and will return a new image object that uses - the previous image objects storage space. After doing this do not use any references - to the old image object anymore as they will be stale. + Converts an image to an RGB565 image (16-bits per pixel). ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -1932,11 +1955,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -1960,18 +1983,18 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). - Returns the image object so you can call another method using ``.`` notation. + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. - .. method:: to_rainbow([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=image.PALETTE_RAINBOW, [alpha_palette=None, [hint=0, [copy=False]]]]]]]]]) + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. - Converts an image to an RGB565 rainbow image (16-bits per pixel). If ``copy`` is False - this method will try to modify the image in-place. If ``copy`` is True then - this method will return a new image copy allocated on the heap. + Returns the image object so you can call another method using ``.`` notation. - ``copy`` may also be another image object, which in this case this method will try to - re-use that image objects storage space and will return a new image object that uses - the previous image objects storage space. After doing this do not use any references - to the old image object anymore as they will be stale. + .. method:: to_rainbow(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=PALETTE_RAINBOW, alpha_palette=None, hint:int=0, copy:bool=False, copy_to_fb:bool=False) -> Image + + Converts an image to an RGB565 rainbow image (16-bits per pixel). ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -1993,11 +2016,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -2021,18 +2044,18 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). - Returns the image object so you can call another method using ``.`` notation. + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. - .. method:: to_ironbow([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=image.PALETTE_IRONBOW, [alpha_palette=None, [hint=0, [copy=False]]]]]]]]]) + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. + + Returns the image object so you can call another method using ``.`` notation. - Converts an image to an RGB565 ironbow image (16-bits per pixel). If ``copy`` is False - this method will try to modify the image in-place. If ``copy`` is True then - this method will return a new image copy allocated on the heap. + .. method:: to_ironbow(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=PALETTE_IRONBOW, alpha_palette=None, hint:int=0, copy:bool=False, copy_to_fb:bool=False) -> Image - ``copy`` may also be another image object, which in this case this method will try to - re-use that image objects storage space and will return a new image object that uses - the previous image objects storage space. After doing this do not use any references - to the old image object anymore as they will be stale. + Converts an image to an RGB565 ironbow image (16-bits per pixel). ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -2054,11 +2077,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -2082,18 +2105,18 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). - Returns the image object so you can call another method using ``.`` notation. + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. + + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. - .. method:: to_jpeg([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0, [copy=False]]]]]]]]]) + Returns the image object so you can call another method using ``.`` notation. - Converts an image to a JPEG image. If ``copy`` is False - this method will try to modify the image in-place. If ``copy`` is True then - this method will return a new image copy allocated on the heap. + .. method:: to_jpeg(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0, copy:bool=False, copy_to_fb:bool=False, quality:int=90, encode_for_ide:bool=False, subsampling:int=0) -> Image - ``copy`` may also be another image object, which in this case this method will try to - re-use that image objects storage space and will return a new image object that uses - the previous image objects storage space. After doing this do not use any references - to the old image object anymore as they will be stale. + Converts an image to a JPEG image. ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -2115,11 +2138,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -2143,18 +2166,31 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). - Returns the image object so you can call another method using ``.`` notation. + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. + + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. + + ``quality`` controls the jpeg image compression quality. The value can be between 0 and 100. + + ``encode_for_ide`` if True the image is encoded in a way that the IDE can display it if + printed by doing ``print(image)``. This is useful for debugging purposes over UARTs via + Open Terminal in the IDE. + + ``subsampling`` can be: + + * `image.JPEG_SUBSAMPLING_AUTO`: Use the best subsampling for the image based on the quality. + * `image.JPEG_SUBSAMPLING_444`: Use 4:4:4 subsampling. + * `image.JPEG_SUBSAMPLING_422`: Use 4:2:2 subsampling. + * `image.JPEG_SUBSAMPLING_420`: Use 4:2:0 subsampling. - .. method:: to_png([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0, [copy=False]]]]]]]]]) + Returns the image object so you can call another method using ``.`` notation. - Converts an image to a PNG image. If ``copy`` is False - this method will try to modify the image in-place. If ``copy`` is True then - this method will return a new image copy allocated on the heap. + .. method:: to_png(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0, copy:bool=False, copy_to_fb:bool=False) -> Image - ``copy`` may also be another image object, which in this case this method will try to - re-use that image objects storage space and will return a new image object that uses - the previous image objects storage space. After doing this do not use any references - to the old image object anymore as they will be stale. + Converts an image to a PNG image. ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -2176,11 +2212,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -2204,108 +2240,96 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). - Returns the image object so you can call another method using ``.`` notation. - - .. method:: compress([quality=90]) + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. - JPEG compresses the image in place. Use this method versus `Image.compressed()` - to save heap space and to use a higher ``quality`` for compression at the - cost of destroying the original image. + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. Returns the image object so you can call another method using ``.`` notation. - ``quality`` is the compression quality (0-100) (int). - - Returns the compressed image if called on a compressed image. - - .. method:: compress_for_ide([quality=90]) - - JPEG compresses the image in place. Use this method versus `Image.compressed()` - to save heap space and to use a higher ``quality`` for compression at the - cost of destroying the original image. - - This method JPEG compresses the image and then formats the JPEG data for - transmission to OpenMV IDE to display by encoding every 6-bits as a byte - valued between 128-191. This is done to prevent JPEG data from being - misinterpreted as other text data in the byte stream. + .. method:: compress(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0, copy:bool=False, copy_to_fb:bool=False, quality:int=90, encode_for_ide:bool=False, subsampling:int=0) -> Image - You need to use this method to format image data for display to terminal - windows created via "Open Terminal" in OpenMV IDE. + Converts an image to a JPEG image. - Returns the image object so you can call another method using ``.`` notation. - - ``quality`` is the compression quality (0-100) (int). + ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this + value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified + then it will match ``x_scale`` to maintain the aspect ratio. - Returns the image compressed for the IDE if called on a compressed image. - Do not call this on an image already compressed for the IDE. + ``y_scale`` controls how much the displayed image is scaled by in the y direction (float). If this + value is negative the image will be flipped vertically. Note that if ``x_scale`` is not specified + then it will match ``x_scale`` to maintain the aspect ratio. - .. method:: compressed([quality=90]) + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h) of the source image to draw. This + allows you to extract just the pixels in the ROI to scale and draw on the destination image. - Returns a JPEG compressed image - the original image is untouched. However, - this method requires a somewhat large allocation of heap space so the image - compression quality must be lower and the image resolution must be lower - than what you could do with `Image.compress()`. + ``rgb_channel`` is the RGB channel (0=R, G=1, B=2) to extract from an RGB565 image (if passed) + and to render onto the destination image. For example, if you pass ``rgb_channel=1`` this will + extract the green channel of the source RGB565 image and draw that in grayscale on the + destination image. - ``quality`` is the compression quality (0-100) (int). + ``alpha`` controls how much of the source image to blend into the destination image. A value of + 256 draws an opaque source image while a value lower than 256 produces a blend between the source + and destination image. 0 results in no modification to the destination image. - Returns a compressed image copy if called on a compressed image. + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of + whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - .. method:: compressed_for_ide([quality=90]) + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel + level allowing you to precisely control the alpha value of pixels based on their grayscale value. + A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes + more transparent until 0. This is applied after ``rgb_channel`` extraction if used. - Returns a JPEG compressed image - the original image is untouched. However, - this method requires a somewhat large allocation of heap space so the image - compression quality must be lower and the image resolution must be lower - than what you could do with `Image.compress()`. + ``hint`` can be a logical OR of the flags: - This method JPEG compresses the image and then formats the JPEG data for - transmission to OpenMV IDE to display by encoding every 6-bits as a byte - valued between 128-191. This is done to prevent JPEG data from being - misinterpreted as other text data in the byte stream. + * `image.AREA`: Use area scaling when downscaling versus the default of nearest neighbor. + * `image.BILINEAR`: Use bilinear scaling versus the default of nearest neighbor scaling. + * `image.BICUBIC`: Use bicubic scaling versus the default of nearest neighbor scaling. + * `image.CENTER`: Center the image being drawn on the display. This is applied after scaling. + * `image.HMIRROR`: Horizontally mirror the image. + * `image.VFLIP`: Vertically flip the image. + * `image.TRANSPOSE`: Transpose the image (swap x/y). + * `image.EXTRACT_RGB_CHANNEL_FIRST`: Do rgb_channel extraction before scaling. + * `image.APPLY_COLOR_PALETTE_FIRST`: Apply color palette before scaling. + * `image.SCALE_ASPECT_KEEP`: Scale the image being drawn to fit inside the display. + * `image.SCALE_ASPECT_EXPAND`: Scale the image being drawn to fill the display (results in cropping) + * `image.SCALE_ASPECT_IGNORE`: Scale the image being drawn to fill the display (results in stretching). + * `image.ROTATE_90`: Rotate the image by 90 degrees (this is just VFLIP | TRANSPOSE). + * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). + * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). - You need to use this method to format image data for display to terminal - windows created via "Open Terminal" in OpenMV IDE. + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. - ``quality`` is the compression quality (0-100) (int). + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. - Returns a image compressed for the IDE copy if called on a compressed image. - Do not call this on an image already compressed for the IDE. + ``quality`` controls the jpeg image compression quality. The value can be between 0 and 100. - .. method: image.jpeg_encode_for_ide() + ``encode_for_ide`` if True the image is encoded in a way that the IDE can display it if + printed by doing ``print(image)``. This is useful for debugging purposes over UARTs via + Open Terminal in the IDE. - This formats the JPEG data for transmission to OpenMV IDE to display by - encoding every 6-bits as a byte valued between 128-191. This is done to - prevent JPEG data from being misinterpreted as other text data in the byte - stream. This method does the formatting in-place destroying the original - JPEG image and returns the encoded jpeg image. + ``subsampling`` can be: - You need to use this method to format image data for display to terminal - windows created via "Open Terminal" in OpenMV IDE. + * `image.JPEG_SUBSAMPLING_AUTO`: Use the best subsampling for the image based on the quality. + * `image.JPEG_SUBSAMPLING_444`: Use 4:4:4 subsampling. + * `image.JPEG_SUBSAMPLING_422`: Use 4:2:2 subsampling. + * `image.JPEG_SUBSAMPLING_420`: Use 4:2:0 subsampling. Returns the image object so you can call another method using ``.`` notation. - Only works on JPEG images. - - .. method: image.jpeg_encoded_for_ide() - - This formats the JPEG data for transmission to OpenMV IDE to display by - encoding every 6-bits as a byte valued between 128-191. This is done to - prevent JPEG data from being misinterpreted as other text data in the byte - stream. This method does the formatting out-of-place preserving the original - JPEG image and returns a new encoded jpeg image. + .. note:: - You need to use this method to format image data for display to terminal - windows created via "Open Terminal" in OpenMV IDE. + `Image.compress` is an alias for `Image.to_jpeg`. - Returns the image object so you can call another method using ``.`` notation. + .. method:: copy(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0, copy_to_fb:float=False) -> Image - Only works on JPEG images. - - .. method:: copy([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0, [copy_to_fb=False]]]]]]]]]) - - Creates a deep copy of the image object. If ``copy_to_fb`` is False then - the new image is allocated on the MicroPython heap. However, the MicroPython heap is limited - and may not have space to store the new image if exhausted. Instead, set ``copy_to_fb`` to - True to set the frame buffer to the new image making this function work just like `sensor.snapshot()`. + Creates a deep copy of the image object. ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -2327,11 +2351,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -2355,20 +2379,14 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). - Returns the new image object. - - Not supported on compressed images. + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + This has no special effect if the image is already in the frame buffer. - .. method:: crop([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0, [copy=False]]]]]]]]]) + Returns the image object so you can call another method using ``.`` notation. - Modifies an image in-place without changing the underlying image type. If ``copy`` is False - this method will try to modify the image in-place. If ``copy`` is True then - this method will return a new image copy allocated on the heap. + .. method:: crop(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0, copy:bool=False, copy_to_fb:bool=False) -> Image - ``copy`` may also be another image object, which in this case this method will try to - re-use that image objects storage space and will return a new image object that uses - the previous image objects storage space. After doing this do not use any references - to the old image object anymore as they will be stale. + Modifies an image in-place without changing the underlying image type. ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -2390,11 +2408,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -2418,20 +2436,18 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). - Returns the image object so you can call another method using ``.`` notation. + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. - Not supported on compressed images. + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. - .. method:: scale([x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0, [copy=False]]]]]]]]]) + Returns the image object so you can call another method using ``.`` notation. - Modifies an image in-place without changing the underlying image type. If ``copy`` is False - this method will try to modify the image in-place. If ``copy`` is True then - this method will return a new image copy allocated on the heap. + .. method:: scale(x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0, copy:bool=False, copy_to_fb:bool=False) -> Image - ``copy`` may also be another image object, which in this case this method will try to - re-use that image objects storage space and will return a new image object that uses - the previous image objects storage space. After doing this do not use any references - to the old image object anymore as they will be stale. + Modifies an image in-place without changing the underlying image type. ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -2453,11 +2469,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -2481,11 +2497,20 @@ The image object is the basic object for machine vision operations. * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). + ``copy`` if True create a deep-copy on the heap of the image that's been converted versus converting the + original image in-place. + + ``copy_to_fb`` if True the image is loaded directly into the frame buffer. + ``copy_to_fb`` has priority over ``copy``. This has no special effect if the image is already in + the frame buffer. + Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images. + .. note:: + + `Image.scale` is an alias for `Image.crop`. - .. method:: save(path, [roi, [quality=50]]) + .. method:: save(path:str, roi:Optional[Tuple[int,int,int,int]]=None, quality=50) -> Image Saves a copy of the image to the filesystem at ``path``. @@ -2501,11 +2526,14 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - .. method:: flush() + .. method:: flush() -> None Updates the frame buffer in the IDE with the image in the frame buffer on the camera. - .. method:: clear([mask]) + Drawing Methods + ~~~~~~~~~~~~~~~ + + .. method:: clear(mask:Optional[Image]=None) -> Image Sets all pixels in the image to zero (very fast). @@ -2518,7 +2546,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images. - .. method:: draw_line(x0, y0, x1, y1, [color, [thickness=1]]) + .. method:: draw_line(x0:int, y0:int, x1:int, y1:int, color:Optional[int,Tuple[int,int,int]]=None, thickness=1) -> Image Draws a line from (x0, y0) to (x1, y1) on the image. You may either pass x0, y0, x1, y1 separately or as a tuple (x0, y0, x1, y1). @@ -2533,7 +2561,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: draw_rectangle(x, y, w, h, [color, [thickness=1, [fill=False]]]) + .. method:: draw_rectangle(x:int, y:int, w:int, h:int, color:Optional[int,Tuple[int,int,int]]=None, thickness=1, fill=False) -> Image Draws a rectangle on the image. You may either pass x, y, w, h separately or as a tuple (x, y, w, h). @@ -2550,7 +2578,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: draw_circle(x, y, radius, [color, [thickness=1, [fill=False]]]) + .. method:: draw_circle(x:int, y:int, radius:int, color:Optional[int,Tuple[int,int,int]]=None, thickness=1, fill=False) -> Image Draws a circle on the image. You may either pass x, y, radius separately or as a tuple (x, y, radius). @@ -2567,7 +2595,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: draw_ellipse(cx, cy, rx, ry, rotation, [color, [thickness=1, [fill=False]]]) + .. method:: draw_ellipse(cx:int, cy:int, rx:int, ry:int, rotation:int, color:Optional[int,Tuple[int,int,int]]=None, thickness=1, fill=False) -> Image Draws an ellipse on the image. You may either pass cx, cy, rx, ry, and the rotation (in degrees) separately or as a tuple (cx, yc, rx, ry, rotation). @@ -2584,7 +2612,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: draw_string(x, y, text, [color, [scale=1, [x_spacing=0, [y_spacing=0, [mono_space=True, [char_rotation=0, [char_hmirror=False, [char_vflip=False, [string_rotation=0, [string_hmirror=False, [string_vflip=False]]]]]]]]]]]) + .. method:: draw_string(x:int, y:int, text:str, color:Optional[int,Tuple[int,int,int]]=None, scale=1, x_spacing=0, y_spacing=0, mono_space=True, char_rotation=0, char_hmirror=False, char_vflip=False, string_rotation=0, string_hmirror=False, string_vflip=False) -> Image Draws 8x10 text starting at location (x, y) in the image. You may either pass x, y separately or as a tuple (x, y). @@ -2627,7 +2655,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: draw_cross(x, y, [color, [size=5, [thickness=1]]]) + .. method:: draw_cross(x:int, y:int, color:Optional[int,Tuple[int,int,int]]=None, size=5, thickness=1) -> Image Draws a cross at location x, y. You may either pass x, y separately or as a tuple (x, y). @@ -2644,7 +2672,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: draw_arrow(x0, y0, x1, y1, [color, [thickness=1]]) + .. method:: draw_arrow(x0:int, y0:int, x1:int, y1:int, color:Optional[int,Tuple[int,int,int]]=None, thickness=1) -> Image Draws an arrow from (x0, y0) to (x1, y1) on the image. You may either pass x0, y0, x1, y1 separately or as a tuple (x0, y0, x1, y1). @@ -2659,7 +2687,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: draw_edges(image, corners, [color, [size=0, [thickness=1, [fill=False]]]]) + .. method:: draw_edges(image:Image, corners, color:Optional[int,Tuple[int,int,int]]=None, size=0, thickness=1, fill=False) -> Image Draws line edges between a corner list returned by methods like `blob.corners`. Coners is a four valued tuple of two valued x/y tuples. E.g. [(x1,y1),(x2,y2),(x3,y3),(x4,y4)]. @@ -2678,14 +2706,14 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: draw_image(image, x, y, [x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0]]]]]]]]) + .. method:: draw_image(image:Image, x:int, y:int, x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0) -> Image Draws an ``image`` whose top-left corner starts at location x, y. You may either pass x, y separately or as a tuple (x, y). This method automatically handles rendering the image passed into the correct pixel format for the destination image while also handling clipping seamlessly. You may also pass a path instead of an image object for this method to automatically load the image - from disk and draw it in one step. E.g. ``draw_image("test.jpg")``. + from disk and use it in one step. E.g. ``draw_image("test.jpg")``. ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified @@ -2707,11 +2735,11 @@ The image object is the basic object for machine vision operations. 256 draws an opaque source image while a value lower than 256 produces a blend between the source and destination image. 0 results in no modification to the destination image. - ``color_palette`` if not ``-1`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of whatever the source image is. This is applied after ``rgb_channel`` extraction if used. - ``alpha_palette`` if not ``-1`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel level allowing you to precisely control the alpha value of pixels based on their grayscale value. A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes @@ -2738,9 +2766,7 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images. - - .. method:: draw_keypoints(keypoints, [color, [size=10, [thickness=1, [fill=False]]]]) + .. method:: draw_keypoints(keypoints, color:Optional[int,Tuple[int,int,int]]=None, size=10, thickness=1, fill=False) -> Image Draws the keypoints of a keypoints object on the image. You may also pass a list of three value tuples containing the (x, y, rotation_angle_in_degrees) to @@ -2761,7 +2787,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: flood_fill(x, y, [seed_threshold=0.05, [floating_threshold=0.05, [color, [invert=False, [clear_background=False, [mask=None]]]]]]) + .. method:: flood_fill(x:int, y:int, seed_threshold=0.05, floating_threshold=0.05, color:Optional[int,Tuple[int,int,int]]=None, invert=False, clear_background=False, mask:Optional[Image]=None) -> Image Flood fills a region of the image starting from location x, y. You may either pass x, y separately or as a tuple (x, y). @@ -2793,7 +2819,10 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: mask_rectange([x, y, w, h]) + Masking Methods + ~~~~~~~~~~~~~~~ + + .. method:: mask_rectange(x:int, y:int, w:int, h:int) -> Image Zeros a rectangular part of the image. If no arguments are supplied this method zeros the center of the image. @@ -2802,7 +2831,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: mask_circle([x, y, radius]) + .. method:: mask_circle(x:int, y:int, radius:int) -> Image Zeros a circular part of the image. If no arguments are supplied this method zeros the center of the image. @@ -2811,7 +2840,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: mask_ellipse([x, y, radius_x, radius_y, rotation_angle_in_degrees]) + .. method:: mask_ellipse(x:int, y:int, radius_x:int, radius_y:int, rotation_angle_in_degrees:int) -> Image Zeros an ellipsed shaped part of the image. If no arguments are supplied this method zeros the center of the image. @@ -2820,7 +2849,10 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: binary(thresholds, [invert=False, [zero=False, [mask=None, [to_bitmap=False, [copy=False]]]]]) + Binary Methods + ~~~~~~~~~~~~~~ + + .. method:: binary(thresholds:List[Tuple[int,int]], invert=False, zero=False, mask:Optional[Image]=None, to_bitmap=False, copy=False) -> Image Sets all pixels in the image to black or white depending on if the pixel is inside of a threshold in the threshold list ``thresholds`` or not. @@ -2887,15 +2919,19 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: invert() + .. method:: invert() -> Image + + Flips (binary inverts) all pixels values in the image. Note that binary + inversion is the same as numerical inversion for images because: - Flips (binary inverts) all pixels values in a binary image very quickly. + ``(255 - pixel) % 256 == (255 + ~pixel + 1) % 256 == (~pixel + 256) % 256 == ~pixel`` and + this holds for any value that's in a range of ``(0-2^n-1)`` which is true for all mutable image datatypes. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. + Not supported on compressed images or bayer/yuv images. - .. method:: b_and(image, [mask=None]) + .. method:: b_and(image:Image, mask:Optional[Image]=None) -> Image Logically ANDs this image with another image. @@ -2913,7 +2949,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: b_nand(image, [mask=None]) + .. method:: b_nand(image:Image, mask:Optional[Image]=None) -> Image Logically NANDs this image with another image. @@ -2931,7 +2967,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: b_or(image, [mask=None]) + .. method:: b_or(image:Image, mask:Optional[Image]=None) -> Image Logically ORs this image with another image. @@ -2949,7 +2985,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: b_nor(image, [mask=None]) + .. method:: b_nor(image:Image, mask:Optional[Image]=None) -> Image Logically NORs this image with another image. @@ -2967,7 +3003,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: b_xor(image, [mask=None]) + .. method:: b_xor(image:Image, mask:Optional[Image]=None) -> Image Logically XORs this image with another image. @@ -2985,7 +3021,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: b_xnor(image, [mask=None]) + .. method:: b_xnor(image:Image, mask:Optional[Image]=None) -> Image Logically XNORs this image with another image. @@ -3003,100 +3039,10 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: erode(size, [threshold, [mask=None]]) - - Removes pixels from the edges of segmented areas. - - This method works by convolving a kernel of ((size*2)+1)x((size*2)+1) pixels - across the image and zeroing the center pixel of the kernel if the sum of - the neighbour pixels set is not greater than ``threshold``. - - This method works like the standard erode method if threshold is not set. If - ``threshold`` is set then you can specify erode to only erode pixels that - have, for example, less than 2 pixels set around them with a threshold of 2. - - ``mask`` is another image to use as a pixel level mask for the operation. - The mask should be an image with just black or white pixels and should be the - same size as the image being operated on. Only pixels set in the mask are - modified. - - Returns the image object so you can call another method using ``.`` notation. - - Not supported on compressed images or bayer images. - - .. method:: dilate(size, [threshold, [mask=None]]) - - Adds pixels to the edges of segmented areas. - - This method works by convolving a kernel of ((size*2)+1)x((size*2)+1) pixels - across the image and setting the center pixel of the kernel if the sum of - the neighbour pixels set is greater than ``threshold``. - - This method works like the standard dilate method if threshold is not set. - If ``threshold`` is set then you can specify dilate to only dilate pixels - that have, for example, more than 2 pixels set around them with a threshold - of 2. - - ``mask`` is another image to use as a pixel level mask for the operation. - The mask should be an image with just black or white pixels and should be the - same size as the image being operated on. Only pixels set in the mask are - modified. - - Returns the image object so you can call another method using ``.`` notation. - - Not supported on compressed images or bayer images. - - .. method:: open(size, [threshold, [mask=None]]) - - Performs erosion and dilation on an image in order. Please see `Image.erode()` - and `Image.dilate()` for more information. - - ``mask`` is another image to use as a pixel level mask for the operation. - The mask should be an image with just black or white pixels and should be the - same size as the image being operated on. Only pixels set in the mask are - modified. - - Returns the image object so you can call another method using ``.`` notation. - - Not supported on compressed images or bayer images. - - .. method:: close(size, [threshold, [mask=None]]) - - Performs dilation and erosion on an image in order. Please see `Image.dilate()` - and `Image.erode()` for more information. - - ``mask`` is another image to use as a pixel level mask for the operation. - The mask should be an image with just black or white pixels and should be the - same size as the image being operated on. Only pixels set in the mask are - modified. - - Returns the image object so you can call another method using ``.`` notation. - - Not supported on compressed images or bayer images. - - .. method:: top_hat(size, [threshold, [mask=None]]) - - Returns the image difference of the image and `Image.open()`'ed image. - - ``mask`` is another image to use as a pixel level mask for the operation. - The mask should be an image with just black or white pixels and should be the - same size as the image being operated on. Only pixels set in the mask are - modified. - - Not supported on compressed images or bayer images. - - .. method:: black_hat(size, [threshold, [mask=None]]) + ISP Methods + ~~~~~~~~~~~ - Returns the image difference of the image and `Image.close()`'ed image. - - ``mask`` is another image to use as a pixel level mask for the operation. - The mask should be an image with just black or white pixels and should be the - same size as the image being operated on. Only pixels set in the mask are - modified. - - Not supported on compressed images or bayer images. - - .. method:: awb(max=False) + .. method:: awb(max:bool=False) -> Image Performs automatic white balance on the image using the gray-world algorithm. This method operates on RAW Bayer Images so that you can improve image quality before converting @@ -3107,9 +3053,9 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images. + Not supported on compressed or yuv images. - .. method:: ccm(matrix) + .. method:: ccm(matrix) -> Image Multiples the passed floating-point color-correction-matrix with the image. Matrices may be in the form of:: @@ -3136,9 +3082,9 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. + Not supported on compressed images or bayer/yuv images. - .. method:: gamma([gamma=1.0, [contrast=1.0, [brightness=0.0]) + .. method:: gamma(gamma:float=1.0, contrast:float=1.0, brightness:float=0.0) -> Image Quickly changes the image gamma, contrast, and brightness. @@ -3159,22 +3105,55 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images. + Not supported on compressed or bayer/yuv images. - .. method:: gamma_corr([gamma=1.0, [contrast=1.0, [brightness=0.0]) + .. method:: gamma_corr(gamma:float=1.0, contrast:float=1.0, brightness:float=0.0) -> Image - Alias for `Image.gamma`. + Quickly changes the image gamma, contrast, and brightness. - .. method:: negate() + ``gamma`` with values greater than 1.0 makes the image darker in a non-linear + manner while less than 1.0 makes the image brighter. The gamma value is applied + to the image by scaling all pixel color channels to be between [0:1) and then + doing a remapping of ``pow(pixel, 1/gamma)`` on all pixels before scaling back. - Flips (numerically inverts) all pixels values in an image very quickly. E.g. - for GRAYSCALE images this method changes all pixels from ``pixel`` to ``255 - pixel``. + ``contrast`` with values greater than 1.0 makes the image brighter in a linear + manner while less than 1.0 makes the image darker. The contrast value is applied + to the image by scaling all pixel color channels to be between [0:1) and then + doing a remapping of ``pixel * contrast`` on all pixels before scaling back. + + ``brightness`` with values greater than 0.0 makes the image brighter in a constant + manner while less than 0.0 makes the image darker. The brightness value is applied + to the image by scaling all pixel color channels to be between [0:1) and then + doing a remapping of ``pixel + brightness`` on all pixels before scaling back. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. + Not supported on compressed or bayer/yuv images. - .. method:: replace(image, [hmirror=False, [vflip=False, [transpose=False, [mask=None]]]]) + .. note:: + + `Image.gamma_corr` is an alias for `Image.gamma`. + + Math Methods + ~~~~~~~~~~~~ + + .. method:: negate() -> Image + + Flips (binary inverts) all pixels values in the image. Note that binary + inversion is the same as numerical inversion for images because: + + ``(255 - pixel) % 256 == (255 + ~pixel + 1) % 256 == (~pixel + 256) % 256 == ~pixel`` and + this holds for any value that's in a range of ``(0-2^n-1)`` which is true for all mutable image datatypes. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer/yuv images. + + .. note:: + + `Image.negate` is an alias for `Image.invert`. + + .. method:: replace(image:Image, hmirror=False, vflip=False, transpose=False, mask:Optional[Image]=None) -> Image Replaces all pixels in the image object with a new image. @@ -3214,15 +3193,15 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: assign(image, [hmirror=False, [vflip=False, [transpose=False, [mask=None]]]]) + .. method:: assign(image:Image, hmirror=False, vflip=False, transpose=False, mask:Optional[Image]=None) -> Image Alias for `Image.replace`. - .. method:: set(image, [hmirror=False, [vflip=False, [transpose=False, [mask=None]]]]) + .. method:: set(image:Image, hmirror=False, vflip=False, transpose=False, mask:Optional[Image]=None) -> Image Alias for `Image.replace`. - .. method:: add(image, [mask=None]) + .. method:: add(image:Image, mask:Optional[Image]=None) -> Image Adds an image pixel-wise to this one. @@ -3240,7 +3219,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: sub(image, [reverse=False, [mask=None]]) + .. method:: sub(image:Image, reverse=False, mask:Optional[Image]=None) -> Image Subtracts an image pixel-wise to this one. @@ -3261,7 +3240,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: min(image, [mask=None]) + .. method:: min(image:Image, mask:Optional[Image]=None) -> Image Returns the minimum image of two images pixel-wise. @@ -3279,7 +3258,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: max(image, [mask=None]) + .. method:: max(image:Image, mask:Optional[Image]=None) -> Image Returns the maximum image of two images pixel-wise. @@ -3297,7 +3276,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: difference(image, [mask=None]) + .. method:: difference(image:Image, mask:Optional[Image]=None) -> Image Returns the absolute difference image between two images (e.g. ||a-b||). @@ -3315,7 +3294,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: blend(image, [alpha=128, [mask=None]]) + .. method:: blend(image:Image, alpha=128, mask:Optional[Image]=None) -> Image Alpha blends two images with each other. @@ -3338,7 +3317,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: histeq([adaptive=False, [clip_limit=-1, [mask=None]]]) + .. method:: histeq(adaptive=False, clip_limit=-1, mask:Optional[Image]=None) -> Image Runs the histogram equalization algorithm on the image. Histogram equalization normalizes the contrast and brightness in the image. @@ -3360,7 +3339,104 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: mean(size, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]) + Filtering Methods + ~~~~~~~~~~~~~~~~~ + + .. method:: erode(size:int, threshold:Optional[int]=None, mask:Optional[Image]=None) -> Image + + Removes pixels from the edges of segmented areas. + + This method works by convolving a kernel of ``((size*2)+1)x((size*2)+1)`` pixels + across the image and zeroing the center pixel of the kernel if the sum of + the neighbour pixels clear is greater than ``threshold``. + + This method works like the standard erode method if threshold is not set. If + ``threshold`` is set then you can specify erode to only erode pixels that + have, for example, more than 2 pixels clear in the kernel region with a + threshold of 2. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer/yuv images. + + .. method:: dilate(size:int, threshold:Optional[int]=None, mask:Optional[Image]=None) -> Image + + Adds pixels to the edges of segmented areas. + + This method works by convolving a kernel of ``((size*2)+1)x((size*2)+1)`` pixels + across the image and setting the center pixel of the kernel if the sum of + the neighbour pixels set is greater than ``threshold``. + + This method works like the standard dilate method if threshold is not set. + If ``threshold`` is set then you can specify dilate to only dilate pixels + that have, for example, more than 2 pixels set in the kernel region with a + threshold of 2. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer/yuv images. + + .. method:: open(size:int, threshold:Optional[int]=None, mask:Optional[Image]=None) -> Image + + Performs erosion and dilation on an image in order. Please see `Image.erode()` + and `Image.dilate()` for more information. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer/yuv images. + + .. method:: close(size:int, threshold:Optional[int]=None, mask:Optional[Image]=None) -> Image + + Performs dilation and erosion on an image in order. Please see `Image.dilate()` + and `Image.erode()` for more information. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Returns the image object so you can call another method using ``.`` notation. + + Not supported on compressed images or bayer/yuv images. + + .. method:: top_hat(size:int, threshold:Optional[int]=None, mask:Optional[Image]=None) -> Image + + Returns the image difference of the image and `Image.open()`'ed image. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Not supported on compressed images or bayer/yuv images. + + .. method:: black_hat(size:int, threshold:Optional[int]=None, mask:Optional[Image]=None) -> Image + + Returns the image difference of the image and `Image.close()`'ed image. + + ``mask`` is another image to use as a pixel level mask for the operation. + The mask should be an image with just black or white pixels and should be the + same size as the image being operated on. Only pixels set in the mask are + modified. + + Not supported on compressed images or bayer/yuv images. + + .. method:: mean(size:int, threshold:Optional[bool]=False, offset:Optional[int]=0, invert:Optional[bool]=False, mask:Optional[Image]=None) -> Image Standard mean blurring filter using a box filter. @@ -3381,11 +3457,9 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. - - This method is not available on the OpenMV Cam M4. + Not supported on compressed images or bayer/yuv images. - .. method:: median(size, [percentile=0.5, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]) + .. method:: median(size:int, percentile:Optional[float]=0.5, threshold:Optional[bool]=False, offset:Optional[int]=0, invert:Optional[bool]=False, mask:Optional[Image]=None) -> Image Runs the median filter on the image. The median filter is the best filter for smoothing surfaces while preserving edges but it is very slow. @@ -3412,11 +3486,9 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. - - This method is not available on the OpenMV Cam M4. + Not supported on compressed images or bayer/yuv images. - .. method:: mode(size, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]) + .. method:: mode(size:int, threshold:Optional[bool]=False, offset:Optional[int]=0, invert:Optional[bool]=False, mask:Optional[Image]=Nonee) -> Image Runs the mode filter on the image by replacing each pixel with the mode of their neighbors. This method works great on grayscale images. However, on @@ -3440,11 +3512,9 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. + Not supported on compressed images or bayer/yuv images. - This method is not available on the OpenMV Cam M4. - - .. method:: midpoint(size, [bias=0.5, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]) + .. method:: midpoint(size:int, bias:Optional[float]=0.5, threshold:Optional[bool]=False, offset:Optional[int]=0, invert:Optional[bool]=False, mask:Optional[Image]=None) -> Image Runs the midpoint filter on the image. This filter finds the midpoint ((max-min)/2) of each pixel neighborhood in the image. @@ -3469,29 +3539,26 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. + Not supported on compressed images or bayer/yuv images. - This method is not available on the OpenMV Cam M4. - - .. method:: morph(size, kernel, [mul, [add=0, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]]) + .. method:: morph(size:int, kernel:list, mul:Optional[float]=1.0, add:Optional[float]=0.0, threshold:Optional[bool]=False, offset:Optional[int]=0, invert:Optional[bool]=False, mask:Optional[Image]=None) -> Image Convolves the image by a filter kernel. This allows you to do general purpose convolutions on an image. ``size`` controls the size of the kernel which must be - ((size*2)+1)x((size*2)+1) elements big. - - ``kernel`` is the kernel to convolve the image by. It can either be a tuple - or a list of integer values. + ``((size*2)+1)x((size*2)+1)`` elements big. - ``mul`` is number to multiply the convolution pixel results by. When not set - it defaults to a value that will prevent scaling in the convolution output. + ``kernel`` is the kernel to convolve the image by. The kernel can either be + a 1D tuple or list or a 2D tuple or list. For 1D kernels the tuple/list + must be ``((size*2)+1)x((size*2)+1)`` elements big. For 2D tuples/lists each + row must be ``((size*2)+1)`` elements big and there must be ``((size*2)+1)`` rows. - ``add`` is a value to add to each convolution pixel result. + ``mul`` allows you to do a global contrast adjustment. It's value should be greater than + 0.0. The default value is 1.0 which does nothing. - ``mul`` basically allows you to do a global contrast adjustment and ``add`` - allows you to do a global brightness adjustment. Pixels that go outside of - the image mins and maxes for color channels will be clipped. + ``add`` allows you to do a global brightness adjustment. It's value should be between + 0.0 and 1.0. The default value is 0.0 which does nothing. If you'd like to adaptive threshold the image on the output of the filter you can pass ``threshold=True`` which will enable adaptive thresholding of the @@ -3508,9 +3575,9 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. + Not supported on compressed images or bayer/yuv images. - .. method:: gaussian(size, [unsharp=False, [mul, [add=0, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]]]) + .. method:: gaussian(size:int, unsharp:Optional[bool]=False, mul:Optional[float]=1.0, add:Optional[float]=0.0, threshold:Optional[bool]=False, offset:Optional[int]=0, invert:Optional[bool]=False, mask:Optional[Image]=None) -> Image Convolves the image by a smoothing guassian kernel. @@ -3520,14 +3587,11 @@ The image object is the basic object for machine vision operations. filtering operation this method will perform an unsharp mask operation which improves image sharpness on edges. - ``mul`` is number to multiply the convolution pixel results by. When not set - it defaults to a value that will prevent scaling in the convolution output. - - ``add`` is a value to add to each convolution pixel result. + ``mul`` allows you to do a global contrast adjustment. It's value should be greater than + 0.0. The default value is 1.0 which does nothing. - ``mul`` basically allows you to do a global contrast adjustment and ``add`` - allows you to do a global brightness adjustment. Pixels that go outside of - the image mins and maxes for color channels will be clipped. + ``add`` allows you to do a global brightness adjustment. It's value should be between + 0.0 and 1.0. The default value is 0.0 which does nothing. If you'd like to adaptive threshold the image on the output of the filter you can pass ``threshold=True`` which will enable adaptive thresholding of the @@ -3544,9 +3608,9 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. + Not supported on compressed images or bayer/yuv images. - .. method:: laplacian(size, [sharpen=False, [mul, [add=0, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]]]) + .. method:: laplacian(size:int, sharpen:Optional[bool]=False, mul:Optional[float]=1.0, add:Optional[float]=0.0, threshold:Optional[bool]=False, offset:Optional[int]=0, invert:Optional[bool]=False, mask:Optional[Image]=None) -> Image Convolves the image by a edge detecting laplacian kernel. @@ -3556,14 +3620,11 @@ The image object is the basic object for machine vision operations. unthresholded edge detection image this method will instead sharpen the image. Increase the kernel size then to increase the image sharpness. - ``mul`` is number to multiply the convolution pixel results by. When not set - it defaults to a value that will prevent scaling in the convolution output. + ``mul`` allows you to do a global contrast adjustment. It's value should be greater than + 0.0. The default value is 1.0 which does nothing. - ``add`` is a value to add to each convolution pixel result. - - ``mul`` basically allows you to do a global contrast adjustment and ``add`` - allows you to do a global brightness adjustment. Pixels that go outside of - the image mins and maxes for color channels will be clipped. + ``add`` allows you to do a global brightness adjustment. It's value should be between + 0.0 and 1.0. The default value is 0.0 which does nothing. If you'd like to adaptive threshold the image on the output of the filter you can pass ``threshold=True`` which will enable adaptive thresholding of the @@ -3580,11 +3641,9 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. - - This method is not available on the OpenMV Cam M4. + Not supported on compressed images or bayer/yuv images. - .. method:: bilateral(size, [color_sigma=0.1, [space_sigma=1, [threshold=False, [offset=0, [invert=False, [mask=None]]]]]]) + .. method:: bilateral(size:int, color_sigma:Optional[float]=0.1, space_sigma:Optional[float]=1.0, threshold:Optional[bool]=False, offset:Optional[int]=0, invert:Optional[bool]=False, mask:Optional[Image]=None) -> Image Convolves the image by a bilateral filter. The bilateral filter smooths the image while keeping edges in the image. @@ -3612,11 +3671,12 @@ The image object is the basic object for machine vision operations. Returns the image object so you can call another method using ``.`` notation. - Not supported on compressed images or bayer images. + Not supported on compressed images or bayer/yuv images. - This method is not available on the OpenMV Cam M4. + Geometric Methods + ~~~~~~~~~~~~~~~~~ - .. method:: linpolar([reverse=False]) + .. method:: linpolar(reverse:bool=False) -> Image Re-project's and image from cartessian coordinates to linear polar coordinates. @@ -3628,7 +3688,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: logpolar([reverse=False]) + .. method:: logpolar(reverse:bool=False) -> Image Re-project's and image from cartessian coordinates to log polar coordinates. @@ -3641,7 +3701,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: lens_corr([strength=1.8, [zoom=1.0, [x_corr=0.0, [y_corr=0.0]]]]) + .. method:: lens_corr(strength:float=1.8, zoom:float=1.0, x_corr:float=0.0, y_corr:float=0.0) -> Image Performs lens correction to un-fisheye the image due to the lens distortion. @@ -3659,7 +3719,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: img.rotation_corr([x_rotation=0.0, [y_rotation=0.0, [z_rotation=0.0, [x_translation=0.0, [y_translation=0.0, [zoom=1.0, [fov=60.0, [corners]]]]]]]]) + .. method:: rotation_corr(x_rotation=0.0, y_rotation=0.0, z_rotation=0.0, x_translation=0.0, y_translation=0.0, zoom=1.0, fov=60.0, corners:Optional[List[Tuple[int,int]]]=None) -> Image Corrects perspective issues in the image by doing a 3D rotation of the frame buffer. @@ -3704,7 +3764,84 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: get_histogram([thresholds, [invert=False, [roi, [bins, [l_bins, [a_bins, [b_bins, [difference]]]]]]]]) + Get Methods + ~~~~~~~~~~~ + + .. method:: get_similarity(image:Image, x:Optional[int]=0, y:Optional[int]=0, x_scale:float=1.0, y_scale:float=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel:int=-1, alpha:int=256, color_palette=None, alpha_palette=None, hint:int=0, dssim:bool=False) -> Similarity + + Computes the similarity between two images. The similarity is computed by + using the structural similiary index (SSIM). The SSIM is a metric that + compares the structural similarity between two images. The SSIM is a value + between -1 and 1. A value of 1 means the images are identical, a value of + 0 means the images are not similar, and a value of -1 means the images are + perfectly the opposite of each other. Typically, if you want to check + if two images are different you should look to see how negative the SSIM + value is. + + ``image`` is the image to compare to. + + You may also pass a path instead of an image object for this method to automatically load the image + from disk and use it in one step. E.g. ``get_similarity("test.jpg")``. + + ``x`` is the x offset to start comparing the image at. + + ``y`` is the y offset to start comparing the image at. + + ``x_scale`` controls how much the displayed image is scaled by in the x direction (float). If this + value is negative the image will be flipped horizontally. Note that if ``y_scale`` is not specified + then it will match ``x_scale`` to maintain the aspect ratio. + + ``y_scale`` controls how much the displayed image is scaled by in the y direction (float). If this + value is negative the image will be flipped vertically. Note that if ``x_scale`` is not specified + then it will match ``x_scale`` to maintain the aspect ratio. + + ``roi`` is the region-of-interest rectangle tuple (x, y, w, h) of the source image to draw. This + allows you to extract just the pixels in the ROI to scale and draw on the destination image. + + ``rgb_channel`` is the RGB channel (0=R, G=1, B=2) to extract from an RGB565 image (if passed) + and to render onto the destination image. For example, if you pass ``rgb_channel=1`` this will + extract the green channel of the source RGB565 image and draw that in grayscale on the + destination image. + + ``alpha`` controls how much of the source image to blend into the destination image. A value of + 256 draws an opaque source image while a value lower than 256 produces a blend between the source + and destination image. 0 results in no modification to the destination image. + + ``color_palette`` if not ``None`` can be `image.PALETTE_RAINBOW`, `image.PALETTE_IRONBOW`, or + a 256 pixel in total RGB565 image to use as a color lookup table on the grayscale value of + whatever the source image is. This is applied after ``rgb_channel`` extraction if used. + + ``alpha_palette`` if not ``None`` can be a 256 pixel in total GRAYSCALE image to use as a alpha + palette which modulates the ``alpha`` value of the source image being drawn at a pixel pixel + level allowing you to precisely control the alpha value of pixels based on their grayscale value. + A pixel value of 255 in the alpha lookup table is opaque which anything less than 255 becomes + more transparent until 0. This is applied after ``rgb_channel`` extraction if used. + + ``hint`` can be a logical OR of the flags: + + * `image.AREA`: Use area scaling when downscaling versus the default of nearest neighbor. + * `image.BILINEAR`: Use bilinear scaling versus the default of nearest neighbor scaling. + * `image.BICUBIC`: Use bicubic scaling versus the default of nearest neighbor scaling. + * `image.CENTER`: Center the image being drawn on the display. This is applied after scaling. + * `image.HMIRROR`: Horizontally mirror the image. + * `image.VFLIP`: Vertically flip the image. + * `image.TRANSPOSE`: Transpose the image (swap x/y). + * `image.EXTRACT_RGB_CHANNEL_FIRST`: Do rgb_channel extraction before scaling. + * `image.APPLY_COLOR_PALETTE_FIRST`: Apply color palette before scaling. + * `image.SCALE_ASPECT_KEEP`: Scale the image being drawn to fit inside the display. + * `image.SCALE_ASPECT_EXPAND`: Scale the image being drawn to fill the display (results in cropping) + * `image.SCALE_ASPECT_IGNORE`: Scale the image being drawn to fill the display (results in stretching). + * `image.ROTATE_90`: Rotate the image by 90 degrees (this is just VFLIP | TRANSPOSE). + * `image.ROTATE_180`: Rotate the image by 180 degrees (this is just HMIRROR | VFLIP). + * `image.ROTATE_270`: Rotate the image by 270 degrees (this is just HMIRROR | TRANSPOSE). + * `image.BLACK_BACKGROUND`: Assume the background image being drawn on is black speeding up blending. + + ``dssim`` if true will compute the structual disimilarity index (DSSIM) instead of the SSIM. A + value of 0 means the images are identical. A value of 1 means the images are completely different. + + Returns a `image.Similarity` object. + + .. method:: get_histogram(thresholds:Optional[List[Tuple[int,int]]]=None, invert=False, roi:Optional[Tuple[int,int,int,int]]=None, bins=256, l_bins=256, a_bins=256, b_bins=256, difference:Optional[Image]=None) -> histogram Computes the normalized histogram on all color channels for an ``roi`` and returns a `image.histogram` object. Please see the `image.histogram` object for more @@ -3764,7 +3901,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: get_statistics([thresholds, [invert=False, [roi, [bins, [l_bins, [a_bins, [b_bins, [difference]]]]]]]]) + .. method:: get_statistics(thresholds:Optional[List[Tuple[int,int]]]=None, invert=False, roi:Optional[Tuple[int,int,int,int]]=None, bins=256, l_bins=256, a_bins=256, b_bins=256, difference:Optional[Image]=None) -> statistics Computes the mean, median, mode, standard deviation, min, max, lower quartile, and upper quartile for all color channels for an ``roi`` and @@ -3828,7 +3965,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: get_regression(thresholds, [invert=False, [roi, [x_stride=2, [y_stride=1, [area_threshold=10, [pixels_threshold=10, [robust=False]]]]]]]) + .. method:: get_regression(thresholds:List[Tuple[int,int]], invert=False, roi:Optional[Tuple[int,int,int,int]]=None, x_stride=2, y_stride=1, area_threshold=10, pixels_threshold=10, robust=False) -> line Computes a linear regression on all the thresholded pixels in the image. The linear regression is computed using least-squares normally which is fast but @@ -3888,7 +4025,10 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: find_blobs(thresholds, [invert=False, [roi, [x_stride=2, [y_stride=1, [area_threshold=10, [pixels_threshold=10, [merge=False, [margin=0, [threshold_cb=None, [merge_cb=None, [x_hist_bins_max=0, [y_hist_bins_max=0]]]]]]]]]]]]) + Detection Methods + ~~~~~~~~~~~~~~~~~ + + .. method:: find_blobs(thresholds:List[Tuple[int,int]], invert=False, roi:Optional[Tuple[int,int,int,int]]=None, x_stride=2, y_stride=1, area_threshold=10, pixels_threshold=10, merge=False, margin=0, threshold_cb=None, merge_cb=None, x_hist_bins_max=0, y_hist_bins_max=0) -> List[blob] Finds all blobs (connected pixel regions that pass a threshold test) in the image and returns a list of `image.blob` objects which describe each blob. @@ -3984,7 +4124,7 @@ The image object is the basic object for machine vision operations. Not supported on compressed images or bayer images. - .. method:: find_lines([roi, [x_stride=2, [y_stride=1, [threshold=1000, [theta_margin=25, [rho_margin=25]]]]]]) + .. method:: find_lines(roi:Optional[Tuple[int,int,int,int]]=None, x_stride=2, y_stride=1, threshold=1000, theta_margin=25, rho_margin=25) -> List[line] Finds all infinite lines in the image using the hough transform. Returns a list of `image.line` objects. @@ -4020,7 +4160,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: find_line_segments([roi, [merge_distance=0, [max_theta_difference=15]]]) + .. method:: find_line_segments(roi:Optional[Tuple[int,int,int,int]]=None, merge_distance=0, max_theta_difference=15) -> List[line] Finds line segments in the image using the hough transform. Returns a list of `image.line` objects . @@ -4040,7 +4180,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: find_circles([roi, [x_stride=2, [y_stride=1, [threshold=2000, [x_margin=10, [y_margin=10, [r_margin=10, [r_min=2, [r_max, [r_step=2]]]]]]]]]]) + .. method:: find_circles(roi:Optional[Tuple[int,int,int,int]]=None, x_stride=2, y_stride=1, threshold=2000, x_margin=10, y_margin=10, r_margin=10, r_min=2, r_max:Optional[int]=None, r_step=2) -> List[circle] Finds circles in the image using the hough transform. Returns a list of `image.circle` objects. @@ -4080,7 +4220,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: find_rects([roi=Auto, [threshold=10000]]) + .. method:: find_rects(roi:Optional[Tuple[int,int,int,int]]=None, threshold=10000) -> List[rect] Find rectangles in the image using the same quad detection algorithm used to find apriltags. Works best of rectangles that have good contrast against the @@ -4098,7 +4238,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: find_qrcodes([roi]) + .. method:: find_qrcodes(roi:Optional[Tuple[int,int,int,int]]=None) -> List[qrcode] Finds all qrcodes within the ``roi`` and returns a list of `image.qrcode` objects. Please see the `image.qrcode` object for more information. @@ -4118,7 +4258,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: find_apriltags([roi, [families=image.TAG36H11, [fx, [fy, [cx, [cy]]]]]]) + .. method:: find_apriltags(roi:Optional[Tuple[int,int,int,int]]=None, families=TAG36H11, fx=0.0, fy=0.0, cx:Optional[int]=None, cy:Optional[int]=None) -> List[apriltag] Finds all apriltags within the ``roi`` and returns a list of `image.apriltag` objects. Please see the `image.apriltag` object for more information. @@ -4175,7 +4315,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: find_datamatrices([roi, [effort=200]]) + .. method:: find_datamatrices(roi:Optional[Tuple[int,int,int,int]]=None, effort=200) -> List[datamatrix] Finds all datamatrices within the ``roi`` and returns a list of `image.datamatrix` objects. Please see the `image.datamatrix` object for more information. @@ -4204,7 +4344,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: find_barcodes([roi]) + .. method:: find_barcodes(roi:Optional[Tuple[int,int,int,int]]=None) -> List[barcode] Finds all 1D barcodes within the ``roi`` and returns a list of `image.barcode` objects. Please see the `image.barcode` object for more information. @@ -4242,7 +4382,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: find_displacement(template, [roi, [template_roi, [logpolar=False]]]) + .. method:: find_displacement(template:Image, roi:Optional[Tuple[int,int,int,int]]=None, template_roi:Optional[Tuple[int,int,int,int]]=None, logpolar=False) -> List[displacement] Find the translation offset of the this image from the template. This method can be used to do optical flow. This method returns a `image.displacement` @@ -4273,7 +4413,7 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: find_template(template, threshold, [roi, [step=2, [search=image.SEARCH_EX]]]) + .. method:: find_template(template:Image, threshold:float, roi:Optional[Tuple[int,int,int,int]]=None, step=2, search=SEARCH_EX) -> Tuple[int,int,int,int] Tries to find the first location in the image where template matches using Normalized Cross Correlation. Returns a bounding box tuple (x, y, w, h) for @@ -4302,7 +4442,7 @@ The image object is the basic object for machine vision operations. Only works on grayscale images. - .. method:: find_features(cascade, [threshold=0.5, [scale=1.5, [roi]]]) + .. method:: find_features(cascade, threshold=0.5, scale=1.5, roi:Optional[Tuple[int,int,int,int]]=None) -> List[Tuple[int,int,int,int]] This method searches the image for all areas that match the passed in Haar Cascade and returns a list of bounding box rectangles tuples (x, y, w, h) @@ -4323,7 +4463,7 @@ The image object is the basic object for machine vision operations. specified, it is equal to the image rectangle. Only pixels within the ``roi`` are operated on. - .. method:: find_eye(roi) + .. method:: find_eye(roi:Tuple[int,int,int,int]) -> Tuple[int,int] Searches for the pupil in a region-of-interest (x, y, w, h) tuple around an eye. Returns a tuple with the (x, y) location of the pupil in the image. @@ -4341,7 +4481,7 @@ The image object is the basic object for machine vision operations. Only works on grayscale images. - .. method:: find_lbp(roi) + .. method:: find_lbp(roi:Tuple[int,int,int,int]) Extracts LBP (local-binary-patterns) keypoints from the region-of-interest (x, y, w, h) tuple. You can then use then use the `image.match_descriptor()` @@ -4353,7 +4493,7 @@ The image object is the basic object for machine vision operations. Only works on grayscale images. - .. method:: find_keypoints([roi, [threshold=20, [normalized=False, [scale_factor=1.5, [max_keypoints=100, [corner_detector=image.CORNER_AGAST]]]]]]) + .. method:: find_keypoints(roi:Optional[Tuple[int,int,int,int]]=None, threshold=20, normalized=False, scale_factor=1.5, max_keypoints=100, corner_detector=CORNER_AGAST) Extracts ORB keypoints from the region-of-interest (x, y, w, h) tuple. You can then use then use the `image.match_descriptor()` function to compare @@ -4387,7 +4527,7 @@ The image object is the basic object for machine vision operations. Only works on grayscale images. - .. method:: find_edges(edge_type, [threshold]) + .. method:: find_edges(edge_type, threshold=(100, 200)) Turns the image to black and white leaving only the edges as white pixels. @@ -4400,7 +4540,7 @@ The image object is the basic object for machine vision operations. Only works on grayscale images. - .. method:: find_hog([roi, [size=8]]) + .. method:: find_hog(roi:Optional[Tuple[int,int,int,int]]=None, size=8) Replaces the pixels in the ROI with HOG (histogram of orientated graidients) lines. @@ -4413,10 +4553,10 @@ The image object is the basic object for machine vision operations. This method is not available on the OpenMV Cam M4. - .. method:: stero_disparity([reversed=False, [max_disparity=64, [threshold=64]]]) + .. method:: stero_disparity(reversed:bool=False, max_disparity:int=64, threshold:int=64) Takes a double wide grayscale image that contains the output of two camera sensors - side-by-side and replaces one of the images int he double wide image with the stero-disparity + side-by-side and replaces one of the images in the double wide image with the stero-disparity image where each pixel reprsents depth. E.g. if you have two 320x240 cameras then this method takes a 640x240 image. @@ -4448,19 +4588,23 @@ Constants --------- .. data:: BINARY + :type: int BINARY (bitmap) pixel format. Each pixel is 1-bit. .. data:: GRAYSCALE + :type: int GRAYSCALE pixel format. Each pixel is 8-bits, 1-byte. .. data:: RGB565 + :type: int RGB565 pixel format. Each pixel is 16-bits, 2-bytes. 5-bits are used for red, 6-bits are used for green, and 5-bits are used for blue. .. data:: BAYER + :type: int RAW BAYER image pixel format. If you try to make the frame size too big to fit in the frame buffer your OpenMV Cam will set the pixel format @@ -4468,6 +4612,7 @@ Constants will be operational. .. data:: YUV422 + :type: int A pixel format that is very easy to jpeg compress. Each pixel is stored as a grayscale 8-bit Y value followed by alternating 8-bit U/V color values that are shared between two @@ -4475,28 +4620,34 @@ Constants methods work with YUV422. .. data:: JPEG + :type: int A JPEG image. .. data:: PNG + :type: int A PNG image. .. data:: PALETTE_RAINBOW + :type: int Default OpenMV Cam color palette for thermal images using a smooth color wheel. .. data:: PALETTE_IRONBOW + :type: int Makes images look like the FLIR Lepton thermal images using a very non-linear color palette. .. data:: AREA + :type: int Use area scaling when downscaling an image (Nearest Neighbor is used for upscaling). You should use area scaling when downscaling for the highest visual quality. .. data:: BILINEAR + :type: int Use bilinear scaling when upscaling an image. This produces a good quality scaled image output and is fast. @@ -4505,6 +4656,7 @@ Constants image. Use `image.AREA` for the higest quality downscaling if speed is not an issue. .. data:: BICUBIC + :type: int Use bicubic scaling when upscaling an image. This produces a high quality scaled image output, but is slow. @@ -4513,33 +4665,40 @@ Constants image. Use `image.AREA` for the higest quality downscaling if speed is not an issue. .. data:: VFLIP + :type: int Vertically flip the image being drawn by `draw_image`. .. data:: HMIRROR + :type: int Horizontally mirror the image being drawn by `draw_image`. .. data:: TRANSPOSE + :type: int Transpose (swap x/y) the image being draw by `draw_image`. .. data:: CENTER + :type: int Center the image being drawn to the center of the image/canvas it's being drawn on. Any x/y offsets passed will move the image being drawn from the center by that amount. .. data:: EXTRACT_RGB_CHANNEL_FIRST + :type: int When extracting an RGB channel from an RGB image using `draw_image` extract the channel first before scaling versus afterwards to prevent any artifacts. .. data:: APPLY_COLOR_PALETTE_FIRST + :type: int When applying a color lookup table to an image using `draw_image` apply the color look table first before scaling versus afterwards to prevent any artifacts. .. data:: SCALE_ASPECT_KEEP + :type: int Scale the image being drawn to fit inside of the image/canvas being drawn on while maintaining the aspect ratio. Unless the image aspect ratios match the image being drawn will not completley @@ -4547,143 +4706,198 @@ Constants the scaled image. .. data:: SCALE_ASPECT_EXPAND + :type: int Scale the image being drawn to fill image/canvas being drawn on while maintaining the aspect ratio. Unless the image aspect ratios match the image being drawn will be cropped. Any x_scale/y_scale values passed will additionally scale the scaled image. .. data:: SCALE_ASPECT_IGNORE + :type: int Scale the image being drawn to fill the image/canvas being drawn on. This does not maintain the aspect ratio of the image being drawn. Any x_scale/y_scale values passed will additionally scale the scaled image. +.. data:: BLACK_BACKGROUND + :type: int + + Speeds up `draw_image` when drawing on a black destination image when using alpha effects that + require reading both source and destination pixels. This skips reading the destination pixel. + .. data:: ROTATE_90 + :type: int Rotate the image by 90 degrees (this is just `image.VFLIP` ORed with `image.TRANSPOSE`). .. data:: ROTATE_180 + :type: int Rotate the image by 180 degrees (this is just `image.HMIRROR` ORed with `image.VFLIP`). .. data:: ROTATE_270 + :type: int Rotate the image by 270 degrees (this is just `image.HMIRROR` ORed with `image.TRANSPOSE`). -.. data:: BLACK_BACKGROUND +.. data:: JPEG_SUBSAMPLING_AUTO + :type: int - Speeds up `draw_image` when drawing on a black destination image when using alpha effects that - require reading both source and destination pixels. This skips reading the destination pixel. + Automatically select the best JPEG subsampling based on the image quality parameter. + +.. data:: JPEG_SUBSAMPLING_444 + :type: int + + Use 4:4:4 JPEG subsampling. + +.. data:: JPEG_SUBSAMPLING_422 + :type: int + + Use 4:2:2 JPEG subsampling. Note, you should force the jpeg subsampling to be 4:2:2 if you are + streaming video via MJPEG for the best compatibility with third-party video players. + +.. data:: JPEG_SUBSAMPLING_420 + :type: int + + Use 4:2:0 JPEG subsampling. .. data:: SEARCH_EX + :type: int Exhaustive template matching search. .. data:: SEARCH_DS + :type: int Faster template matching search. .. data:: EDGE_CANNY + :type: int Use the canny edge detection algorithm for doing edge detection on an image. .. data:: EDGE_SIMPLE + :type: int Use a simple thresholded high pass filter algorithm for doing edge detection on an image. .. data:: CORNER_FAST + :type: int Faster and less accurate corner detection algorithm for ORB keypoints. .. data:: CORNER_AGAST + :type: int Slower and more accurate corner detection algorithm for ORB keypoints. .. data:: TAG16H5 + :type: int TAG1H5 tag family bit mask enum. Used for AprilTags. .. data:: TAG25H7 + :type: int TAG25H7 tag family bit mask enum. Used for AprilTags. .. data:: TAG25H9 + :type: int TAG25H9 tag family bit mask enum. Used for AprilTags. .. data:: TAG36H10 + :type: int TAG36H10 tag family bit mask enum. Used for AprilTags. .. data:: TAG36H11 + :type: int TAG36H11 tag family bit mask enum. Used for AprilTags. .. data:: ARTOOLKIT + :type: int ARTOOLKIT tag family bit mask enum. Used for AprilTags. .. data:: EAN2 + :type: int EAN2 barcode type enum. .. data:: EAN5 + :type: int EAN5 barcode type enum. .. data:: EAN8 + :type: int EAN8 barcode type enum. .. data:: UPCE + :type: int UPCE barcode type enum. .. data:: ISBN10 + :type: int ISBN10 barcode type enum. .. data:: UPCA + :type: int UPCA barcode type enum. .. data:: EAN13 + :type: int EAN13 barcode type enum. .. data:: ISBN13 + :type: int ISBN13 barcode type enum. .. data:: I25 + :type: int I25 barcode type enum. .. data:: DATABAR + :type: int DATABAR barcode type enum. .. data:: DATABAR_EXP + :type: int DATABAR_EXP barcode type enum. .. data:: CODABAR + :type: int CODABAR barcode type enum. .. data:: CODE39 + :type: int CODE39 barcode type enum. .. data:: PDF417 + :type: int PDF417 barcode type enum - Future (e.g. doesn't work right now). .. data:: CODE93 + :type: int CODE93 barcode type enum. .. data:: CODE128 + :type: int CODE128 barcode type enum. diff --git a/docs/library/omv.imu.rst b/docs/library/omv.imu.rst index 3c2ae972548b..2de17eeaaf50 100644 --- a/docs/library/omv.imu.rst +++ b/docs/library/omv.imu.rst @@ -14,7 +14,7 @@ IMU sensor under the camera sensor. Functions --------- -.. function:: acceleration_mg() +.. function:: acceleration_mg() -> Tuple[float, float, float] Returns the acceleration for (x, y, z) in a float tuple in milli-g's. @@ -24,7 +24,7 @@ Functions Y points down below the camera sensor (towards the bottom on the board) Z points in the reverse direction of the camera sensor (into the table) -.. function:: angular_rate_mdps() +.. function:: angular_rate_mdps() -> Tuple[float, float, float] Returns the angular rate for (x, y, z) in a float tuple in milli-degrees-per-second. @@ -34,11 +34,11 @@ Functions Y points down below the camera sensor (towards the bottom on the board) Z points in the reverse direction of the camera sensor (into the table) -.. function:: temperature_c() +.. function:: temperature_c() -> float Returns the temperature in celsius (float). -.. function:: roll() +.. function:: roll() -> float Returns the rotation angle in degrees (float) of the camera module. @@ -47,7 +47,7 @@ Functions * 180 -> Camera is upside down. * 270 -> Camera is rotated right. -.. function:: pitch() +.. function:: pitch() -> float Returns the rotation angle in degrees (float) of the camera module. @@ -56,14 +56,14 @@ Functions * 180 -> Camera is upside down. * 270 -> Camera is pointing up. -.. function:: sleep(enable) +.. function:: sleep(enable:bool) -> None Pass ``True`` to put the IMU sensor to sleep. ``False`` to wake it back up (the default). -.. function:: __write_reg(addr, val) +.. function:: __write_reg(addr:int, val:int) -> None Set 8-bit LSM6DS3 register ``addr`` to 8-bit ``val``. -.. function:: __read_reg(addr) +.. function:: __read_reg(addr:int) -> int Get 8-bit LSM6DS3 register ``addr``. diff --git a/docs/library/omv.micro_speech.rst b/docs/library/omv.micro_speech.rst deleted file mode 100644 index 67bde9896f9b..000000000000 --- a/docs/library/omv.micro_speech.rst +++ /dev/null @@ -1,43 +0,0 @@ -:mod:`micro_speech` --- Micro Speech Audio Module Example -========================================================= - -.. module:: micro_speech - :synopsis: Example voice recognition module - -The `micro_speech` module runs Google's TensorFlow Lite for Microcontrollers Micro Speech framework -for voice recognition. - -Please see this `guide `__ for training a new model. - -Constructors ------------- - -.. class:: MicroSpeech() - - Creates a MicroSpeech voice recognition class. - - .. method:: audio_callback(buf_in) - - Pass this method to `audio.start_streaming()` to fill the `MicroSpeech` class with audio samples. - - `MicroSpeech` will compute the FFT of the audio samples and keep a sliding window internally - of the FFT the last 100ms or so of audio samples received as features for voice recognition. - - .. method:: listen(tf_model, [threshold=0.9, [timeout=1000, [filter=None]]]) - - Executes the tensor flow lite model ``tf_model``, which should be a path to a tensor flow lite - model on disk, on the audio stream. - - This method will continue to execute the model until it classifies a result that has a - confidence ratio above ``threshold`` and that's within the range specified by ``filter``. - - For example, if the model is designed to classify sounds into the four labels ['Silence', - 'Unknown', 'Yes', 'No'], then a ``threshold`` of 0.7 mean that listen() only returns when - the confidence score for one of those classes goes above 0.7. ``filter`` can then be ``[2, 3]`` - to specify that we only care about 'Yes' or 'No' going above 0.7. - - ``timeout`` is the amount of time to run the model on audio data. If zero then listen will - run forever until a result passes the threshold and filter criteria. - - Returns the index of the the label with the highest confidence score. E.g. for the example - above 0, 1, 2, or 3 for ['Silence', 'Unknown', 'Yes', 'No'] respectively. diff --git a/docs/library/omv.mjpeg.rst b/docs/library/omv.mjpeg.rst index 6c6d3ef6eb3f..12372ee6037d 100644 --- a/docs/library/omv.mjpeg.rst +++ b/docs/library/omv.mjpeg.rst @@ -38,7 +38,7 @@ Example usage:: Constructors ~~~~~~~~~~~~ -.. class:: Mjpeg(filename, [width, [height]]) +.. class:: Mjpeg(filename:str, width:Optional[int]=None, height:Optional[int]=None) Create a Mjpeg object which you can add frames to. ``filename`` is the path to save the mjpeg recording to. @@ -52,27 +52,27 @@ Constructors Methods ~~~~~~~ - .. method:: is_closed() + .. method:: is_closed() -> bool Return True if the file was closed. You cannot write more data to a closed file. - .. method:: width() + .. method:: width() -> int Returns the width (horizontal resolution) for the mjpeg file. - .. method:: height() + .. method:: height() -> int Returns the height (vertical resolution) for the mjpeg file. - .. method:: count() + .. method:: count() -> int Returns the number of frames in the mjpeg file. - .. method:: size() + .. method:: size() -> int Returns the file size in bytes of the mjpeg so far. This value is updated after adding frames. - .. method:: add_frame(image, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0, [quality=90]]]]]]]) + .. method:: add_frame(image:image.Image, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel=-1, alpha=256, color_palette=None, alpha_palette=None, hint=0, quality=90) Add an image to the mjpeg recording. The added image is automatically scaled up/down while preserving the aspect-ratio to the resolution specified when the mjpeg file was created. @@ -124,7 +124,7 @@ Constructors Returns the object. - .. method:: write(image, [quality=90, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0]]]]]]]) + .. method:: write(image:image.Image, quality=90, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel=-1, alpha=256, color_palette=None, alpha_palette=None, hint=0) Alias for `Mjpeg.add_frame()`. diff --git a/docs/library/omv.ml.apps.rst b/docs/library/omv.ml.apps.rst new file mode 100644 index 000000000000..e42e2140be9e --- /dev/null +++ b/docs/library/omv.ml.apps.rst @@ -0,0 +1,53 @@ +.. currentmodule:: ml.apps + +:mod:`ml.apps` --- ML Apps +========================== + +.. module:: ml.apps + :synopsis: ML Apps + +The `ml.apps` module contains various ML application classes. + +.. _apps.MicroSpeech: + +class MicroSpeech -- Speech Recognition +--------------------------------------- + +The MicroSpeech object is used to recognize simple spoken words using the MicroSpeech model from +TensorFlow Lite for Microcontrollers. The model recognizes the words "yes" and "no" by default. + +You can customize the model to recognize other words by training a new model. See the +`Micro Speech `__ +guide. + +Constructors +~~~~~~~~~~~~ + +.. class:: MicroSpeech(preprocessor:str=None, micro_speech:str=None, labels:list[str, ...]=None) -> MicroSpeech + + Creates a MicroSpeech object. If no preprocessor is provided, the default preprocessor is used. + If no micro_speech model is provided, the default model is used. If no labels are provided, the + default labels are used from the default model. + + Methods + ~~~~~~~ + + .. method:: listen(timeout:int=0, callback=None, threshold:float=0.65, filter:list[str, ...]=["Yes", "No"]) -> tuple[str, float] + + Listens for a spoken word and returns the word and confidence level as a tuple if the + confidence level is above the threshold and the word is in the filter list. + + ``timeout`` is the maximum time in milliseconds to listen for a word. If zero, the method + will listen indefinitely until a word is recognized. If -1 is passed, the method will not + block and will return immediately with the result tuple which may contain ``None`` if no + word is recognized. If a positive value is passed, the method will listen for that amount + of time in milliseconds and then return the result tuple. + + ``callback`` is a function that will be called with the word and confidence level instead + of returning the result. When combined with a timeout of zero, this allows you to listen + for words indefinitely and process them as they are recognized. + + ``threshold`` is the minimum confidence level required to return a result. + + ``filter`` is a list of words that the model should recognize. If the recognized word is + not in the filter list, the result is ignored. diff --git a/docs/library/omv.ml.preprocessing.rst b/docs/library/omv.ml.preprocessing.rst new file mode 100644 index 000000000000..9b5a0fe0b433 --- /dev/null +++ b/docs/library/omv.ml.preprocessing.rst @@ -0,0 +1,53 @@ +.. currentmodule:: ml.preprocessing + +:mod:`ml.preprocessing` --- ML Preprocessing +============================================ + +.. module:: ml.preprocessing + :synopsis: ML Preprocessing + +The `ml.preprocessing` module contains classes for preprocessing images for use with machine learning models. + +.. _preprocessing.Normalization: + +class Normalization -- Image Normalization +------------------------------------------ + +The `Normalization` object is used to convert image objects to numpy ``ndarray`` objects for use with the `Model` object. +It's automatically created by the `Model` object when an image object is passed to `predict()`. However, +you can also manually create a `Normalization` object to control the conversion process, select an ROI, etc. + +For example:: + + model = ml.Model("model.tflite") + norm = ml.Normalization(scale=(0.0, 1.0), mean=(0.485, 0.456, 0.406), stdev=(0.229, 0.224, 0.225)) + outputs = model.predict([norm(image)]) + +Constructors +~~~~~~~~~~~~ + +.. class:: Normalization(scale:tuple[float, float]=(0.0, 1.0), mean:tuple[float, float, float]=(0.0, 0.0, 0.0), stdev:tuple[float, float, float]=(1.0, 1.0, 1.0), roi:tuple[int,int,int,int]=None) -> Normalization + + Creates a `Normalization` object which is used to convert image objects to numpy arrays for use with the + `predict()`. The object can also be used to select a region of interest (ROI) in the image to + convert to a numpy array. + + The Normalization object automatically converts any image type passed (including compressed images) + into either a single channel (grayscale) or three channel (RGB888) image which is passed to the tensor + input of the model. Images are centered, scaled up/down (using bilinear/area scaling), and cropped as + necessary to match the input tensor size of the model. + + For ``uint8`` input tensors the image is directly passed ignoring scale and mean/stdev. For ``int8`` + input tensors the image is shifted to be within the ``int8`` range from the ``uint8`` range and + then directly passed ignoring scale and mean/stdev. Tensors that accept either of these formats + can be processed more quickly than tensors that require floating point inputs. + + For floating point input tensors it's not possible to guess the correct range that the model + expects. While each input tensor encodes a scale and zero point value that can be used to + convert the input to the correct range, these values do not tell you what the range + of the input data should be in floating point. E.g. should image RGB values be within the range + of (0.0, 1.0), (-1.0, 1.0), (0.0, 255.0), and etc. before applying a scale and zero point? The + answer is that it depends on the model and how it was trained. So, the normalization object instead + allows you to directly specify the range of the input data, the mean, and the standard deviation. The + Grayscale or RGB88 image is then converted into a floating point tensor for the model to process + based on these values. diff --git a/docs/library/omv.ml.rst b/docs/library/omv.ml.rst new file mode 100644 index 000000000000..07a83f3f9c0b --- /dev/null +++ b/docs/library/omv.ml.rst @@ -0,0 +1,167 @@ +:mod:`ml` --- Machine Learning +============================== + +.. module:: ml + :synopsis: Machine Learning + +The `ml` module contains functionality for processing machine learning models on the OpenMV Cam. + +The heart of the `ml` module is the `Model()` object which is used to load and execute +TensorFlow Lite models. The `Model()` object accepts a list of up to 4D input tensors for +each model input tensor and returns a list of up to 4D output tensors for each model output +tensor. Each input/output tensor works using a numpy ``ndarray``. + +For TensorFlow Lite models, the `Model()` object handles all ops enabled +`here `_. The `Model()` +object will automatically leverage CMSIS-NN, Helium, and an Ethos NPU if available to speed up +inference. Availability of these accelerators is dependent on the OpenMV Cam model. + +For image processing support the `ml` module automatically converts passed image objects to numpy +``ndarray`` objects by wrapping them with the `Normalization()` object which handles this conversion. The +`Normalization()` object can also be manually created to control the conversion process, select an +ROI, and etc. + +For more information on ``ndarray`` objects see the +`ulab documentation `_. All OpenMV Cams support +ndarray objects up to rank 4 (meaning 4D tensors). + +.. note:: + + Complex number support and the `scipy special module `_ + are currently disabled on all OpenMV Cams at the moment to save flash space. + +Sub Modules +----------- + +.. toctree:: + :maxdepth: 2 + + omv.ml.apps.rst + omv.ml.preprocessing.rst + omv.ml.utils.rst + +class model -- Model Container +------------------------------ + +A model object is used to load and execute TensorFlow Lite models. The model object accepts a list +of up to 4D input tensors per model corresponding to the number of tensor inputs of the model +and returns a list of up to 4D output tensors corresponding to the number of tensor outputs of the +model. Each input/output tensor is an numpy ``ndarray``. + +Constructors +~~~~~~~~~~~~ + +.. class:: Model(path:str, load_to_fb:bool=False) -> Model + + Loads a model from ``path`` into memory and prepares it for being executed. ``path`` can either + be a file on disk or the name of a built-in model which will be loaded from internal flash. Models that are + built-in to the internal flash firmware image do not take up RAM to store the model weights when used. + + If the model you are trying to load is very large and doesn't fit in the MicroPython heap you + can set ``load_to_fb`` to True to load the model into the frame buffer stack instead. This allows + you to get around the heap size limitations. However, models loaded this way need to be deallocated + in-order with anything else that uses the frame buffer stack versus the MicroPython heap. Typically, + the frame buffer stack is much larger than the MicroPython heap so you can load much larger models + using this option, but, you need to be careful if you deallocate. + + Once a model is loaded you can execute it multiple times with different inputs using `predict()`. + The model will rember its internal state between calls to `predict()`. + + When deleted the model will automatically free up any memory it used from the heap or frame buffer stack. + + Methods + ~~~~~~~ + + .. method:: predict(inputs:list, callback=None) -> list + + Executes the model with the given inputs. The inputs should be a list of numpy ``ndarray`` objects corresponding + to the number of input tensors the model supports. The method returns a list of numpy ``ndarray`` objects + corresponding to the number of output tensors the model has. + + The model input tensors can be up to 4D tensors of uint8, int8, int16, or float32 values. The passed + numpy ``ndarray`` for an input tensor is then converted to floating point and scaled/offset based on + the input tensor's scale and zero point values before being passed to the model. For example, an ``ndarray`` + of uint8 values will be converted to float32s between 0.0-255.0, divided by the input tensor's scale, and + then have the input tensor's zero point added to it. The same process is done for int8 and int16 values + whereas float32 values are passed directly to the model ignoring the scale and zero point values. + + The model's output tensors can be up to 4D tensors of uint8, int8, or float32 values. For uint8 + and int8 tensors the returned numpy ndarray is created by subtracting the output tensor's zero + point value before multiplying by the output tensor's scale value. For float32 tensors, values are + passed directly to the output without any scaling or offset being applied. + + Note that `predict()` requires the shape of the input ``ndarray`` objects to match the shape of the model + input tensors exactly. You can use the ``reshape()`` method of an ndarray with the `input_shape` + attribute of the model to reshape the input data to the correct shape if necessary. + + If a ``callback`` is passed then it will receive the `Model`, ``inputs``, and ``outputs`` as arguments + which allows for custom post-processing of the model outputs. The callback may then return + whatever it likes which will be returned by `predict()`. The ``callback`` method allows for building + up a library of post-processing functions that can be used on demand for different models. + + For custom pre-processing, `predict()` also accepts "callable" objects as inputs. Any object + implementing the ``__call__`` method can be passed to `predict()` as an input. `predict()` will + then call the object with a writeable bytearray representing the input tensor, the input tensor's shape tuple, + and the input tensors data type value (as an int). The object should then set the input tensor data in the + bytearray to what the model expects. This is how `Normalization()` converts image objects to input tensors. + + Attributes + ~~~~~~~~~~ + + .. attribute:: len + :type: int + + The size of the loaded model in bytes. + + .. attribute:: ram + :type: int + + The amount of RAM used by the model for it's tensor arena. + + .. attribute:: input_shape + :type: list[tuple[int, ...]] + + A list of tuples containing the shape of each input tensor. + + .. attribute:: input_dtype + :type: list[str] + + A list of strings containing the data type of each input tensor. + 'b', 'B', 'h', and 'f' respectively for uint8, int8, int16, and float32. + + .. attribute:: input_scale + :type: list[float] + + A list of floats containing the scale of each input tensor. + + .. attribute:: input_zero_point + :type: list[int] + + A list of integers containing the zero point of each input tensor. + + .. attribute:: output_shape + :type: list[tuple[int, ...]] + + A list of tuples containing the shape of each output tensor. + + .. attribute:: output_dtype + :type: list[str] + + A list of strings containing the data type of each output tensor. + 'b', 'B' and 'f' respectively for uint8, int8 and float32. + + .. attribute:: output_scale + :type: list[float] + + A list of floats containing the scale of each output tensor. + + .. attribute:: output_zero_point + :type: list[int] + + A list of integers containing the zero point of each output tensor. + + .. attribute:: labels + :type: list[str] + + A list of strings containing the labels for the model (if it was built-in to the firmware with labels, + otherwise, ``None``). diff --git a/docs/library/omv.ml.utils.rst b/docs/library/omv.ml.utils.rst new file mode 100644 index 000000000000..b3cf668cc9fd --- /dev/null +++ b/docs/library/omv.ml.utils.rst @@ -0,0 +1,56 @@ +.. currentmodule:: ml.utils + +:mod:`ml.utils` --- ML Utils +============================ + +.. module:: ml.utils + :synopsis: ML Utils + +The `ml.utils` module contains utility classes and functions for machine learning. + +.. _utils.NMS: + +class NMS - Soft-Non-Maximum Suppression +---------------------------------------- + +The `NMS` object is used to collect a list of bounding boxes and their associated scores and then filter +out overlapping bounding boxes with lower scores. Additionally, it remaps bounding boxes detected +in a sub-window back to the original image coordinates. + +Constructors +~~~~~~~~~~~~ + +.. class:: NMS(window_w:int, window_h:int, roi:tuple[int,int,int,int]) -> NMS + + Creates a `NMS` object with the given window size and region of interest (ROI). The window is + width/height of the input tensor of image model. The ROI is the region of interest that returned by the + `Normalization()` object which corresponds to the region of the image that the model was run on. + This allows the `NMS` object to remap bounding boxes detected in a sub-window back to the original + image coordinates. + + Methods + ~~~~~~~ + + .. method:: add_bounding_boxes(xmin:float, ymin:float, xmax:float, ymax:float, score:float, label_index:int) -> None + + Adds a bounding box to the `NMS` object with the given coordinates, score, and label index. + + ``xmin``, ``ymin``, ``xmax``, and ``ymax`` are the bounding box coordinates in the range of 0.0 to 1.0 + where (0.0, 0.0) is the top-left corner of the image and (1.0, 1.0) is the bottom-right corner of the image. + + ``score`` is the confidence score of the bounding box (0.0-1.0). + + ``label_index`` is the index of the label associated with the bounding box. + + .. method:: get_bounding_boxes(threshold:float=0.1, sigma:float=0.1) -> list[tuple[int,int,int,int,float,int]] + + Returns a list of bounding boxes that have been filtered by the `NMS` object and remapped + to the original image coordinates. Bounding box tuples are + ``(x, y, w, h, score, label_index)``. After calling this method you should create a new + `NMS` object if you want to process a new set of bounding boxes. + + Bounding boxes must have a higher score then ``threshold`` to be kept. + + ``sigma`` controls the gaussian used to apply a score penalty to overlapping bounding boxes + using the Soft-Non-Maximum-Suppression algorithm. A higher ``sigma`` will result in a more + aggressive suppression of overlapping bounding boxes. diff --git a/docs/library/omv.omv.rst b/docs/library/omv.omv.rst index 0b4eb4aa03a9..8f06ace9549c 100644 --- a/docs/library/omv.omv.rst +++ b/docs/library/omv.omv.rst @@ -9,38 +9,38 @@ The ``omv`` module is used to get OpenMV Cam information. Functions --------- -.. function:: version_major() +.. function:: version_major() -> int Returns the major version number (int). -.. function:: version_minor() +.. function:: version_minor() -> int Returns the minor version number (int). -.. function:: version_patch() +.. function:: version_patch() -> int Returns the patch version number (int). -.. function:: version_string() +.. function:: version_string() -> str Returns the version string (e.g. "2.8.0"). -.. function:: arch() +.. function:: arch() -> str Returns the board architecture string. This string is really just meant for OpenMV IDE but you can get it with this function. -.. function:: board_type() +.. function:: board_type() -> str Returns the board type string. This string is really just meant for OpenMV IDE but you can get it with this function. -.. function:: board_id() +.. function:: board_id() -> str Returns the board id string. This string is really just meant for OpenMV IDE but you can get it with this function. -.. function:: disable_fb([disable]) +.. function:: disable_fb(disable: Optional[bool]=None) -> bool When ``disable`` is set to ``True`` the OpenMV Cam will no longer jpeg compress images and stream them to OpenMV IDE. The IDE may still poll for images unless ``Disable FB`` is checked in OpenMV diff --git a/docs/library/omv.rpc.rst b/docs/library/omv.rpc.rst index 0f6b76dbf702..e8a56c184373 100644 --- a/docs/library/omv.rpc.rst +++ b/docs/library/omv.rpc.rst @@ -91,7 +91,7 @@ Constructors Methods ~~~~~~~ - .. method:: get_bytes(buff, timeout_ms): + .. method:: get_bytes(buff, timeout_ms:int) This method is meant to be reimplemented by specific interface classes of `rpc_master` and `rpc_slave`. It should fill the ``buff`` argument which is either a `bytearray` or `memoryview` object of bytes from the @@ -100,13 +100,13 @@ Constructors complete in at least ``timeout_ms`` milliseconds and not faster as the `rpc_master` and `rpc_slave` objects will automatically increase the ``timeout_ms`` to synchronize. - .. method:: put_bytes(data, timeout_ms): + .. method:: put_bytes(data, timeout_ms:int) This method is meant to be reimplemented by specific interface classes of `rpc_master` and `rpc_slave`. It should send ``data`` bytes on the interface within ``timeout_ms`` milliseconds. If it completes faster than the timeout that is okay. No return value is expected. - .. method:: stream_reader(call_back, queue_depth=1, read_timeout_ms=5000): + .. method:: stream_reader(call_back, queue_depth=1, read_timeout_ms=5000) This method is meant to be called directly. After synchronization of the master and slave on return of a callback ``stream_reader`` may be called to receive data as fast as possible from the master or @@ -124,7 +124,7 @@ Constructors If you need to cancel the ``stream_reader`` just raise an exception in the ``call_back`` and catch it. The remote side will automatically timeout. - .. method:: stream_writer(call_back, write_timeout_ms=5000): + .. method:: stream_writer(call_back, write_timeout_ms=5000) This method is meant to be called directly. After synchronization of the master and slave on return of a ``callback`` ``stream_writer`` may be called to send data as fast as possible from the master or slave @@ -155,7 +155,7 @@ Constructors Methods ~~~~~~~ - .. method:: call(name, data=bytes(), send_timeout=1000, recv_timeout=1000): + .. method:: call(name, data=bytes(), send_timeout=1000, recv_timeout=1000) Executes a remote call on the slave device. ``name`` is a string name of the remote function or method to execute. ``data`` is the ``bytes`` like object that will be sent as the argument of the remote function @@ -187,13 +187,13 @@ Constructors Methods ~~~~~~~ - .. method:: register_callback(cb): + .. method:: register_callback(cb) Registers a call back that can be executed by the master device. The call back should take one argument which will be a ``memoryview`` object and it should return a ``bytes()`` like object as the result. The call back should return in less than 1 second if possible. - .. method:: schedule_callback(cb): + .. method:: schedule_callback(cb) After you execute ``rpc_slave.loop()`` it is not possible to execute long running operations outside of the ``rpc`` library. ``schedule_callback`` allows you to break out of the ``rpc`` library temporarily after completion @@ -221,7 +221,7 @@ Constructors limits the size of the data moved inside the ``rpc`` library without running out of memory on the OpenMV Cam. - .. method:: setup_loop_callback(cb): + .. method:: setup_loop_callback(cb) The loop call back is called every loop iteration of ``rpc_slave.loop()``. Unlike the ``rpc.schedule_callback()`` call back this call back stays registered after being registered once. You can use the loop call back to @@ -236,7 +236,7 @@ Constructors at a fixed frequency. Please see how to Write Interrupt Handlers for more information. Note: The `Mutex` library is installed on your OpenMV Cam along with the ``rpc`` library. - .. method:: loop(recv_timeout=1000, send_timeout=1000): + .. method:: loop(recv_timeout=1000, send_timeout=1000) Starts execution of the ``rpc`` library on the slave to receive data. This method does not return (except via an exception from a call back). You should register all call backs first before @@ -256,7 +256,7 @@ Control another ``rpc`` device over CAN. Constructors ~~~~~~~~~~~~ -.. class:: rpc_can_master(message_if=0x7FF, bit_rate=250000, sample_point=75, can_bus=2): +.. class:: rpc_can_master(message_if=0x7FF, bit_rate=250000, sample_point=75, can_bus=2) Creates a CAN ``rpc`` master. This interface can move up to 1 Mb/s. @@ -276,7 +276,7 @@ Be controlled by another ``rpc`` device over CAN. Constructors ~~~~~~~~~~~~ -.. class:: rpc_can_slave(message_id=0x7FF, bit_rate=250000, sample_point=75, can_bus=2): +.. class:: rpc_can_slave(message_id=0x7FF, bit_rate=250000, sample_point=75, can_bus=2) Creates a CAN ``rpc`` slave. This interface can move up to 1 Mb/s. diff --git a/docs/library/omv.sensor.rst b/docs/library/omv.sensor.rst index a7fb190f471a..f8c18eb30daa 100644 --- a/docs/library/omv.sensor.rst +++ b/docs/library/omv.sensor.rst @@ -23,19 +23,19 @@ Example usage:: Functions --------- -.. function:: reset() +.. function:: reset() -> None Initializes the camera sensor. -.. function:: sleep(enable) +.. function:: sleep(enable:bool) -> None Puts the camera to sleep if enable is True. Otherwise, wakes it back up. -.. function:: shutdown(enable) +.. function:: shutdown(enable:bool) -> None Puts the camera into a lower power mode than sleep (but the camera must be reset on being woken up). -.. function:: flush() +.. function:: flush() -> None Copies whatever was in the frame buffer to the IDE. You should call this method to display the last image your OpenMV Cam takes if it's not running @@ -43,7 +43,7 @@ Functions of about a second after your script finishes for the IDE to grab the image from your camera. Otherwise, this method will have no effect. -.. function:: snapshot() +.. function:: snapshot() -> image.Image Takes a picture using the camera and returns an ``image`` object. @@ -69,7 +69,7 @@ Functions RAM the pixformat, framesize, windowing, and framebuffers. The cropping parameters will be applied to maintain the aspect ratio and will stay until `sensor.set_framesize()` or `sensor.set_windowing()` are called. -.. function:: skip_frames([n, time]) +.. function:: skip_frames(n:Optional[int]=None, time:Optional[int]=None) -> None Takes ``n`` number of snapshots to let the camera image stabilize after changing camera settings. ``n`` is passed as normal argument, e.g. @@ -92,21 +92,21 @@ Functions RAM given the pixformat, framesize, windowing, and framebuffers. The cropping parameters will be applied to maintain the aspect ratio and will stay until `sensor.set_framesize()` or `sensor.set_windowing()` are called. -.. function:: width() +.. function:: width() -> int Returns the sensor resolution width. -.. function:: height() +.. function:: height() -> int Returns the sensor resolution height. -.. function:: get_fb() +.. function:: get_fb() -> Optional[image.Image] (Get Frame Buffer) Returns the image object returned by a previous call of `sensor.snapshot()`. If `sensor.snapshot()` had not been called before then ``None`` is returned. -.. function:: get_id() +.. function:: get_id() -> int Returns the camera module ID. @@ -122,7 +122,7 @@ Functions * `sensor.GC2145`: Arduino Nicla Vision H7 sensor module. * `sensor.PAJ6100`: PixArt Imaging sensor Module. -.. function:: alloc_extra_fb(width, height, pixformat) +.. function:: alloc_extra_fb(width:int, height:int, pixformat:int) -> image.Image Allocates another frame buffer for image storage from the frame buffer stack and returns an ``image`` object of ``width``, ``height``, and ``pixformat``. @@ -144,7 +144,7 @@ Functions memory more easily if you try to execute more memory intensive machine vision algorithms like `Image.find_apriltags()`. -.. function:: dealloc_extra_fb() +.. function:: dealloc_extra_fb() -> None Deallocates the last previously allocated extra frame buffer. Extra frame buffers are stored in a stack like structure. @@ -161,7 +161,7 @@ Functions left over. This memory allocation method is extremely efficent for computer vision on microcontrollers. -.. function:: set_pixformat(pixformat) +.. function:: set_pixformat(pixformat:int) -> None Sets the pixel format for the camera module. @@ -175,11 +175,11 @@ Functions resolutions you should set the pixformat to `sensor.JPEG`. You can control the image quality then with `sensor.set_quality()`. -.. function:: get_pixformat() +.. function:: get_pixformat() -> int Returns the pixformat for the camera module. -.. function:: set_framesize(framesize) +.. function:: set_framesize(framesize:int) -> None Sets the frame size for the camera module. @@ -222,11 +222,11 @@ Functions * `sensor.WQXGA`: 2560x1600 (only for the OV5640 sensor) * `sensor.WQXGA2`: 2592x1944 (only for the OV5640 sensor) -.. function:: get_framesize() +.. function:: get_framesize() -> int Returns the frame size for the camera module. -.. function:: set_framerate(rate) +.. function:: set_framerate(rate:int) -> None Sets the frame rate in hz for the camera module. @@ -238,11 +238,11 @@ Functions the camera sensor frame rate internally to save power and improve image quality by increasing the sensor exposure. `set_framerate` may conflict with `set_auto_exposure` on some cameras. -.. function:: get_framerate() +.. function:: get_framerate() -> int Returns the frame rate in hz for the camera module. -.. function:: set_windowing(roi) +.. function:: set_windowing(roi:Union[Tuple[int,int],Tuple[int,int,int,int]]) -> None Sets the resolution of the camera to a sub resolution inside of the current resolution. For example, setting the resolution to `sensor.VGA` and then @@ -256,27 +256,27 @@ Functions This function will automatically handle cropping the passed roi to the framesize. -.. function:: get_windowing() +.. function:: get_windowing() -> Tuple[int, int, int, int] Returns the ``roi`` tuple (x, y, w, h) previously set with `sensor.set_windowing()`. -.. function:: set_gainceiling(gainceiling) +.. function:: set_gainceiling(gainceiling:int) -> None Set the camera image gainceiling. 2, 4, 8, 16, 32, 64, or 128. -.. function:: set_contrast(constrast) +.. function:: set_contrast(constrast:int) -> None Set the camera image contrast. -3 to +3. -.. function:: set_brightness(brightness) +.. function:: set_brightness(brightness:int) -> None Set the camera image brightness. -3 to +3. -.. function:: set_saturation(saturation) +.. function:: set_saturation(saturation:int) -> None Set the camera image saturation. -3 to +3. -.. function:: set_quality(quality) +.. function:: set_quality(quality:int) -> None Set the camera image JPEG compression quality. 0 - 100. @@ -284,11 +284,11 @@ Functions Only for the OV2640/OV5640 cameras. -.. function:: set_colorbar(enable) +.. function:: set_colorbar(enable:bool) -> None Turns color bar mode on (True) or off (False). Defaults to off. -.. function:: set_auto_gain(enable, [gain_db=-1, [gain_db_ceiling]]) +.. function:: set_auto_gain(enable:bool, gain_db=-1, gain_db_ceiling:Optional[int]=None) -> None ``enable`` turns auto gain control on (True) or off (False). The camera will startup with auto gain control on. @@ -302,11 +302,11 @@ Functions You need to turn off white balance too if you want to track colors. -.. function:: get_gain_db() +.. function:: get_gain_db() -> float Returns the current camera gain value in decibels (float). -.. function:: set_auto_exposure(enable, [exposure_us]) +.. function:: set_auto_exposure(enable:bool, exposure_us:Optional[int]=None) -> None ``enable`` turns auto exposure control on (True) or off (False). The camera will startup with auto exposure control on. @@ -321,11 +321,11 @@ Functions exposure value by much. Instead, they change the gain value alot of deal with changing lighting. -.. function:: get_exposure_us() +.. function:: get_exposure_us() -> int Returns the current camera exposure value in microseconds (int). -.. function:: set_auto_whitebal(enable, [rgb_gain_db]) +.. function:: set_auto_whitebal(enable:bool, rgb_gain_db:Optional[Tuple[float,float,float]]=None) -> None ``enable`` turns auto white balance on (True) or off (False). The camera will startup with auto white balance on. @@ -337,12 +337,12 @@ Functions You need to turn off gain control too if you want to track colors. -.. function:: get_rgb_gain_db() +.. function:: get_rgb_gain_db() -> Tuple[float, float, float] Returns a tuple with the current camera red, green, and blue gain values in decibels ((float, float, float)). -.. function:: set_auto_blc([enable, [regs]]) +.. function:: set_auto_blc(enable:bool, regs:Optional[Any]=None) Sets the auto black line calibration (blc) control on the camera. @@ -351,27 +351,27 @@ Functions ``regs`` if disabled then you can manually set the blc register values via the values you got previously from `get_blc_regs()`. -.. function:: get_blc_regs() +.. function:: get_blc_regs() -> Any Returns the sensor blc registers as an opaque tuple of integers. For use with `set_auto_blc`. -.. function:: set_hmirror(enable) +.. function:: set_hmirror(enable:bool) -> None Turns horizontal mirror mode on (True) or off (False). Defaults to off. -.. function:: get_hmirror() +.. function:: get_hmirror() -> bool Returns if horizontal mirror mode is enabled. -.. function:: set_vflip(enable) +.. function:: set_vflip(enable:bool) -> None Turns vertical flip mode on (True) or off (False). Defaults to off. -.. function:: get_vflip() +.. function:: get_vflip() -> bool Returns if vertical flip mode is enabled. -.. function:: set_transpose(enable) +.. function:: set_transpose(enable:bool) -> None Turns transpose mode on (True) or off (False). Defaults to off. @@ -380,11 +380,11 @@ Functions * vflip=True, hmirror=True, transpose=False -> 180 degree rotation * vflip=False, hmirror=True, transpose=True -> 270 degree rotation -.. function:: get_transpose() +.. function:: get_transpose() -> bool Returns if transpose mode is enabled. -.. function:: set_auto_rotation(enable) +.. function:: set_auto_rotation(enable:bool) -> None Turns auto rotation mode on (True) or off (False). Defaults to off. @@ -392,7 +392,7 @@ Functions This function only works when the OpenMV Cam has an `imu` installed and is enabled automatically. -.. function:: get_auto_rotation() +.. function:: get_auto_rotation() -> bool Returns if auto rotation mode is enabled. @@ -400,7 +400,7 @@ Functions This function only works when the OpenMV Cam has an `imu` installed and is enabled automatically. -.. function:: set_framebuffers(count) +.. function:: set_framebuffers(count:int) -> None Sets the number of frame buffers used to receive image data. By default your OpenMV Cam will automatically try to allocate the maximum number of frame buffers it can possibly allocate @@ -458,11 +458,11 @@ Functions slow-mo video just record video normally to the SD card and then play the video back on a desktop machine slower than it was recorded. -.. function:: get_framebuffers() +.. function:: get_framebuffers() -> int Returns the current number of frame buffers allocated. -.. function:: disable_delays([disable]) +.. function:: disable_delays(disable:Optional[bool]=None) -> bool If ``disable`` is ``True`` then disable all settling time delays in the sensor module. Whenever you reset the camera module, change modes, etc. the sensor driver delays to prevent @@ -472,7 +472,7 @@ Functions If this function is called with no arguments it returns if delays are disabled. -.. function:: disable_full_flush([disable]) +.. function:: disable_full_flush(disable:Optional[bool]=None) -> bool If ``disable`` is ``True`` then automatic framebuffer flushing mentioned in `set_framebuffers` is disabled. This removes any time limit on frames in the frame buffer fifo. For example, if @@ -488,20 +488,20 @@ Functions there is no space to hold a frame at which point the frame capture process stops. The process always stops when there is no space to hold the next frame. -.. function:: set_lens_correction(enable, radi, coef) +.. function:: set_lens_correction(enable:bool, radi:int, coef:int) -> None ``enable`` True to enable and False to disable (bool). ``radi`` integer radius of pixels to correct (int). ``coef`` power of correction (int). -.. function:: set_vsync_callback(cb) +.. function:: set_vsync_callback(cb) -> None Registers callback ``cb`` to be executed (in interrupt context) whenever the camera module generates a new frame (but, before the frame is received). ``cb`` takes one argument and is passed the current state of the vsync pin after changing. -.. function:: set_frame_callback(cb) +.. function:: set_frame_callback(cb) -> None Registers callback ``cb`` to be executed (in interrupt context) whenever the camera module generates a new frame and the frame is ready to be read via `sensor.snapshot()`. @@ -510,11 +510,11 @@ Functions Use this to get an interrupt to schedule reading a frame later with `micropython.schedule()`. -.. function:: get_frame_available() +.. function:: get_frame_available() -> bool Returns True if a frame is available to read by calling `sensor.snapshot()`. -.. function:: ioctl(...) +.. function:: ioctl(*args, **kwargs) -> Any Executes a sensor specific method: @@ -560,21 +560,21 @@ Functions * `sensor.IOCTL_HIMAX_MD_THRESHOLD` - Pass this enum followed by a threshold value (0-255) to set the motion detection threshold on the HM01B0. * `sensor.IOCTL_HIMAX_OSC_ENABLE` - Pass this enum followed by ``True``/``False`` to enable/disable the oscillator HM01B0 to save power. -.. function:: set_color_palette(palette) +.. function:: set_color_palette(palette:int) -> None Sets the color palette to use for FLIR Lepton grayscale to RGB565 conversion. -.. function:: get_color_palette() +.. function:: get_color_palette() -> int Returns the current color palette setting. Defaults to `image.PALETTE_RAINBOW`. -.. function:: __write_reg(address, value) +.. function:: __write_reg(address:int, value:int) -> None Write ``value`` (int) to camera register at ``address`` (int). .. note:: See the camera data sheet for register info. -.. function:: __read_reg(address) +.. function:: __read_reg(address:int) -> int Read camera register at ``address`` (int). @@ -584,13 +584,15 @@ Constants --------- .. data:: BINARY + :type: int BINARY (bitmap) pixel format. Each pixel is 1-bit. - This format is usful for mask storage. Can be used with `image.Image()` and + This format is usful for mask storage. Can be used with `Image()` and `sensor.alloc_extra_fb()`. .. data:: GRAYSCALE + :type: int GRAYSCALE pixel format (Y from YUV422). Each pixel is 8-bits, 1-byte. @@ -598,6 +600,7 @@ Constants RGB565 images. .. data:: RGB565 + :type: int RGB565 pixel format. Each pixel is 16-bits, 2-bytes. 5-bits are used for red, 6-bits are used for green, and 5-bits are used for blue. @@ -606,6 +609,7 @@ Constants grayscale images. .. data:: BAYER + :type: int RAW BAYER image pixel format. If you try to make the frame size too big to fit in the frame buffer your OpenMV Cam will set the pixel format @@ -613,6 +617,7 @@ Constants will be operational. .. data:: YUV422 + :type: int A pixel format that is very easy to jpeg compress. Each pixel is stored as a grayscale 8-bit Y value followed by alternating 8-bit U/V color values that are shared between two @@ -620,371 +625,466 @@ Constants methods work with YUV422. .. data:: JPEG + :type: int JPEG mode. The camera module outputs compressed jpeg images. Use `sensor.set_quality()` to control the jpeg quality. Only works for the OV2640/OV5640 cameras. .. data:: OV2640 + :type: int `sensor.get_id()` returns this for the OV2640 camera. .. data:: OV5640 + :type: int `sensor.get_id()` returns this for the OV5640 camera. .. data:: OV7690 + :type: int `sensor.get_id()` returns this for the OV7690 camera. .. data:: OV7725 + :type: int `sensor.get_id()` returns this for the OV7725 camera. .. data:: OV9650 + :type: int `sensor.get_id()` returns this for the OV9650 camera. .. data:: MT9V022 + :type: int `sensor.get_id()` returns this for the MT9V022 camera. .. data:: MT9V024 + :type: int `sensor.get_id()` returns this for the MT9V024 camera. .. data:: MT9V032 + :type: int `sensor.get_id()` returns this for the MT9V032 camera. .. data:: MT9V034 + :type: int `sensor.get_id()` returns this for the MT9V034 camera. .. data:: MT9M114 + :type: int `sensor.get_id()` returns this for the MT9M114 camera. .. data:: LEPTON + :type: int `sensor.get_id()` returns this for the LEPTON1/2/3 cameras. .. data:: HM01B0 + :type: int `sensor.get_id()` returns this for the HM01B0 camera. .. data:: HM0360 + :type: int `sensor.get_id()` returns this for the HM01B0 camera. .. data:: GC2145 + :type: int `sensor.get_id()` returns this for the GC2145 camera. +.. data:: PAG7920 + :type: int + + `sensor.get_id()` returns this for the PAG7920 camera. + .. data:: PAJ6100 + :type: int `sensor.get_id()` returns this for the PAJ6100 camera. .. data:: FROGEYE2020 + :type: int `sensor.get_id()` returns this for the FROGEYE2020 camera. .. data:: QQCIF + :type: int 88x72 resolution for the camera sensor. .. data:: QCIF + :type: int 176x144 resolution for the camera sensor. .. data:: CIF + :type: int 352x288 resolution for the camera sensor. .. data:: QQSIF + :type: int 88x60 resolution for the camera sensor. .. data:: QSIF + :type: int 176x120 resolution for the camera sensor. .. data:: SIF + :type: int 352x240 resolution for the camera sensor. .. data:: QQQQVGA + :type: int 40x30 resolution for the camera sensor. .. data:: QQQVGA + :type: int 80x60 resolution for the camera sensor. .. data:: QQVGA + :type: int 160x120 resolution for the camera sensor. .. data:: QVGA + :type: int 320x240 resolution for the camera sensor. .. data:: VGA + :type: int 640x480 resolution for the camera sensor. .. data:: HQQQQVGA + :type: int 30x20 resolution for the camera sensor. .. data:: HQQQVGA + :type: int 60x40 resolution for the camera sensor. .. data:: HQQVGA + :type: int 120x80 resolution for the camera sensor. .. data:: HQVGA + :type: int 240x160 resolution for the camera sensor. .. data:: HVGA + :type: int 480x320 resolution for the camera sensor. .. data:: B64X32 + :type: int 64x32 resolution for the camera sensor. For use with `Image.find_displacement()` and any other FFT based algorithm. .. data:: B64X64 + :type: int 64x64 resolution for the camera sensor. For use with `Image.find_displacement()` and any other FFT based algorithm. .. data:: B128X64 + :type: int 128x64 resolution for the camera sensor. For use with `Image.find_displacement()` and any other FFT based algorithm. .. data:: B128X128 + :type: int 128x128 resolution for the camera sensor. For use with `Image.find_displacement()` and any other FFT based algorithm. .. data:: B160X160 + :type: int 160x160 resolution for the HM01B0 camera sensor. .. data:: B320X320 + :type: int 320x320 resolution for the HM01B0 camera sensor. .. data:: LCD + :type: int 128x160 resolution for the camera sensor (for use with the lcd shield). .. data:: QQVGA2 + :type: int 128x160 resolution for the camera sensor (for use with the lcd shield). .. data:: WVGA + :type: int 720x480 resolution for the MT9V034 camera sensor. .. data:: WVGA2 + :type: int 752x480 resolution for the MT9V034 camera sensor. .. data:: SVGA + :type: int 800x600 resolution for the camera sensor. .. data:: XGA + :type: int 1024x768 resolution for the camera sensor. .. data:: WXGA + :type: int 1280x768 resolution for the MT9M114 camera sensor. .. data:: SXGA + :type: int 1280x1024 resolution for the camera sensor. Only works for the OV2640/OV5640 cameras. .. data:: SXGAM + :type: int 1280x960 resolution for the MT9M114 camera sensor. .. data:: UXGA + :type: int 1600x1200 resolution for the camera sensor. Only works for the OV2640/OV5640 cameras. .. data:: HD + :type: int 1280x720 resolution for the camera sensor. .. data:: FHD + :type: int 1920x1080 resolution for the camera sensor. Only works for the OV5640 camera. .. data:: QHD + :type: int 2560x1440 resolution for the camera sensor. Only works for the OV5640 camera. .. data:: QXGA + :type: int 2048x1536 resolution for the camera sensor. Only works for the OV5640 camera. .. data:: WQXGA + :type: int 2560x1600 resolution for the camera sensor. Only works for the OV5640 camera. .. data:: WQXGA2 + :type: int 2592x1944 resolution for the camera sensor. Only works for the OV5640 camera. .. data:: IOCTL_SET_READOUT_WINDOW + :type: int Lets you set the readout window for the OV5640. .. data:: IOCTL_GET_READOUT_WINDOW + :type: int Lets you get the readout window for the OV5640. .. data:: IOCTL_SET_TRIGGERED_MODE + :type: int Lets you set the triggered mode for the MT9V034. .. data:: IOCTL_GET_TRIGGERED_MODE + :type: int Lets you get the triggered mode for the MT9V034. .. data:: IOCTL_SET_FOV_WIDE + :type: int Enable `sensor.set_framesize()` to optimize for the field-of-view over FPS. .. data:: IOCTL_GET_FOV_WIDE + :type: int Return if `sensor.set_framesize()` is optimizing for field-of-view over FPS. .. data:: IOCTL_TRIGGER_AUTO_FOCUS + :type: int Used to trigger auto focus for the OV5640 FPC camera module. .. data:: IOCTL_PAUSE_AUTO_FOCUS + :type: int Used to pause auto focus (while running) for the OV5640 FPC camera module. .. data:: IOCTL_RESET_AUTO_FOCUS + :type: int Used to reset auto focus back to the default for the OV5640 FPC camera module. .. data:: IOCTL_WAIT_ON_AUTO_FOCUS + :type: int Used to wait on auto focus to finish after being triggered for the OV5640 FPC camera module. .. data:: IOCTL_SET_NIGHT_MODE + :type: int Used to turn night mode on or off on a sensor. Nightmode reduces the frame rate to increase exposure dynamically. .. data:: IOCTL_GET_NIGHT_MODE + :type: int Gets the current value of if night mode is enabled or disabled for your sensor. .. data:: IOCTL_LEPTON_GET_WIDTH + :type: int Lets you get the FLIR Lepton image resolution width in pixels. .. data:: IOCTL_LEPTON_GET_HEIGHT + :type: int Lets you get the FLIR Lepton image resolution height in pixels. .. data:: IOCTL_LEPTON_GET_RADIOMETRY + :type: int Lets you get the FLIR Lepton type (radiometric or not). .. data:: IOCTL_LEPTON_GET_REFRESH + :type: int Lets you get the FLIR Lepton refresh rate in hertz. .. data:: IOCTL_LEPTON_GET_RESOLUTION + :type: int Lets you get the FLIR Lepton ADC resolution in bits. .. data:: IOCTL_LEPTON_RUN_COMMAND + :type: int Executes a 16-bit command given the FLIR Lepton SDK. .. data:: IOCTL_LEPTON_SET_ATTRIBUTE + :type: int Sets a FLIR Lepton Attribute given the FLIR Lepton SDK. .. data:: IOCTL_LEPTON_GET_ATTRIBUTE + :type: int Gets a FLIR Lepton Attribute given the FLIR Lepton SDK. .. data:: IOCTL_LEPTON_GET_FPA_TEMPERATURE + :type: int Gets the FLIR Lepton FPA temp in celsius. .. data:: IOCTL_LEPTON_GET_AUX_TEMPERATURE + :type: int Gets the FLIR Lepton AUX temp in celsius. .. data:: IOCTL_LEPTON_SET_MEASUREMENT_MODE + :type: int Lets you set the FLIR Lepton driver into a mode where you can get a valid temperature value per pixel. See `sensor.ioctl()` for more information. .. data:: IOCTL_LEPTON_GET_MEASUREMENT_MODE + :type: int Lets you get if measurement mode is enabled or not for the FLIR Lepton sensor. See `sensor.ioctl()` for more information. .. data:: IOCTL_LEPTON_SET_MEASUREMENT_RANGE + :type: int Lets you set the temperature range you want to map pixels in the image to when in measurement mode. See `sensor.ioctl()` for more information. .. data:: IOCTL_LEPTON_GET_MEASUREMENT_RANGE + :type: int Lets you get the temperature range used for measurement mode. See `sensor.ioctl()` for more information. .. data:: IOCTL_HIMAX_MD_ENABLE + :type: int Lets you control the motion detection interrupt on the HM01B0. See `sensor.ioctl()` for more information. .. data:: IOCTL_HIMAX_MD_CLEAR + :type: int Lets you control the motion detection interrupt on the HM01B0. See `sensor.ioctl()` for more information. .. data:: IOCTL_HIMAX_MD_WINDOW + :type: int Lets you control the motion detection interrupt on the HM01B0. See `sensor.ioctl()` for more information. .. data:: IOCTL_HIMAX_MD_THRESHOLD + :type: int Lets you control the motion detection interrupt on the HM01B0. See `sensor.ioctl()` for more information. .. data:: IOCTL_HIMAX_OSC_ENABLE + :type: int Lets you control the internal oscillator on the HM01B0. See `sensor.ioctl()` for more information. .. data:: SINGLE_BUFFER + :type: int Pass to `sensor.set_framebuffers()` to set single buffer mode (1 buffer). .. data:: DOUBLE_BUFFER + :type: int Pass to `sensor.set_framebuffers()` to set double buffer mode (2 buffers). .. data:: TRIPLE_BUFFER + :type: int Pass to `sensor.set_framebuffers()` to set triple buffer mode (3 buffers). .. data:: VIDEO_FIFO + :type: int Pass to `sensor.set_framebuffers()` to set video FIFO mode (4 buffers). diff --git a/docs/library/omv.tf.rst b/docs/library/omv.tf.rst deleted file mode 100644 index a87174e93c8d..000000000000 --- a/docs/library/omv.tf.rst +++ /dev/null @@ -1,366 +0,0 @@ -:mod:`tf` --- Tensor Flow -========================= - -.. module:: tf - :synopsis: Tensor Flow - -The ``tf`` module is capable of executing Quantized TensorFlow Lite Models -on the OpenMV Cam (not supported on the OpenMV Cam M4). - -You can read more about how to create your own models that can run on the -OpenMV Cam `here `__. In -particular: - - * Supported operations are listed `here `__. - - * Note that tensorflow lite operations are versioned. If no version numbers - are listed after the operation then the min and max version supported are - 1. If there are numbers after an operation those numbers represent the - minimum and maximum operation version supported. - * If you are using Keras to generate your model be careful about only using - operators that are supported by tensorflow lite for microcontrollers. Otherwise, - your model will not be runnable by your OpenMV Cam. - - * Convert your model to a FlatBuffer by following the instructions `here `__. - * Finally, quantize your model by following the instructions `here `__. - -Alternatively, just follow Google's in-depth guide `here `__. -If you have problems with Google's in-depth guide please contact Google for help. - -The final output ``.tflite`` model can be directly loaded and run by your -OpenMV Cam. That said, the model and the model's required sratch RAM must -fit within the available frame buffer stack RAM on your OpenMV Cam. - - * The OpenMV Cam M7 has about 384KB of frame buffer RAM. Please try - to keep your model and it's required scratch buffer under 320 KB. - * The OpenMV Cam H7 has about 496KB of frame buffer RAM. Please try - to keep your model and it's required scratch buffer under 400 KB. - * The OpenMV Cam H7 Plus has about 31MB of frame buffer RAM. That - said, running a model anywhere near the that size will be extremely slow. - -Alternatively, you can also load a model onto the MicroPython Heap or the OpenMV Cam frame buffer. -However, this significantly limits the model size on all OpenMV Cams. - -Functions ---------- - -.. function:: classify(path, img, [roi, [min_scale=1.0, [scale_mul=0.5, [x_overlap=0, [y_overlap=0]]]]]) - - Executes the TensorFlow Lite image classification model on the ``img`` - object and returns a list of `tf_classification` objects. This method - executes the network multiple times on the image in a controllable sliding - window type manner (by default the algorithm only executes the network once - on the whole image frame). - - ``path`` a path to a ``.tflite`` model to execute on your OpenMV Cam's - disk. The model is loaded into memory, executed, and released all in - one function call to save from having to load the model in the - MicroPython heap. Pass ``"person_detection"`` to load the built-in - person detection model from your OpenMV Cam's internal flash. - - ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not - specified, it is equal to the image rectangle. Only pixels within the - ``roi`` are operated on. - - ``min_scale`` controls how much scaling is applied to the network. At the - default value the network is not scaled. However, a value of 0.5 would allow - for detecting objects 50% in size of the image roi size... - - ``scale_mul`` controls how many different scales are tested out. The sliding - window method works by multiplying a default scale of 1 by ``scale_mul`` - while the result is over ``min_scale``. The default value of ``scale_mul``, - 0.5, tests out a 50% size reduction per scale change. However, a value of - 0.95 would only be a 5% size reductioin. - - ``x_overlap`` controls the percentage of overlap with the next detector - area of the sliding window. A value of zero means no overlap. A value of - 0.95 would mean 95% overlap. - - ``y_overlap`` controls the percentage of overlap with the next detector - area of the sliding window. A value of zero means no overlap. A value of - 0.95 would mean 95% overlap. - -.. function:: segment(path, img, [roi]) - - Executes the TensorFlow Lite image segmentation model on the ``img`` - object and returns a list of grayscale `image` objects for each - segmentation class output channel. - - ``path`` a path to a ``.tflite`` model to execute on your OpenMV Cam's - disk. The model is loaded into memory, executed, and released all in - one function call to save from having to load the model in the - MicroPython heap. - - ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not - specified, it is equal to the image rectangle. Only pixels within the - ``roi`` are operated on. - -.. function:: detect(path, img, [roi, [thresholds, [invert]]]) - - Executes the TensorFlow Lite image segmentation model on the ``img`` - object and returns a list of `image.blob` objects for each segmentation - class output. E.g. if you have an image that's segmented into two classes - this method will return a list of two lists of blobs that match the requested - thresholds. - - ``path`` a path to a ``.tflite`` model to execute on your OpenMV Cam's - disk. The model is loaded into memory, executed, and released all in - one function call to save from having to load the model in the - MicroPython heap. - - ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not - specified, it is equal to the image rectangle. Only pixels within the - ``roi`` are operated on. - - ``thresholds`` must be a list of tuples - ``[(lo, hi), (lo, hi), ..., (lo, hi)]`` defining the ranges of color you - want to track. You may pass up to 32 threshold tuples in one call. Each tuple - needs to contain two values - a min grayscale value and a max grayscale value. - Only pixel regions that fall between these thresholds will be considered. - For easy usage this function will automatically fix swapped min and max values. - If the tuple is too short the rest of the thresholds are assumed to be at maximum - range. If no thresholds are specified they are assumed to be (128, 255) which - will detect "active" pixel regions in the segmented images. - - ``invert`` inverts the thresholding operation such that instead of matching - pixels inside of some known color bounds pixels are matched that are outside - of the known color bounds. - -.. function:: regression(path, array) - - Executes the TensorFlow Lite regression model on the passed array of floats and returns - a new array of floats as the result. This method accepts 1D/2D/3D arrays which must match - the input shape of the network. Arrays should be organized in [height][width][channel] order. - - ``path`` a path to a ``.tflite`` model to execute on your OpenMV Cam's - disk. The model is loaded into memory, executed, and released all in - one function call to save from having to load the model in the - MicroPython heap. - -.. function:: load(path, [load_to_fb=False]) - - ``path`` a path to a ``.tflite`` model to load into memory on the MicroPython heap by default. - - NOTE! The MicroPython heap is only ~50 KB on the OpenMV Cam M7 and ~256 KB on the OpenMV Cam H7. - - Pass ``"person_detection"`` to load the built-in person detection model from your - OpenMV Cam's internal flash. This built-in model does not use any Micropython Heap - as all the weights are stored in flash which is accessible in the same way as RAM. - - ``load_to_fb`` if passed as True will instead reserve part of the OpenMV Cam frame buffer - stack for storing the TensorFlow Lite model. You will get the most efficent execution - performance for large models that do not fit on the heap by loading them into frame buffer - memory once from disk and then repeatedly executing the model. That said, the frame buffer - space used will not be available anymore for other algorithms. - - Returns a `tf_model` object which can operate on an image. - -.. function:: free_from_fb() - - Deallocates a previously allocated `tf_model` object created with ``load_to_fb`` set to True. - - Note that deallocations happen in the reverse order of allocation. - -class tf_classification -- tf classification dection result ------------------------------------------------------------ - -The tf_classification object is returned by `tf.classify()` or `tf_model.classify()`. - -Constructors -~~~~~~~~~~~~ - -.. class:: tf_classification() - - Please call `tf.classify()` or `tf_model.classify()` to create this object. - - Methods - ~~~~~~~ - - .. method:: rect() - - Returns a rectangle tuple (x, y, w, h) for use with `image` methods - like `Image.draw_rectangle()` of the tf_classification's bounding box. - - .. method:: x() - - Returns the tf_classification's bounding box x coordinate (int). - - You may also get this value doing ``[0]`` on the object. - - .. method:: y() - - Returns the tf_classification's bounding box y coordinate (int). - - You may also get this value doing ``[1]`` on the object. - - .. method:: w() - - Returns the tf_classification's bounding box w coordinate (int). - - You may also get this value doing ``[2]`` on the object. - - .. method:: h() - - Returns the tf_classification's bounding box h coordinate (int). - - You may also get this value doing ``[3]`` on the object. - - .. method:: classification_output() - - Returns a list of the classification label scores. The size of this - list is determined by your model output channel size. For example, - mobilenet outputs a list of 1000 classification scores for all 1000 - classes understood by mobilenet. Use ``zip`` in python to combine - the classification score results with classification labels. - - You may also get this value doing ``[4]`` on the object. - -class tf_model -- TensorFlow Model ----------------------------------- - -If your model size is small enough and you have enough heap or frame buffer space you may wish -to directly load the model into memory to save from having to load it from disk -each time you wish to execute it. - -Constructors -~~~~~~~~~~~~ - -.. class:: tf_model() - - Please call `tf.load()` to create the TensorFlow Model object. TensorFlow Model objects allow - you to execute a model from RAM versus having to load it from disk repeatedly. - - Methods - ~~~~~~~ - - .. method:: len() - - Returns the size in bytes of the model. - - .. method:: ram() - - Returns the model's required free RAM in bytes. - - .. method:: input_height() - - Returns the input height of the model. You can use this to size your input - image height appropriately. - - .. method:: input_width() - - Returns the input width of the model. You can use this to size your input - image width appropriately. - - .. method:: input_channels() - - Returns the number of input color channels in the model. - - .. method:: input_datatype() - - Returns the model's input datatype (this is a string of "uint8", "int8", or "float"). - - .. method:: input_scale() - - Returns the input scale for the model. - - .. method:: input_zero_point() - - Returns the output zero point for the model. - - .. method:: output_height() - - Returns the output height of the model. You can use this to size your output - image height appropriately. - - .. method:: output_width() - - Returns the output width of the model. You can use this to size your output - image width appropriately. - - .. method:: output_channels() - - Returns the number of output color channels in the model. - - .. method:: output_datatype() - - Returns the model's output datatype (this is a string of "uint8", "int8", or "float"). - - .. method:: output_scale() - - Returns the output scale for the model. - - .. method:: output_zero_point() - - Returns the output zero point for the model. - - .. method:: classify(img, [roi, [min_scale=1.0, [scale_mul=0.5, [x_overlap=0, [y_overlap=0]]]]]) - - Executes the TensorFlow Lite image classification model on the ``img`` - object and returns a list of `tf_classification` objects. This method - executes the network multiple times on the image in a controllable sliding - window type manner (by default the algorithm only executes the network once - on the whole image frame). - - ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not - specified, it is equal to the image rectangle. Only pixels within the - ``roi`` are operated on. - - ``min_scale`` controls how much scaling is applied to the network. At the - default value the network is not scaled. However, a value of 0.5 would allow - for detecting objects 50% in size of the image roi size... - - ``scale_mul`` controls how many different scales are tested out. The sliding - window method works by multiplying a default scale of 1 by ``scale_mul`` - while the result is over ``min_scale``. The default value of ``scale_mul``, - 0.5, tests out a 50% size reduction per scale change. However, a value of - 0.95 would only be a 5% size reductioin. - - ``x_overlap`` controls the percentage of overlap with the next detector - area of the sliding window. A value of zero means no overlap. A value of - 0.95 would mean 95% overlap. - - ``y_overlap`` controls the percentage of overlap with the next detector - area of the sliding window. A value of zero means no overlap. A value of - 0.95 would mean 95% overlap. - - .. method:: segment(img, [roi]) - - Executes the TensorFlow Lite image segmentation model on the ``img`` - object and returns a list of grayscale `image` objects for each - segmentation class output channel. - - ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not - specified, it is equal to the image rectangle. Only pixels within the - ``roi`` are operated on. - - .. method:: detect(img, [roi, [thresholds, [invert]]]) - - Executes the TensorFlow Lite image segmentation model on the ``img`` - object and returns a list of `image.blob` objects for each segmentation - class output. E.g. if you have an image that's segmented into two classes - this method will return a list of two lists of blobs that match the requested - thresholds. - - ``roi`` is the region-of-interest rectangle tuple (x, y, w, h). If not - specified, it is equal to the image rectangle. Only pixels within the - ``roi`` are operated on. - - ``thresholds`` must be a list of tuples - ``[(lo, hi), (lo, hi), ..., (lo, hi)]`` defining the ranges of color you - want to track. You may pass up to 32 threshold tuples in one call. Each tuple - needs to contain two values - a min grayscale value and a max grayscale value. - Only pixel regions that fall between these thresholds will be considered. - For easy usage this function will automatically fix swapped min and max values. - If the tuple is too short the rest of the thresholds are assumed to be at maximum - range. If no thresholds are specified they are assumed to be (128, 255) which - will detect "active" pixel regions in the segmented images. - - ``invert`` inverts the thresholding operation such that instead of matching - pixels inside of some known color bounds pixels are matched that are outside - of the known color bounds. - - .. method:: regression(array) - - Executes the TensorFlow Lite regression model on the passed array of floats and returns - a new array of floats as the result. This method accepts 1D/2D/3D arrays which must match - the input shape of the network. Arrays should be organized in [height][width][channel] order. diff --git a/docs/library/omv.tfp410.rst b/docs/library/omv.tfp410.rst index bd11b75c7fde..9acdbf5d771e 100644 --- a/docs/library/omv.tfp410.rst +++ b/docs/library/omv.tfp410.rst @@ -13,7 +13,7 @@ DVI/HDMI Controller for the OpenMV Pure Thermal. Constructors ------------ -.. class:: tfp410.TFP410([i2c_addr=0x3F]) +.. class:: tfp410.TFP410(i2c_addr=0x3F) Initializes the TFP410 DVI/HDMI controller chip to drive an external DVI/HDMI display via a 24-bit parallel LCD bus. You just need to create this object to initialize the display. @@ -21,11 +21,11 @@ Constructors Methods ------- -.. method:: TFP410.isconnected() +.. method:: TFP410.isconnected() -> bool Returns if an external display is connected. -.. method:: TFP410.hotplug_callback(callback) +.. method:: TFP410.hotplug_callback(callback) -> None Registers a ``callback`` function that be called whenever the state of an external display being connected changes. The new state will be passed as an argument. diff --git a/docs/library/omv.tv.rst b/docs/library/omv.tv.rst index 60f8d11de49a..99addf0fe6e3 100644 --- a/docs/library/omv.tv.rst +++ b/docs/library/omv.tv.rst @@ -24,7 +24,7 @@ Example usage:: Functions --------- -.. function:: init([type=tv.TV_SHIELD, [triple_buffer=False]]) +.. function:: init(type=TV_SHIELD, triple_buffer=False) -> None Initializes an attached tv output module. @@ -36,36 +36,36 @@ Functions ``triple_buffer`` If True then makes updates to the screen non-blocking in `tv.TV_SHIELD` mode at the cost of 3X the display RAM (495 KB). -.. function:: deinit() +.. function:: deinit() -> None Deinitializes the tv module, internal/external hardware, and I/O pins. -.. function:: width() +.. function:: width() -> int Returns 352 pixels. This is the `sensor.SIF` resolution. -.. function:: height() +.. function:: height() -> int Returns 240 pixels. This is the `sensor.SIF` resolution. -.. function:: type() +.. function:: type() -> int Returns the type of the screen that was set during `tv.init()`. -.. function:: triple_buffer() +.. function:: triple_buffer() -> bool Returns if triple buffering is enabled that was set during `tv.init()`. -.. function:: refresh() +.. function:: refresh() -> None Returns 60 Hz. -.. function:: channel([channel]) +.. function:: channel(channel:Optional[int]=None) -> int For the wireless TV shield this sets the broadcast channel between 1-8. If passed without a channel argument then this method returns the previously set channel (1-8). Default is channel 8. -.. function:: display(image, [x=0, [y=0, [x_scale=1.0, [y_scale=1.0, [roi=None, [rgb_channel=-1, [alpha=256, [color_palette=None, [alpha_palette=None, [hint=0]]]]]]]]]]) +.. function:: display(image:image.Image, x=0, y=0, x_scale=1.0, y_scale=1.0, roi:Optional[Tuple[int,int,int,int]]=None, rgb_channel=-1, alpha=256, color_palette=None, alpha_palette=None, hint=0) Displays an ``image`` whose top-left corner starts at location x, y. @@ -119,9 +119,11 @@ Constants --------- .. data:: TV_NONE + :type: int Returned by `tv.type()` when the this module is not initialized. .. data:: TV_SHIELD + :type: int Used to initialize the TV module. diff --git a/docs/library/time.rst b/docs/library/time.rst index 88ec60db7f6e..fe805e340560 100644 --- a/docs/library/time.rst +++ b/docs/library/time.rst @@ -252,31 +252,31 @@ Functions Constructors ------------ -.. class:: clock() +.. class:: clock() -> clock Returns a clock object. Methods ------- - .. method:: tick() + .. method:: tick() -> None Starts tracking elapsed time. - .. method:: fps() + .. method:: fps() -> float Stops tracking the elapsed time and returns the current FPS (frames per second). Always call ``tick`` first before calling this function. - .. method:: avg() + .. method:: avg() -> float Stops tracking the elapsed time and returns the current average elapsed time in milliseconds. Always call ``tick`` first before calling this function. - .. method:: reset() + .. method:: reset() -> None Resets the clock object.