micropython: add micropython component

This commit is contained in:
KY-zhang-X
2022-09-29 12:10:37 +08:00
parent 1514f1cb9b
commit dd76146324
2679 changed files with 354110 additions and 0 deletions

View File

@@ -0,0 +1,191 @@
# Makefile for Sphinx documentation
#
# You can set these variables from the command line.
PYTHON = python3
SPHINXOPTS = -W --keep-going
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = build/$(MICROPY_PORT)
CPYDIFFDIR = ../tools
CPYDIFF = gen-cpydiff.py
GENRSTDIR = genrst
# Run "make FORCE= ..." to avoid rebuilding from scratch (and risk
# producing incorrect docs).
FORCE = -E
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
@echo " cpydiff to generate the MicroPython differences from CPython"
clean:
rm -rf $(BUILDDIR)/*
rm -f $(GENRSTDIR)/*
cpydiff:
@echo "Generating MicroPython Differences."
rm -f $(GENRSTDIR)/*
cd $(CPYDIFFDIR) && $(PYTHON) $(CPYDIFF)
html: cpydiff
$(SPHINXBUILD) $(FORCE) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/MicroPython.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/MicroPython.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/MicroPython"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/MicroPython"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex: cpydiff
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf: cpydiff
$(SPHINXBUILD) $(FORCE) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja: cpydiff
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."

View File

@@ -0,0 +1,53 @@
MicroPython Documentation
=========================
The MicroPython documentation can be found at:
http://docs.micropython.org/en/latest/
The documentation you see there is generated from the files in the docs tree:
https://github.com/micropython/micropython/tree/master/docs
Building the documentation locally
----------------------------------
If you're making changes to the documentation, you may want to build the
documentation locally so that you can preview your changes.
Install Sphinx, and optionally (for the RTD-styling), sphinx_rtd_theme,
preferably in a virtualenv:
pip install sphinx
pip install sphinx_rtd_theme
In `micropython/docs`, build the docs:
make html
You'll find the index page at `micropython/docs/build/html/index.html`.
Having readthedocs.org build the documentation
----------------------------------------------
If you would like to have docs for forks/branches hosted on GitHub, GitLab or
BitBucket an alternative to building the docs locally is to sign up for a free
https://readthedocs.org account. The rough steps to follow are:
1. sign-up for an account, unless you already have one
2. in your account settings: add GitHub as a connected service (assuming
you have forked this repo on github)
3. in your account projects: import your forked/cloned micropython repository
into readthedocs
4. in the project's versions: add the branches you are developing on or
for which you'd like readthedocs to auto-generate docs whenever you
push a change
PDF manual generation
---------------------
This can be achieved with:
make latexpdf
but require rather complete install of LaTeX with various extensions. On
Debian/Ubuntu, try (500MB+ download):
apt-get install texlive-latex-recommended texlive-latex-extra

View File

@@ -0,0 +1,301 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# The members of the html_context dict are available inside topindex.html
micropy_version = os.getenv('MICROPY_VERSION') or 'latest'
micropy_all_versions = (os.getenv('MICROPY_ALL_VERSIONS') or 'latest').split(',')
url_pattern = '%s/en/%%s' % (os.getenv('MICROPY_URL_PREFIX') or '/',)
html_context = {
'cur_version':micropy_version,
'all_versions':[
(ver, url_pattern % ver) for ver in micropy_all_versions
],
'downloads':[
('PDF', url_pattern % micropy_version + '/micropython-docs.pdf'),
],
}
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '- The MicroPython Documentation is Copyright © 2014-2022, Damien P. George, Paul Sokolovsky, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# We don't follow "The short X.Y version" vs "The full version, including alpha/beta/rc tags"
# breakdown, so use the same version identifier for both to avoid confusion.
version = release = '1.19'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', '.venv']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Global include files. Sphinx docs suggest using rst_epilog in preference
# of rst_prolog, so we follow. Absolute paths below mean "from the base
# of the doctree".
rst_epilog = """
.. include:: /templates/replace.inc
"""
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Include 3 levels of headers in PDF ToC
'preamble': '\setcounter{tocdepth}{2}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George, Paul Sokolovsky, and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('https://docs.python.org/3.5', None)}

View File

@@ -0,0 +1,266 @@
.. _cmodules:
MicroPython external C modules
==============================
When developing modules for use with MicroPython you may find you run into
limitations with the Python environment, often due to an inability to access
certain hardware resources or Python speed limitations.
If your limitations can't be resolved with suggestions in :ref:`speed_python`,
writing some or all of your module in C (and/or C++ if implemented for your port)
is a viable option.
If your module is designed to access or work with commonly available
hardware or libraries please consider implementing it inside the MicroPython
source tree alongside similar modules and submitting it as a pull request.
If however you're targeting obscure or proprietary systems it may make
more sense to keep this external to the main MicroPython repository.
This chapter describes how to compile such external modules into the
MicroPython executable or firmware image. Both Make and CMake build
tools are supported, and when writing an external module it's a good idea to
add the build files for both of these tools so the module can be used on all
ports. But when compiling a particular port you will only need to use one
method of building, either Make or CMake.
An alternative approach is to use :ref:`natmod` which allows writing custom C
code that is placed in a .mpy file, which can be imported dynamically in to
a running MicroPython system without the need to recompile the main firmware.
Structure of an external C module
---------------------------------
A MicroPython user C module is a directory with the following files:
* ``*.c`` / ``*.cpp`` / ``*.h`` source code files for your module.
These will typically include the low level functionality being implemented and
the MicroPython binding functions to expose the functions and module(s).
Currently the best reference for writing these functions/modules is
to find similar modules within the MicroPython tree and use them as examples.
* ``micropython.mk`` contains the Makefile fragment for this module.
``$(USERMOD_DIR)`` is available in ``micropython.mk`` as the path to your
module directory. As it's redefined for each c module, is should be expanded
in your ``micropython.mk`` to a local make variable,
eg ``EXAMPLE_MOD_DIR := $(USERMOD_DIR)``
Your ``micropython.mk`` must add your modules source files relative to your
expanded copy of ``$(USERMOD_DIR)`` to ``SRC_USERMOD``, eg
``SRC_USERMOD += $(EXAMPLE_MOD_DIR)/example.c``
If you have custom compiler options (like ``-I`` to add directories to search
for header files), these should be added to ``CFLAGS_USERMOD`` for C code
and to ``CXXFLAGS_USERMOD`` for C++ code.
* ``micropython.cmake`` contains the CMake configuration for this module.
In ``micropython.cmake``, you may use ``${CMAKE_CURRENT_LIST_DIR}`` as the path to
the current module.
Your ``micropython.cmake`` should define an ``INTERFACE`` library and associate
your source files, compile definitions and include directories with it.
The library should then be linked to the ``usermod`` target.
.. code-block:: cmake
add_library(usermod_cexample INTERFACE)
target_sources(usermod_cexample INTERFACE
${CMAKE_CURRENT_LIST_DIR}/examplemodule.c
)
target_include_directories(usermod_cexample INTERFACE
${CMAKE_CURRENT_LIST_DIR}
)
target_link_libraries(usermod INTERFACE usermod_cexample)
See below for full usage example.
Basic example
-------------
This simple module named ``cexample`` provides a single function
``cexample.add_ints(a, b)`` which adds the two integer args together and returns
the result. It can be found in the MicroPython source tree
`in the examples directory <https://github.com/micropython/micropython/tree/master/examples/usercmodule/cexample>`_
and has a source file and a Makefile fragment with content as described above::
micropython/
└──examples/
└──usercmodule/
└──cexample/
├── examplemodule.c
├── micropython.mk
└── micropython.cmake
Refer to the comments in these files for additional explanation.
Next to the ``cexample`` module there's also ``cppexample`` which
works in the same way but shows one way of mixing C and C++ code
in MicroPython.
Compiling the cmodule into MicroPython
--------------------------------------
To build such a module, compile MicroPython (see `getting started
<https://github.com/micropython/micropython/wiki/Getting-Started>`_),
applying 2 modifications:
1. Set the build-time flag ``USER_C_MODULES`` to point to the modules
you want to include. For ports that use Make this variable should be a
directory which is searched automatically for modules. For ports that
use CMake this variable should be a file which includes the modules to
build. See below for details.
2. Enable the modules by setting the corresponding C preprocessor macro to
1. This is only needed if the modules you are building are not
automatically enabled.
For building the example modules which come with MicroPython,
set ``USER_C_MODULES`` to the ``examples/usercmodule`` directory for Make,
or to ``examples/usercmodule/micropython.cmake`` for CMake.
For example, here's how the to build the unix port with the example modules:
.. code-block:: bash
cd micropython/ports/unix
make USER_C_MODULES=../../examples/usercmodule
You may need to run ``make clean`` once at the start when including new
user modules in the build. The build output will show the modules found::
...
Including User C Module from ../../examples/usercmodule/cexample
Including User C Module from ../../examples/usercmodule/cppexample
...
For a CMake-based port such as rp2, this will look a little different (note
that CMake is actually invoked by ``make``):
.. code-block:: bash
cd micropython/ports/rp2
make USER_C_MODULES=../../examples/usercmodule/micropython.cmake
Again, you may need to run ``make clean`` first for CMake to pick up the
user modules. The CMake build output lists the modules by name::
...
Including User C Module(s) from ../../examples/usercmodule/micropython.cmake
Found User C Module(s): usermod_cexample, usermod_cppexample
...
The contents of the top-level ``micropython.cmake`` can be used to control which
modules are enabled.
For your own projects it's more convenient to keep custom code out of the main
MicroPython source tree, so a typical project directory structure will look
like this::
my_project/
├── modules/
│ ├── example1/
│ │ ├── example1.c
│ │ ├── micropython.mk
│ │ └── micropython.cmake
│ ├── example2/
│ │ ├── example2.c
│ │ ├── micropython.mk
│ │ └── micropython.cmake
│ └── micropython.cmake
└── micropython/
├──ports/
... ├──stm32/
...
When building with Make set ``USER_C_MODULES`` to the ``my_project/modules``
directory. For example, building the stm32 port:
.. code-block:: bash
cd my_project/micropython/ports/stm32
make USER_C_MODULES=../../../modules
When building with CMake the top level ``micropython.cmake`` -- found directly
in the ``my_project/modules`` directory -- should ``include`` all of the modules
you want to have available:
.. code-block:: cmake
include(${CMAKE_CURRENT_LIST_DIR}/example1/micropython.cmake)
include(${CMAKE_CURRENT_LIST_DIR}/example2/micropython.cmake)
Then build with:
.. code-block:: bash
cd my_project/micropython/ports/esp32
make USER_C_MODULES=../../../../modules/micropython.cmake
Note that the esp32 port needs the extra ``..`` for relative paths due to the
location of its main ``CMakeLists.txt`` file. You can also specify absolute
paths to ``USER_C_MODULES``.
All modules specified by the ``USER_C_MODULES`` variable (either found in this
directory when using Make, or added via ``include`` when using CMake) will be
compiled, but only those which are enabled will be available for importing.
User modules are usually enabled by default (this is decided by the developer
of the module), in which case there is nothing more to do than set ``USER_C_MODULES``
as described above.
If a module is not enabled by default then the corresponding C preprocessor macro
must be enabled. This macro name can be found by searching for the ``MP_REGISTER_MODULE``
line in the module's source code (it usually appears at the end of the main source file).
This macro should be surrounded by a ``#if X`` / ``#endif`` pair, and the configuration
option ``X`` must be set to 1 using ``CFLAGS_EXTRA`` to make the module available. If
there is no ``#if X`` / ``#endif`` pair then the module is enabled by default.
For example, the ``examples/usercmodule/cexample`` module is enabled by default so
has the following line in its source code:
.. code-block:: c
MP_REGISTER_MODULE(MP_QSTR_cexample, example_user_cmodule);
Alternatively, to make this module disabled by default but selectable through
a preprocessor configuration option, it would be:
.. code-block:: c
#if MODULE_CEXAMPLE_ENABLED
MP_REGISTER_MODULE(MP_QSTR_cexample, example_user_cmodule);
#endif
In this case the module is enabled by adding ``CFLAGS_EXTRA=-DMODULE_CEXAMPLE_ENABLED=1``
to the ``make`` command, or editing ``mpconfigport.h`` or ``mpconfigboard.h`` to add
.. code-block:: c
#define MODULE_CEXAMPLE_ENABLED (1)
Note that the exact method depends on the port as they have different
structures. If not done correctly it will compile but importing will
fail to find the module.
Module usage in MicroPython
---------------------------
Once built into your copy of MicroPython, the module
can now be accessed in Python just like any other builtin module, e.g.
.. code-block:: python
import cexample
print(cexample.add_ints(1, 3))
# should display 4

View File

@@ -0,0 +1,317 @@
.. _compiler:
The Compiler
============
The compilation process in MicroPython involves the following steps:
* The lexer converts the stream of text that makes up a MicroPython program into tokens.
* The parser then converts the tokens into an abstract syntax (parse tree).
* Then bytecode or native code is emitted based on the parse tree.
For purposes of this discussion we are going to add a simple language feature ``add1``
that can be use in Python as:
.. code-block:: bash
>>> add1 3
4
>>>
The ``add1`` statement takes an integer as argument and adds ``1`` to it.
Adding a grammar rule
----------------------
MicroPython's grammar is based on the `CPython grammar <https://docs.python.org/3.5/reference/grammar.html>`_
and is defined in `py/grammar.h <https://github.com/micropython/micropython/blob/master/py/grammar.h>`_.
This grammar is what is used to parse MicroPython source files.
There are two macros you need to know to define a grammar rule: ``DEF_RULE`` and ``DEF_RULE_NC``.
``DEF_RULE`` allows you to define a rule with an associated compile function,
while ``DEF_RULE_NC`` has no compile (NC) function for it.
A simple grammar definition with a compile function for our new ``add1`` statement
looks like the following:
.. code-block:: c
DEF_RULE(add1_stmt, c(add1_stmt), and(2), tok(KW_ADD1), rule(testlist))
The second argument ``c(add1_stmt)`` is the corresponding compile function that should be implemented
in ``py/compile.c`` to turn this rule into executable code.
The third required argument can be ``or`` or ``and``. This specifies the number of nodes associated
with a statement. For example, in this case, our ``add1`` statement is similar to ADD1 in assembly
language. It takes one numeric argument. Therefore, the ``add1_stmt`` has two nodes associated with it.
One node is for the statement itself, i.e the literal ``add1`` corresponding to ``KW_ADD1``,
and the other for its argument, a ``testlist`` rule which is the top-level expression rule.
.. note::
The ``add1`` rule here is just an example and not part of the standard
MicroPython grammar.
The fourth argument in this example is the token associated with the rule, ``KW_ADD1``. This token should be
defined in the lexer by editing ``py/lexer.h``.
Defining the same rule without a compile function is achieved by using the ``DEF_RULE_NC`` macro
and omitting the compile function argument:
.. code-block:: c
DEF_RULE_NC(add1_stmt, and(2), tok(KW_ADD1), rule(testlist))
The remaining arguments take on the same meaning. A rule without a compile function must
be handled explicitly by all rules that may have this rule as a node. Such NC-rules are usually
used to express sub-parts of a complicated grammar structure that cannot be expressed in a
single rule.
.. note::
The macros ``DEF_RULE`` and ``DEF_RULE_NC`` take other arguments. For an in-depth understanding of
supported parameters, see `py/grammar.h <https://github.com/micropython/micropython/blob/master/py/grammar.h>`_.
Adding a lexical token
----------------------
Every rule defined in the grammar should have a token associated with it that is defined in ``py/lexer.h``.
Add this token by editing the ``_mp_token_kind_t`` enum:
.. code-block:: c
:emphasize-lines: 12
typedef enum _mp_token_kind_t {
...
MP_TOKEN_KW_OR,
MP_TOKEN_KW_PASS,
MP_TOKEN_KW_RAISE,
MP_TOKEN_KW_RETURN,
MP_TOKEN_KW_TRY,
MP_TOKEN_KW_WHILE,
MP_TOKEN_KW_WITH,
MP_TOKEN_KW_YIELD,
MP_TOKEN_KW_ADD1,
...
} mp_token_kind_t;
Then also edit ``py/lexer.c`` to add the new keyword literal text:
.. code-block:: c
:emphasize-lines: 12
STATIC const char *const tok_kw[] = {
...
"or",
"pass",
"raise",
"return",
"try",
"while",
"with",
"yield",
"add1",
...
};
Notice the keyword is named depending on what you want it to be. For consistency, maintain the
naming standard accordingly.
.. note::
The order of these keywords in ``py/lexer.c`` must match the order of tokens in the enum
defined in ``py/lexer.h``.
Parsing
-------
In the parsing stage the parser takes the tokens produced by the lexer and converts them to an abstract syntax tree (AST) or
*parse tree*. The implementation for the parser is defined in `py/parse.c <https://github.com/micropython/micropython/blob/master/py/parse.c>`_.
The parser also maintains a table of constants for use in different aspects of parsing, similar to what a
`symbol table <https://steemit.com/programming/@drifter1/writing-a-simple-compiler-on-my-own-symbol-table-basic-structure>`_
does.
Several optimizations like `constant folding <http://compileroptimizations.com/category/constant_folding.htm>`_
on integers for most operations e.g. logical, binary, unary, etc, and optimizing enhancements on parenthesis
around expressions are performed during this phase, along with some optimizations on strings.
It's worth noting that *docstrings* are discarded and not accessible to the compiler.
Even optimizations like `string interning <https://en.wikipedia.org/wiki/String_interning>`_ are
not applied to *docstrings*.
Compiler passes
---------------
Like many compilers, MicroPython compiles all code to MicroPython bytecode or native code. The functionality
that achieves this is implemented in `py/compile.c <https://github.com/micropython/micropython/blob/master/py/compile.c>`_.
The most relevant method you should know about is this:
.. code-block:: c
mp_obj_t mp_compile(mp_parse_tree_t *parse_tree, qstr source_file, bool is_repl) {
// Compile the input parse_tree to a raw-code structure.
mp_raw_code_t *rc = mp_compile_to_raw_code(parse_tree, source_file, is_repl);
// Create and return a function object that executes the outer module.
return mp_make_function_from_raw_code(rc, MP_OBJ_NULL, MP_OBJ_NULL);
}
The compiler compiles the code in four passes: scope, stack size, code size and emit.
Each pass runs the same C code over the same AST data structure, with different things
being computed each time based on the results of the previous pass.
First pass
~~~~~~~~~~
In the first pass, the compiler learns about the known identifiers (variables) and
their scope, being global, local, closed over, etc. In the same pass the emitter
(bytecode or native code) also computes the number of labels needed for the emitted
code.
.. code-block:: c
// Compile pass 1.
comp->emit = emit_bc;
comp->emit_method_table = &emit_bc_method_table;
uint max_num_labels = 0;
for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
if (s->emit_options == MP_EMIT_OPT_ASM) {
compile_scope_inline_asm(comp, s, MP_PASS_SCOPE);
} else {
compile_scope(comp, s, MP_PASS_SCOPE);
// Check if any implicitly declared variables should be closed over.
for (size_t i = 0; i < s->id_info_len; ++i) {
id_info_t *id = &s->id_info[i];
if (id->kind == ID_INFO_KIND_GLOBAL_IMPLICIT) {
scope_check_to_close_over(s, id);
}
}
}
...
}
Second and third passes
~~~~~~~~~~~~~~~~~~~~~~~
The second and third passes involve computing the Python stack size and code size
for the bytecode or native code. After the third pass the code size cannot change,
otherwise jump labels will be incorrect.
.. code-block:: c
for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
...
// Pass 2: Compute the Python stack size.
compile_scope(comp, s, MP_PASS_STACK_SIZE);
// Pass 3: Compute the code size.
if (comp->compile_error == MP_OBJ_NULL) {
compile_scope(comp, s, MP_PASS_CODE_SIZE);
}
...
}
Just before pass two there is a selection for the type of code to be emitted, which can
either be native or bytecode.
.. code-block:: c
// Choose the emitter type.
switch (s->emit_options) {
case MP_EMIT_OPT_NATIVE_PYTHON:
case MP_EMIT_OPT_VIPER:
if (emit_native == NULL) {
emit_native = NATIVE_EMITTER(new)(&comp->compile_error, &comp->next_label, max_num_labels);
}
comp->emit_method_table = NATIVE_EMITTER_TABLE;
comp->emit = emit_native;
break;
default:
comp->emit = emit_bc;
comp->emit_method_table = &emit_bc_method_table;
break;
}
The bytecode option is the default but something unique to note for the native
code option is that there is another option via ``VIPER``. See the
:ref:`Emitting native code <emitting_native_code>` section for more details on
viper annotations.
There is also support for *inline assembly code*, where assembly instructions are
written as Python function calls but are emitted directly as the corresponding
machine code. This assembler has only three passes (scope, code size, emit)
and uses a different implementation, not the ``compile_scope`` function.
See the `inline assembler tutorial <https://docs.micropython.org/en/latest/pyboard/tutorial/assembler.html#pyboard-tutorial-assembler>`_
for more details.
Fourth pass
~~~~~~~~~~~
The fourth pass emits the final code that can be executed, either bytecode in
the virtual machine, or native code directly by the CPU.
.. code-block:: c
for (scope_t *s = comp->scope_head; s != NULL && comp->compile_error == MP_OBJ_NULL; s = s->next) {
...
// Pass 4: Emit the compiled bytecode or native code.
if (comp->compile_error == MP_OBJ_NULL) {
compile_scope(comp, s, MP_PASS_EMIT);
}
}
Emitting bytecode
-----------------
Statements in Python code usually correspond to emitted bytecode, for example ``a + b``
generates "push a" then "push b" then "binary op add". Some statements do not emit
anything but instead affect other things like the scope of variables, for example
``global a``.
The implementation of a function that emits bytecode looks similar to this:
.. code-block:: c
void mp_emit_bc_unary_op(emit_t *emit, mp_unary_op_t op) {
emit_write_bytecode_byte(emit, 0, MP_BC_UNARY_OP_MULTI + op);
}
We use the unary operator expressions for an example here but the implementation
details are similar for other statements/expressions. The method ``emit_write_bytecode_byte()``
is a wrapper around the main function ``emit_get_cur_to_write_bytecode()`` that all
functions must call to emit bytecode.
.. _emitting_native_code:
Emitting native code
---------------------
Similar to how bytecode is generated, there should be a corresponding function in ``py/emitnative.c`` for each
code statement:
.. code-block:: c
STATIC void emit_native_unary_op(emit_t *emit, mp_unary_op_t op) {
vtype_kind_t vtype;
emit_pre_pop_reg(emit, &vtype, REG_ARG_2);
if (vtype == VTYPE_PYOBJ) {
emit_call_with_imm_arg(emit, MP_F_UNARY_OP, op, REG_ARG_1);
emit_post_push_reg(emit, VTYPE_PYOBJ, REG_RET);
} else {
adjust_stack(emit, 1);
EMIT_NATIVE_VIPER_TYPE_ERROR(emit,
MP_ERROR_TEXT("unary op %q not implemented"), mp_unary_op_method_name[op]);
}
}
The difference here is that we have to handle *viper typing*. Viper annotations allow
us to handle more than one type of variable. By default all variables are Python objects,
but with viper a variable can also be declared as a machine-typed variable like a native
integer or pointer. Viper can be thought of as a superset of Python, where normal Python
objects are handled as usual, while native machine variables are handled in an optimised
way by using direct machine instructions for the operations. Viper typing may break
Python equivalence because, for example, integers become native integers and can overflow
(unlike Python integers which extend automatically to arbitrary precision).

View File

@@ -0,0 +1,18 @@
.. _extendingmicropython:
Extending MicroPython in C
==========================
This chapter describes options for implementing additional functionality in C, but from code
written outside of the main MicroPython repository. The first approach is useful for building
your own custom firmware with some project-specific additional modules or functions that can
be accessed from Python. The second approach is for building modules that can be loaded at runtime.
Please see the :ref:`library section <internals_library>` for more information on building core modules that
live in the main MicroPython repository.
.. toctree::
:maxdepth: 3
cmodules.rst
natmod.rst

View File

@@ -0,0 +1,329 @@
.. _gettingstarted:
Getting Started
===============
This guide covers a step-by-step process on setting up version control, obtaining and building
a copy of the source code for a port, building the documentation, running tests, and a description of the
directory structure of the MicroPython code base.
Source control with git
-----------------------
MicroPython is hosted on `GitHub <https://github.com/micropython/micropython>`_ and uses
`Git <https://git-scm.com>`_ for source control. The workflow is such that
code is pulled and pushed to and from the main repository. Install the respective version
of Git for your operating system to follow through the rest of the steps.
.. note::
For a reference on the installation instructions, please refer to
the `Git installation instructions <https://git-scm.com/book/en/v2/Getting-Started-Installing-Git>`_.
Learn about the basic git commands in this `Git Handbook <https://guides.github.com/introduction/git-handbook/>`_
or any other sources on the internet.
.. note::
A .git-blame-ignore-revs file is included which avoids the output of git blame getting cluttered
by commits which are only for formatting code but have no functional changes. See `git blame documentation
<https://git-scm.com/docs/git-blame#Documentation/git-blame.txt---ignore-revltrevgt>`_ on how to use this.
Get the code
------------
It is recommended that you maintain a fork of the MicroPython repository for your development purposes.
The process of obtaining the source code includes the following:
#. Fork the repository https://github.com/micropython/micropython
#. You will now have a fork at <https://github.com/<your-user-name>/micropython>.
#. Clone the forked repository using the following command:
.. code-block:: bash
$ git clone https://github.com/<your-user-name>/micropython
Then, `configure the remote repositories <https://git-scm.com/book/en/v2/Git-Basics-Working-with-Remotes>`_ to be able to
collaborate on the MicroPython project.
Configure remote upstream:
.. code-block:: bash
$ cd micropython
$ git remote add upstream https://github.com/micropython/micropython
It is common to configure ``upstream`` and ``origin`` on a forked repository
to assist with sharing code changes. You can maintain your own mapping but
it is recommended that ``origin`` maps to your fork and ``upstream`` to the main
MicroPython repository.
After the above configuration, your setup should be similar to this:
.. code-block:: bash
$ git remote -v
origin https://github.com/<your-user-name>/micropython (fetch)
origin https://github.com/<your-user-name>/micropython (push)
upstream https://github.com/micropython/micropython (fetch)
upstream https://github.com/micropython/micropython (push)
You should now have a copy of the source code. By default, you are pointing
to the master branch. To prepare for further development, it is recommended
to work on a development branch.
.. code-block:: bash
$ git checkout -b dev-branch
You can give it any name. You will have to compile MicroPython whenever you change
to a different branch.
Compile and build the code
--------------------------
When compiling MicroPython, you compile a specific :term:`port`, usually
targeting a specific :ref:`board <glossary>`. Start by installing the required dependencies.
Then build the MicroPython cross-compiler before you can successfully compile and build.
This applies specifically when using Linux to compile.
The Windows instructions are provided in a later section.
.. _required_dependencies:
Required dependencies
~~~~~~~~~~~~~~~~~~~~~
Install the required dependencies for Linux:
.. code-block:: bash
$ sudo apt-get install build-essential libffi-dev git pkg-config
For the stm32 port, the ARM cross-compiler is required:
.. code-block:: bash
$ sudo apt-get install arm-none-eabi-gcc arm-none-eabi-binutils arm-none-eabi-newlib
See the `ARM GCC
toolchain <https://developer.arm.com/tools-and-software/open-source-software/developer-tools/gnu-toolchain/gnu-rm>`_
for the latest details.
Python is also required. Python 2 is supported for now, but we recommend using Python 3.
Check that you have Python available on your system:
.. code-block:: bash
$ python3
Python 3.5.0 (default, Jul 17 2020, 14:04:10)
[GCC 5.4.0 20160609] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>>
All supported ports have different dependency requirements, see their respective
`readme files <https://github.com/micropython/micropython/tree/master/ports>`_.
Building the MicroPython cross-compiler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Almost all ports require building ``mpy-cross`` first to perform pre-compilation
of Python code that will be included in the port firmware:
.. code-block:: bash
$ cd mpy-cross
$ make
.. note::
Note that, ``mpy-cross`` must be built for the host architecture
and not the target architecture.
If it built successfully, you should see a message similar to this:
.. code-block:: bash
LINK mpy-cross
text data bss dec hex filename
279328 776 880 280984 44998 mpy-cross
.. note::
Use ``make -C mpy-cross`` to build the cross-compiler in one statement
without moving to the ``mpy-cross`` directory otherwise, you will need
to do ``cd ..`` for the next steps.
Building the Unix port of MicroPython
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Unix port is a version of MicroPython that runs on Linux, macOS, and other Unix-like operating systems.
It's extremely useful for developing MicroPython as it avoids having to deploy your code to a device to test it.
In many ways, it works a lot like CPython's python binary.
To build for the Unix port, make sure all Linux related dependencies are installed as detailed in the
required dependencies section. See the :ref:`required_dependencies`
to make sure that all dependencies are installed for this port. Also, make sure you have a working
environment for ``gcc`` and ``GNU make``. Ubuntu 20.04 has been used for the example
below but other unixes ought to work with little modification:
.. code-block:: bash
$ gcc --version
gcc (Ubuntu 9.3.0-10ubuntu2) 9.3.0
Copyright (C) 2019 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.then build:
.. code-block:: bash
$ cd ports/unix
$ make submodules
$ make
If MicroPython built correctly, you should see the following:
.. code-block:: bash
LINK micropython
text data bss dec hex filename
412033 5680 2496 420209 66971 micropython
Now run it:
.. code-block:: bash
$ ./micropython
MicroPython v1.13-38-gc67012d-dirty on 2020-09-13; linux version
Use Ctrl-D to exit, Ctrl-E for paste mode
>>> print("hello world")
hello world
>>>
Building the Windows port
~~~~~~~~~~~~~~~~~~~~~~~~~
The Windows port includes a Visual Studio project file micropython.vcxproj that you can use to build micropython.exe.
It can be opened in Visual Studio or built from the command line using msbuild. Alternatively, it can be built using mingw,
either in Windows with Cygwin, or on Linux.
See `windows port documentation <https://github.com/micropython/micropython/tree/master/ports/windows>`_ for more information.
Building the STM32 port
~~~~~~~~~~~~~~~~~~~~~~~
Like the Unix port, you need to install some required dependencies
as detailed in the :ref:`required_dependencies` section, then build:
.. code-block:: bash
$ cd ports/stm32
$ make submodules
$ make
Please refer to the `stm32 documentation <https://github.com/micropython/micropython/tree/master/ports/stm32>`_
for more details on flashing the firmware.
.. note::
See the :ref:`required_dependencies` to make sure that all dependencies are installed for this port.
The cross-compiler is needed. ``arm-none-eabi-gcc`` should also be in the $PATH or specified manually
via CROSS_COMPILE, either by setting the environment variable or in the ``make`` command line arguments.
You can also specify which board to use:
.. code-block:: bash
$ cd ports/stm32
$ make submodules
$ make BOARD=<board>
See `ports/stm32/boards <https://github.com/micropython/micropython/tree/master/ports/stm32/boards>`_
for the available boards. e.g. "PYBV11" or "NUCLEO_WB55".
Building the documentation
--------------------------
MicroPython documentation is created using ``Sphinx``. If you have already
installed Python, then install ``Sphinx`` using ``pip``. It is recommended
that you use a virtual environment:
.. code-block:: bash
$ python3 -m venv env
$ source env/bin/activate
$ pip install sphinx
Navigate to the ``docs`` directory:
.. code-block:: bash
$ cd docs
Build the docs:
.. code-block:: bash
$ make html
Open ``docs/build/html/index.html`` in your browser to view the docs locally. Refer to the
documentation on `importing your documentation
<https://docs.readthedocs.io/en/stable/intro/import-guide.html>`_ to use Read the Docs.
Running the tests
-----------------
To run all tests in the test suite on the Unix port use:
.. code-block:: bash
$ cd ports/unix
$ make test
To run a selection of tests on a board/device connected over USB use:
.. code-block:: bash
$ cd tests
$ ./run-tests.py --target minimal --device /dev/ttyACM0
See also :ref:`writingtests`.
Folder structure
----------------
There are a couple of directories to take note of in terms of where certain implementation details
are. The following is a break down of the top-level folders in the source code.
py
Contains the compiler, runtime, and core library implementation.
mpy-cross
Has the MicroPython cross-compiler which pre-compiles the Python scripts to bytecode.
ports
Code for all the versions of MicroPython for the supported ports.
lib
Low-level C libraries used by any port which are mostly 3rd-party libraries.
drivers
Has drivers for specific hardware and intended to work across multiple ports.
extmod
Contains a C implementation of more non-core modules.
docs
Has the standard documentation found at https://docs.micropython.org/.
tests
An implementation of the test suite.
tools
Contains helper tools including the ``upip`` and the ``pyboard.py`` module.
examples
Example code for building MicroPython as a library as well as native modules.

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.2 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.6 KiB

View File

@@ -0,0 +1,26 @@
MicroPython Internals
=====================
This chapter covers a tour of MicroPython from the perspective of a developer, contributing
to MicroPython. It acts as a comprehensive resource on the implementation details of MicroPython
for both novice and expert contributors.
Development around MicroPython usually involves modifying the core runtime, porting or
maintaining a new library. This guide describes at great depth, the implementation
details of MicroPython including a getting started guide, compiler internals, porting
MicroPython to a new platform and implementing a core MicroPython library.
.. toctree::
:maxdepth: 3
gettingstarted.rst
writingtests.rst
compiler.rst
memorymgt.rst
library.rst
optimizations.rst
qstr.rst
maps.rst
publiccapi.rst
extendingmicropython.rst
porting.rst

View File

@@ -0,0 +1,86 @@
.. _internals_library:
Implementing a Module
=====================
This chapter details how to implement a core module in MicroPython.
MicroPython modules can be one of the following:
- Built-in module: A general module that is be part of the MicroPython repository.
- User module: A module that is useful for your specific project that you maintain
in your own repository or private codebase.
- Dynamic module: A module that can be deployed and imported at runtime to your device.
A module in MicroPython can be implemented in one of the following locations:
- py/: A core library that mirrors core CPython functionality.
- extmod/: A CPython or MicroPython-specific module that is shared across multiple ports.
- ports/<port>/: A port-specific module.
.. note::
This chapter describes modules implemented in ``py/`` or core modules.
See :ref:`extendingmicropython` for details on implementing an external module.
For details on port-specific modules, see :ref:`porting_to_a_board`.
Implementing a core module
--------------------------
Like CPython, MicroPython has core builtin modules that can be accessed through import statements.
An example is the ``gc`` module discussed in :ref:`memorymanagement`.
.. code-block:: bash
>>> import gc
>>> gc.enable()
>>>
MicroPython has several other builtin standard/core modules like ``io``, ``array`` etc.
Adding a new core module involves several modifications.
First, create the ``C`` file in the ``py/`` directory. In this example we are adding a
hypothetical new module ``subsystem`` in the file ``modsubsystem.c``:
.. code-block:: c
#include "py/builtin.h"
#include "py/runtime.h"
#if MICROPY_PY_SUBSYSTEM
// info()
STATIC mp_obj_t py_subsystem_info(void) {
return MP_OBJ_NEW_SMALL_INT(42);
}
MP_DEFINE_CONST_FUN_OBJ_0(subsystem_info_obj, py_subsystem_info);
STATIC const mp_rom_map_elem_t mp_module_subsystem_globals_table[] = {
{ MP_ROM_QSTR(MP_QSTR___name__), MP_ROM_QSTR(MP_QSTR_subsystem) },
{ MP_ROM_QSTR(MP_QSTR_info), MP_ROM_PTR(&subsystem_info_obj) },
};
STATIC MP_DEFINE_CONST_DICT(mp_module_subsystem_globals, mp_module_subsystem_globals_table);
const mp_obj_module_t mp_module_subsystem = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t *)&mp_module_subsystem_globals,
};
MP_REGISTER_MODULE(MP_QSTR_subsystem, mp_module_subsystem);
#endif
The implementation includes a definition of all functions related to the module and adds the
functions to the module's global table in ``mp_module_subsystem_globals_table``. It also
creates the module object with ``mp_module_subsystem``. The module is then registered with
the wider system via the ``MP_REGISTER_MODULE`` macro.
After building and running the modified MicroPython, the module should now be importable:
.. code-block:: bash
>>> import subsystem
>>> subsystem.info()
42
>>>
Our ``info()`` function currently returns just a single number but can be extended
to do anything. Similarly, more functions can be added to this new module.

View File

@@ -0,0 +1,63 @@
.. _maps:
Maps and Dictionaries
=====================
MicroPython dictionaries and maps use techniques called open addressing and linear probing.
This chapter details both of these methods.
Open addressing
---------------
`Open addressing <https://en.wikipedia.org/wiki/Open_addressing>`_ is used to resolve collisions.
Collisions are very common occurrences and happen when two items happen to hash to the same
slot or location. For example, given a hash setup as this:
.. image:: img/collision.png
If there is a request to fill slot ``0`` with ``70``, since the slot ``0`` is not empty, open addressing
finds the next available slot in the dictionary to service this request. This sequential search for an alternate
location is called *probing*. There are several sequence probing algorithms but MicroPython uses
linear probing that is described in the next section.
Linear probing
--------------
Linear probing is one of the methods for finding an available address or slot in a dictionary. In MicroPython,
it is used with open addressing. To service the request described above, unlike other probing algorithms,
linear probing assumes a fixed interval of ``1`` between probes. The request will therefore be serviced by
placing the item in the next free slot which is slot ``4`` in our example:
.. image:: img/linprob.png
The same methods i.e open addressing and linear probing are used to search for an item in a dictionary.
Assume we want to search for the data item ``33``. The computed hash value will be 2. Looking at slot 2
reveals ``33``, at this point, we return ``True``. Searching for ``70`` is quite different as there was a
collision at the time of insertion. Therefore computing the hash value is ``0`` which is currently
holding ``44``. Instead of simply returning ``False``, we perform a sequential search starting at point
``1`` until the item ``70`` is found or we encounter a free slot. This is the general way of performing
look-ups in hashes:
.. code-block:: c
// not yet found, keep searching in this table
pos = (pos + 1) % set->alloc;
if (pos == start_pos) {
// search got back to starting position, so index is not in table
if (lookup_kind & MP_MAP_LOOKUP_ADD_IF_NOT_FOUND) {
if (avail_slot != NULL) {
// there was an available slot, so use that
set->used++;
*avail_slot = index;
return index;
} else {
// not enough room in table, rehash it
mp_set_rehash(set);
// restart the search for the new element
start_pos = pos = hash % set->alloc;
}
}
} else {
return MP_OBJ_NULL;
}

View File

@@ -0,0 +1,141 @@
.. _memorymanagement:
Memory Management
=================
Unlike programming languages such as C/C++, MicroPython hides memory management
details from the developer by supporting automatic memory management.
Automatic memory management is a technique used by operating systems or applications to automatically manage
the allocation and deallocation of memory. This eliminates challenges such as forgetting to
free the memory allocated to an object. Automatic memory management also avoids the critical issue of using memory
that is already released. Automatic memory management takes many forms, one of them being
garbage collection (GC).
The garbage collector usually has two responsibilities;
#. Allocate new objects in available memory.
#. Free unused memory.
There are many GC algorithms but MicroPython uses the
`Mark and Sweep <https://en.wikipedia.org/wiki/Tracing_garbage_collection#Basic_algorithm>`_
policy for managing memory. This algorithm has a mark phase that traverses the heap marking all
live objects while the sweep phase goes through the heap reclaiming all unmarked objects.
Garbage collection functionality in MicroPython is available through the ``gc`` built-in
module:
.. code-block:: bash
>>> x = 5
>>> x
5
>>> import gc
>>> gc.enable()
>>> gc.mem_alloc()
1312
>>> gc.mem_free()
2071392
>>> gc.collect()
19
>>> gc.disable()
>>>
Even when ``gc.disable()`` is invoked, collection can be triggered with ``gc.collect()``.
The object model
----------------
All MicroPython objects are referred to by the ``mp_obj_t`` data type.
This is usually word-sized (i.e. the same size as a pointer on the target architecture),
and can be typically 32-bit (STM32, nRF, ESP32, Unix x86) or 64-bit (Unix x64).
It can also be greater than a word-size for certain object representations, for
example ``OBJ_REPR_D`` has a 64-bit sized ``mp_obj_t`` on a 32-bit architecture.
An ``mp_obj_t`` represents a MicroPython object, for example an integer, float, type, dict or
class instance. Some objects, like booleans and small integers, have their value stored directly
in the ``mp_obj_t`` value and do not require additional memory. Other objects have their value
store elsewhere in memory (for example on the garbage-collected heap) and their ``mp_obj_t`` contains
a pointer to that memory. A portion of ``mp_obj_t`` is the tag which tells what type of object it is.
See ``py/mpconfig.h`` for the specific details of the available representations.
**Pointer tagging**
Because pointers are word-aligned, when they are stored in an ``mp_obj_t`` the
lower bits of this object handle will be zero. For example on a 32-bit architecture
the lower 2 bits will be zero:
``********|********|********|******00``
These bits are reserved for purposes of storing a tag. The tag stores extra information as
opposed to introducing a new field to store that information in the object, which may be
inefficient. In MicroPython the tag tells if we are dealing with a small integer, interned
(small) string or a concrete object, and different semantics apply to each of these.
For small integers the mapping is this:
``********|********|********|*******1``
Where the asterisks hold the actual integer value. For an interned string or an immediate
object (e.g. ``True``) the layout of the ``mp_obj_t`` value is, respectively:
``********|********|********|*****010``
``********|********|********|*****110``
While a concrete object that is none of the above takes the form:
``********|********|********|******00``
The stars here correspond to the address of the concrete object in memory.
Allocation of objects
----------------------
The value of a small integer is stored directly in the ``mp_obj_t`` and will be
allocated in-place, not on the heap or elsewhere. As such, creation of small
integers does not affect the heap. Similarly for interned strings that already have
their textual data stored elsewhere, and immediate values like ``None``, ``False``
and ``True``.
Everything else which is a concrete object is allocated on the heap and its object structure is such that
a field is reserved in the object header to store the type of the object.
.. code-block:: bash
+++++++++++
+ +
+ type + object header
+ +
+++++++++++
+ + object items
+ +
+ +
+++++++++++
The heap's smallest unit of allocation is a block, which is four machine words in
size (16 bytes on a 32-bit machine, 32 bytes on a 64-bit machine).
Another structure also allocated on the heap tracks the allocation of
objects in each block. This structure is called a *bitmap*.
.. image:: img/bitmap.png
The bitmap tracks whether a block is "free" or "in use" and use two bits to track this state
for each block.
The mark-sweep garbage collector manages the objects allocated on the heap, and also
utilises the bitmap to mark objects that are still in use.
See `py/gc.c <https://github.com/micropython/micropython/blob/master/py/gc.c>`_
for the full implementation of these details.
**Allocation: heap layout**
The heap is arranged such that it consists of blocks in pools. A block
can have different properties:
- *ATB(allocation table byte):* If set, then the block is a normal block
- *FREE:* Free block
- *HEAD:* Head of a chain of blocks
- *TAIL:* In the tail of a chain of blocks
- *MARK :* Marked head block
- *FTB(finaliser table byte):* If set, then the block has a finaliser

View File

@@ -0,0 +1,225 @@
.. _natmod:
Native machine code in .mpy files
=================================
This section describes how to build and work with .mpy files that contain native
machine code from a language other than Python. This allows you to
write code in a language like C, compile and link it into a .mpy file, and then
import this file like a normal Python module. This can be used for implementing
functionality which is performance critical, or for including an existing
library written in another language.
One of the main advantages of using native .mpy files is that native machine code
can be imported by a script dynamically, without the need to rebuild the main
MicroPython firmware. This is in contrast to :ref:`cmodules` which also allows
defining custom modules in C but they must be compiled into the main firmware image.
The focus here is on using C to build native modules, but in principle any
language which can be compiled to stand-alone machine code can be put into a
.mpy file.
A native .mpy module is built using the ``mpy_ld.py`` tool, which is found in the
``tools/`` directory of the project. This tool takes a set of object files
(.o files) and links them together to create a native .mpy files. It requires
CPython 3 and the library pyelftools v0.25 or greater.
Supported features and limitations
----------------------------------
A .mpy file can contain MicroPython bytecode and/or native machine code. If it
contains native machine code then the .mpy file has a specific architecture
associated with it. Current supported architectures are (these are the valid
options for the ``ARCH`` variable, see below):
* ``x86`` (32 bit)
* ``x64`` (64 bit x86)
* ``armv6m`` (ARM Thumb, eg Cortex-M0)
* ``armv7m`` (ARM Thumb 2, eg Cortex-M3)
* ``armv7emsp`` (ARM Thumb 2, single precision float, eg Cortex-M4F, Cortex-M7)
* ``armv7emdp`` (ARM Thumb 2, double precision float, eg Cortex-M7)
* ``xtensa`` (non-windowed, eg ESP8266)
* ``xtensawin`` (windowed with window size 8, eg ESP32)
When compiling and linking the native .mpy file the architecture must be chosen
and the corresponding file can only be imported on that architecture. For more
details about .mpy files see :ref:`mpy_files`.
Native code must be compiled as position independent code (PIC) and use a global
offset table (GOT), although the details of this varies from architecture to
architecture. When importing .mpy files with native code the import machinery
is able to do some basic relocation of the native code. This includes
relocating text, rodata and BSS sections.
Supported features of the linker and dynamic loader are:
* executable code (text)
* read-only data (rodata), including strings and constant data (arrays, structs, etc)
* zeroed data (BSS)
* pointers in text to text, rodata and BSS
* pointers in rodata to text, rodata and BSS
The known limitations are:
* data sections are not supported; workaround: use BSS data and initialise the
data values explicitly
* static BSS variables are not supported; workaround: use global BSS variables
So, if your C code has writable data, make sure the data is defined globally,
without an initialiser, and only written to within functions.
Linker limitation: the native module is not linked against the symbol table of the
full MicroPython firmware. Rather, it is linked against an explicit table of exported
symbols found in ``mp_fun_table`` (in ``py/nativeglue.h``), that is fixed at firmware
build time. It is thus not possible to simply call some arbitrary HAL/OS/RTOS/system
function, for example.
New symbols can be added to the end of the table and the firmware rebuilt.
The symbols also need to be added to ``tools/mpy_ld.py``'s ``fun_table`` dict in the
same location. This allows ``mpy_ld.py`` to be able to pick the new symbols up and
provide relocations for them when the mpy is imported. Finally, if the symbol is a
function, a macro or stub should be added to ``py/dynruntime.h`` to make it easy to
call the function.
Defining a native module
------------------------
A native .mpy module is defined by a set of files that are used to build the .mpy.
The filesystem layout consists of two main parts, the source files and the Makefile:
* In the simplest case only a single C source file is required, which contains all
the code that will be compiled into the .mpy module. This C source code must
include the ``py/dynruntime.h`` file to access the MicroPython dynamic API, and
must at least define a function called ``mpy_init``. This function will be the
entry point of the module, called when the module is imported.
The module can be split into multiple C source files if desired. Parts of the
module can also be implemented in Python. All source files should be listed in
the Makefile, by adding them to the ``SRC`` variable (see below). This includes
both C source files as well as any Python files which will be included in the
resulting .mpy file.
* The ``Makefile`` contains the build configuration for the module and list the
source files used to build the .mpy module. It should define ``MPY_DIR`` as the
location of the MicroPython repository (to find header files, the relevant Makefile
fragment, and the ``mpy_ld.py`` tool), ``MOD`` as the name of the module, ``SRC``
as the list of source files, optionally specify the machine architecture via ``ARCH``,
and then include ``py/dynruntime.mk``.
Minimal example
---------------
This section provides a fully working example of a simple module named ``factorial``.
This module provides a single function ``factorial.factorial(x)`` which computes the
factorial of the input and returns the result.
Directory layout::
factorial/
├── factorial.c
└── Makefile
The file ``factorial.c`` contains:
.. code-block:: c
// Include the header file to get access to the MicroPython API
#include "py/dynruntime.h"
// Helper function to compute factorial
STATIC mp_int_t factorial_helper(mp_int_t x) {
if (x == 0) {
return 1;
}
return x * factorial_helper(x - 1);
}
// This is the function which will be called from Python, as factorial(x)
STATIC mp_obj_t factorial(mp_obj_t x_obj) {
// Extract the integer from the MicroPython input object
mp_int_t x = mp_obj_get_int(x_obj);
// Calculate the factorial
mp_int_t result = factorial_helper(x);
// Convert the result to a MicroPython integer object and return it
return mp_obj_new_int(result);
}
// Define a Python reference to the function above
STATIC MP_DEFINE_CONST_FUN_OBJ_1(factorial_obj, factorial);
// This is the entry point and is called when the module is imported
mp_obj_t mpy_init(mp_obj_fun_bc_t *self, size_t n_args, size_t n_kw, mp_obj_t *args) {
// This must be first, it sets up the globals dict and other things
MP_DYNRUNTIME_INIT_ENTRY
// Make the function available in the module's namespace
mp_store_global(MP_QSTR_factorial, MP_OBJ_FROM_PTR(&factorial_obj));
// This must be last, it restores the globals dict
MP_DYNRUNTIME_INIT_EXIT
}
The file ``Makefile`` contains:
.. code-block:: make
# Location of top-level MicroPython directory
MPY_DIR = ../../..
# Name of module
MOD = factorial
# Source files (.c or .py)
SRC = factorial.c
# Architecture to build for (x86, x64, armv6m, armv7m, xtensa, xtensawin)
ARCH = x64
# Include to get the rules for compiling and linking the module
include $(MPY_DIR)/py/dynruntime.mk
Compiling the module
--------------------
The prerequisite tools needed to build a native .mpy file are:
* The MicroPython repository (at least the ``py/`` and ``tools/`` directories).
* CPython 3, and the library pyelftools (eg ``pip install 'pyelftools>=0.25'``).
* GNU make.
* A C compiler for the target architecture (if C source is used).
* Optionally ``mpy-cross``, built from the MicroPython repository (if .py source is used).
Be sure to select the correct ``ARCH`` for the target you are going to run on.
Then build with::
$ make
Without modifying the Makefile you can specify the target architecture via::
$ make ARCH=armv7m
Module usage in MicroPython
---------------------------
Once the module is built there should be a file called ``factorial.mpy``. Copy
this so it is accessible on the filesystem of your MicroPython system and can be
found in the import path. The module can now be accessed in Python just like any
other module, for example::
import factorial
print(factorial.factorial(10))
# should display 3628800
Further examples
----------------
See ``examples/natmod/`` for further examples which show many of the available
features of native .mpy modules. Such features include:
* using multiple C source files
* including Python code alongside C code
* rodata and BSS data
* memory allocation
* use of floating point
* exception handling
* including external C libraries

View File

@@ -0,0 +1,72 @@
.. _optimizations:
Optimizations
=============
MicroPython uses several optimizations to save RAM but also ensure the efficient
execution of programs. This chapter discusses some of these optimizations.
.. note::
:ref:`qstr` and :ref:`maps` details other optimizations on strings and
dictionaries.
Frozen bytecode
---------------
When MicroPython loads Python code from the filesystem, it first has to parse the file into
a temporary in-memory representation, and then generate bytecode for execution, both of which
are stored in the heap (in RAM). This can lead to significant amounts of memory being used.
The MicroPython cross compiler can be used to generate
a ``.mpy`` file, containing the pre-compiled bytecode for a Python module. This will still
be loaded into RAM, but it avoids the additional overhead of the parsing stage.
As a further optimisation, the pre-compiled bytecode from a ``.mpy`` file can be "frozen"
into the firmware image as part of the main firmware compilation process, which means that
the bytecode will be executed from ROM. This can lead to a significant memory saving, and
reduce heap fragmentation.
Variables
---------
MicroPython processes local and global variables differently. Global variables
are stored and looked up from a global dictionary that is allocated on the heap
(note that each module has its own separate dict, so separate namespace).
Local variables on the other hand are are stored on the Python value stack, which may
live on the C stack or on the heap. They are accessed directly by their offset
within the Python stack, which is more efficient than a global lookup in a dict.
The length of global variable names also affects how much RAM is used as identifiers
are stored in RAM. The shorter the identifier, the less memory is used.
The other aspect is that ``const`` variables that start with an underscore are treated as
proper constants and are not allocated or added in a dictionary, hence saving some memory.
These variables use ``const()`` from the MicroPython library. Therefore:
.. code-block:: python
from micropython import const
X = const(1)
_Y = const(2)
foo(X, _Y)
Compiles to:
.. code-block:: python
X = 1
foo(1, 2)
Allocation of memory
--------------------
Most of the common MicroPython constructs are not allocated on the heap.
However the following are:
- Dynamic data structures like lists, mappings, etc;
- Functions, classes and object instances;
- imports; and
- First-time assignment of global variables (to create the slot in the global dict).
For a detailed discussion on a more user-centric perspective on optimization,
see `Maximising MicroPython speed <https://docs.micropython.org/en/latest/reference/speed_python.html>`_

View File

@@ -0,0 +1,306 @@
.. _porting_to_a_board:
Porting MicroPython
===================
The MicroPython project contains several ports to different microcontroller families and
architectures. The project repository has a `ports <https://github.com/micropython/micropython/tree/master/ports>`_
directory containing a subdirectory for each supported port.
A port will typically contain definitions for multiple "boards", each of which is a specific piece of
hardware that that port can run on, e.g. a development kit or device.
The `minimal port <https://github.com/micropython/micropython/tree/master/ports/minimal>`_ is
available as a simplified reference implementation of a MicroPython port. It can run on both the
host system and an STM32F4xx MCU.
In general, starting a port requires:
- Setting up the toolchain (configuring Makefiles, etc).
- Implementing boot configuration and CPU initialization.
- Initialising basic drivers required for development and debugging (e.g. GPIO, UART).
- Performing the board-specific configurations.
- Implementing the port-specific modules.
Minimal MicroPython firmware
----------------------------
The best way to start porting MicroPython to a new board is by integrating a minimal
MicroPython interpreter. For this walkthrough, create a subdirectory for the new
port in the ``ports`` directory:
.. code-block:: bash
$ cd ports
$ mkdir example_port
The basic MicroPython firmware is implemented in the main port file, e.g ``main.c``:
.. code-block:: c
#include "py/compile.h"
#include "py/gc.h"
#include "py/mperrno.h"
#include "py/stackctrl.h"
#include "shared/runtime/gchelper.h"
#include "shared/runtime/pyexec.h"
// Allocate memory for the MicroPython GC heap.
static char heap[4096];
int main(int argc, char **argv) {
// Initialise the MicroPython runtime.
mp_stack_ctrl_init();
gc_init(heap, heap + sizeof(heap));
mp_init();
// Start a normal REPL; will exit when ctrl-D is entered on a blank line.
pyexec_friendly_repl();
// Deinitialise the runtime.
gc_sweep_all();
mp_deinit();
return 0;
}
// Handle uncaught exceptions (should never be reached in a correct C implementation).
void nlr_jump_fail(void *val) {
for (;;) {
}
}
// Do a garbage collection cycle.
void gc_collect(void) {
gc_collect_start();
gc_helper_collect_regs_and_stack();
gc_collect_end();
}
// There is no filesystem so stat'ing returns nothing.
mp_import_stat_t mp_import_stat(const char *path) {
return MP_IMPORT_STAT_NO_EXIST;
}
// There is no filesystem so opening a file raises an exception.
mp_lexer_t *mp_lexer_new_from_file(const char *filename) {
mp_raise_OSError(MP_ENOENT);
}
We also need a Makefile at this point for the port:
.. code-block:: Makefile
# Include the core environment definitions; this will set $(TOP).
include ../../py/mkenv.mk
# Include py core make definitions.
include $(TOP)/py/py.mk
# Set CFLAGS and libraries.
CFLAGS = -I. -I$(BUILD) -I$(TOP)
LIBS = -lm
# Define the required source files.
SRC_C = \
main.c \
mphalport.c \
shared/readline/readline.c \
shared/runtime/gchelper_generic.c \
shared/runtime/pyexec.c \
shared/runtime/stdout_helpers.c \
# Define the required object files.
OBJ = $(PY_CORE_O) $(addprefix $(BUILD)/, $(SRC_C:.c=.o))
# Define the top-level target, the main firmware.
all: $(BUILD)/firmware.elf
# Define how to build the firmware.
$(BUILD)/firmware.elf: $(OBJ)
$(ECHO) "LINK $@"
$(Q)$(CC) $(LDFLAGS) -o $@ $^ $(LIBS)
$(Q)$(SIZE) $@
# Include remaining core make rules.
include $(TOP)/py/mkrules.mk
Remember to use proper tabs to indent the Makefile.
MicroPython Configurations
--------------------------
After integrating the minimal code above, the next step is to create the MicroPython
configuration files for the port. The compile-time configurations are specified in
``mpconfigport.h`` and additional hardware-abstraction functions, such as time keeping,
in ``mphalport.h``.
The following is an example of an ``mpconfigport.h`` file:
.. code-block:: c
#include <stdint.h>
// Python internal features.
#define MICROPY_ENABLE_GC (1)
#define MICROPY_HELPER_REPL (1)
#define MICROPY_ERROR_REPORTING (MICROPY_ERROR_REPORTING_TERSE)
#define MICROPY_FLOAT_IMPL (MICROPY_FLOAT_IMPL_FLOAT)
// Enable u-modules to be imported with their standard name, like sys.
#define MICROPY_MODULE_WEAK_LINKS (1)
// Fine control over Python builtins, classes, modules, etc.
#define MICROPY_PY_ASYNC_AWAIT (0)
#define MICROPY_PY_BUILTINS_SET (0)
#define MICROPY_PY_ATTRTUPLE (0)
#define MICROPY_PY_COLLECTIONS (0)
#define MICROPY_PY_MATH (0)
#define MICROPY_PY_IO (0)
#define MICROPY_PY_STRUCT (0)
// Type definitions for the specific machine.
typedef intptr_t mp_int_t; // must be pointer size
typedef uintptr_t mp_uint_t; // must be pointer size
typedef long mp_off_t;
// We need to provide a declaration/definition of alloca().
#include <alloca.h>
// Define the port's name and hardware.
#define MICROPY_HW_BOARD_NAME "example-board"
#define MICROPY_HW_MCU_NAME "unknown-cpu"
#define MP_STATE_PORT MP_STATE_VM
#define MICROPY_PORT_ROOT_POINTERS \
const char *readline_hist[8];
This configuration file contains machine-specific configurations including aspects like if different
MicroPython features should be enabled e.g. ``#define MICROPY_ENABLE_GC (1)``. Making this Setting
``(0)`` disables the feature.
Other configurations include type definitions, root pointers, board name, microcontroller name
etc.
Similarly, an minimal example ``mphalport.h`` file looks like this:
.. code-block:: c
static inline void mp_hal_set_interrupt_char(char c) {}
Support for standard input/output
---------------------------------
MicroPython requires at least a way to output characters, and to have a REPL it also
requires a way to input characters. Functions for this can be implemented in the file
``mphalport.c``, for example:
.. code-block:: c
#include <unistd.h>
#include "py/mpconfig.h"
// Receive single character, blocking until one is available.
int mp_hal_stdin_rx_chr(void) {
unsigned char c = 0;
int r = read(STDIN_FILENO, &c, 1);
(void)r;
return c;
}
// Send the string of given length.
void mp_hal_stdout_tx_strn(const char *str, mp_uint_t len) {
int r = write(STDOUT_FILENO, str, len);
(void)r;
}
These input and output functions have to be modified depending on the
specific board API. This example uses the standard input/output stream.
Building and running
--------------------
At this stage the directory of the new port should contain::
ports/example_port/
├── main.c
├── Makefile
├── mpconfigport.h
├── mphalport.c
└── mphalport.h
The port can now be built by running ``make`` (or otherwise, depending on your system).
If you are using the default compiler settings in the Makefile given above then this
will create an executable called ``build/firmware.elf`` which can be executed directly.
To get a functional REPL you may need to first configure the terminal to raw mode:
.. code-block:: bash
$ stty raw opost -echo
$ ./build/firmware.elf
That should give a MicroPython REPL. You can then run commands like:
.. code-block:: bash
MicroPython v1.13 on 2021-01-01; example-board with unknown-cpu
>>> import sys
>>> sys.implementation
('micropython', (1, 13, 0))
>>>
Use Ctrl-D to exit, and then run ``reset`` to reset the terminal.
Adding a module to the port
---------------------------
To add a custom module like ``myport``, first add the module definition in a file
``modmyport.c``:
.. code-block:: c
#include "py/runtime.h"
STATIC mp_obj_t myport_info(void) {
mp_printf(&mp_plat_print, "info about my port\n");
return mp_const_none;
}
STATIC MP_DEFINE_CONST_FUN_OBJ_0(myport_info_obj, myport_info);
STATIC const mp_rom_map_elem_t myport_module_globals_table[] = {
{ MP_OBJ_NEW_QSTR(MP_QSTR___name__), MP_OBJ_NEW_QSTR(MP_QSTR_myport) },
{ MP_ROM_QSTR(MP_QSTR_info), MP_ROM_PTR(&myport_info_obj) },
};
STATIC MP_DEFINE_CONST_DICT(myport_module_globals, myport_module_globals_table);
const mp_obj_module_t myport_module = {
.base = { &mp_type_module },
.globals = (mp_obj_dict_t *)&myport_module_globals,
};
MP_REGISTER_MODULE(MP_QSTR_myport, myport_module);
You will also need to edit the Makefile to add ``modmyport.c`` to the ``SRC_C`` list, and
a new line adding the same file to ``SRC_QSTR`` (so qstrs are searched for in this new file),
like this:
.. code-block:: Makefile
SRC_C = \
main.c \
modmyport.c \
mphalport.c \
...
SRC_QSTR += modmyport.c
If all went correctly then, after rebuilding, you should be able to import the new module:
.. code-block:: bash
>>> import myport
>>> myport.info()
info about my port
>>>

View File

@@ -0,0 +1,25 @@
.. _publiccapi:
The public C API
================
The public C-API comprises functions defined in all C header files in the ``py/``
directory. Most of the important core runtime C APIs are exposed in ``runtime.h`` and
``obj.h``.
The following is an example of public API functions from ``obj.h``:
.. code-block:: c
mp_obj_t mp_obj_new_list(size_t n, mp_obj_t *items);
mp_obj_t mp_obj_list_append(mp_obj_t self_in, mp_obj_t arg);
mp_obj_t mp_obj_list_remove(mp_obj_t self_in, mp_obj_t value);
void mp_obj_list_get(mp_obj_t self_in, size_t *len, mp_obj_t **items);
At its core, any functions and macros in header files make up the public
API and can be used to access very low-level details of MicroPython. Static
inline functions in header files are fine too, such functions will be
inlined in the code when used.
Header files in the ``ports`` directory are only exposed to the functionality
specific to a given port.

View File

@@ -0,0 +1,115 @@
.. _qstr:
MicroPython string interning
============================
MicroPython uses `string interning`_ to save both RAM and ROM. This avoids
having to store duplicate copies of the same string. Primarily, this applies to
identifiers in your code, as something like a function or variable name is very
likely to appear in multiple places in the code. In MicroPython an interned
string is called a QSTR (uniQue STRing).
A QSTR value (with type ``qstr``) is a index into a linked list of QSTR pools.
QSTRs store their length and a hash of their contents for fast comparison during
the de-duplication process. All bytecode operations that work with strings use
a QSTR argument.
Compile-time QSTR generation
----------------------------
In the MicroPython C code, any strings that should be interned in the final
firmware are written as ``MP_QSTR_Foo``. At compile time this will evaluate to
a ``qstr`` value that points to the index of ``"Foo"`` in the QSTR pool.
A multi-step process in the ``Makefile`` makes this work. In summary this
process has three parts:
1. Find all ``MP_QSTR_Foo`` tokens in the code.
2. Generate a static QSTR pool containing all the string data (including lengths
and hashes).
3. Replace all ``MP_QSTR_Foo`` (via the preprocessor) with their corresponding
index.
``MP_QSTR_Foo`` tokens are searched for in two sources:
1. All files referenced in ``$(SRC_QSTR)``. This is all C code (i.e. ``py``,
``extmod``, ``ports/stm32``) but not including third-party code such as
``lib``.
2. Additional ``$(QSTR_GLOBAL_DEPENDENCIES)`` (which includes ``mpconfig*.h``).
*Note:* ``frozen_mpy.c`` (generated by mpy-tool.py) has its own QSTR generation
and pool.
Some additional strings that can't be expressed using the ``MP_QSTR_Foo`` syntax
(e.g. they contain non-alphanumeric characters) are explicitly provided in
``qstrdefs.h`` and ``qstrdefsport.h`` via the ``$(QSTR_DEFS)`` variable.
Processing happens in the following stages:
1. ``qstr.i.last`` is the concatenation of putting every single input file
through the C pre-processor. This means that any conditionally disabled code
will be removed, and macros expanded. This means we don't add strings to the
pool that won't be used in the final firmware. Because at this stage (thanks
to the ``NO_QSTR`` macro added by ``QSTR_GEN_CFLAGS``) there is no
definition for ``MP_QSTR_Foo`` it passes through this stage unaffected. This
file also includes comments from the preprocessor that include line number
information. Note that this step only uses files that have changed, which
means that ``qstr.i.last`` will only contain data from files that have
changed since the last compile.
2. ``qstr.split`` is an empty file created after running ``makeqstrdefs.py split``
on qstr.i.last. It's just used as a dependency to indicate that the step ran.
This script outputs one file per input C file, ``genhdr/qstr/...file.c.qstr``,
which contains only the matched QSTRs. Each QSTR is printed as ``Q(Foo)``.
This step is necessary to combine the existing files with the new data
generated from the incremental update in ``qstr.i.last``.
3. ``qstrdefs.collected.h`` is the output of concatenating ``genhdr/qstr/*``
using ``makeqstrdefs.py cat``. This is now the full set of ``MP_QSTR_Foo``'s
found in the code, now formatted as ``Q(Foo)``, one-per-line, with duplicates.
This file is only updated if the set of qstrs has changed. A hash of the QSTR
data is written to another file (``qstrdefs.collected.h.hash``) which allows
it to track changes across builds.
4. Generate an enumeration, each entry of which maps a ``MP_QSTR_Foo`` to it's corresponding index.
It concatenates ``qstrdefs.collected.h`` with ``qstrdefs*.h``, then it transforms
each line from ``Q(Foo)`` to ``"Q(Foo)"`` so they pass through the preprocessor
unchanged. Then the preprocessor is used to deal with any conditional
compilation in ``qstrdefs*.h``. Then the transformation is undone back to
``Q(Foo)``, and saved as ``qstrdefs.preprocessed.h``.
5. ``qstrdefs.generated.h`` is the output of ``makeqstrdata.py``. For each
``Q(Foo)`` in qstrdefs.preprocessed.h (plus some extra hard-coded ones), it outputs
``QDEF(MP_QSTR_Foo, (const byte*)"hash" "Foo")``.
Then in the main compile, two things happen with ``qstrdefs.generated.h``:
1. In qstr.h, each QDEF becomes an entry in an enum, which makes ``MP_QSTR_Foo``
available to code and equal to the index of that string in the QSTR table.
2. In qstr.c, the actual QSTR data table is generated as elements of the
``mp_qstr_const_pool->qstrs``.
.. _`string interning`: https://en.wikipedia.org/wiki/String_interning
Run-time QSTR generation
------------------------
Additional QSTR pools can be created at runtime so that strings can be added to
them. For example, the code::
foo[x] = 3
Will need to create a QSTR for the value of ``x`` so it can be used by the
"load attr" bytecode.
Also, when compiling Python code, identifiers and literals need to have QSTRs
created. Note: only literals shorter than 10 characters become QSTRs. This is
because a regular string on the heap always takes up a minimum of 16 bytes (one
GC block), whereas QSTRs allow them to be packed more efficiently into the pool.
QSTR pools (and the underlying "chunks" that store the string data) are allocated
on-demand on the heap with a minimum size.

View File

@@ -0,0 +1,70 @@
.. _writingtests:
Writing tests
=============
Tests in MicroPython are located at the path ``tests/``. The following is a listing of
key directories and the run-tests.py runner script:
.. code-block:: bash
.
├── basics
├── extmod
├── float
├── micropython
├── run-tests.py
...
There are subfolders maintained to categorize the tests. Add a test by creating a new file in one of the
existing folders or in a new folder. It's also possible to make custom tests outside this tests folder,
which would be recommended for a custom port.
For example, add the following code in a file ``print.py`` in the ``tests/unix/`` subdirectory:
.. code-block:: python
def print_one():
print(1)
print_one()
If you run your tests, this test should appear in the test output:
.. code-block:: bash
$ cd ports/unix
$ make tests
skip unix/extra_coverage.py
pass unix/ffi_callback.py
pass unix/ffi_float.py
pass unix/ffi_float2.py
pass unix/print.py
pass unix/time.py
pass unix/time2.py
Tests are run by comparing the output from the test target against the output from CPython.
So any test should use print statements to indicate test results.
For tests that can't be compared to CPython (i.e. micropython-specific functionality),
you can provide a ``.py.exp`` file which will be used as the truth for comparison.
The other way to run tests, which is useful when running on targets other than the Unix port, is:
.. code-block:: bash
$ cd tests
$ ./run-tests.py
Then to run on a board:
.. code-block:: bash
$ ./run-tests.py --target minimal --device /dev/ttyACM0
And to run only a certain set of tests (eg a directory):
.. code-block:: bash
$ ./run-tests.py -d basics
$ ./run-tests.py float/builtin*.py

View File

@@ -0,0 +1,24 @@
.. _cpython_diffs:
MicroPython differences from CPython
====================================
MicroPython implements Python 3.4 and some select features of Python 3.5 and
above. The sections below describe the current status of these features.
.. toctree::
../differences/python_35.rst
../differences/python_36.rst
../differences/python_37.rst
../differences/python_38.rst
../differences/python_39.rst
For the features of Python that are implemented by MicroPython, there are
sometimes differences in their behaviour compared to standard Python. The
operations listed in the sections below produce conflicting results in
MicroPython when compared to standard Python.
.. toctree::
:maxdepth: 2

View File

@@ -0,0 +1,181 @@
.. _python_35:
Python 3.5
==========
Below is a list of finalised/accepted PEPs for Python 3.5 grouped into their impact to MicroPython.
+----------------------------------------------------------------------------------------------------------+---------------+
| **Extensions to the syntax:** | **Status** |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 448 <https://www.python.org/dev/peps/pep-0448/>`_ | additional unpacking generalizations | Partial |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 465 <https://www.python.org/dev/peps/pep-0465/>`_ | a new matrix multiplication operator | Completed |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 492 <https://www.python.org/dev/peps/pep-0492/>`_ | coroutines with async and await syntax | Completed |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| **Extensions and changes to runtime:** |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 461 <https://www.python.org/dev/peps/pep-0461/>`_ | % formatting for binary strings | Completed |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 475 <https://www.python.org/dev/peps/pep-0475/>`_ | retrying system calls that fail with EINTR | Completed |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 479 <https://www.python.org/dev/peps/pep-0479/>`_ | change StopIteration handling inside generators | Completed |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| **Standard library changes:** |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 471 <https://www.python.org/dev/peps/pep-0471/>`_ | os.scandir() | |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 485 <https://www.python.org/dev/peps/pep-0485/>`_ | math.isclose(), a function for testing | Completed |
| | approximate equality | |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| **Miscellaneous changes:** |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 441 <https://www.python.org/dev/peps/pep-0441/>`_ | improved Python zip application support | |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 486 <https://www.python.org/dev/peps/pep-0486/>`_ | make the Python Launcher aware of virtual | Not relevant |
| | environments | |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 484 <https://www.python.org/dev/peps/pep-0484/>`_ | type hints (advisory only) | In Progress |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 488 <https://www.python.org/dev/peps/pep-0488/>`_ | elimination of PYO files | Not relevant |
+--------------------------------------------------------+-------------------------------------------------+---------------+
| `PEP 489 <https://www.python.org/dev/peps/pep-0489/>`_ | redesigning extension module loading | |
+--------------------------------------------------------+-------------------------------------------------+---------------+
Other Language Changes:
+-----------------------------------------------------------------------------------------------------------+---------------+
| Added the *namereplace* error handlers. The *backslashreplace* error handlers now work with decoding and | |
| translating. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| Property docstrings are now writable. This is especially useful for collections.namedtuple() docstrings | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| Circular imports involving relative imports are now supported. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
New Modules:
* `typing <https://docs.python.org/3/whatsnew/3.5.html#typing>`_
* `zipzap <https://docs.python.org/3/whatsnew/3.5.html#zipapp>`_
Changes to built-in modules:
+-----------------------------------------------------------------------------------------------------------+---------------+
| `collections <https://docs.python.org/3/whatsnew/3.5.html#collections>`_ |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *OrderedDict* class is now implemented in C, which makes it 4 to 100 times faster. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| *OrderedDict.items()* , *OrderedDict.keys()* , *OrderedDict.values()* views now support reversed() | |
| iteration. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The deque class now defines *index()*, *insert()*, and *copy()*, and supports the + and * operators. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| Docstrings produced by namedtuple() can now be updated. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The UserString class now implements the *__getnewargs__()*, *__rmod__()*, *casefold()*, *format_map()*, | |
| *isprintable()*, and *maketrans()* methods to match the corresponding methods of str. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `heapq <https://docs.python.org/3/whatsnew/3.5.html#heapq>`_ |
+-----------------------------------------------------------------------------------------------------------+---------------+
| Element comparison in *merge()* can now be customized by passing a key function in a new optional key | |
| keyword argument, and a new optional *reverse* keyword argument can be used to reverse element comparison | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `io <https://docs.python.org/3/whatsnew/3.5.html#io>`_ |
+-----------------------------------------------------------------------------------------------------------+---------------+
| A new *BufferedIOBase.readinto1()* method, that uses at most one call to the underlying raw stream's | |
| *RawIOBase.read()* or *RawIOBase.readinto()* methods | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `json <https://docs.python.org/3/whatsnew/3.5.html#json>`_ | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| JSON decoder now raises JSONDecodeError instead of ValueError to provide better context information about | |
| the error. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `math <https://docs.python.org/3/whatsnew/3.5.html#math>`_ |
+-----------------------------------------------------------------------------------------------------------+---------------+
| Two new constants have been added to the math module: *inf* and *nan*. | Completed |
+-----------------------------------------------------------------------------------------------------------+---------------+
| A new function *isclose()* provides a way to test for approximate equality. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| A new *gcd()* function has been added. The *fractions.gcd()* function is now deprecated. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `os <https://docs.python.org/3/whatsnew/3.5.html#os>`_ |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The new *scandir()* function returning an iterator of DirEntry objects has been added. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *urandom()* function now uses the *getrandom()* syscall on Linux 3.17 or newer, and *getentropy()* on | |
| OpenBSD 5.6 and newer, removing the need to use /dev/urandom and avoiding failures due to potential file | |
| descriptor exhaustion. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| New *get_blocking()* and *set_blocking()* functions allow getting and setting a file descriptor's blocking| |
| mode (O_NONBLOCK.) | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| There is a new *os.path.commonpath()* function returning the longest common sub-path of each passed | |
| pathname | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `re <https://docs.python.org/3/whatsnew/3.5.html#re>`_ | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| References and conditional references to groups with fixed length are now allowed in lookbehind assertions| |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The number of capturing groups in regular expressions is no longer limited to 100. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *sub()* and *subn()* functions now replace unmatched groups with empty strings instead of raising an | |
| exception. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *re.error* exceptions have new attributes, msg, pattern, pos, lineno, and colno, that provide better | |
| context information about the error | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `socket <https://docs.python.org/3/whatsnew/3.5.html#socket>`_ |
+-----------------------------------------------------------------------------------------------------------+---------------+
| Functions with timeouts now use a monotonic clock, instead of a system clock. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| A new *socket.sendfile()* method allows sending a file over a socket by using the high-performance | |
| *os.sendfile()* function on UNIX, resulting in uploads being from 2 to 3 times faster than when using | |
| plain *socket.send()* | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *socket.sendall()* method no longer resets the socket timeout every time bytes are received or sent. | |
| The socket timeout is now the maximum total duration to send all data. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The backlog argument of the *socket.listen()* method is now optional. By default it is set to SOMAXCONN or| Completed |
| to 128, whichever is less. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `ssl <https://docs.python.org/3/whatsnew/3.5.html#ssl>`_ |
+-----------------------------------------------------------------------------------------------------------+---------------+
| Memory BIO Support | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| Application-Layer Protocol Negotiation Support | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| There is a new *SSLSocket.version()* method to query the actual protocol version in use. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The SSLSocket class now implements a *SSLSocket.sendfile()* method. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *SSLSocket.send()* method now raises either the *ssl.SSLWantReadError* or *ssl.SSLWantWriteError* | |
| exception on a non-blocking socket if the operation would block. Previously, it would return 0. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *cert_time_to_seconds()* function now interprets the input time as UTC and not as local time, per RFC | |
| 5280. Additionally, the return value is always an int. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| New *SSLObject.shared_ciphers()* and *SSLSocket.shared_ciphers()* methods return the list of ciphers sent | |
| by the client during the handshake. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *SSLSocket.do_handshake()*, *SSLSocket.read()*, *SSLSocket.shutdown()*, and *SSLSocket.write()* | |
| methods of the SSLSocket class no longer reset the socket timeout every time bytes are received or sent. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *match_hostname()* function now supports matching of IP addresses. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `sys <https://docs.python.org/3/whatsnew/3.5.html#sys>`_ |
+-----------------------------------------------------------------------------------------------------------+---------------+
| A new *set_coroutine_wrapper()* function allows setting a global hook that will be called whenever a | |
| coroutine object is created by an async def function. A corresponding *get_coroutine_wrapper()* can be | |
| used to obtain a currently set wrapper. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| A new *is_finalizing()* function can be used to check if the Python interpreter is shutting down. | |
+-----------------------------------------------------------------------------------------------------------+---------------+
| `time <https://docs.python.org/3/whatsnew/3.5.html#time>`_ |
+-----------------------------------------------------------------------------------------------------------+---------------+
| The *monotonic()* function is now always available | |
+-----------------------------------------------------------------------------------------------------------+---------------+

View File

@@ -0,0 +1,191 @@
.. _python_36:
Python 3.6
==========
Python 3.6 beta 1 was released on 12 Sep 2016, and a summary of the new features can be found here:
+-----------------------------------------------------------------------------------------------------------+--------------+
| **New Syntax Features:** | **Status** |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 498 <https://www.python.org/dev/peps/pep-0498/>`_ | Literal String Formatting | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 515 <https://www.python.org/dev/peps/pep-0515/>`_ | Underscores in Numeric Literals | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 525 <https://www.python.org/dev/peps/pep-0525/>`_ | Asynchronous Generators | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 526 <https://www.python.org/dev/peps/pep-0526/>`_ | Syntax for Variable Annotations (provisional) | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 530 <https://www.python.org/dev/peps/pep-0530/>`_ | Asynchronous Comprehensions | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| **New Built-in Features:** |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 468 <https://www.python.org/dev/peps/pep-0468/>`_ | Preserving the order of *kwargs* in a function | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 487 <https://www.python.org/dev/peps/pep-0487/>`_ | Simpler customization of class creation | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 520 <https://www.python.org/dev/peps/pep-0520/>`_ | Preserving Class Attribute Definition Order | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| **Standard Library Changes:** |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 495 <https://www.python.org/dev/peps/pep-0495/>`_ | Local Time Disambiguation | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 506 <https://www.python.org/dev/peps/pep-0506/>`_ | Adding A Secrets Module To The Standard Library | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 519 <https://www.python.org/dev/peps/pep-0519/>`_ | Adding a file system path protocol | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| **CPython internals:** |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 509 <https://www.python.org/dev/peps/pep-0509/>`_ | Add a private version to dict | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 523 <https://www.python.org/dev/peps/pep-0523/>`_ | Adding a frame evaluation API to CPython | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| **Linux/Window Changes** |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 524 <https://www.python.org/dev/peps/pep-0524/>`_ | Make os.urandom() blocking on Linux | |
| | (during system startup) | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 528 <https://www.python.org/dev/peps/pep-0528/>`_ | Change Windows console encoding to UTF-8 | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
| `PEP 529 <https://www.python.org/dev/peps/pep-0529/>`_ | Change Windows filesystem encoding to UTF-8 | |
+--------------------------------------------------------+--------------------------------------------------+--------------+
Other Language Changes:
+-------------------------------------------------------------------------------------------------------------+---------------+
| A *global* or *nonlocal* statement must now textually appear before the first use of the affected name in | |
| the same scope. Previously this was a SyntaxWarning. | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| It is now possible to set a special method to None to indicate that the corresponding operation is not | |
| available. For example, if a class sets *__iter__()* to *None* , the class is not iterable. | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| Long sequences of repeated traceback lines are now abbreviated as *[Previous line repeated {count} more | |
| times]* | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| Import now raises the new exception *ModuleNotFoundError* when it cannot find a module. Code that currently | |
| checks for ImportError (in try-except) will still work. | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| Class methods relying on zero-argument *super()* will now work correctly when called from metaclass methods | |
| during class creation. | |
+-------------------------------------------------------------------------------------------------------------+---------------+
Changes to built-in modules:
+--------------------------------------------------------------------------------------------------------------+----------------+
| `array <https://docs.python.org/3.6/whatsnew/3.6.html#array>`_ | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| Exhausted iterators of *array.array* will now stay exhausted even if the iterated array is extended. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `binascii <https://docs.python.org/3.6/whatsnew/3.6.html#binascii>`_ | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The b2a_base64() function now accepts an optional newline keyword argument to control whether the newline | Completed |
| character is appended to the return value | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `cmath <https://docs.python.org/3.6/whatsnew/3.6.html#cmath>`_ | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The new cmath.tau (τ) constant has been added | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| New constants: *cmath.inf* and *cmath.nan* to match *math.inf* and *math.nan* , and also *cmath.infj* and | |
| *cmath.nanj* to match the format used by complex repr | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `collections <https://docs.python.org/3.6/whatsnew/3.6.html#collections>`_ |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The new Collection abstract base class has been added to represent sized iterable container classes | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The new *Reversible* abstract base class represents iterable classes that also provide the *__reversed__()* | |
| method. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The new *AsyncGenerator* abstract base class represents asynchronous generators. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The *namedtuple()* function now accepts an optional keyword argument module, which, when specified, is used | |
| for the *__module__* attribute of the returned named tuple class. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The verbose and rename arguments for *namedtuple()* are now keyword-only. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| Recursive *collections.deque* instances can now be pickled. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `hashlib <https://docs.python.org/3.6/whatsnew/3.6.html#hashlib>`_ |
+--------------------------------------------------------------------------------------------------------------+----------------+
| BLAKE2 hash functions were added to the module. *blake2b()* and *blake2s()* are always available and support | |
| the full feature set of BLAKE2. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The SHA-3 hash functions *sha3_224()*, *sha3_256()*, *sha3_384()*, *sha3_512()*, and *SHAKE* hash functions | |
| *shake_128()* and *shake_256()* were added. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The password-based key derivation function *scrypt()* is now available with OpenSSL 1.1.0 and newer. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `json <https://docs.python.org/3.6/whatsnew/3.6.html#json>`_ |
+--------------------------------------------------------------------------------------------------------------+----------------+
| *json.load()* and *json.loads()* now support binary input. Encoded JSON should be represented using either | |
| UTF-8, UTF-16, or UTF-32. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `math <https://docs.python.org/3.6/whatsnew/3.6.html#math>`_ |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The new math.tau (τ) constant has been added | Completed |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `os <https://docs.python.org/3.6/whatsnew/3.6.html#os>`_ |
+--------------------------------------------------------------------------------------------------------------+----------------+
| A new *close()* method allows explicitly closing a *scandir()* iterator. The *scandir()* iterator now | |
| supports the context manager protocol. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| On Linux, *os.urandom()* now blocks until the system urandom entropy pool is initialized to increase the | |
| security. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The Linux *getrandom()* syscall (get random bytes) is now exposed as the new *os.getrandom()* function. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `re <https://docs.python.org/3.6/whatsnew/3.6.html#re>`_ |
+--------------------------------------------------------------------------------------------------------------+----------------+
| Added support of modifier spans in regular expressions. Examples: *'(?i:p)ython'* matches 'python' and | |
| 'Python', but not 'PYTHON'; *'(?i)g(?-i:v)r'* matches *'GvR'* and *'gvr'*, but not *'GVR'* . | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| Match object groups can be accessed by *__getitem__*, which is equivalent to *group()*. So *mo['name']* is | |
| now equivalent to *mo.group('name')*. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| Match objects now support index-like objects as group indices. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `socket <https://docs.python.org/3.6/whatsnew/3.6.html#socket>`_ |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The *ioctl()* function now supports the *SIO_LOOPBACK_FAST_PATH* control code. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The *getsockopt()* constants *SO_DOMAIN* , *SO_PROTOCOL*, *SO_PEERSEC* , and *SO_PASSSEC* are now supported. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The *setsockopt()* now supports the *setsockopt(level, optname, None, optlen: int)* form. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The socket module now supports the address family *AF_ALG* to interface with Linux Kernel crypto API. | |
| *ALG_*, *SOL_ALG* and *sendmsg_afalg()* were added. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| New Linux constants *TCP_USER_TIMEOUT* and *TCP_CONGESTION* were added. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `ssl <https://docs.python.org/3.6/whatsnew/3.6.html#ssl>`_ |
+--------------------------------------------------------------------------------------------------------------+----------------+
| ssl supports OpenSSL 1.1.0. The minimum recommend version is 1.0.2. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| 3DES has been removed from the default cipher suites and ChaCha20 Poly1305 cipher suites have been added. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| *SSLContext* has better default configuration for options and ciphers. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| SSL session can be copied from one client-side connection to another with the new *SSLSession* class. TLS | |
| session resumption can speed up the initial handshake, reduce latency and improve performance. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The new *get_ciphers()* method can be used to get a list of enabled ciphers in order of cipher priority. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| All constants and flags have been converted to *IntEnum* and *IntFlags*. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| Server and client-side specific TLS protocols for *SSLContext* were added. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| Added *SSLContext.post_handshake_auth* to enable and *ssl.SSLSocket.verify_client_post_handshake()* to | |
| initiate TLS 1.3 post-handshake authentication. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `struct <https://docs.python.org/3.6/whatsnew/3.6.html#struct>`_ | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| now supports IEEE 754 half-precision floats via the 'e' format specifier. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `sys <https://docs.python.org/3.6/whatsnew/3.6.html#sys>`_ | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The new *getfilesystemencodeerrors()* function returns the name of the error mode used to convert between | |
| Unicode filenames and bytes filenames. | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| `zlib <https://docs.python.org/3.6/whatsnew/3.6.html#zlib>`_ | |
+--------------------------------------------------------------------------------------------------------------+----------------+
| The *compress()* and *decompress()* functions now accept keyword arguments | |
+--------------------------------------------------------------------------------------------------------------+----------------+

View File

@@ -0,0 +1,95 @@
.. _python_37:
Python 3.7
==========
New Features:
+--------------------------------------------------------+--------------------------------------------------+----------------+
| **Features:** | **Status** |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 538 <https://www.python.org/dev/peps/pep-0538/>`_ | Coercing the legacy C locale to a UTF-8 based | |
| | locale | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 539 <https://www.python.org/dev/peps/pep-0539/>`_ | A New C-API for Thread-Local Storage in CPython | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 540 <https://www.python.org/dev/peps/pep-0540/>`_ | UTF-8 mode | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 552 <https://www.python.org/dev/peps/pep-0552/>`_ | Deterministic pyc | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 553 <https://www.python.org/dev/peps/pep-0553/>`_ | Built-in breakpoint() | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 557 <https://www.python.org/dev/peps/pep-0557/>`_ | Data Classes | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 560 <https://www.python.org/dev/peps/pep-0560/>`_ | Core support for typing module and generic types | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_ | Module __getattr__ and __dir__ | Partially done |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 563 <https://www.python.org/dev/peps/pep-0563/>`_ | Postponed Evaluation of Annotations | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 564 <https://www.python.org/dev/peps/pep-0564/>`_ | Time functions with nanosecond resolution | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 565 <https://www.python.org/dev/peps/pep-0565/>`_ | Show DeprecationWarning in __main__ | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
| `PEP 567 <https://www.python.org/dev/peps/pep-0567/>`_ | Context Variables | |
+--------------------------------------------------------+--------------------------------------------------+----------------+
Other Language Changes:
+----------------------------------------------------------------------------------------------------------+----------------+
| async and await are now reserved keywords | Completed |
+----------------------------------------------------------------------------------------------------------+----------------+
| dict objects must preserve insertion-order | |
+----------------------------------------------------------------------------------------------------------+----------------+
| More than 255 arguments can now be passed to a function; a function can now have more than 255 parameters| |
+----------------------------------------------------------------------------------------------------------+----------------+
| bytes.fromhex() and bytearray.fromhex() now ignore all ASCII whitespace, not only spaces | |
+----------------------------------------------------------------------------------------------------------+----------------+
| str, bytes, and bytearray gained support for the new isascii() method, which can be used to test if a | |
| string or bytes contain only the ASCII characters | |
+----------------------------------------------------------------------------------------------------------+----------------+
| ImportError now displays module name and module __file__ path whenfrom ... import ... fails | |
+----------------------------------------------------------------------------------------------------------+----------------+
| Circular imports involving absolute imports with binding a submodule to a name are now supported | |
+----------------------------------------------------------------------------------------------------------+----------------+
| object.__format__(x, '') is now equivalent to str(x) rather than format(str(self), '') | |
+----------------------------------------------------------------------------------------------------------+----------------+
| In order to better support dynamic creation of stack traces, types.TracebackType can now be instantiated | |
| from Python code, and the tb_next attribute on tracebacks is now writable | |
+----------------------------------------------------------------------------------------------------------+----------------+
| When using the -m switch, sys.path[0] is now eagerly expanded to the full starting directory path, rather| |
| than being left as the empty directory (which allows imports from the current working directory at the | |
| time when an import occurs) | |
+----------------------------------------------------------------------------------------------------------+----------------+
| The new -X importtime option or the PYTHONPROFILEIMPORTTIME environment variable can be used to show the | |
| timing of each module import | |
+----------------------------------------------------------------------------------------------------------+----------------+
Changes to built-in modules:
+------------------------------------------------------------------------------------------------------------+----------------+
| `asyncio <https://docs.python.org/3/whatsnew/3.7.html#asyncio>`_ | |
+------------------------------------------------------------------------------------------------------------+----------------+
| asyncio (many, may need a separate ticket) | |
+------------------------------------------------------------------------------------------------------------+----------------+
| `gc <https://docs.python.org/3/whatsnew/3.7.html#gc>`_ | |
+------------------------------------------------------------------------------------------------------------+----------------+
| New features include *gc.freeze()*, *gc.unfreeze()*, *gc-get_freeze_count* | |
+------------------------------------------------------------------------------------------------------------+----------------+
| `math <https://docs.python.org/3/whatsnew/3.7.html#math>`_ | |
+------------------------------------------------------------------------------------------------------------+----------------+
| math.remainder() added to implement IEEE 754-style remainder | |
+------------------------------------------------------------------------------------------------------------+----------------+
| `re <https://docs.python.org/3/whatsnew/3.7.html#re>`_ | |
+------------------------------------------------------------------------------------------------------------+----------------+
| A number of tidy up features including better support for splitting on empty strings and copy support for | |
| compiled expressions and match objects | |
+------------------------------------------------------------------------------------------------------------+----------------+
| `sys <https://docs.python.org/3/whatsnew/3.7.html#sys>`_ | |
+------------------------------------------------------------------------------------------------------------+----------------+
| sys.breakpointhook() added. sys.get(/set)_coroutine_origin_tracking_depth() added | |
+------------------------------------------------------------------------------------------------------------+----------------+
| `time <https://docs.python.org/3/whatsnew/3.7.html#time>`_ | |
+------------------------------------------------------------------------------------------------------------+----------------+
| Mostly updates to support nanosecond resolution in PEP564, see above | |
+------------------------------------------------------------------------------------------------------------+----------------+

View File

@@ -0,0 +1,118 @@
.. _python_38:
Python 3.8
==========
Python 3.8.0 (final) was released on the 14 October 2019. The Features for 3.8
are defined in `PEP 569 <https://www.python.org/dev/peps/pep-0569/#id9>`_ and
a detailed description of the changes can be found in What's New in `Python
3.8. <https://docs.python.org/3/whatsnew/3.8.html>`_
+--------------------------------------------------------+---------------------------------------------------+---------------+
| **Features:** | Status |
+--------------------------------------------------------+---------------------------------------------------+---------------+
| `PEP 570 <https://www.python.org/dev/peps/pep-0570/>`_ | Positional-only arguments | |
+--------------------------------------------------------+---------------------------------------------------+---------------+
| `PEP 572 <https://www.python.org/dev/peps/pep-0572/>`_ | Assignment Expressions | |
+--------------------------------------------------------+---------------------------------------------------+---------------+
| `PEP 574 <https://www.python.org/dev/peps/pep-0574/>`_ | Pickle protocol 5 with out-of-band data | |
+--------------------------------------------------------+---------------------------------------------------+---------------+
| `PEP 578 <https://www.python.org/dev/peps/pep-0578/>`_ | Runtime audit hooks | |
+--------------------------------------------------------+---------------------------------------------------+---------------+
| `PEP 587 <https://www.python.org/dev/peps/pep-0587/>`_ | Python Initialization Configuration | |
+--------------------------------------------------------+---------------------------------------------------+---------------+
| `PEP 590 <https://www.python.org/dev/peps/pep-0590/>`_ | Vectorcall: a fast calling protocol for CPython | |
+--------------------------------------------------------+---------------------------------------------------+---------------+
| **Miscellaneous** |
+------------------------------------------------------------------------------------------------------------+---------------+
| f-strings support = for self-documenting expressions and debugging | Completed |
+------------------------------------------------------------------------------------------------------------+---------------+
Other Language Changes:
+------------------------------------------------------------------------------------------------------------+-------------+
| A *continue* statement was illegal in the *finally* clause due to a problem with the implementation. In | Completed |
| Python 3.8 this restriction was lifted | |
+------------------------------------------------------------------------------------------------------------+-------------+
| The *bool*, *int* , and *fractions.Fraction* types now have an *as_integer_ratio()* method like that found | |
| in *float* and *decimal.Decimal* | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Constructors of *int*, *float* and *complex* will now use the *__index__()* special method, if available | |
| and the corresponding method *__int__()*, *__float__()* or *__complex__()* is not available | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Added support of *\N{name}* escapes in regular expressions | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Dict and dictviews are now iterable in reversed insertion order using *reversed()* | |
+------------------------------------------------------------------------------------------------------------+-------------+
| The syntax allowed for keyword names in function calls was further restricted. In particular, | |
| f((keyword)=arg) is no longer allowed | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Generalized iterable unpacking in yield and return statements no longer requires enclosing parentheses | |
+------------------------------------------------------------------------------------------------------------+-------------+
| When a comma is missed in code such as [(10, 20) (30, 40)], the compiler displays a SyntaxWarning with a | |
| helpful suggestion | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Arithmetic operations between subclasses of *datetime.date* or *datetime.datetime* and *datetime.timedelta*| |
| objects now return an instance of the subclass, rather than the base class | |
+------------------------------------------------------------------------------------------------------------+-------------+
| When the Python interpreter is interrupted by *Ctrl-C (SIGINT)* and the resulting *KeyboardInterrupt* | |
| exception is not caught, the Python process now exits via a SIGINT signal or with the correct exit code | |
| such that the calling process can detect that it died due to a *Ctrl-C* | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Some advanced styles of programming require updating the *types.CodeType* object for an existing function | |
+------------------------------------------------------------------------------------------------------------+-------------+
| For integers, the three-argument form of the pow() function now permits the exponent to be negative in the | |
| case where the base is relatively prime to the modulus | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Dict comprehensions have been synced-up with dict literals so that the key is computed first and the value | |
| second | |
+------------------------------------------------------------------------------------------------------------+-------------+
| The *object.__reduce__()* method can now return a tuple from two to six elements long | |
+------------------------------------------------------------------------------------------------------------+-------------+
Changes to built-in modules:
+------------------------------------------------------------------------------------------------------------+-------------+
| `asyncio` |
+------------------------------------------------------------------------------------------------------------+-------------+
| *asyncio.run()* has graduated from the provisional to stable API | Completed |
+------------------------------------------------------------------------------------------------------------+-------------+
| Running *python -m asyncio* launches a natively async REPL | |
+------------------------------------------------------------------------------------------------------------+-------------+
| The exception *asyncio.CancelledError* now inherits from *BaseException* rather than *Exception* and no | Completed |
| longer inherits from *concurrent.futures.CancelledError* | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Added *asyncio.Task.get_coro()* for getting the wrapped coroutine within an *asyncio.Task* | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Asyncio tasks can now be named, either by passing the name keyword argument to *asyncio.create_task()* or | |
| the *create_task()* event loop method, or by calling the *set_name()* method on the task object | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Added support for Happy Eyeballs to *asyncio.loop.create_connection()*. To specify the behavior, two new | |
| parameters have been added: *happy_eyeballs_delay* and interleave. | |
+------------------------------------------------------------------------------------------------------------+-------------+
| `gc` |
+------------------------------------------------------------------------------------------------------------+-------------+
| *get_objects()* can now receive an optional generation parameter indicating a generation to get objects | |
| from. (Note, though, that while *gc* is a built-in, *get_objects()* is not implemented for MicroPython) | |
+------------------------------------------------------------------------------------------------------------+-------------+
| `math` |
+------------------------------------------------------------------------------------------------------------+-------------+
| Added new function *math.dist()* for computing Euclidean distance between two points | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Expanded the *math.hypot()* function to handle multiple dimensions | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Added new function, *math.prod()*, as analogous function to *sum()* that returns the product of a "start" | |
| value (default: 1) times an iterable of numbers | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Added two new combinatoric functions *math.perm()* and *math.comb()* | |
+------------------------------------------------------------------------------------------------------------+-------------+
| Added a new function *math.isqrt()* for computing accurate integer square roots without conversion to | |
| floating point | |
+------------------------------------------------------------------------------------------------------------+-------------+
| The function *math.factorial()* no longer accepts arguments that are not int-like | Completed |
+------------------------------------------------------------------------------------------------------------+-------------+
| `sys` |
+------------------------------------------------------------------------------------------------------------+-------------+
| Add new *sys.unraisablehook()* function which can be overridden to control how "unraisable exceptions" | |
| are handled | |
+------------------------------------------------------------------------------------------------------------+-------------+

View File

@@ -0,0 +1,121 @@
.. _python_39:
Python 3.9
==========
Python 3.9.0 (final) was released on the 5th October 2020. The Features for 3.9 are
defined in `PEP 596 <https://www.python.org/dev/peps/pep-0596/#features-for-3-9>`_
and a detailed description of the changes can be found in
`What's New in Python 3.9 <https://docs.python.org/3/whatsnew/3.9.html>`_
+--------------------------------------------------------+----------------------------------------------------+--------------+
| **Features:** | | **Status** |
+--------------------------------------------------------+----------------------------------------------------+--------------+
| `PEP 573 <https://www.python.org/dev/peps/pep-0573/>`_ | fast access to module state from methods of C | |
| | extension types | |
+--------------------------------------------------------+----------------------------------------------------+--------------+
| `PEP 584 <https://www.python.org/dev/peps/pep-0584/>`_ | union operators added to dict | |
+--------------------------------------------------------+----------------------------------------------------+--------------+
| `PEP 585 <https://www.python.org/dev/peps/pep-0584/>`_ | type hinting generics in standard collections | |
+--------------------------------------------------------+----------------------------------------------------+--------------+
| `PEP 593 <https://www.python.org/dev/peps/pep-0593/>`_ | flexible function and variable annotations | |
+--------------------------------------------------------+----------------------------------------------------+--------------+
| `PEP 602 <https://www.python.org/dev/peps/pep-0602/>`_ | CPython adopts an annual release cycle. Instead of | |
| | annual, aiming for two month release cycle | |
+--------------------------------------------------------+----------------------------------------------------+--------------+
| `PEP 614 <https://www.python.org/dev/peps/pep-0614/>`_ | relaxed grammar restrictions on decorators | |
+--------------------------------------------------------+----------------------------------------------------+--------------+
| `PEP 615 <https://www.python.org/dev/peps/pep-0615/>`_ | the IANA Time Zone Database is now present in the | |
| | standard library in the zoneinfo module | |
+--------------------------------------------------------+----------------------------------------------------+--------------+
| `PEP 616 <https://www.python.org/dev/peps/pep-0616/>`_ | string methods to remove prefixes and suffixes | |
+--------------------------------------------------------+----------------------------------------------------+--------------+
| `PEP 617 <https://www.python.org/dev/peps/pep-0617/>`_ | CPython now uses a new parser based on PEG | |
+--------------------------------------------------------+----------------------------------------------------+--------------+
Other Language Changes:
+-------------------------------------------------------------------------------------------------------------+---------------+
| *__import__()* now raises *ImportError* instead of *ValueError* | Completed |
+-------------------------------------------------------------------------------------------------------------+---------------+
| Python now gets the absolute path of the script filename specified on the command line (ex: *python3* | |
| *script.py*): the *__file__* attribute of the *__main__* module became an absolute path, rather than a | |
| relative path | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| By default, for best performance, the errors argument is only checked at the first encoding/decoding error | |
| and the encoding argument is sometimes ignored for empty strings | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| *"".replace("", s, n)* now returns *s* instead of an empty string for all non-zero n. It is now consistent | |
| with *"".replace("", s)* | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| Any valid expression can now be used as a decorator. Previously, the grammar was much more restrictive | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| Parallel running of *aclose()* / *asend()* / *athrow()* is now prohibited, and *ag_running* now reflects | |
| the actual running status of the async generator | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| Unexpected errors in calling the *__iter__* method are no longer masked by TypeError in the in operator and | |
| functions contains(), indexOf() and countOf() of the operator module | |
+-------------------------------------------------------------------------------------------------------------+---------------+
| Unparenthesized lambda expressions can no longer be the expression part in an if clause in comprehensions | |
| and generator expressions | |
+-------------------------------------------------------------------------------------------------------------+---------------+
Changes to built-in modules:
+---------------------------------------------------------------------------------------------------------------+---------------+
| `asyncio` |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Due to significant security concerns, the reuse_address parameter of *asyncio.loop.create_datagram_endpoint()*| |
| is no longer supported | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Added a new coroutine *shutdown_default_executor()* that schedules a shutdown for the default executor that | |
| waits on the *ThreadPoolExecutor* to finish closing. Also, *asyncio.run()* has been updated to use the new | |
| coroutine. | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Added *asyncio.PidfdChildWatcher*, a Linux-specific child watcher implementation that polls process file | |
| descriptors | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| added a new *coroutine asyncio.to_thread()* | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| When cancelling the task due to a timeout, *asyncio.wait_for()* will now wait until the cancellation is | |
| complete also in the case when timeout is <= 0, like it does with positive timeouts | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| *asyncio* now raises *TyperError* when calling incompatible methods with an *ssl.SSLSocket* socket | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| `gc` |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Garbage collection does not block on resurrected objects | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Added a new function *gc.is_finalized()* to check if an object has been finalized by the garbage collector | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| `math` |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Expanded the *math.gcd()* function to handle multiple arguments. Formerly, it only supported two arguments | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Added *math.lcm()*: return the least common multiple of specified arguments | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Added *math.nextafter()*: return the next floating-point value after x towards y | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Added *math.ulp()*: return the value of the least significant bit of a float | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| `os` |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Exposed the Linux-specific *os.pidfd_open()* and *os.P_PIDFD* | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| The *os.unsetenv()* function is now also available on Windows | Completed |
+---------------------------------------------------------------------------------------------------------------+---------------+
| The *os.putenv()* and *os.unsetenv()* functions are now always available | Completed |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Added *os.waitstatus_to_exitcode()* function: convert a wait status to an exit code | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| `random` |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Added a new *random.Random.randbytes* method: generate random bytes | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| `sys` |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Added a new *sys.platlibdir* attribute: name of the platform-specific library directory | |
+---------------------------------------------------------------------------------------------------------------+---------------+
| Previously, *sys.stderr* was block-buffered when non-interactive. Now stderr defaults to always being | |
| line-buffered | |
+---------------------------------------------------------------------------------------------------------------+---------------+

View File

@@ -0,0 +1,64 @@
.. _esp32_general:
General information about the ESP32 port
========================================
The ESP32 is a popular WiFi and Bluetooth enabled System-on-Chip (SoC) by
Espressif Systems.
Multitude of boards
-------------------
There is a multitude of modules and boards from different sources which carry
the ESP32 chip. MicroPython tries to provide a generic port which would run on
as many boards/modules as possible, but there may be limitations. Espressif
development boards are taken as reference for the port (for example, testing is
performed on them). For any board you are using please make sure you have a
datasheet, schematics and other reference materials so you can look up any
board-specific functions.
To make a generic ESP32 port and support as many boards as possible the
following design and implementation decision were made:
* GPIO pin numbering is based on ESP32 chip numbering. Please have the manual/pin
diagram of your board at hand to find correspondence between your board pins and
actual ESP32 pins.
* All pins are supported by MicroPython but not all are usable on any given board.
For example pins that are connected to external SPI flash should not be used,
and a board may only expose a certain selection of pins.
Technical specifications and SoC datasheets
-------------------------------------------
The datasheets and other reference material for ESP32 chip are available
from the vendor site: https://www.espressif.com/en/support/download/documents?keys=esp32 .
They are the primary reference for the chip technical specifications, capabilities,
operating modes, internal functioning, etc.
For your convenience, some of technical specifications are provided below:
* Architecture: Xtensa Dual-Core 32-bit LX6
* CPU frequency: up to 240MHz
* Total RAM available: 528KB (part of it reserved for system)
* BootROM: 448KB
* Internal FlashROM: none
* External FlashROM: code and data, via SPI Flash; usual size 4MB
* GPIO: 34 (GPIOs are multiplexed with other functions, including
external FlashROM, UART, etc.)
* UART: 3 RX/TX UART (no hardware handshaking), one TX-only UART
* SPI: 4 SPI interfaces (one used for FlashROM)
* I2C: 2 I2C (bitbang implementation available on any pins)
* I2S: 2
* ADC: 12-bit SAR ADC up to 18 channels
* DAC: 2 8-bit DACs
* RMT: 8 channels allowing accurate pulse transmit/receive
* Programming: using BootROM bootloader from UART - due to external FlashROM
and always-available BootROM bootloader, the ESP32 is not brickable
For more information see the ESP32 datasheet: https://www.espressif.com/sites/default/files/documentation/esp32_datasheet_en.pdf
MicroPython is implemented on top of the ESP-IDF, Espressif's development
framework for the ESP32. This is a FreeRTOS based system. See the
`ESP-IDF Programming Guide <https://docs.espressif.com/projects/esp-idf/en/latest/index.html>`_
for details.

Binary file not shown.

After

Width:  |  Height:  |  Size: 84 KiB

View File

@@ -0,0 +1,756 @@
.. _esp32_quickref:
Quick reference for the ESP32
=============================
.. image:: img/esp32.jpg
:alt: ESP32 board
:width: 640px
The Espressif ESP32 Development Board (image attribution: Adafruit).
Below is a quick reference for ESP32-based boards. If it is your first time
working with this board it may be useful to get an overview of the microcontroller:
.. toctree::
:maxdepth: 1
general.rst
tutorial/index.rst
Installing MicroPython
----------------------
See the corresponding section of tutorial: :ref:`esp32_intro`. It also includes
a troubleshooting subsection.
General board control
---------------------
The MicroPython REPL is on UART0 (GPIO1=TX, GPIO3=RX) at baudrate 115200.
Tab-completion is useful to find out what methods an object has.
Paste mode (ctrl-E) is useful to paste a large slab of Python code into
the REPL.
The :mod:`machine` module::
import machine
machine.freq() # get the current frequency of the CPU
machine.freq(240000000) # set the CPU frequency to 240 MHz
The :mod:`esp` module::
import esp
esp.osdebug(None) # turn off vendor O/S debugging messages
esp.osdebug(0) # redirect vendor O/S debugging messages to UART(0)
# low level methods to interact with flash storage
esp.flash_size()
esp.flash_user_start()
esp.flash_erase(sector_no)
esp.flash_write(byte_offset, buffer)
esp.flash_read(byte_offset, buffer)
The :mod:`esp32` module::
import esp32
esp32.hall_sensor() # read the internal hall sensor
esp32.raw_temperature() # read the internal temperature of the MCU, in Fahrenheit
esp32.ULP() # access to the Ultra-Low-Power Co-processor
Note that the temperature sensor in the ESP32 will typically read higher than
ambient due to the IC getting warm while it runs. This effect can be minimised
by reading the temperature sensor immediately after waking up from sleep.
Networking
----------
The :mod:`network` module::
import network
wlan = network.WLAN(network.STA_IF) # create station interface
wlan.active(True) # activate the interface
wlan.scan() # scan for access points
wlan.isconnected() # check if the station is connected to an AP
wlan.connect('essid', 'password') # connect to an AP
wlan.config('mac') # get the interface's MAC address
wlan.ifconfig() # get the interface's IP/netmask/gw/DNS addresses
ap = network.WLAN(network.AP_IF) # create access-point interface
ap.config(essid='ESP-AP') # set the ESSID of the access point
ap.config(max_clients=10) # set how many clients can connect to the network
ap.active(True) # activate the interface
A useful function for connecting to your local WiFi network is::
def do_connect():
import network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect('essid', 'password')
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
Once the network is established the :mod:`socket <socket>` module can be used
to create and use TCP/UDP sockets as usual, and the ``urequests`` module for
convenient HTTP requests.
After a call to ``wlan.connect()``, the device will by default retry to connect
**forever**, even when the authentication failed or no AP is in range.
``wlan.status()`` will return ``network.STAT_CONNECTING`` in this state until a
connection succeeds or the interface gets disabled. This can be changed by
calling ``wlan.config(reconnects=n)``, where n are the number of desired reconnect
attempts (0 means it won't retry, -1 will restore the default behaviour of trying
to reconnect forever).
Delay and timing
----------------
Use the :mod:`time <time>` module::
import time
time.sleep(1) # sleep for 1 second
time.sleep_ms(500) # sleep for 500 milliseconds
time.sleep_us(10) # sleep for 10 microseconds
start = time.ticks_ms() # get millisecond counter
delta = time.ticks_diff(time.ticks_ms(), start) # compute time difference
Timers
------
The ESP32 port has four hardware timers. Use the :ref:`machine.Timer <machine.Timer>` class
with a timer ID from 0 to 3 (inclusive)::
from machine import Timer
tim0 = Timer(0)
tim0.init(period=5000, mode=Timer.ONE_SHOT, callback=lambda t:print(0))
tim1 = Timer(1)
tim1.init(period=2000, mode=Timer.PERIODIC, callback=lambda t:print(1))
The period is in milliseconds.
Virtual timers are not currently supported on this port.
.. _Pins_and_GPIO:
Pins and GPIO
-------------
Use the :ref:`machine.Pin <machine.Pin>` class::
from machine import Pin
p0 = Pin(0, Pin.OUT) # create output pin on GPIO0
p0.on() # set pin to "on" (high) level
p0.off() # set pin to "off" (low) level
p0.value(1) # set pin to on/high
p2 = Pin(2, Pin.IN) # create input pin on GPIO2
print(p2.value()) # get value, 0 or 1
p4 = Pin(4, Pin.IN, Pin.PULL_UP) # enable internal pull-up resistor
p5 = Pin(5, Pin.OUT, value=1) # set pin high on creation
p6 = Pin(6, Pin.OUT, drive=Pin.DRIVE_3) # set maximum drive strength
Available Pins are from the following ranges (inclusive): 0-19, 21-23, 25-27, 32-39.
These correspond to the actual GPIO pin numbers of ESP32 chip. Note that many
end-user boards use their own adhoc pin numbering (marked e.g. D0, D1, ...).
For mapping between board logical pins and physical chip pins consult your board
documentation.
Four drive strengths are supported, using the ``drive`` keyword argument to the
``Pin()`` constructor or ``Pin.init()`` method, with different corresponding
safe maximum source/sink currents and approximate internal driver resistances:
- ``Pin.DRIVE_0``: 5mA / 130 ohm
- ``Pin.DRIVE_1``: 10mA / 60 ohm
- ``Pin.DRIVE_2``: 20mA / 30 ohm (default strength if not configured)
- ``Pin.DRIVE_3``: 40mA / 15 ohm
The ``hold=`` keyword argument to ``Pin()`` and ``Pin.init()`` will enable the
ESP32 "pad hold" feature. When set to ``True``, the pin configuration
(direction, pull resistors and output value) will be held and any further
changes (including changing the output level) will not be applied. Setting
``hold=False`` will immediately apply any outstanding pin configuration changes
and release the pin. Using ``hold=True`` while a pin is already held will apply
any configuration changes and then immediately reapply the hold.
Notes:
* Pins 1 and 3 are REPL UART TX and RX respectively
* Pins 6, 7, 8, 11, 16, and 17 are used for connecting the embedded flash,
and are not recommended for other uses
* Pins 34-39 are input only, and also do not have internal pull-up resistors
* See :ref:`Deep_sleep_Mode` for a discussion of pin behaviour during sleep
There's a higher-level abstraction :ref:`machine.Signal <machine.Signal>`
which can be used to invert a pin. Useful for illuminating active-low LEDs
using ``on()`` or ``value(1)``.
UART (serial bus)
-----------------
See :ref:`machine.UART <machine.UART>`. ::
from machine import UART
uart1 = UART(1, baudrate=9600, tx=33, rx=32)
uart1.write('hello') # write 5 bytes
uart1.read(5) # read up to 5 bytes
The ESP32 has three hardware UARTs: UART0, UART1 and UART2.
They each have default GPIO assigned to them, however depending on your
ESP32 variant and board, these pins may conflict with embedded flash,
onboard PSRAM or peripherals.
Any GPIO can be used for hardware UARTs using the GPIO matrix, so to avoid
conflicts simply provide ``tx`` and ``rx`` pins when constructing. The default
pins listed below.
===== ===== ===== =====
\ UART0 UART1 UART2
===== ===== ===== =====
tx 1 10 17
rx 3 9 16
===== ===== ===== =====
PWM (pulse width modulation)
----------------------------
PWM can be enabled on all output-enabled pins. The base frequency can
range from 1Hz to 40MHz but there is a tradeoff; as the base frequency
*increases* the duty resolution *decreases*. See
`LED Control <https://docs.espressif.com/projects/esp-idf/en/latest/api-reference/peripherals/ledc.html>`_
for more details.
Use the :ref:`machine.PWM <machine.PWM>` class::
from machine import Pin, PWM
pwm0 = PWM(Pin(0)) # create PWM object from a pin
freq = pwm0.freq() # get current frequency (default 5kHz)
pwm0.freq(1000) # set PWM frequency from 1Hz to 40MHz
duty = pwm0.duty() # get current duty cycle, range 0-1023 (default 512, 50%)
pwm0.duty(256) # set duty cycle from 0 to 1023 as a ratio duty/1023, (now 25%)
duty_u16 = pwm0.duty_u16() # get current duty cycle, range 0-65535
pwm0.duty_u16(2**16*3//4) # set duty cycle from 0 to 65535 as a ratio duty_u16/65535, (now 75%)
duty_ns = pwm0.duty_ns() # get current pulse width in ns
pwm0.duty_ns(250_000) # set pulse width in nanoseconds from 0 to 1_000_000_000/freq, (now 25%)
pwm0.deinit() # turn off PWM on the pin
pwm2 = PWM(Pin(2), freq=20000, duty=512) # create and configure in one go
print(pwm2) # view PWM settings
ESP chips have different hardware peripherals:
===================================================== ======== ======== ========
Hardware specification ESP32 ESP32-S2 ESP32-C3
----------------------------------------------------- -------- -------- --------
Number of groups (speed modes) 2 1 1
Number of timers per group 4 4 4
Number of channels per group 8 8 6
----------------------------------------------------- -------- -------- --------
Different PWM frequencies (groups * timers) 8 4 4
Total PWM channels (Pins, duties) (groups * channels) 16 8 6
===================================================== ======== ======== ========
A maximum number of PWM channels (Pins) are available on the ESP32 - 16 channels,
but only 8 different PWM frequencies are available, the remaining 8 channels must
have the same frequency. On the other hand, 16 independent PWM duty cycles are
possible at the same frequency.
See more examples in the :ref:`esp32_pwm` tutorial.
ADC (analog to digital conversion)
----------------------------------
On the ESP32, ADC functionality is available on pins 32-39 (ADC block 1) and
pins 0, 2, 4, 12-15 and 25-27 (ADC block 2).
Use the :ref:`machine.ADC <machine.ADC>` class::
from machine import ADC
adc = ADC(pin) # create an ADC object acting on a pin
val = adc.read_u16() # read a raw analog value in the range 0-65535
val = adc.read_uv() # read an analog value in microvolts
ADC block 2 is also used by WiFi and so attempting to read analog values from
block 2 pins when WiFi is active will raise an exception.
The internal ADC reference voltage is typically 1.1V, but varies slightly from
package to package. The ADC is less linear close to the reference voltage
(particularly at higher attenuations) and has a minimum measurement voltage
around 100mV, voltages at or below this will read as 0. To read voltages
accurately, it is recommended to use the ``read_uv()`` method (see below).
ESP32-specific ADC class method reference:
.. class:: ADC(pin, *, atten)
Return the ADC object for the specified pin. ESP32 does not support
different timings for ADC sampling and so the ``sample_ns`` keyword argument
is not supported.
To read voltages above the reference voltage, apply input attenuation with
the ``atten`` keyword argument. Valid values (and approximate linear
measurement ranges) are:
- ``ADC.ATTN_0DB``: No attenuation (100mV - 950mV)
- ``ADC.ATTN_2_5DB``: 2.5dB attenuation (100mV - 1250mV)
- ``ADC.ATTN_6DB``: 6dB attenuation (150mV - 1750mV)
- ``ADC.ATTN_11DB``: 11dB attenuation (150mV - 2450mV)
.. Warning::
Note that the absolute maximum voltage rating for input pins is 3.6V. Going
near to this boundary risks damage to the IC!
.. method:: ADC.read_uv()
This method uses the known characteristics of the ADC and per-package eFuse
values - set during manufacture - to return a calibrated input voltage
(before attenuation) in microvolts. The returned value has only millivolt
resolution (i.e., will always be a multiple of 1000 microvolts).
The calibration is only valid across the linear range of the ADC. In
particular, an input tied to ground will read as a value above 0 microvolts.
Within the linear range, however, more accurate and consistent results will
be obtained than using `read_u16()` and scaling the result with a constant.
The ESP32 port also supports the :ref:`machine.ADC <machine.ADCBlock>` API:
.. class:: ADCBlock(id, *, bits)
Return the ADC block object with the given ``id`` (1 or 2) and initialize
it to the specified resolution (9 to 12-bits depending on the ESP32 series)
or the highest supported resolution if not specified.
.. method:: ADCBlock.connect(pin)
ADCBlock.connect(channel)
ADCBlock.connect(channel, pin)
Return the ``ADC`` object for the specified ADC pin or channel number.
Arbitrary connection of ADC channels to GPIO is not supported and so
specifying a pin that is not connected to this block, or specifying a
mismatched channel and pin, will raise an exception.
Legacy methods:
.. method:: ADC.read()
This method returns the raw ADC value ranged according to the resolution of
the block, e.g., 0-4095 for 12-bit resolution.
.. method:: ADC.atten(atten)
Equivalent to ``ADC.init(atten=atten)``.
.. method:: ADC.width(bits)
Equivalent to ``ADC.block().init(bits=bits)``.
For compatibility, the ``ADC`` object also provides constants matching the
supported ADC resolutions:
- ``ADC.WIDTH_9BIT`` = 9
- ``ADC.WIDTH_10BIT`` = 10
- ``ADC.WIDTH_11BIT`` = 11
- ``ADC.WIDTH_12BIT`` = 12
Software SPI bus
----------------
Software SPI (using bit-banging) works on all pins, and is accessed via the
:ref:`machine.SoftSPI <machine.SoftSPI>` class::
from machine import Pin, SoftSPI
# construct a SoftSPI bus on the given pins
# polarity is the idle state of SCK
# phase=0 means sample on the first edge of SCK, phase=1 means the second
spi = SoftSPI(baudrate=100000, polarity=1, phase=0, sck=Pin(0), mosi=Pin(2), miso=Pin(4))
spi.init(baudrate=200000) # set the baudrate
spi.read(10) # read 10 bytes on MISO
spi.read(10, 0xff) # read 10 bytes while outputting 0xff on MOSI
buf = bytearray(50) # create a buffer
spi.readinto(buf) # read into the given buffer (reads 50 bytes in this case)
spi.readinto(buf, 0xff) # read into the given buffer and output 0xff on MOSI
spi.write(b'12345') # write 5 bytes on MOSI
buf = bytearray(4) # create a buffer
spi.write_readinto(b'1234', buf) # write to MOSI and read from MISO into the buffer
spi.write_readinto(buf, buf) # write buf to MOSI and read MISO back into buf
.. Warning::
Currently *all* of ``sck``, ``mosi`` and ``miso`` *must* be specified when
initialising Software SPI.
Hardware SPI bus
----------------
There are two hardware SPI channels that allow faster transmission
rates (up to 80Mhz). These may be used on any IO pins that support the
required direction and are otherwise unused (see :ref:`Pins_and_GPIO`)
but if they are not configured to their default pins then they need to
pass through an extra layer of GPIO multiplexing, which can impact
their reliability at high speeds. Hardware SPI channels are limited
to 40MHz when used on pins other than the default ones listed below.
===== =========== ============
\ HSPI (id=1) VSPI (id=2)
===== =========== ============
sck 14 18
mosi 13 23
miso 12 19
===== =========== ============
Hardware SPI is accessed via the :ref:`machine.SPI <machine.SPI>` class and
has the same methods as software SPI above::
from machine import Pin, SPI
hspi = SPI(1, 10000000)
hspi = SPI(1, 10000000, sck=Pin(14), mosi=Pin(13), miso=Pin(12))
vspi = SPI(2, baudrate=80000000, polarity=0, phase=0, bits=8, firstbit=0, sck=Pin(18), mosi=Pin(23), miso=Pin(19))
Software I2C bus
----------------
Software I2C (using bit-banging) works on all output-capable pins, and is
accessed via the :ref:`machine.SoftI2C <machine.SoftI2C>` class::
from machine import Pin, SoftI2C
i2c = SoftI2C(scl=Pin(5), sda=Pin(4), freq=100000)
i2c.scan() # scan for devices
i2c.readfrom(0x3a, 4) # read 4 bytes from device with address 0x3a
i2c.writeto(0x3a, '12') # write '12' to device with address 0x3a
buf = bytearray(10) # create a buffer with 10 bytes
i2c.writeto(0x3a, buf) # write the given buffer to the peripheral
Hardware I2C bus
----------------
There are two hardware I2C peripherals with identifiers 0 and 1. Any available
output-capable pins can be used for SCL and SDA but the defaults are given
below.
===== =========== ============
\ I2C(0) I2C(1)
===== =========== ============
scl 18 25
sda 19 26
===== =========== ============
The driver is accessed via the :ref:`machine.I2C <machine.I2C>` class and
has the same methods as software I2C above::
from machine import Pin, I2C
i2c = I2C(0)
i2c = I2C(1, scl=Pin(5), sda=Pin(4), freq=400000)
I2S bus
-------
See :ref:`machine.I2S <machine.I2S>`. ::
from machine import I2S, Pin
i2s = I2S(0, sck=Pin(13), ws=Pin(14), sd=Pin(34), mode=I2S.TX, bits=16, format=I2S.STEREO, rate=44100, ibuf=40000) # create I2S object
i2s.write(buf) # write buffer of audio samples to I2S device
i2s = I2S(1, sck=Pin(33), ws=Pin(25), sd=Pin(32), mode=I2S.RX, bits=16, format=I2S.MONO, rate=22050, ibuf=40000) # create I2S object
i2s.readinto(buf) # fill buffer with audio samples from I2S device
The I2S class is currently available as a Technical Preview. During the preview period, feedback from
users is encouraged. Based on this feedback, the I2S class API and implementation may be changed.
ESP32 has two I2S buses with id=0 and id=1
Real time clock (RTC)
---------------------
See :ref:`machine.RTC <machine.RTC>` ::
from machine import RTC
rtc = RTC()
rtc.datetime((2017, 8, 23, 1, 12, 48, 0, 0)) # set a specific date and time
rtc.datetime() # get date and time
WDT (Watchdog timer)
--------------------
See :ref:`machine.WDT <machine.WDT>`. ::
from machine import WDT
# enable the WDT with a timeout of 5s (1s is the minimum)
wdt = WDT(timeout=5000)
wdt.feed()
.. _Deep_sleep_mode:
Deep-sleep mode
---------------
The following code can be used to sleep, wake and check the reset cause::
import machine
# check if the device woke from a deep sleep
if machine.reset_cause() == machine.DEEPSLEEP_RESET:
print('woke from a deep sleep')
# put the device to sleep for 10 seconds
machine.deepsleep(10000)
Notes:
* Calling ``deepsleep()`` without an argument will put the device to sleep
indefinitely
* A software reset does not change the reset cause
Some ESP32 pins (0, 2, 4, 12-15, 25-27, 32-39) are connected to the RTC during
deep-sleep and can be used to wake the device with the ``wake_on_`` functions in
the :mod:`esp32` module. The output-capable RTC pins (all except 34-39) will
also retain their pull-up or pull-down resistor configuration when entering
deep-sleep.
If the pull resistors are not actively required during deep-sleep and are likely
to cause current leakage (for example a pull-up resistor is connected to ground
through a switch), then they should be disabled to save power before entering
deep-sleep mode::
from machine import Pin, deepsleep
# configure input RTC pin with pull-up on boot
pin = Pin(2, Pin.IN, Pin.PULL_UP)
# disable pull-up and put the device to sleep for 10 seconds
pin.init(pull=None)
machine.deepsleep(10000)
Output-configured RTC pins will also retain their output direction and level in
deep-sleep if pad hold is enabled with the ``hold=True`` argument to
``Pin.init()``.
Non-RTC GPIO pins will be disconnected by default on entering deep-sleep.
Configuration of non-RTC pins - including output level - can be retained by
enabling pad hold on the pin and enabling GPIO pad hold during deep-sleep::
from machine import Pin, deepsleep
import esp32
opin = Pin(19, Pin.OUT, value=1, hold=True) # hold output level
ipin = Pin(21, Pin.IN, Pin.PULL_UP, hold=True) # hold pull-up
# enable pad hold in deep-sleep for non-RTC GPIO
esp32.gpio_deep_sleep_hold(True)
# put the device to sleep for 10 seconds
deepsleep(10000)
The pin configuration - including the pad hold - will be retained on wake from
sleep. See :ref:`Pins_and_GPIO` above for a further discussion of pad holding.
SD card
-------
See :ref:`machine.SDCard <machine.SDCard>`. ::
import machine, os
# Slot 2 uses pins sck=18, cs=5, miso=19, mosi=23
sd = machine.SDCard(slot=2)
os.mount(sd, "/sd") # mount
os.listdir('/sd') # list directory contents
os.umount('/sd') # eject
RMT
---
The RMT is ESP32-specific and allows generation of accurate digital pulses with
12.5ns resolution. See :ref:`esp32.RMT <esp32.RMT>` for details. Usage is::
import esp32
from machine import Pin
r = esp32.RMT(0, pin=Pin(18), clock_div=8)
r # RMT(channel=0, pin=18, source_freq=80000000, clock_div=8)
# The channel resolution is 100ns (1/(source_freq/clock_div)).
r.write_pulses((1, 20, 2, 40), 0) # Send 0 for 100ns, 1 for 2000ns, 0 for 200ns, 1 for 4000ns
OneWire driver
--------------
The OneWire driver is implemented in software and works on all pins::
from machine import Pin
import onewire
ow = onewire.OneWire(Pin(12)) # create a OneWire bus on GPIO12
ow.scan() # return a list of devices on the bus
ow.reset() # reset the bus
ow.readbyte() # read a byte
ow.writebyte(0x12) # write a byte on the bus
ow.write('123') # write bytes on the bus
ow.select_rom(b'12345678') # select a specific device by its ROM code
There is a specific driver for DS18S20 and DS18B20 devices::
import time, ds18x20
ds = ds18x20.DS18X20(ow)
roms = ds.scan()
ds.convert_temp()
time.sleep_ms(750)
for rom in roms:
print(ds.read_temp(rom))
Be sure to put a 4.7k pull-up resistor on the data line. Note that
the ``convert_temp()`` method must be called each time you want to
sample the temperature.
NeoPixel and APA106 driver
--------------------------
Use the ``neopixel`` and ``apa106`` modules::
from machine import Pin
from neopixel import NeoPixel
pin = Pin(0, Pin.OUT) # set GPIO0 to output to drive NeoPixels
np = NeoPixel(pin, 8) # create NeoPixel driver on GPIO0 for 8 pixels
np[0] = (255, 255, 255) # set the first pixel to white
np.write() # write data to all pixels
r, g, b = np[0] # get first pixel colour
The APA106 driver extends NeoPixel, but internally uses a different colour order::
from apa106 import APA106
ap = APA106(pin, 8)
r, g, b = ap[0]
.. Warning::
By default ``NeoPixel`` is configured to control the more popular *800kHz*
units. It is possible to use alternative timing to control other (typically
400kHz) devices by passing ``timing=0`` when constructing the
``NeoPixel`` object.
For low-level driving of a NeoPixel see `machine.bitstream`.
This low-level driver uses an RMT channel by default. To configure this see
`RMT.bitstream_channel`.
APA102 (DotStar) uses a different driver as it has an additional clock pin.
Capacitive touch
----------------
Use the ``TouchPad`` class in the ``machine`` module::
from machine import TouchPad, Pin
t = TouchPad(Pin(14))
t.read() # Returns a smaller number when touched
``TouchPad.read`` returns a value relative to the capacitive variation. Small numbers (typically in
the *tens*) are common when a pin is touched, larger numbers (above *one thousand*) when
no touch is present. However the values are *relative* and can vary depending on the board
and surrounding composition so some calibration may be required.
There are ten capacitive touch-enabled pins that can be used on the ESP32: 0, 2, 4, 12, 13
14, 15, 27, 32, 33. Trying to assign to any other pins will result in a ``ValueError``.
Note that TouchPads can be used to wake an ESP32 from sleep::
import machine
from machine import TouchPad, Pin
import esp32
t = TouchPad(Pin(14))
t.config(500) # configure the threshold at which the pin is considered touched
esp32.wake_on_touch(True)
machine.lightsleep() # put the MCU to sleep until a touchpad is touched
For more details on touchpads refer to `Espressif Touch Sensor
<https://docs.espressif.com/projects/esp-idf/en/latest/api-reference/peripherals/touch_pad.html>`_.
DHT driver
----------
The DHT driver is implemented in software and works on all pins::
import dht
import machine
d = dht.DHT11(machine.Pin(4))
d.measure()
d.temperature() # eg. 23 (°C)
d.humidity() # eg. 41 (% RH)
d = dht.DHT22(machine.Pin(4))
d.measure()
d.temperature() # eg. 23.6 (°C)
d.humidity() # eg. 41.3 (% RH)
WebREPL (web browser interactive prompt)
----------------------------------------
WebREPL (REPL over WebSockets, accessible via a web browser) is an
experimental feature available in ESP32 port. Download web client
from https://github.com/micropython/webrepl (hosted version available
at http://micropython.org/webrepl), and configure it by executing::
import webrepl_setup
and following on-screen instructions. After reboot, it will be available
for connection. If you disabled automatic start-up on boot, you may
run configured daemon on demand using::
import webrepl
webrepl.start()
# or, start with a specific password
webrepl.start(password='mypass')
The WebREPL daemon listens on all active interfaces, which can be STA or
AP. This allows you to connect to the ESP32 via a router (the STA
interface) or directly when connected to its access point.
In addition to terminal/command prompt access, WebREPL also has provision
for file transfer (both upload and download). The web client has buttons for
the corresponding functions, or you can use the command-line client
``webrepl_cli.py`` from the repository above.
See the MicroPython forum for other community-supported alternatives
to transfer files to an ESP32 board.

View File

@@ -0,0 +1,23 @@
.. _esp32_tutorial:
MicroPython tutorial for ESP32
==============================
This tutorial is intended to get you started using MicroPython on the ESP32
system-on-a-chip. If it is your first time it is recommended to follow the
tutorial through in the order below. Otherwise the sections are mostly self
contained, so feel free to skip to those that interest you.
The tutorial does not assume that you know Python, but it also does not attempt
to explain any of the details of the Python language. Instead it provides you
with commands that are ready to run, and hopes that you will gain a bit of
Python knowledge along the way. To learn more about Python itself please refer
to `<https://www.python.org>`__.
.. toctree::
:maxdepth: 1
:numbered:
intro.rst
pwm.rst
peripheral_access.rst

View File

@@ -0,0 +1,139 @@
.. _esp32_intro:
Getting started with MicroPython on the ESP32
=============================================
Using MicroPython is a great way to get the most of your ESP32 board. And
vice versa, the ESP32 chip is a great platform for using MicroPython. This
tutorial will guide you through setting up MicroPython, getting a prompt, using
WebREPL, connecting to the network and communicating with the Internet, using
the hardware peripherals, and controlling some external components.
Let's get started!
Requirements
------------
The first thing you need is a board with an ESP32 chip. The MicroPython
software supports the ESP32 chip itself and any board should work. The main
characteristic of a board is how the GPIO pins are connected to the outside
world, and whether it includes a built-in USB-serial convertor to make the
UART available to your PC.
Names of pins will be given in this tutorial using the chip names (eg GPIO2)
and it should be straightforward to find which pin this corresponds to on your
particular board.
Powering the board
------------------
If your board has a USB connector on it then most likely it is powered through
this when connected to your PC. Otherwise you will need to power it directly.
Please refer to the documentation for your board for further details.
Getting the firmware
--------------------
The first thing you need to do is download the most recent MicroPython firmware
.bin file to load onto your ESP32 device. You can download it from the
`MicroPython downloads page <https://micropython.org/download#esp32>`_.
From here, you have 3 main choices:
* Stable firmware builds
* Daily firmware builds
* Daily firmware builds with SPIRAM support
If you are just starting with MicroPython, the best bet is to go for the Stable
firmware builds. If you are an advanced, experienced MicroPython ESP32 user
who would like to follow development closely and help with testing new
features, there are daily builds. If your board has SPIRAM support you can
use either the standard firmware or the firmware with SPIRAM support, and in
the latter case you will have access to more RAM for Python objects.
Deploying the firmware
----------------------
Once you have the MicroPython firmware you need to load it onto your ESP32 device.
There are two main steps to do this: first you need to put your device in
bootloader mode, and second you need to copy across the firmware. The exact
procedure for these steps is highly dependent on the particular board and you will
need to refer to its documentation for details.
Fortunately, most boards have a USB connector, a USB-serial convertor, and the DTR
and RTS pins wired in a special way then deploying the firmware should be easy as
all steps can be done automatically. Boards that have such features
include the Adafruit Feather HUZZAH32, M5Stack, Wemos LOLIN32, and TinyPICO
boards, along with the Espressif DevKitC, PICO-KIT, WROVER-KIT dev-kits.
For best results it is recommended to first erase the entire flash of your
device before putting on new MicroPython firmware.
Currently we only support esptool.py to copy across the firmware. You can find
this tool here: `<https://github.com/espressif/esptool/>`__, or install it
using pip::
pip install esptool
Versions starting with 1.3 support both Python 2.7 and Python 3.4 (or newer).
An older version (at least 1.2.1 is needed) works fine but will require Python
2.7.
Using esptool.py you can erase the flash with the command::
esptool.py --port /dev/ttyUSB0 erase_flash
And then deploy the new firmware using::
esptool.py --chip esp32 --port /dev/ttyUSB0 write_flash -z 0x1000 esp32-20180511-v1.9.4.bin
Notes:
* You might need to change the "port" setting to something else relevant for your
PC
* You may need to reduce the baudrate if you get errors when flashing
(eg down to 115200 by adding ``--baud 115200`` into the command)
* For some boards with a particular FlashROM configuration you may need to
change the flash mode (eg by adding ``-fm dio`` into the command)
* The filename of the firmware should match the file that you have
If the above commands run without error then MicroPython should be installed on
your board!
Serial prompt
-------------
Once you have the firmware on the device you can access the REPL (Python prompt)
over UART0 (GPIO1=TX, GPIO3=RX), which might be connected to a USB-serial
convertor, depending on your board. The baudrate is 115200.
From here you can now follow the ESP8266 tutorial, because these two Espressif chips
are very similar when it comes to using MicroPython on them. The ESP8266 tutorial
is found at :ref:`esp8266_tutorial` (but skip the Introduction section).
Troubleshooting installation problems
-------------------------------------
If you experience problems during flashing or with running firmware immediately
after it, here are troubleshooting recommendations:
* Be aware of and try to exclude hardware problems. There are 2 common
problems: bad power source quality, and worn-out/defective FlashROM.
Speaking of power source, not just raw amperage is important, but also low
ripple and noise/EMI in general. The most reliable and convenient power
source is a USB port.
* The flashing instructions above use flashing speed of 460800 baud, which is
good compromise between speed and stability. However, depending on your
module/board, USB-UART convertor, cables, host OS, etc., the above baud
rate may be too high and lead to errors. Try a more common 115200 baud
rate instead in such cases.
* To catch incorrect flash content (e.g. from a defective sector on a chip),
add ``--verify`` switch to the commands above.
* If you still experience problems with flashing the firmware please
refer to esptool.py project page, https://github.com/espressif/esptool
for additional documentation and a bug tracker where you can report problems.
* If you are able to flash the firmware but the ``--verify`` option returns
errors even after multiple retries the you may have a defective FlashROM chip.

View File

@@ -0,0 +1,44 @@
Accessing peripherals directly via registers
============================================
The ESP32's peripherals can be controlled via direct register reads and writes.
This requires reading the datasheet to know what registers to use and what
values to write to them. The following example shows how to turn on and change
the prescaler of the MCPWM0 peripheral.
.. code-block:: python3
from micropython import const
from machine import mem32
# Define the register addresses that will be used.
DR_REG_DPORT_BASE = const(0x3FF00000)
DPORT_PERIP_CLK_EN_REG = const(DR_REG_DPORT_BASE + 0x0C0)
DPORT_PERIP_RST_EN_REG = const(DR_REG_DPORT_BASE + 0x0C4)
DPORT_PWM0_CLK_EN = const(1 << 17)
MCPWM0 = const(0x3FF5E000)
MCPWM1 = const(0x3FF6C000)
# Enable CLK and disable RST.
print(hex(mem32[DPORT_PERIP_CLK_EN_REG] & 0xffffffff))
print(hex(mem32[DPORT_PERIP_RST_EN_REG] & 0xffffffff))
mem32[DPORT_PERIP_CLK_EN_REG] |= DPORT_PWM0_CLK_EN
mem32[DPORT_PERIP_RST_EN_REG] &= ~DPORT_PWM0_CLK_EN
print(hex(mem32[DPORT_PERIP_CLK_EN_REG] & 0xffffffff))
print(hex(mem32[DPORT_PERIP_RST_EN_REG] & 0xffffffff))
# Change the MCPWM0 prescaler.
print(hex(mem32[MCPWM0])) # read PWM_CLK_CFG_REG (reset value = 0)
mem32[MCPWM0] = 0x55 # change PWM_CLK_PRESCALE
print(hex(mem32[MCPWM0])) # read PWM_CLK_CFG_REG
Note that before a peripheral can be used its clock must be enabled and it must
be taken out of reset. In the above example the following registers are used
for this:
- ``DPORT_PERI_CLK_EN_REG``: used to enable a peripheral clock
- ``DPORT_PERI_RST_EN_REG``: used to reset (or take out of reset) a peripheral
The MCPWM0 peripheral is in bit position 17 of the above two registers, hence
the value of ``DPORT_PWM0_CLK_EN``.

View File

@@ -0,0 +1,115 @@
.. _esp32_pwm:
Pulse Width Modulation
======================
Pulse width modulation (PWM) is a way to get an artificial analog output on a
digital pin. It achieves this by rapidly toggling the pin from low to high.
There are two parameters associated with this: the frequency of the toggling,
and the duty cycle. The duty cycle is defined to be how long the pin is high
compared with the length of a single period (low plus high time). Maximum
duty cycle is when the pin is high all of the time, and minimum is when it is
low all of the time.
* More comprehensive example with all 16 PWM channels and 8 timers::
from machine import Pin, PWM
try:
f = 100 # Hz
d = 1024 // 16 # 6.25%
pins = (15, 2, 4, 16, 18, 19, 22, 23, 25, 26, 27, 14 , 12, 13, 32, 33)
pwms = []
for i, pin in enumerate(pins):
pwms.append(PWM(Pin(pin), freq=f * (i // 2 + 1), duty= 1023 if i==15 else d * (i + 1)))
print(pwms[i])
finally:
for pwm in pwms:
try:
pwm.deinit()
except:
pass
Output is::
PWM(Pin(15), freq=100, duty=64, resolution=10, mode=0, channel=0, timer=0)
PWM(Pin(2), freq=100, duty=128, resolution=10, mode=0, channel=1, timer=0)
PWM(Pin(4), freq=200, duty=192, resolution=10, mode=0, channel=2, timer=1)
PWM(Pin(16), freq=200, duty=256, resolution=10, mode=0, channel=3, timer=1)
PWM(Pin(18), freq=300, duty=320, resolution=10, mode=0, channel=4, timer=2)
PWM(Pin(19), freq=300, duty=384, resolution=10, mode=0, channel=5, timer=2)
PWM(Pin(22), freq=400, duty=448, resolution=10, mode=0, channel=6, timer=3)
PWM(Pin(23), freq=400, duty=512, resolution=10, mode=0, channel=7, timer=3)
PWM(Pin(25), freq=500, duty=576, resolution=10, mode=1, channel=0, timer=0)
PWM(Pin(26), freq=500, duty=640, resolution=10, mode=1, channel=1, timer=0)
PWM(Pin(27), freq=600, duty=704, resolution=10, mode=1, channel=2, timer=1)
PWM(Pin(14), freq=600, duty=768, resolution=10, mode=1, channel=3, timer=1)
PWM(Pin(12), freq=700, duty=832, resolution=10, mode=1, channel=4, timer=2)
PWM(Pin(13), freq=700, duty=896, resolution=10, mode=1, channel=5, timer=2)
PWM(Pin(32), freq=800, duty=960, resolution=10, mode=1, channel=6, timer=3)
PWM(Pin(33), freq=800, duty=1023, resolution=10, mode=1, channel=7, timer=3)
* Example of a smooth frequency change::
from utime import sleep
from machine import Pin, PWM
F_MIN = 500
F_MAX = 1000
f = F_MIN
delta_f = 1
p = PWM(Pin(5), f)
print(p)
while True:
p.freq(f)
sleep(10 / F_MIN)
f += delta_f
if f >= F_MAX or f <= F_MIN:
delta_f = -delta_f
See PWM wave at Pin(5) with an oscilloscope.
* Example of a smooth duty change::
from utime import sleep
from machine import Pin, PWM
DUTY_MAX = 2**16 - 1
duty_u16 = 0
delta_d = 16
p = PWM(Pin(5), 1000, duty_u16=duty_u16)
print(p)
while True:
p.duty_u16(duty_u16)
sleep(1 / 1000)
duty_u16 += delta_d
if duty_u16 >= DUTY_MAX:
duty_u16 = DUTY_MAX
delta_d = -delta_d
elif duty_u16 <= 0:
duty_u16 = 0
delta_d = -delta_d
See PWM wave at Pin(5) with an oscilloscope.
Note: the Pin.OUT mode does not need to be specified. The channel is initialized
to PWM mode internally once for each Pin that is passed to the PWM constructor.
The following code is wrong::
pwm = PWM(Pin(5, Pin.OUT), freq=1000, duty=512) # Pin(5) in PWM mode here
pwm = PWM(Pin(5, Pin.OUT), freq=500, duty=256) # Pin(5) in OUT mode here, PWM is off
Use this code instead::
pwm = PWM(Pin(5), freq=1000, duty=512)
pwm.init(freq=500, duty=256)

View File

@@ -0,0 +1,211 @@
.. _esp8266_general:
General information about the ESP8266 port
==========================================
ESP8266 is a popular WiFi-enabled System-on-Chip (SoC) by Espressif Systems.
Multitude of boards
-------------------
There is a multitude of modules and boards from different sources which carry
the ESP8266 chip. MicroPython tries to provide a generic port which would run on
as many boards/modules as possible, but there may be limitations. Adafruit
Feather HUZZAH board is taken as a reference board for the port (for example,
testing is performed on it). If you have another board, please make sure you
have a datasheet, schematics and other reference materials for your board
handy to look up various aspects of your board functioning.
To make a generic ESP8266 port and support as many boards as possible,
the following design and implementation decision were made:
* GPIO pin numbering is based on ESP8266 chip numbering, not some "logical"
numbering of a particular board. Please have the manual/pin diagram of your board
at hand to find correspondence between your board pins and actual ESP8266 pins.
We also encourage users of various boards to share this mapping via MicroPython
forum, with the idea to collect community-maintained reference materials
eventually.
* All pins which make sense to support, are supported by MicroPython
(for example, pins which are used to connect SPI flash
are not exposed, as they're unlikely useful for anything else, and
operating on them will lead to board lock-up). However, any particular
board may expose only subset of pins. Consult your board reference manual.
* Some boards may lack external pins/internal connectivity to support
ESP8266 deepsleep mode.
Technical specifications and SoC datasheets
-------------------------------------------
The datasheets and other reference material for ESP8266 chip are available
from the vendor site: http://bbs.espressif.com/viewtopic.php?f=67&t=225 .
They are the primary reference for the chip technical specifications, capabilities,
operating modes, internal functioning, etc.
For your convenience, some of technical specifications are provided below:
* Architecture: Xtensa lx106
* CPU frequency: 80MHz overclockable to 160MHz
* Total RAM available: 96KB (part of it reserved for system)
* BootROM: 64KB
* Internal FlashROM: None
* External FlashROM: code and data, via SPI Flash. Normal sizes 512KB-4MB.
* GPIO: 16 + 1 (GPIOs are multiplexed with other functions, including
external FlashROM, UART, deep sleep wake-up, etc.)
* UART: One RX/TX UART (no hardware handshaking), one TX-only UART.
* SPI: 2 SPI interfaces (one used for FlashROM).
* I2C: No native external I2C (bitbang implementation available on any pins).
* I2S: 1.
* Programming: using BootROM bootloader from UART. Due to external FlashROM
and always-available BootROM bootloader, ESP8266 is not brickable.
Scarcity of runtime resources
-----------------------------
ESP8266 has very modest resources (first of all, RAM memory). So, please
avoid allocating too big container objects (lists, dictionaries) and
buffers. There is also no full-fledged OS to keep track of resources
and automatically clean them up, so that's the task of a user/user
application: please be sure to close open files, sockets, etc. as soon
as possible after use.
Boot process
------------
On boot, MicroPython EPS8266 port executes ``_boot.py`` script from internal
frozen modules. It mounts filesystem in FlashROM, or if it's not available,
performs first-time setup of the module and creates the filesystem. This
part of the boot process is considered fixed, and not available for customization
for end users (even if you build from source, please refrain from changes to
it; customization of early boot process is available only to advanced users
and developers, who can diagnose themselves any issues arising from
modifying the standard process).
Once the filesystem is mounted, ``boot.py`` is executed from it. The standard
version of this file is created during first-time module set up and has
commands to start a WebREPL daemon (disabled by default, configurable
with ``webrepl_setup`` module), etc. This
file is customizable by end users (for example, you may want to set some
parameters or add other services which should be run on
a module start-up). But keep in mind that incorrect modifications to boot.py
may still lead to boot loops or lock ups, requiring to reflash a module
from scratch. (In particular, it's recommended that you use either
``webrepl_setup`` module or manual editing to configure WebREPL, but not
both).
As a final step of boot procedure, ``main.py`` is executed from filesystem,
if exists. This file is a hook to start up a user application each time
on boot (instead of going to REPL). For small test applications, you may
name them directly as ``main.py``, and upload to module, but instead it's
recommended to keep your application(s) in separate files, and have just
the following in ``main.py``::
import my_app
my_app.main()
This will allow to keep the structure of your application clear, as well as
allow to install multiple applications on a board, and switch among them.
Known Issues
------------
Real-time clock
~~~~~~~~~~~~~~~
RTC in ESP8266 has very bad accuracy, drift may be seconds per minute. As
a workaround, to measure short enough intervals you can use
``time.time()``, etc. functions, and for wall clock time, synchronize from
the net using included ``ntptime.py`` module.
Due to limitations of the ESP8266 chip the internal real-time clock (RTC)
will overflow every 7:45h. If a long-term working RTC time is required then
``time()`` or ``localtime()`` must be called at least once within 7 hours.
MicroPython will then handle the overflow.
Simultaneous operation of STA_IF and AP_IF
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Simultaneous operation of STA_IF and AP_IF interfaces is supported.
However, due to restrictions of the hardware, there may be performance
issues in the AP_IF, if the STA_IF is not connected and searching.
An application should manage these interfaces and for example
deactivate the STA_IF in environments where only the AP_IF is used.
Sockets and WiFi buffers overflow
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Socket instances remain active until they are explicitly closed. This has two
consequences. Firstly they occupy RAM, so an application which opens sockets
without closing them may eventually run out of memory. Secondly not properly
closed socket can cause the low-level part of the vendor WiFi stack to emit
``Lmac`` errors. This occurs if data comes in for a socket and is not
processed in a timely manner. This can overflow the WiFi stack input queue
and lead to a deadlock. The only recovery is by a hard reset.
The above may also happen after an application terminates and quits to the REPL
for any reason including an exception. Subsequent arrival of data provokes the
failure with the above error message repeatedly issued. So, sockets should be
closed in any case, regardless whether an application terminates successfully
or by an exception, for example using try/finally::
sock = socket(...)
try:
# Use sock
finally:
sock.close()
SSL/TLS limitations
~~~~~~~~~~~~~~~~~~~
ESP8266 uses `axTLS <http://axtls.sourceforge.net/>`_ library, which is one
of the smallest TLS libraries with compatible licensing. However, it
also has some known issues/limitations:
1. No support for Diffie-Hellman (DH) key exchange and Elliptic-curve
cryptography (ECC). This means it can't work with sites which require
the use of these features (it works ok with the typical sites that use
RSA certificates).
2. Half-duplex communication nature. axTLS uses a single buffer for both
sending and receiving, which leads to considerable memory saving and
works well with protocols like HTTP. But there may be problems with
protocols which don't follow classic request-response model.
Besides axTLS's own limitations, the configuration used for MicroPython is
highly optimized for code size, which leads to additional limitations
(these may be lifted in the future):
3. Optimized RSA algorithms are not enabled, which may lead to slow
SSL handshakes.
4. Session Reuse is not enabled, which means every connection must undergo
the full, expensive SSL handshake.
Besides axTLS specific limitations described above, there's another generic
limitation with usage of TLS on the low-memory devices:
5. The TLS standard specifies the maximum length of the TLS record (unit
of TLS communication, the entire record must be buffered before it can
be processed) as 16KB. That's almost half of the available ESP8266 memory,
and inside a more or less advanced application would be hard to allocate
due to memory fragmentation issues. As a compromise, a smaller buffer is
used, with the idea that the most interesting usage for SSL would be
accessing various REST APIs, which usually require much smaller messages.
The buffers size is on the order of 5KB, and is adjusted from time to
time, taking as a reference being able to access https://google.com .
The smaller buffer however means that some sites can't be accessed using
it, and it's not possible to stream large amounts of data. axTLS does
have support for TLS's Max Fragment Size extension, but no HTTPS website
does, so use of the extension is really only effective for local
communication with other devices.
There are also some not implemented features specifically in MicroPython's
``ssl`` module based on axTLS:
6. Certificates are not validated (this makes connections susceptible
to man-in-the-middle attacks).
7. There is no support for client certificates (scheduled to be fixed in
1.9.4 release).

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

View File

@@ -0,0 +1,466 @@
.. _esp8266_quickref:
Quick reference for the ESP8266
===============================
.. image:: img/adafruit_products_pinoutstop.jpg
:alt: Adafruit Feather HUZZAH board
:width: 640px
The Adafruit Feather HUZZAH board (image attribution: Adafruit).
Below is a quick reference for ESP8266-based boards. If it is your first time
working with this board please consider reading the following sections first:
.. toctree::
:maxdepth: 1
general.rst
tutorial/index.rst
Installing MicroPython
----------------------
See the corresponding section of tutorial: :ref:`intro`. It also includes
a troubleshooting subsection.
General board control
---------------------
The MicroPython REPL is on UART0 (GPIO1=TX, GPIO3=RX) at baudrate 115200.
Tab-completion is useful to find out what methods an object has.
Paste mode (ctrl-E) is useful to paste a large slab of Python code into
the REPL.
The :mod:`machine` module::
import machine
machine.freq() # get the current frequency of the CPU
machine.freq(160000000) # set the CPU frequency to 160 MHz
The :mod:`esp` module::
import esp
esp.osdebug(None) # turn off vendor O/S debugging messages
esp.osdebug(0) # redirect vendor O/S debugging messages to UART(0)
Networking
----------
The :mod:`network` module::
import network
wlan = network.WLAN(network.STA_IF) # create station interface
wlan.active(True) # activate the interface
wlan.scan() # scan for access points
wlan.isconnected() # check if the station is connected to an AP
wlan.connect('essid', 'password') # connect to an AP
wlan.config('mac') # get the interface's MAC address
wlan.ifconfig() # get the interface's IP/netmask/gw/DNS addresses
ap = network.WLAN(network.AP_IF) # create access-point interface
ap.active(True) # activate the interface
ap.config(essid='ESP-AP') # set the ESSID of the access point
A useful function for connecting to your local WiFi network is::
def do_connect():
import network
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
if not wlan.isconnected():
print('connecting to network...')
wlan.connect('essid', 'password')
while not wlan.isconnected():
pass
print('network config:', wlan.ifconfig())
Once the network is established the :mod:`socket <socket>` module can be used
to create and use TCP/UDP sockets as usual.
Delay and timing
----------------
Use the :mod:`time <time>` module::
import time
time.sleep(1) # sleep for 1 second
time.sleep_ms(500) # sleep for 500 milliseconds
time.sleep_us(10) # sleep for 10 microseconds
start = time.ticks_ms() # get millisecond counter
delta = time.ticks_diff(time.ticks_ms(), start) # compute time difference
Timers
------
Virtual (RTOS-based) timers are supported. Use the :ref:`machine.Timer <machine.Timer>` class
with timer ID of -1::
from machine import Timer
tim = Timer(-1)
tim.init(period=5000, mode=Timer.ONE_SHOT, callback=lambda t:print(1))
tim.init(period=2000, mode=Timer.PERIODIC, callback=lambda t:print(2))
The period is in milliseconds.
Pins and GPIO
-------------
Use the :ref:`machine.Pin <machine.Pin>` class::
from machine import Pin
p0 = Pin(0, Pin.OUT) # create output pin on GPIO0
p0.on() # set pin to "on" (high) level
p0.off() # set pin to "off" (low) level
p0.value(1) # set pin to on/high
p2 = Pin(2, Pin.IN) # create input pin on GPIO2
print(p2.value()) # get value, 0 or 1
p4 = Pin(4, Pin.IN, Pin.PULL_UP) # enable internal pull-up resistor
p5 = Pin(5, Pin.OUT, value=1) # set pin high on creation
Available pins are: 0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 16, which correspond
to the actual GPIO pin numbers of ESP8266 chip. Note that many end-user
boards use their own adhoc pin numbering (marked e.g. D0, D1, ...). As
MicroPython supports different boards and modules, physical pin numbering
was chosen as the lowest common denominator. For mapping between board
logical pins and physical chip pins, consult your board documentation.
Note that Pin(1) and Pin(3) are REPL UART TX and RX respectively.
Also note that Pin(16) is a special pin (used for wakeup from deepsleep
mode) and may be not available for use with higher-level classes like
``Neopixel``.
There's a higher-level abstraction :ref:`machine.Signal <machine.Signal>`
which can be used to invert a pin. Useful for illuminating active-low LEDs
using ``on()`` or ``value(1)``.
UART (serial bus)
-----------------
See :ref:`machine.UART <machine.UART>`. ::
from machine import UART
uart = UART(0, baudrate=9600)
uart.write('hello')
uart.read(5) # read up to 5 bytes
Two UARTs are available. UART0 is on Pins 1 (TX) and 3 (RX). UART0 is
bidirectional, and by default is used for the REPL. UART1 is on Pins 2
(TX) and 8 (RX) however Pin 8 is used to connect the flash chip, so
UART1 is TX only.
When UART0 is attached to the REPL, all incoming chars on UART(0) go
straight to stdin so uart.read() will always return None. Use
sys.stdin.read() if it's needed to read characters from the UART(0)
while it's also used for the REPL (or detach, read, then reattach).
When detached the UART(0) can be used for other purposes.
If there are no objects in any of the dupterm slots when the REPL is
started (on hard or soft reset) then UART(0) is automatically attached.
Without this, the only way to recover a board without a REPL would be to
completely erase and reflash (which would install the default boot.py which
attaches the REPL).
To detach the REPL from UART0, use::
import os
os.dupterm(None, 1)
The REPL is attached by default. If you have detached it, to reattach
it use::
import os, machine
uart = machine.UART(0, 115200)
os.dupterm(uart, 1)
PWM (pulse width modulation)
----------------------------
PWM can be enabled on all pins except Pin(16). There is a single frequency
for all channels, with range between 1 and 1000 (measured in Hz). The duty
cycle is between 0 and 1023 inclusive.
Use the ``machine.PWM`` class::
from machine import Pin, PWM
pwm0 = PWM(Pin(0)) # create PWM object from a pin
pwm0.freq() # get current frequency
pwm0.freq(1000) # set frequency
pwm0.duty() # get current duty cycle
pwm0.duty(200) # set duty cycle
pwm0.deinit() # turn off PWM on the pin
pwm2 = PWM(Pin(2), freq=500, duty=512) # create and configure in one go
ADC (analog to digital conversion)
----------------------------------
ADC is available on a dedicated pin.
Note that input voltages on the ADC pin must be between 0v and 1.0v.
Use the :ref:`machine.ADC <machine.ADC>` class::
from machine import ADC
adc = ADC(0) # create ADC object on ADC pin
adc.read() # read value, 0-1024
Software SPI bus
----------------
There are two SPI drivers. One is implemented in software (bit-banging)
and works on all pins, and is accessed via the :ref:`machine.SoftSPI <machine.SoftSPI>`
class::
from machine import Pin, SoftSPI
# construct an SPI bus on the given pins
# polarity is the idle state of SCK
# phase=0 means sample on the first edge of SCK, phase=1 means the second
spi = SoftSPI(baudrate=100000, polarity=1, phase=0, sck=Pin(0), mosi=Pin(2), miso=Pin(4))
spi.init(baudrate=200000) # set the baudrate
spi.read(10) # read 10 bytes on MISO
spi.read(10, 0xff) # read 10 bytes while outputting 0xff on MOSI
buf = bytearray(50) # create a buffer
spi.readinto(buf) # read into the given buffer (reads 50 bytes in this case)
spi.readinto(buf, 0xff) # read into the given buffer and output 0xff on MOSI
spi.write(b'12345') # write 5 bytes on MOSI
buf = bytearray(4) # create a buffer
spi.write_readinto(b'1234', buf) # write to MOSI and read from MISO into the buffer
spi.write_readinto(buf, buf) # write buf to MOSI and read MISO back into buf
Hardware SPI bus
----------------
The hardware SPI is faster (up to 80Mhz), but only works on following pins:
``MISO`` is GPIO12, ``MOSI`` is GPIO13, and ``SCK`` is GPIO14. It has the same
methods as the bitbanging SPI class above, except for the pin parameters for the
constructor and init (as those are fixed)::
from machine import Pin, SPI
hspi = SPI(1, baudrate=80000000, polarity=0, phase=0)
(``SPI(0)`` is used for FlashROM and not available to users.)
I2C bus
-------
The I2C driver is implemented in software and works on all pins,
and is accessed via the :ref:`machine.I2C <machine.I2C>` class (which is an
alias of :ref:`machine.SoftI2C <machine.SoftI2C>`)::
from machine import Pin, I2C
# construct an I2C bus
i2c = I2C(scl=Pin(5), sda=Pin(4), freq=100000)
i2c.readfrom(0x3a, 4) # read 4 bytes from peripheral device with address 0x3a
i2c.writeto(0x3a, '12') # write '12' to peripheral device with address 0x3a
buf = bytearray(10) # create a buffer with 10 bytes
i2c.writeto(0x3a, buf) # write the given buffer to the peripheral
Real time clock (RTC)
---------------------
See :ref:`machine.RTC <machine.RTC>` ::
from machine import RTC
rtc = RTC()
rtc.datetime((2017, 8, 23, 1, 12, 48, 0, 0)) # set a specific date and time
rtc.datetime() # get date and time
# synchronize with ntp
# need to be connected to wifi
import ntptime
ntptime.settime() # set the rtc datetime from the remote server
rtc.datetime() # get the date and time in UTC
.. note:: Not all methods are implemented: `RTC.now()`, `RTC.irq(handler=*) <RTC.irq>`
(using a custom handler), `RTC.init()` and `RTC.deinit()` are
currently not supported.
WDT (Watchdog timer)
--------------------
See :ref:`machine.WDT <machine.WDT>`. ::
from machine import WDT
# enable the WDT
wdt = WDT()
wdt.feed()
Deep-sleep mode
---------------
Connect GPIO16 to the reset pin (RST on HUZZAH). Then the following code
can be used to sleep, wake and check the reset cause::
import machine
# configure RTC.ALARM0 to be able to wake the device
rtc = machine.RTC()
rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)
# check if the device woke from a deep sleep
if machine.reset_cause() == machine.DEEPSLEEP_RESET:
print('woke from a deep sleep')
# set RTC.ALARM0 to fire after 10 seconds (waking the device)
rtc.alarm(rtc.ALARM0, 10000)
# put the device to sleep
machine.deepsleep()
OneWire driver
--------------
The OneWire driver is implemented in software and works on all pins::
from machine import Pin
import onewire
ow = onewire.OneWire(Pin(12)) # create a OneWire bus on GPIO12
ow.scan() # return a list of devices on the bus
ow.reset() # reset the bus
ow.readbyte() # read a byte
ow.writebyte(0x12) # write a byte on the bus
ow.write('123') # write bytes on the bus
ow.select_rom(b'12345678') # select a specific device by its ROM code
There is a specific driver for DS18S20 and DS18B20 devices::
import time, ds18x20
ds = ds18x20.DS18X20(ow)
roms = ds.scan()
ds.convert_temp()
time.sleep_ms(750)
for rom in roms:
print(ds.read_temp(rom))
Be sure to put a 4.7k pull-up resistor on the data line. Note that
the ``convert_temp()`` method must be called each time you want to
sample the temperature.
NeoPixel driver
---------------
Use the ``neopixel`` module::
from machine import Pin
from neopixel import NeoPixel
pin = Pin(0, Pin.OUT) # set GPIO0 to output to drive NeoPixels
np = NeoPixel(pin, 8) # create NeoPixel driver on GPIO0 for 8 pixels
np[0] = (255, 255, 255) # set the first pixel to white
np.write() # write data to all pixels
r, g, b = np[0] # get first pixel colour
.. Warning::
By default ``NeoPixel`` is configured to control the more popular *800kHz*
units. It is possible to use alternative timing to control other (typically
400kHz) devices by passing ``timing=0`` when constructing the
``NeoPixel`` object.
For low-level driving of a NeoPixel see `machine.bitstream`.
APA102 driver
-------------
Use the ``apa102`` module::
from machine import Pin
from apa102 import APA102
clock = Pin(14, Pin.OUT) # set GPIO14 to output to drive the clock
data = Pin(13, Pin.OUT) # set GPIO13 to output to drive the data
apa = APA102(clock, data, 8) # create APA102 driver on the clock and the data pin for 8 pixels
apa[0] = (255, 255, 255, 31) # set the first pixel to white with a maximum brightness of 31
apa.write() # write data to all pixels
r, g, b, brightness = apa[0] # get first pixel colour
For low-level driving of an APA102::
import esp
esp.apa102_write(clock_pin, data_pin, rgbi_buf)
DHT driver
----------
The DHT driver is implemented in software and works on all pins::
import dht
import machine
d = dht.DHT11(machine.Pin(4))
d.measure()
d.temperature() # eg. 23 (°C)
d.humidity() # eg. 41 (% RH)
d = dht.DHT22(machine.Pin(4))
d.measure()
d.temperature() # eg. 23.6 (°C)
d.humidity() # eg. 41.3 (% RH)
SSD1306 driver
--------------
Driver for SSD1306 monochrome OLED displays. See tutorial :ref:`ssd1306`. ::
from machine import Pin, I2C
import ssd1306
i2c = I2C(scl=Pin(5), sda=Pin(4), freq=100000)
display = ssd1306.SSD1306_I2C(128, 64, i2c)
display.text('Hello World', 0, 0, 1)
display.show()
WebREPL (web browser interactive prompt)
----------------------------------------
WebREPL (REPL over WebSockets, accessible via a web browser) is an
experimental feature available in ESP8266 port. Download web client
from https://github.com/micropython/webrepl (hosted version available
at http://micropython.org/webrepl), and configure it by executing::
import webrepl_setup
and following on-screen instructions. After reboot, it will be available
for connection. If you disabled automatic start-up on boot, you may
run configured daemon on demand using::
import webrepl
webrepl.start()
The supported way to use WebREPL is by connecting to ESP8266 access point,
but the daemon is also started on STA interface if it is active, so if your
router is set up and works correctly, you may also use WebREPL while connected
to your normal Internet access point (use the ESP8266 AP connection method
if you face any issues).
Besides terminal/command prompt access, WebREPL also has provision for file
transfer (both upload and download). Web client has buttons for the
corresponding functions, or you can use command-line client ``webrepl_cli.py``
from the repository above.
See the MicroPython forum for other community-supported alternatives
to transfer files to ESP8266.

View File

@@ -0,0 +1,19 @@
Analog to Digital Conversion
============================
The ESP8266 has a single pin (separate to the GPIO pins) which can be used to
read analog voltages and convert them to a digital value. You can construct
such an ADC pin object using::
>>> import machine
>>> adc = machine.ADC(0)
Then read its value with::
>>> adc.read()
58
The values returned from the ``read()`` function are between 0 (for 0.0 volts)
and 1024 (for 1.0 volts). Please note that this input can only tolerate a
maximum of 1.0 volts and you must use a voltage divider circuit to measure
larger voltages.

View File

@@ -0,0 +1,91 @@
Controlling APA102 LEDs
=======================
APA102 LEDs, also known as DotStar LEDs, are individually addressable
full-colour RGB LEDs, generally in a string formation. They differ from
NeoPixels in that they require two pins to control - both a Clock and Data pin.
They can operate at a much higher data and PWM frequencies than NeoPixels and
are more suitable for persistence-of-vision effects.
To create an APA102 object do the following::
>>> import machine, apa102
>>> strip = apa102.APA102(machine.Pin(5), machine.Pin(4), 60)
This configures an 60 pixel APA102 strip with clock on GPIO5 and data on GPIO4.
You can adjust the pin numbers and the number of pixels to suit your needs.
The RGB colour data, as well as a brightness level, is sent to the APA102 in a
certain order. Usually this is ``(Red, Green, Blue, Brightness)``.
If you are using one of the newer APA102C LEDs the green and blue are swapped,
so the order is ``(Red, Blue, Green, Brightness)``.
The APA102 has more of a square lens while the APA102C has more of a round one.
If you are using a APA102C strip and would prefer to provide colours in RGB
order instead of RBG, you can customise the tuple colour order like so::
>>> strip.ORDER = (0, 2, 1, 3)
To set the colour of pixels use::
>>> strip[0] = (255, 255, 255, 31) # set to white, full brightness
>>> strip[1] = (255, 0, 0, 31) # set to red, full brightness
>>> strip[2] = (0, 255, 0, 15) # set to green, half brightness
>>> strip[3] = (0, 0, 255, 7) # set to blue, quarter brightness
Use the ``write()`` method to output the colours to the LEDs::
>>> strip.write()
Demonstration::
import time
import machine, apa102
# 1M strip with 60 LEDs
strip = apa102.APA102(machine.Pin(5), machine.Pin(4), 60)
brightness = 1 # 0 is off, 1 is dim, 31 is max
# Helper for converting 0-255 offset to a colour tuple
def wheel(offset, brightness):
# The colours are a transition r - g - b - back to r
offset = 255 - offset
if offset < 85:
return (255 - offset * 3, 0, offset * 3, brightness)
if offset < 170:
offset -= 85
return (0, offset * 3, 255 - offset * 3, brightness)
offset -= 170
return (offset * 3, 255 - offset * 3, 0, brightness)
# Demo 1: RGB RGB RGB
red = 0xff0000
green = red >> 8
blue = red >> 16
for i in range(strip.n):
colour = red >> (i % 3) * 8
strip[i] = ((colour & red) >> 16, (colour & green) >> 8, (colour & blue), brightness)
strip.write()
# Demo 2: Show all colours of the rainbow
for i in range(strip.n):
strip[i] = wheel((i * 256 // strip.n) % 255, brightness)
strip.write()
# Demo 3: Fade all pixels together through rainbow colours, offset each pixel
for r in range(5):
for n in range(256):
for i in range(strip.n):
strip[i] = wheel(((i * 256 // strip.n) + n) & 255, brightness)
strip.write()
time.sleep_ms(25)
# Demo 4: Same colour, different brightness levels
for b in range(31,-1,-1):
strip[0] = (255, 153, 0, b)
strip.write()
time.sleep_ms(250)
# End: Turn off all the LEDs
strip.fill((0, 0, 0, 0))
strip.write()

View File

@@ -0,0 +1,65 @@
Temperature and Humidity
========================
DHT (Digital Humidity & Temperature) sensors are low cost digital sensors with
capacitive humidity sensors and thermistors to measure the surrounding air.
They feature a chip that handles analog to digital conversion and provide a
1-wire interface. Newer sensors additionally provide an I2C interface.
The DHT11 (blue) and DHT22 (white) sensors provide the same 1-wire interface,
however, the DHT22 requires a separate object as it has more complex
calculation. DHT22 have 1 decimal place resolution for both humidity and
temperature readings. DHT11 have whole number for both.
A custom 1-wire protocol, which is different to Dallas 1-wire, is used to get
the measurements from the sensor. The payload consists of a humidity value,
a temperature value and a checksum.
To use the 1-wire interface, construct the objects referring to their data pin::
>>> import dht
>>> import machine
>>> d = dht.DHT11(machine.Pin(4))
>>> import dht
>>> import machine
>>> d = dht.DHT22(machine.Pin(4))
Then measure and read their values with::
>>> d.measure()
>>> d.temperature()
>>> d.humidity()
Values returned from ``temperature()`` are in degrees Celsius and values
returned from ``humidity()`` are a percentage of relative humidity.
The DHT11 can be called no more than once per second and the DHT22 once every
two seconds for most accurate results. Sensor accuracy will degrade over time.
Each sensor supports a different operating range. Refer to the product
datasheets for specifics.
In 1-wire mode, only three of the four pins are used and in I2C mode, all four
pins are used. Older sensors may still have 4 pins even though they do not
support I2C. The 3rd pin is simply not connected.
Pin configurations:
Sensor without I2C in 1-wire mode (eg. DHT11, DHT22, AM2301, AM2302):
1=VDD, 2=Data, 3=NC, 4=GND
Sensor with I2C in 1-wire mode (eg. DHT12, AM2320, AM2321, AM2322):
1=VDD, 2=Data, 3=GND, 4=GND
Sensor with I2C in I2C mode (eg. DHT12, AM2320, AM2321, AM2322):
1=VDD, 2=SDA, 3=GND, 4=SCL
You should use pull-up resistors for the Data, SDA and SCL pins.
To make newer I2C sensors work in backwards compatible 1-wire mode, you must
connect both pins 3 and 4 to GND. This disables the I2C interface.
DHT22 sensors are now sold under the name AM2302 and are otherwise identical.

View File

@@ -0,0 +1,69 @@
The internal filesystem
=======================
If your devices has 1Mbyte or more of storage then it will be set up (upon first
boot) to contain a filesystem. This filesystem uses the FAT format and is
stored in the flash after the MicroPython firmware.
Creating and reading files
--------------------------
MicroPython on the ESP8266 supports the standard way of accessing files in
Python, using the built-in ``open()`` function.
To create a file try::
>>> f = open('data.txt', 'w')
>>> f.write('some data')
9
>>> f.close()
The "9" is the number of bytes that were written with the ``write()`` method.
Then you can read back the contents of this new file using::
>>> f = open('data.txt')
>>> f.read()
'some data'
>>> f.close()
Note that the default mode when opening a file is to open it in read-only mode,
and as a text file. Specify ``'wb'`` as the second argument to ``open()`` to
open for writing in binary mode, and ``'rb'`` to open for reading in binary
mode.
Listing file and more
---------------------
The os module can be used for further control over the filesystem. First
import the module::
>>> import os
Then try listing the contents of the filesystem::
>>> os.listdir()
['boot.py', 'port_config.py', 'data.txt']
You can make directories::
>>> os.mkdir('dir')
And remove entries::
>>> os.remove('data.txt')
Start up scripts
----------------
There are two files that are treated specially by the ESP8266 when it starts up:
boot.py and main.py. The boot.py script is executed first (if it exists) and
then once it completes the main.py script is executed. You can create these
files yourself and populate them with the code that you want to run when the
device starts up.
Accessing the filesystem via WebREPL
------------------------------------
You can access the filesystem over WebREPL using the web client in a browser
or via the command-line tool. Please refer to Quick Reference and Tutorial
sections for more information about WebREPL.

View File

@@ -0,0 +1,35 @@
.. _esp8266_tutorial:
MicroPython tutorial for ESP8266
================================
This tutorial is intended to get you started using MicroPython on the ESP8266
system-on-a-chip. If it is your first time it is recommended to follow the
tutorial through in the order below. Otherwise the sections are mostly self
contained, so feel free to skip to those that interest you.
The tutorial does not assume that you know Python, but it also does not attempt
to explain any of the details of the Python language. Instead it provides you
with commands that are ready to run, and hopes that you will gain a bit of
Python knowledge along the way. To learn more about Python itself please refer
to `<https://www.python.org>`__.
.. toctree::
:maxdepth: 1
:numbered:
intro.rst
repl.rst
filesystem.rst
network_basics.rst
network_tcp.rst
pins.rst
pwm.rst
adc.rst
powerctrl.rst
onewire.rst
neopixel.rst
apa102.rst
dht.rst
ssd1306.rst
nextsteps.rst

View File

@@ -0,0 +1,212 @@
.. _intro:
Getting started with MicroPython on the ESP8266
===============================================
Using MicroPython is a great way to get the most of your ESP8266 board. And
vice versa, the ESP8266 chip is a great platform for using MicroPython. This
tutorial will guide you through setting up MicroPython, getting a prompt, using
WebREPL, connecting to the network and communicating with the Internet, using
the hardware peripherals, and controlling some external components.
Let's get started!
Requirements
------------
The first thing you need is a board with an ESP8266 chip. The MicroPython
software supports the ESP8266 chip itself and any board should work. The main
characteristic of a board is how much flash it has, how the GPIO pins are
connected to the outside world, and whether it includes a built-in USB-serial
convertor to make the UART available to your PC.
The minimum requirement for flash size is 1Mbyte. There is also a special
build for boards with 512KB, but it is highly limited comparing to the
normal build: there is no support for filesystem, and thus features which
depend on it won't work (WebREPL, upip, etc.). As such, 512KB build will
be more interesting for users who build from source and fine-tune parameters
for their particular application.
Names of pins will be given in this tutorial using the chip names (eg GPIO0)
and it should be straightforward to find which pin this corresponds to on your
particular board.
Powering the board
------------------
If your board has a USB connector on it then most likely it is powered through
this when connected to your PC. Otherwise you will need to power it directly.
Please refer to the documentation for your board for further details.
Getting the firmware
--------------------
The first thing you need to do is download the most recent MicroPython firmware
.bin file to load onto your ESP8266 device. You can download it from the
`MicroPython downloads page <http://micropython.org/download#esp8266>`_.
From here, you have 3 main choices
* Stable firmware builds for 1024kb modules and above.
* Daily firmware builds for 1024kb modules and above.
* Daily firmware builds for 512kb modules.
If you are just starting with MicroPython, the best bet is to go for the Stable
firmware builds. If you are an advanced, experienced MicroPython ESP8266 user
who would like to follow development closely and help with testing new
features, there are daily builds (note: you actually may need some
development experience, e.g. being ready to follow git history to know
what new changes and features were introduced).
Support for 512kb modules is provided on a feature preview basis. For end
users, it's recommended to use modules with flash of 1024kb or more. As
such, only daily builds for 512kb modules are provided.
Deploying the firmware
----------------------
Once you have the MicroPython firmware (compiled code), you need to load it onto
your ESP8266 device. There are two main steps to do this: first you
need to put your device in boot-loader mode, and second you need to copy across
the firmware. The exact procedure for these steps is highly dependent on the
particular board and you will need to refer to its documentation for details.
If you have a board that has a USB connector, a USB-serial convertor, and has
the DTR and RTS pins wired in a special way then deploying the firmware should
be easy as all steps can be done automatically. Boards that have such features
include the Adafruit Feather HUZZAH and NodeMCU boards.
If you do not have such a board, you need keep GPIO0 pulled to ground and reset
the device by pulling the reset pin to ground and releasing it again to enter
programming mode.
For best results it is recommended to first erase the entire flash of your
device before putting on new MicroPython firmware.
Currently we only support esptool.py to copy across the firmware. You can find
this tool here: `<https://github.com/espressif/esptool/>`__, or install it
using pip::
pip install esptool
Versions starting with 1.3 support both Python 2.7 and Python 3.4 (or newer).
An older version (at least 1.2.1 is needed) works fine but will require Python
2.7.
Any other flashing program should work, so feel free to try them out or refer
to the documentation for your board to see its recommendations.
Using esptool.py you can erase the flash with the command::
esptool.py --port /dev/ttyUSB0 erase_flash
And then deploy the new firmware using::
esptool.py --port /dev/ttyUSB0 --baud 460800 write_flash --flash_size=detect 0 esp8266-20170108-v1.8.7.bin
You might need to change the "port" setting to something else relevant for your
PC. You may also need to reduce the baudrate if you get errors when flashing
(eg down to 115200). The filename of the firmware should also match the file
that you have.
For some boards with a particular FlashROM configuration (e.g. some variants of
a NodeMCU board) you may need to manually set a compatible
`SPI Flash Mode <https://github.com/espressif/esptool/wiki/SPI-Flash-Modes>`_.
You'd usually pick the fastest option that is compatible with your device, but
the ``-fm dout`` option (the slowest option) should have the best compatibility::
esptool.py --port /dev/ttyUSB0 --baud 460800 write_flash --flash_size=detect -fm dout 0 esp8266-20170108-v1.8.7.bin
If the above commands run without error then MicroPython should be installed on
your board!
If you pulled GPIO0 manually to ground to enter programming mode, release it
now and reset the device by again pulling the reset pin to ground for a short
duration.
Serial prompt
-------------
Once you have the firmware on the device you can access the REPL (Python prompt)
over UART0 (GPIO1=TX, GPIO3=RX), which might be connected to a USB-serial
convertor, depending on your board. The baudrate is 115200. The next part of
the tutorial will discuss the prompt in more detail.
WiFi
----
After a fresh install and boot the device configures itself as a WiFi access
point (AP) that you can connect to. The ESSID is of the form MicroPython-xxxxxx
where the x's are replaced with part of the MAC address of your device (so will
be the same everytime, and most likely different for all ESP8266 chips). The
password for the WiFi is micropythoN (note the upper-case N). Its IP address
will be 192.168.4.1 once you connect to its network. WiFi configuration will
be discussed in more detail later in the tutorial.
Troubleshooting installation problems
-------------------------------------
If you experience problems during flashing or with running firmware immediately
after it, here are troubleshooting recommendations:
* Be aware of and try to exclude hardware problems. There are 2 common problems:
bad power source quality and worn-out/defective FlashROM. Speaking of power
source, not just raw amperage is important, but also low ripple and noise/EMI
in general. If you experience issues with self-made or wall-wart style power
supply, try USB power from a computer. Unearthed power supplies are also known
to cause problems as they source of increased EMI (electromagnetic interference)
- at the very least, and may lead to electrical devices breakdown. So, you are
advised to avoid using unearthed power connections when working with ESP8266
and other boards. In regard to FlashROM hardware problems, there are independent
(not related to MicroPython in any way) reports
`(e.g.) <http://internetofhomethings.com/homethings/?p=538>`_
that on some ESP8266 modules, FlashROM can be programmed as little as 20 times
before programming errors occur. This is *much* less than 100,000 programming
cycles cited for FlashROM chips of a type used with ESP8266 by reputable
vendors, which points to either production rejects, or second-hand worn-out
flash chips to be used on some (apparently cheap) modules/boards. You may want
to use your best judgement about source, price, documentation, warranty,
post-sales support for the modules/boards you purchase.
* The flashing instructions above use flashing speed of 460800 baud, which is
good compromise between speed and stability. However, depending on your
module/board, USB-UART convertor, cables, host OS, etc., the above baud
rate may be too high and lead to errors. Try a more common 115200 baud
rate instead in such cases.
* If lower baud rate didn't help, you may want to try older version of
esptool.py, which had a different programming algorithm::
pip install esptool==1.0.1
This version doesn't support ``--flash_size=detect`` option, so you will
need to specify FlashROM size explicitly (in megabits). It also requires
Python 2.7, so you may need to use ``pip2`` instead of ``pip`` in the
command above.
* The ``--flash_size`` option in the commands above is mandatory. Omitting
it will lead to a corrupted firmware.
* To catch incorrect flash content (e.g. from a defective sector on a chip),
add ``--verify`` switch to the commands above.
* Additionally, you can check the firmware integrity from a MicroPython REPL
prompt (assuming you were able to flash it and ``--verify`` option doesn't
report errors)::
import esp
esp.check_fw()
If the last output value is True, the firmware is OK. Otherwise, it's
corrupted and need to be reflashed correctly.
* If you experience any issues with another flashing application (not
esptool.py), try esptool.py, it is a generally accepted flashing
application in the ESP8266 community.
* If you still experience problems with even flashing the firmware, please
refer to esptool.py project page, https://github.com/espressif/esptool
for additional documentation and bug tracker where you can report problems.
* If you are able to flash firmware, but ``--verify`` option or
``esp.check_fw()`` return errors even after multiple retries, you
may have a defective FlashROM chip, as explained above.

View File

@@ -0,0 +1,84 @@
Controlling NeoPixels
=====================
NeoPixels, also known as WS2812 LEDs, are full-colour LEDs that are connected in
serial, are individually addressable, and can have their red, green and blue
components set between 0 and 255. They require precise timing to control them
and there is a special neopixel module to do just this.
To create a NeoPixel object do the following::
>>> import machine, neopixel
>>> np = neopixel.NeoPixel(machine.Pin(4), 8)
This configures a NeoPixel strip on GPIO4 with 8 pixels. You can adjust the
"4" (pin number) and the "8" (number of pixel) to suit your set up.
To set the colour of pixels use::
>>> np[0] = (255, 0, 0) # set to red, full brightness
>>> np[1] = (0, 128, 0) # set to green, half brightness
>>> np[2] = (0, 0, 64) # set to blue, quarter brightness
For LEDs with more than 3 colours, such as RGBW pixels or RGBY pixels, the
NeoPixel class takes a ``bpp`` parameter. To setup a NeoPixel object for an
RGBW Pixel, do the following::
>>> import machine, neopixel
>>> np = neopixel.NeoPixel(machine.Pin(4), 8, bpp=4)
In a 4-bpp mode, remember to use 4-tuples instead of 3-tuples to set the colour.
For example to set the first three pixels use::
>>> np[0] = (255, 0, 0, 128) # Orange in an RGBY Setup
>>> np[1] = (0, 255, 0, 128) # Yellow-green in an RGBY Setup
>>> np[2] = (0, 0, 255, 128) # Green-blue in an RGBY Setup
Then use the ``write()`` method to output the colours to the LEDs::
>>> np.write()
The following demo function makes a fancy show on the LEDs::
import time
def demo(np):
n = np.n
# cycle
for i in range(4 * n):
for j in range(n):
np[j] = (0, 0, 0)
np[i % n] = (255, 255, 255)
np.write()
time.sleep_ms(25)
# bounce
for i in range(4 * n):
for j in range(n):
np[j] = (0, 0, 128)
if (i // n) % 2 == 0:
np[i % n] = (0, 0, 0)
else:
np[n - 1 - (i % n)] = (0, 0, 0)
np.write()
time.sleep_ms(60)
# fade in/out
for i in range(0, 4 * 256, 8):
for j in range(n):
if (i // 256) % 2 == 0:
val = i & 0xff
else:
val = 255 - (i & 0xff)
np[j] = (val, 0, 0)
np.write()
# clear
for i in range(n):
np[i] = (0, 0, 0)
np.write()
Execute it using::
>>> demo(np)

View File

@@ -0,0 +1,81 @@
Network basics
==============
The network module is used to configure the WiFi connection. There are two WiFi
interfaces, one for the station (when the ESP8266 connects to a router) and one
for the access point (for other devices to connect to the ESP8266). Create
instances of these objects using::
>>> import network
>>> sta_if = network.WLAN(network.STA_IF)
>>> ap_if = network.WLAN(network.AP_IF)
You can check if the interfaces are active by::
>>> sta_if.active()
False
>>> ap_if.active()
True
You can also check the network settings of the interface by::
>>> ap_if.ifconfig()
('192.168.4.1', '255.255.255.0', '192.168.4.1', '8.8.8.8')
The returned values are: IP address, netmask, gateway, DNS.
Configuration of the WiFi
-------------------------
Upon a fresh install the ESP8266 is configured in access point mode, so the
AP_IF interface is active and the STA_IF interface is inactive. You can
configure the module to connect to your own network using the STA_IF interface.
First activate the station interface::
>>> sta_if.active(True)
Then connect to your WiFi network::
>>> sta_if.connect('<your ESSID>', '<your password>')
To check if the connection is established use::
>>> sta_if.isconnected()
Once established you can check the IP address::
>>> sta_if.ifconfig()
('192.168.0.2', '255.255.255.0', '192.168.0.1', '8.8.8.8')
You can then disable the access-point interface if you no longer need it::
>>> ap_if.active(False)
Here is a function you can run (or put in your boot.py file) to automatically
connect to your WiFi network::
def do_connect():
import network
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
sta_if.connect('<essid>', '<password>')
while not sta_if.isconnected():
pass
print('network config:', sta_if.ifconfig())
Sockets
-------
Once the WiFi is set up the way to access the network is by using sockets.
A socket represents an endpoint on a network device, and when two sockets are
connected together communication can proceed.
Internet protocols are built on top of sockets, such as email (SMTP), the web
(HTTP), telnet, ssh, among many others. Each of these protocols is assigned
a specific port, which is just an integer. Given an IP address and a port
number you can connect to a remote device and start talking with it.
The next part of the tutorial discusses how to use sockets to do some common
and useful network tasks.

View File

@@ -0,0 +1,123 @@
Network - TCP sockets
=====================
The building block of most of the internet is the TCP socket. These sockets
provide a reliable stream of bytes between the connected network devices.
This part of the tutorial will show how to use TCP sockets in a few different
cases.
Star Wars Asciimation
---------------------
The simplest thing to do is to download data from the internet. In this case
we will use the Star Wars Asciimation service provided by the blinkenlights.nl
website. It uses the telnet protocol on port 23 to stream data to anyone that
connects. It's very simple to use because it doesn't require you to
authenticate (give a username or password), you can just start downloading data
straight away.
The first thing to do is make sure we have the socket module available::
>>> import socket
Then get the IP address of the server::
>>> addr_info = socket.getaddrinfo("towel.blinkenlights.nl", 23)
The ``getaddrinfo`` function actually returns a list of addresses, and each
address has more information than we need. We want to get just the first valid
address, and then just the IP address and port of the server. To do this use::
>>> addr = addr_info[0][-1]
If you type ``addr_info`` and ``addr`` at the prompt you will see exactly what
information they hold.
Using the IP address we can make a socket and connect to the server::
>>> s = socket.socket()
>>> s.connect(addr)
Now that we are connected we can download and display the data::
>>> while True:
... data = s.recv(500)
... print(str(data, 'utf8'), end='')
...
When this loop executes it should start showing the animation (use ctrl-C to
interrupt it).
You should also be able to run this same code on your PC using normal Python if
you want to try it out there.
HTTP GET request
----------------
The next example shows how to download a webpage. HTTP uses port 80 and you
first need to send a "GET" request before you can download anything. As part
of the request you need to specify the page to retrieve.
Let's define a function that can download and print a URL::
def http_get(url):
import socket
_, _, host, path = url.split('/', 3)
addr = socket.getaddrinfo(host, 80)[0][-1]
s = socket.socket()
s.connect(addr)
s.send(bytes('GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n' % (path, host), 'utf8'))
while True:
data = s.recv(100)
if data:
print(str(data, 'utf8'), end='')
else:
break
s.close()
Then you can try::
>>> http_get('http://micropython.org/ks/test.html')
This should retrieve the webpage and print the HTML to the console.
Simple HTTP server
------------------
The following code creates an simple HTTP server which serves a single webpage
that contains a table with the state of all the GPIO pins::
import machine
pins = [machine.Pin(i, machine.Pin.IN) for i in (0, 2, 4, 5, 12, 13, 14, 15)]
html = """<!DOCTYPE html>
<html>
<head> <title>ESP8266 Pins</title> </head>
<body> <h1>ESP8266 Pins</h1>
<table border="1"> <tr><th>Pin</th><th>Value</th></tr> %s </table>
</body>
</html>
"""
import socket
addr = socket.getaddrinfo('0.0.0.0', 80)[0][-1]
s = socket.socket()
s.bind(addr)
s.listen(1)
print('listening on', addr)
while True:
cl, addr = s.accept()
print('client connected from', addr)
cl_file = cl.makefile('rwb', 0)
while True:
line = cl_file.readline()
if not line or line == b'\r\n':
break
rows = ['<tr><td>%s</td><td>%d</td></tr>' % (str(p), p.value()) for p in pins]
response = html % '\n'.join(rows)
cl.send('HTTP/1.0 200 OK\r\nContent-type: text/html\r\n\r\n')
cl.send(response)
cl.close()

View File

@@ -0,0 +1,12 @@
Next steps
==========
That brings us to the end of the tutorial! Hopefully by now you have a good
feel for the capabilities of MicroPython on the ESP8266 and understand how to
control both the WiFi and IO aspects of the chip.
There are many features that were not covered in this tutorial. The best way
to learn about them is to read the full documentation of the modules, and to
experiment!
Good luck creating your Internet of Things devices!

View File

@@ -0,0 +1,37 @@
Controlling 1-wire devices
==========================
The 1-wire bus is a serial bus that uses just a single wire for communication
(in addition to wires for ground and power). The DS18B20 temperature sensor
is a very popular 1-wire device, and here we show how to use the onewire module
to read from such a device.
For the following code to work you need to have at least one DS18S20 or DS18B20 temperature
sensor with its data line connected to GPIO12. You must also power the sensors
and connect a 4.7k Ohm resistor between the data pin and the power pin. ::
import time
import machine
import onewire, ds18x20
# the device is on GPIO12
dat = machine.Pin(12)
# create the onewire object
ds = ds18x20.DS18X20(onewire.OneWire(dat))
# scan for devices on the bus
roms = ds.scan()
print('found devices:', roms)
# loop 10 times and print all temperatures
for i in range(10):
print('temperatures:', end=' ')
ds.convert_temp()
time.sleep_ms(750)
for rom in roms:
print(ds.read_temp(rom), end=' ')
print()
Note that you must execute the ``convert_temp()`` function to initiate a
temperature reading, then wait at least 750ms before reading the value.

View File

@@ -0,0 +1,76 @@
GPIO Pins
=========
The way to connect your board to the external world, and control other
components, is through the GPIO pins. Not all pins are available to use,
in most cases only pins 0, 2, 4, 5, 12, 13, 14, 15, and 16 can be used.
The pins are available in the machine module, so make sure you import that
first. Then you can create a pin using::
>>> pin = machine.Pin(0)
Here, the "0" is the pin that you want to access. Usually you want to
configure the pin to be input or output, and you do this when constructing
it. To make an input pin use::
>>> pin = machine.Pin(0, machine.Pin.IN, machine.Pin.PULL_UP)
You can either use PULL_UP or None for the input pull-mode. If it's
not specified then it defaults to None, which is no pull resistor. GPIO16
has no pull-up mode.
You can read the value on the pin using::
>>> pin.value()
0
The pin on your board may return 0 or 1 here, depending on what it's connected
to. To make an output pin use::
>>> pin = machine.Pin(0, machine.Pin.OUT)
Then set its value using::
>>> pin.value(0)
>>> pin.value(1)
Or::
>>> pin.off()
>>> pin.on()
External interrupts
-------------------
All pins except number 16 can be configured to trigger a hard interrupt if their
input changes. You can set code (a callback function) to be executed on the
trigger.
Let's first define a callback function, which must take a single argument,
being the pin that triggered the function. We will make the function just print
the pin::
>>> def callback(p):
... print('pin change', p)
Next we will create two pins and configure them as inputs::
>>> from machine import Pin
>>> p0 = Pin(0, Pin.IN)
>>> p2 = Pin(2, Pin.IN)
An finally we need to tell the pins when to trigger, and the function to call
when they detect an event::
>>> p0.irq(trigger=Pin.IRQ_FALLING, handler=callback)
>>> p2.irq(trigger=Pin.IRQ_RISING | Pin.IRQ_FALLING, handler=callback)
We set pin 0 to trigger only on a falling edge of the input (when it goes from
high to low), and set pin 2 to trigger on both a rising and falling edge. After
entering this code you can apply high and low voltages to pins 0 and 2 to see
the interrupt being executed.
A hard interrupt will trigger as soon as the event occurs and will interrupt any
running code, including Python code. As such your callback functions are
limited in what they can do (they cannot allocate memory, for example) and
should be as short and simple as possible.

View File

@@ -0,0 +1,61 @@
Power control
=============
The ESP8266 provides the ability to change the CPU frequency on the fly, and
enter a deep-sleep state. Both can be used to manage power consumption.
Changing the CPU frequency
--------------------------
The machine module has a function to get and set the CPU frequency. To get the
current frequency use::
>>> import machine
>>> machine.freq()
80000000
By default the CPU runs at 80MHz. It can be changed to 160MHz if you need more
processing power, at the expense of current consumption::
>>> machine.freq(160000000)
>>> machine.freq()
160000000
You can change to the higher frequency just while your code does the heavy
processing and then change back when it's finished.
Deep-sleep mode
---------------
The deep-sleep mode will shut down the ESP8266 and all its peripherals,
including the WiFi (but not including the real-time-clock, which is used to wake
the chip). This drastically reduces current consumption and is a good way to
make devices that can run for a while on a battery.
To be able to use the deep-sleep feature you must connect GPIO16 to the reset
pin (RST on the Adafruit Feather HUZZAH board). Then the following code can be
used to sleep and wake the device::
import machine
# configure RTC.ALARM0 to be able to wake the device
rtc = machine.RTC()
rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)
# set RTC.ALARM0 to fire after 10 seconds (waking the device)
rtc.alarm(rtc.ALARM0, 10000)
# put the device to sleep
machine.deepsleep()
Note that when the chip wakes from a deep-sleep it is completely reset,
including all of the memory. The boot scripts will run as usual and you can
put code in them to check the reset cause to perhaps do something different if
the device just woke from a deep-sleep. For example, to print the reset cause
you can use::
if machine.reset_cause() == machine.DEEPSLEEP_RESET:
print('woke from a deep sleep')
else:
print('power on or hard reset')

View File

@@ -0,0 +1,87 @@
Pulse Width Modulation
======================
Pulse width modulation (PWM) is a way to get an artificial analog output on a
digital pin. It achieves this by rapidly toggling the pin from low to high.
There are two parameters associated with this: the frequency of the toggling,
and the duty cycle. The duty cycle is defined to be how long the pin is high
compared with the length of a single period (low plus high time). Maximum
duty cycle is when the pin is high all of the time, and minimum is when it is
low all of the time.
On the ESP8266 the pins 0, 2, 4, 5, 12, 13, 14 and 15 all support PWM. The
limitation is that they must all be at the same frequency, and the frequency
must be between 1Hz and 1kHz.
To use PWM on a pin you must first create the pin object, for example::
>>> import machine
>>> p12 = machine.Pin(12)
Then create the PWM object using::
>>> pwm12 = machine.PWM(p12)
You can set the frequency and duty cycle using::
>>> pwm12.freq(500)
>>> pwm12.duty(512)
Note that the duty cycle is between 0 (all off) and 1023 (all on), with 512
being a 50% duty. Values beyond this min/max will be clipped. If you
print the PWM object then it will tell you its current configuration::
>>> pwm12
PWM(12, freq=500, duty=512)
You can also call the ``freq()`` and ``duty()`` methods with no arguments to
get their current values.
The pin will continue to be in PWM mode until you deinitialise it using::
>>> pwm12.deinit()
Fading an LED
-------------
Let's use the PWM feature to fade an LED. Assuming your board has an LED
connected to pin 2 (ESP-12 modules do) we can create an LED-PWM object using::
>>> led = machine.PWM(machine.Pin(2), freq=1000)
Notice that we can set the frequency in the PWM constructor.
For the next part we will use timing and some math, so import these modules::
>>> import time, math
Then create a function to pulse the LED::
>>> def pulse(l, t):
... for i in range(20):
... l.duty(int(math.sin(i / 10 * math.pi) * 500 + 500))
... time.sleep_ms(t)
You can try this function out using::
>>> pulse(led, 50)
For a nice effect you can pulse many times in a row::
>>> for i in range(10):
... pulse(led, 20)
Remember you can use ctrl-C to interrupt the code.
Control a hobby servo
---------------------
Hobby servo motors can be controlled using PWM. They require a frequency of
50Hz and then a duty between about 40 and 115, with 77 being the centre value.
If you connect a servo to the power and ground pins, and then the signal line
to pin 12 (other pins will work just as well), you can control the motor using::
>>> servo = machine.PWM(machine.Pin(12), freq=50)
>>> servo.duty(40)
>>> servo.duty(115)
>>> servo.duty(77)

View File

@@ -0,0 +1,212 @@
Getting a MicroPython REPL prompt
=================================
REPL stands for Read Evaluate Print Loop, and is the name given to the
interactive MicroPython prompt that you can access on the ESP8266. Using the
REPL is by far the easiest way to test out your code and run commands.
There are two ways to access the REPL: either via a wired connection through the
UART serial port, or via WiFi.
REPL over the serial port
-------------------------
The REPL is always available on the UART0 serial peripheral, which is connected
to the pins GPIO1 for TX and GPIO3 for RX. The baudrate of the REPL is 115200.
If your board has a USB-serial convertor on it then you should be able to access
the REPL directly from your PC. Otherwise you will need to have a way of
communicating with the UART.
To access the prompt over USB-serial you need to use a terminal emulator program.
On Windows TeraTerm is a good choice, on Mac you can use the built-in ``screen``
program, and Linux has ``picocom`` and ``minicom``. Of course, there are many
other terminal programs that will work, so pick your favourite!
For example, on Linux you can try running::
picocom /dev/ttyUSB0 -b115200
Once you have made the connection over the serial port you can test if it is
working by hitting enter a few times. You should see the Python REPL prompt,
indicated by ``>>>``.
WebREPL - a prompt over WiFi
----------------------------
WebREPL allows you to use the Python prompt over WiFi, connecting through a
browser. The latest versions of Firefox and Chrome are supported.
For your convenience, WebREPL client is hosted at
`<http://micropython.org/webrepl>`__. Alternatively, you can install it
locally from the the GitHub repository
`<https://github.com/micropython/webrepl>`__.
Before connecting to WebREPL, you should set a password and enable it via
a normal serial connection. Initial versions of MicroPython for ESP8266
came with WebREPL automatically enabled on the boot and with the
ability to set a password via WiFi on the first connection, but as WebREPL
was becoming more widely known and popular, the initial setup has switched
to a wired connection for improved security::
import webrepl_setup
Follow the on-screen instructions and prompts. To make any changes active,
you will need to reboot your device.
To use WebREPL connect your computer to the ESP8266's access point
(MicroPython-xxxxxx, see the previous section about this). If you have
already reconfigured your ESP8266 to connect to a router then you can
skip this part.
Once you are on the same network as the ESP8266 you click the "Connect" button
(if you are connecting via a router then you may need to change the IP address,
by default the IP address is correct when connected to the ESP8266's access
point). If the connection succeeds then you should see a password prompt.
Once you type the password configured at the setup step above, press Enter once
more and you should get a prompt looking like ``>>>``. You can now start
typing Python commands!
Using the REPL
--------------
Once you have a prompt you can start experimenting! Anything you type at the
prompt will be executed after you press the Enter key. MicroPython will run
the code that you enter and print the result (if there is one). If there is an
error with the text that you enter then an error message is printed.
Try typing the following at the prompt::
>>> print('hello esp8266!')
hello esp8266!
Note that you shouldn't type the ``>>>`` arrows, they are there to indicate that
you should type the text after it at the prompt. And then the line following is
what the device should respond with. In the end, once you have entered the text
``print("hello esp8266!")`` and pressed the Enter key, the output on your screen
should look exactly like it does above.
If you already know some python you can now try some basic commands here. For
example::
>>> 1 + 2
3
>>> 1 / 2
0.5
>>> 12**34
4922235242952026704037113243122008064
If your board has an LED attached to GPIO2 (the ESP-12 modules do) then you can
turn it on and off using the following code::
>>> import machine
>>> pin = machine.Pin(2, machine.Pin.OUT)
>>> pin.on()
>>> pin.off()
Note that ``on`` method of a Pin might turn the LED off and ``off`` might
turn it on (or vice versa), depending on how the LED is wired on your board.
To resolve this, machine.Signal class is provided.
Line editing
~~~~~~~~~~~~
You can edit the current line that you are entering using the left and right
arrow keys to move the cursor, as well as the delete and backspace keys. Also,
pressing Home or ctrl-A moves the cursor to the start of the line, and pressing
End or ctrl-E moves to the end of the line.
Input history
~~~~~~~~~~~~~
The REPL remembers a certain number of previous lines of text that you entered
(up to 8 on the ESP8266). To recall previous lines use the up and down arrow
keys.
Tab completion
~~~~~~~~~~~~~~
Pressing the Tab key will do an auto-completion of the current word that you are
entering. This can be very useful to find out functions and methods that a
module or object has. Try it out by typing "ma" and then pressing Tab. It
should complete to "machine" (assuming you imported machine in the above
example). Then type "." and press Tab again to see a list of all the functions
that the machine module has.
Line continuation and auto-indent
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Certain things that you type will need "continuing", that is, will need more
lines of text to make a proper Python statement. In this case the prompt will
change to ``...`` and the cursor will auto-indent the correct amount so you can
start typing the next line straight away. Try this by defining the following
function::
>>> def toggle(p):
... p.value(not p.value())
...
...
...
>>>
In the above, you needed to press the Enter key three times in a row to finish
the compound statement (that's the three lines with just dots on them). The
other way to finish a compound statement is to press backspace to get to the
start of the line, then press the Enter key. (If you did something wrong and
want to escape the continuation mode then press ctrl-C; all lines will be
ignored.)
The function you just defined allows you to toggle a pin. The pin object you
created earlier should still exist (recreate it if it doesn't) and you can
toggle the LED using::
>>> toggle(pin)
Let's now toggle the LED in a loop (if you don't have an LED then you can just
print some text instead of calling toggle, to see the effect)::
>>> import time
>>> while True:
... toggle(pin)
... time.sleep_ms(500)
...
...
...
>>>
This will toggle the LED at 1Hz (half a second on, half a second off). To stop
the toggling press ctrl-C, which will raise a KeyboardInterrupt exception and
break out of the loop.
The time module provides some useful functions for making delays and doing
timing. Use tab completion to find out what they are and play around with them!
Paste mode
~~~~~~~~~~
Pressing ctrl-E will enter a special paste mode. This allows you to copy and
paste a chunk of text into the REPL. If you press ctrl-E you will see the
paste-mode prompt::
paste mode; Ctrl-C to cancel, Ctrl-D to finish
===
You can then paste (or type) your text in. Note that none of the special keys
or commands work in paste mode (eg Tab or backspace), they are just accepted
as-is. Press ctrl-D to finish entering the text and execute it.
Other control commands
~~~~~~~~~~~~~~~~~~~~~~
There are four other control commands:
* Ctrl-A on a blank line will enter raw REPL mode. This is like a permanent
paste mode, except that characters are not echoed back.
* Ctrl-B on a blank like goes to normal REPL mode.
* Ctrl-C cancels any input, or interrupts the currently running code.
* Ctrl-D on a blank line will do a soft reset.
Note that ctrl-A and ctrl-D do not work with WebREPL.

View File

@@ -0,0 +1,93 @@
.. _ssd1306:
Using a SSD1306 OLED display
============================
The SSD1306 OLED display uses either a SPI or I2C interface and comes in a variety of
sizes (128x64, 128x32, 72x40, 64x48) and colours (white, yellow, blue, yellow + blue).
Hardware SPI interface::
from machine import Pin, SPI
import ssd1306
hspi = SPI(1) # sck=14 (scl), mosi=13 (sda), miso=12 (unused)
dc = Pin(4) # data/command
rst = Pin(5) # reset
cs = Pin(15) # chip select, some modules do not have a pin for this
display = ssd1306.SSD1306_SPI(128, 64, hspi, dc, rst, cs)
Software SPI interface::
from machine import Pin, SoftSPI
import ssd1306
spi = SoftSPI(baudrate=500000, polarity=1, phase=0, sck=Pin(14), mosi=Pin(13), miso=Pin(12))
dc = Pin(4) # data/command
rst = Pin(5) # reset
cs = Pin(15) # chip select, some modules do not have a pin for this
display = ssd1306.SSD1306_SPI(128, 64, spi, dc, rst, cs)
I2C interface::
from machine import Pin, I2C
import ssd1306
# using default address 0x3C
i2c = I2C(sda=Pin(4), scl=Pin(5))
display = ssd1306.SSD1306_I2C(128, 64, i2c)
Print Hello World on the first line::
display.text('Hello, World!', 0, 0, 1)
display.show()
Basic functions::
display.poweroff() # power off the display, pixels persist in memory
display.poweron() # power on the display, pixels redrawn
display.contrast(0) # dim
display.contrast(255) # bright
display.invert(1) # display inverted
display.invert(0) # display normal
display.rotate(True) # rotate 180 degrees
display.rotate(False) # rotate 0 degrees
display.show() # write the contents of the FrameBuffer to display memory
Subclassing FrameBuffer provides support for graphics primitives::
display.fill(0) # fill entire screen with colour=0
display.pixel(0, 10) # get pixel at x=0, y=10
display.pixel(0, 10, 1) # set pixel at x=0, y=10 to colour=1
display.hline(0, 8, 4, 1) # draw horizontal line x=0, y=8, width=4, colour=1
display.vline(0, 8, 4, 1) # draw vertical line x=0, y=8, height=4, colour=1
display.line(0, 0, 127, 63, 1) # draw a line from 0,0 to 127,63
display.rect(10, 10, 107, 43, 1) # draw a rectangle outline 10,10 to 117,53, colour=1
display.fill_rect(10, 10, 107, 43, 1) # draw a solid rectangle 10,10 to 117,53, colour=1
display.text('Hello World', 0, 0, 1) # draw some text at x=0, y=0, colour=1
display.scroll(20, 0) # scroll 20 pixels to the right
# draw another FrameBuffer on top of the current one at the given coordinates
import framebuf
fbuf = framebuf.FrameBuffer(bytearray(8 * 8 * 1), 8, 8, framebuf.MONO_VLSB)
fbuf.line(0, 0, 7, 7, 1)
display.blit(fbuf, 10, 10, 0) # draw on top at x=10, y=10, key=0
display.show()
Draw the MicroPython logo and print some text::
display.fill(0)
display.fill_rect(0, 0, 32, 32, 1)
display.fill_rect(2, 2, 28, 28, 0)
display.vline(9, 8, 22, 1)
display.vline(16, 2, 22, 1)
display.vline(23, 8, 22, 1)
display.fill_rect(26, 24, 2, 4, 1)
display.text('MicroPython', 40, 0, 1)
display.text('SSD1306', 40, 12, 1)
display.text('OLED 128x64', 40, 24, 1)
display.show()

View File

@@ -0,0 +1,19 @@
MicroPython documentation and references
========================================
.. toctree::
library/index.rst
reference/index.rst
genrst/index.rst
develop/index.rst
license.rst
pyboard/quickref.rst
esp8266/quickref.rst
esp32/quickref.rst
rp2/quickref.rst
mimxrt/quickref.rst
wipy/quickref.rst
unix/quickref.rst
zephyr/quickref.rst
renesas-ra/quickref.rst

View File

@@ -0,0 +1,12 @@
:mod:`_thread` -- multithreading support
========================================
.. module:: _thread
:synopsis: multithreading support
|see_cpython_module| :mod:`python:_thread`.
This module implements multithreading support.
This module is highly experimental and its API is not yet fully settled
and not yet described in this documentation.

View File

@@ -0,0 +1,29 @@
:mod:`array` -- arrays of numeric data
======================================
.. module:: array
:synopsis: efficient arrays of numeric data
|see_cpython_module| :mod:`python:array`.
Supported format codes: ``b``, ``B``, ``h``, ``H``, ``i``, ``I``, ``l``,
``L``, ``q``, ``Q``, ``f``, ``d`` (the latter 2 depending on the
floating-point support).
Classes
-------
.. class:: array(typecode, [iterable])
Create array with elements of given type. Initial contents of the
array are given by *iterable*. If it is not provided, an empty
array is created.
.. method:: append(val)
Append new element *val* to the end of array, growing it.
.. method:: extend(iterable)
Append new elements as contained in *iterable* to the end of
array, growing it.

View File

@@ -0,0 +1,38 @@
:mod:`binascii` -- binary/ASCII conversions
===========================================
.. module:: binascii
:synopsis: binary/ASCII conversions
|see_cpython_module| :mod:`python:binascii`.
This module implements conversions between binary data and various
encodings of it in ASCII form (in both directions).
Functions
---------
.. function:: hexlify(data, [sep])
Convert the bytes in the *data* object to a hexadecimal representation.
Returns a bytes object.
If the additional argument *sep* is supplied it is used as a separator
between hexadecimal values.
.. function:: unhexlify(data)
Convert hexadecimal data to binary representation. Returns bytes string.
(i.e. inverse of hexlify)
.. function:: a2b_base64(data)
Decode base64-encoded data, ignoring invalid characters in the input.
Conforms to `RFC 2045 s.6.8 <https://tools.ietf.org/html/rfc2045#section-6.8>`_.
Returns a bytes object.
.. function:: b2a_base64(data, *, newline=True)
Encode binary data in base64 format, as in `RFC 3548
<https://tools.ietf.org/html/rfc3548.html>`_. Returns the encoded data
followed by a newline character if newline is true, as a bytes object.

View File

@@ -0,0 +1,761 @@
:mod:`bluetooth` --- low-level Bluetooth
========================================
.. module:: bluetooth
:synopsis: Low-level Bluetooth radio functionality
This module provides an interface to a Bluetooth controller on a board.
Currently this supports Bluetooth Low Energy (BLE) in Central, Peripheral,
Broadcaster, and Observer roles, as well as GATT Server and Client and L2CAP
connection-oriented-channels. A device may operate in multiple roles
concurrently. Pairing (and bonding) is supported on some ports.
This API is intended to match the low-level Bluetooth protocol and provide
building-blocks for higher-level abstractions such as specific device types.
.. note:: This module is still under development and its classes, functions,
methods and constants are subject to change.
class BLE
---------
Constructor
-----------
.. class:: BLE()
Returns the singleton BLE object.
Configuration
-------------
.. method:: BLE.active([active], /)
Optionally changes the active state of the BLE radio, and returns the
current state.
The radio must be made active before using any other methods on this class.
.. method:: BLE.config('param', /)
BLE.config(*, param=value, ...)
Get or set configuration values of the BLE interface. To get a value the
parameter name should be quoted as a string, and just one parameter is
queried at a time. To set values use the keyword syntax, and one ore more
parameter can be set at a time.
Currently supported values are:
- ``'mac'``: The current address in use, depending on the current address mode.
This returns a tuple of ``(addr_type, addr)``.
See :meth:`gatts_write <BLE.gatts_write>` for details about address type.
This may only be queried while the interface is currently active.
- ``'addr_mode'``: Sets the address mode. Values can be:
* 0x00 - PUBLIC - Use the controller's public address.
* 0x01 - RANDOM - Use a generated static address.
* 0x02 - RPA - Use resolvable private addresses.
* 0x03 - NRPA - Use non-resolvable private addresses.
By default the interface mode will use a PUBLIC address if available, otherwise
it will use a RANDOM address.
- ``'gap_name'``: Get/set the GAP device name used by service 0x1800,
characteristic 0x2a00. This can be set at any time and changed multiple
times.
- ``'rxbuf'``: Get/set the size in bytes of the internal buffer used to store
incoming events. This buffer is global to the entire BLE driver and so
handles incoming data for all events, including all characteristics.
Increasing this allows better handling of bursty incoming data (for
example scan results) and the ability to receive larger characteristic values.
- ``'mtu'``: Get/set the MTU that will be used during a ATT MTU exchange. The
resulting MTU will be the minimum of this and the remote device's MTU.
ATT MTU exchange will not happen automatically (unless the remote device initiates
it), and must be manually initiated with
:meth:`gattc_exchange_mtu<BLE.gattc_exchange_mtu>`.
Use the ``_IRQ_MTU_EXCHANGED`` event to discover the MTU for a given connection.
- ``'bond'``: Sets whether bonding will be enabled during pairing. When
enabled, pairing requests will set the "bond" flag and the keys will be stored
by both devices.
- ``'mitm'``: Sets whether MITM-protection is required for pairing.
- ``'io'``: Sets the I/O capabilities of this device.
Available options are::
_IO_CAPABILITY_DISPLAY_ONLY = const(0)
_IO_CAPABILITY_DISPLAY_YESNO = const(1)
_IO_CAPABILITY_KEYBOARD_ONLY = const(2)
_IO_CAPABILITY_NO_INPUT_OUTPUT = const(3)
_IO_CAPABILITY_KEYBOARD_DISPLAY = const(4)
- ``'le_secure'``: Sets whether "LE Secure" pairing is required. Default is
false (i.e. allow "Legacy Pairing").
Event Handling
--------------
.. method:: BLE.irq(handler, /)
Registers a callback for events from the BLE stack. The *handler* takes two
arguments, ``event`` (which will be one of the codes below) and ``data``
(which is an event-specific tuple of values).
**Note:** As an optimisation to prevent unnecessary allocations, the ``addr``,
``adv_data``, ``char_data``, ``notify_data``, and ``uuid`` entries in the
tuples are read-only memoryview instances pointing to :mod:`bluetooth`'s internal
ringbuffer, and are only valid during the invocation of the IRQ handler
function. If your program needs to save one of these values to access after
the IRQ handler has returned (e.g. by saving it in a class instance or global
variable), then it needs to take a copy of the data, either by using ``bytes()``
or ``bluetooth.UUID()``, like this::
connected_addr = bytes(addr) # equivalently: adv_data, char_data, or notify_data
matched_uuid = bluetooth.UUID(uuid)
For example, the IRQ handler for a scan result might inspect the ``adv_data``
to decide if it's the correct device, and only then copy the address data to be
used elsewhere in the program. And to print data from within the IRQ handler,
``print(bytes(addr))`` will be needed.
An event handler showing all possible events::
def bt_irq(event, data):
if event == _IRQ_CENTRAL_CONNECT:
# A central has connected to this peripheral.
conn_handle, addr_type, addr = data
elif event == _IRQ_CENTRAL_DISCONNECT:
# A central has disconnected from this peripheral.
conn_handle, addr_type, addr = data
elif event == _IRQ_GATTS_WRITE:
# A client has written to this characteristic or descriptor.
conn_handle, attr_handle = data
elif event == _IRQ_GATTS_READ_REQUEST:
# A client has issued a read. Note: this is only supported on STM32.
# Return a non-zero integer to deny the read (see below), or zero (or None)
# to accept the read.
conn_handle, attr_handle = data
elif event == _IRQ_SCAN_RESULT:
# A single scan result.
addr_type, addr, adv_type, rssi, adv_data = data
elif event == _IRQ_SCAN_DONE:
# Scan duration finished or manually stopped.
pass
elif event == _IRQ_PERIPHERAL_CONNECT:
# A successful gap_connect().
conn_handle, addr_type, addr = data
elif event == _IRQ_PERIPHERAL_DISCONNECT:
# Connected peripheral has disconnected.
conn_handle, addr_type, addr = data
elif event == _IRQ_GATTC_SERVICE_RESULT:
# Called for each service found by gattc_discover_services().
conn_handle, start_handle, end_handle, uuid = data
elif event == _IRQ_GATTC_SERVICE_DONE:
# Called once service discovery is complete.
# Note: Status will be zero on success, implementation-specific value otherwise.
conn_handle, status = data
elif event == _IRQ_GATTC_CHARACTERISTIC_RESULT:
# Called for each characteristic found by gattc_discover_services().
conn_handle, def_handle, value_handle, properties, uuid = data
elif event == _IRQ_GATTC_CHARACTERISTIC_DONE:
# Called once service discovery is complete.
# Note: Status will be zero on success, implementation-specific value otherwise.
conn_handle, status = data
elif event == _IRQ_GATTC_DESCRIPTOR_RESULT:
# Called for each descriptor found by gattc_discover_descriptors().
conn_handle, dsc_handle, uuid = data
elif event == _IRQ_GATTC_DESCRIPTOR_DONE:
# Called once service discovery is complete.
# Note: Status will be zero on success, implementation-specific value otherwise.
conn_handle, status = data
elif event == _IRQ_GATTC_READ_RESULT:
# A gattc_read() has completed.
conn_handle, value_handle, char_data = data
elif event == _IRQ_GATTC_READ_DONE:
# A gattc_read() has completed.
# Note: The value_handle will be zero on btstack (but present on NimBLE).
# Note: Status will be zero on success, implementation-specific value otherwise.
conn_handle, value_handle, status = data
elif event == _IRQ_GATTC_WRITE_DONE:
# A gattc_write() has completed.
# Note: The value_handle will be zero on btstack (but present on NimBLE).
# Note: Status will be zero on success, implementation-specific value otherwise.
conn_handle, value_handle, status = data
elif event == _IRQ_GATTC_NOTIFY:
# A server has sent a notify request.
conn_handle, value_handle, notify_data = data
elif event == _IRQ_GATTC_INDICATE:
# A server has sent an indicate request.
conn_handle, value_handle, notify_data = data
elif event == _IRQ_GATTS_INDICATE_DONE:
# A client has acknowledged the indication.
# Note: Status will be zero on successful acknowledgment, implementation-specific value otherwise.
conn_handle, value_handle, status = data
elif event == _IRQ_MTU_EXCHANGED:
# ATT MTU exchange complete (either initiated by us or the remote device).
conn_handle, mtu = data
elif event == _IRQ_L2CAP_ACCEPT:
# A new channel has been accepted.
# Return a non-zero integer to reject the connection, or zero (or None) to accept.
conn_handle, cid, psm, our_mtu, peer_mtu = data
elif event == _IRQ_L2CAP_CONNECT:
# A new channel is now connected (either as a result of connecting or accepting).
conn_handle, cid, psm, our_mtu, peer_mtu = data
elif event == _IRQ_L2CAP_DISCONNECT:
# Existing channel has disconnected (status is zero), or a connection attempt failed (non-zero status).
conn_handle, cid, psm, status = data
elif event == _IRQ_L2CAP_RECV:
# New data is available on the channel. Use l2cap_recvinto to read.
conn_handle, cid = data
elif event == _IRQ_L2CAP_SEND_READY:
# A previous l2cap_send that returned False has now completed and the channel is ready to send again.
# If status is non-zero, then the transmit buffer overflowed and the application should re-send the data.
conn_handle, cid, status = data
elif event == _IRQ_CONNECTION_UPDATE:
# The remote device has updated connection parameters.
conn_handle, conn_interval, conn_latency, supervision_timeout, status = data
elif event == _IRQ_ENCRYPTION_UPDATE:
# The encryption state has changed (likely as a result of pairing or bonding).
conn_handle, encrypted, authenticated, bonded, key_size = data
elif event == _IRQ_GET_SECRET:
# Return a stored secret.
# If key is None, return the index'th value of this sec_type.
# Otherwise return the corresponding value for this sec_type and key.
sec_type, index, key = data
return value
elif event == _IRQ_SET_SECRET:
# Save a secret to the store for this sec_type and key.
sec_type, key, value = data
return True
elif event == _IRQ_PASSKEY_ACTION:
# Respond to a passkey request during pairing.
# See gap_passkey() for details.
# action will be an action that is compatible with the configured "io" config.
# passkey will be non-zero if action is "numeric comparison".
conn_handle, action, passkey = data
The event codes are::
from micropython import const
_IRQ_CENTRAL_CONNECT = const(1)
_IRQ_CENTRAL_DISCONNECT = const(2)
_IRQ_GATTS_WRITE = const(3)
_IRQ_GATTS_READ_REQUEST = const(4)
_IRQ_SCAN_RESULT = const(5)
_IRQ_SCAN_DONE = const(6)
_IRQ_PERIPHERAL_CONNECT = const(7)
_IRQ_PERIPHERAL_DISCONNECT = const(8)
_IRQ_GATTC_SERVICE_RESULT = const(9)
_IRQ_GATTC_SERVICE_DONE = const(10)
_IRQ_GATTC_CHARACTERISTIC_RESULT = const(11)
_IRQ_GATTC_CHARACTERISTIC_DONE = const(12)
_IRQ_GATTC_DESCRIPTOR_RESULT = const(13)
_IRQ_GATTC_DESCRIPTOR_DONE = const(14)
_IRQ_GATTC_READ_RESULT = const(15)
_IRQ_GATTC_READ_DONE = const(16)
_IRQ_GATTC_WRITE_DONE = const(17)
_IRQ_GATTC_NOTIFY = const(18)
_IRQ_GATTC_INDICATE = const(19)
_IRQ_GATTS_INDICATE_DONE = const(20)
_IRQ_MTU_EXCHANGED = const(21)
_IRQ_L2CAP_ACCEPT = const(22)
_IRQ_L2CAP_CONNECT = const(23)
_IRQ_L2CAP_DISCONNECT = const(24)
_IRQ_L2CAP_RECV = const(25)
_IRQ_L2CAP_SEND_READY = const(26)
_IRQ_CONNECTION_UPDATE = const(27)
_IRQ_ENCRYPTION_UPDATE = const(28)
_IRQ_GET_SECRET = const(29)
_IRQ_SET_SECRET = const(30)
For the ``_IRQ_GATTS_READ_REQUEST`` event, the available return codes are::
_GATTS_NO_ERROR = const(0x00)
_GATTS_ERROR_READ_NOT_PERMITTED = const(0x02)
_GATTS_ERROR_WRITE_NOT_PERMITTED = const(0x03)
_GATTS_ERROR_INSUFFICIENT_AUTHENTICATION = const(0x05)
_GATTS_ERROR_INSUFFICIENT_AUTHORIZATION = const(0x08)
_GATTS_ERROR_INSUFFICIENT_ENCRYPTION = const(0x0f)
For the ``_IRQ_PASSKEY_ACTION`` event, the available actions are::
_PASSKEY_ACTION_NONE = const(0)
_PASSKEY_ACTION_INPUT = const(2)
_PASSKEY_ACTION_DISPLAY = const(3)
_PASSKEY_ACTION_NUMERIC_COMPARISON = const(4)
In order to save space in the firmware, these constants are not included on the
:mod:`bluetooth` module. Add the ones that you need from the list above to your
program.
Broadcaster Role (Advertiser)
-----------------------------
.. method:: BLE.gap_advertise(interval_us, adv_data=None, *, resp_data=None, connectable=True)
Starts advertising at the specified interval (in **micro**\ seconds). This
interval will be rounded down to the nearest 625us. To stop advertising, set
*interval_us* to ``None``.
*adv_data* and *resp_data* can be any type that implements the buffer
protocol (e.g. ``bytes``, ``bytearray``, ``str``). *adv_data* is included
in all broadcasts, and *resp_data* is send in reply to an active scan.
**Note:** if *adv_data* (or *resp_data*) is ``None``, then the data passed
to the previous call to ``gap_advertise`` will be re-used. This allows a
broadcaster to resume advertising with just ``gap_advertise(interval_us)``.
To clear the advertising payload pass an empty ``bytes``, i.e. ``b''``.
Observer Role (Scanner)
-----------------------
.. method:: BLE.gap_scan(duration_ms, interval_us=1280000, window_us=11250, active=False, /)
Run a scan operation lasting for the specified duration (in **milli**\ seconds).
To scan indefinitely, set *duration_ms* to ``0``.
To stop scanning, set *duration_ms* to ``None``.
Use *interval_us* and *window_us* to optionally configure the duty cycle.
The scanner will run for *window_us* **micro**\ seconds every *interval_us*
**micro**\ seconds for a total of *duration_ms* **milli**\ seconds. The default
interval and window are 1.28 seconds and 11.25 milliseconds respectively
(background scanning).
For each scan result the ``_IRQ_SCAN_RESULT`` event will be raised, with event
data ``(addr_type, addr, adv_type, rssi, adv_data)``.
``addr_type`` values indicate public or random addresses:
* 0x00 - PUBLIC
* 0x01 - RANDOM (either static, RPA, or NRPA, the type is encoded in the address itself)
``adv_type`` values correspond to the Bluetooth Specification:
* 0x00 - ADV_IND - connectable and scannable undirected advertising
* 0x01 - ADV_DIRECT_IND - connectable directed advertising
* 0x02 - ADV_SCAN_IND - scannable undirected advertising
* 0x03 - ADV_NONCONN_IND - non-connectable undirected advertising
* 0x04 - SCAN_RSP - scan response
``active`` can be set ``True`` if you want to receive scan responses in the results.
When scanning is stopped (either due to the duration finishing or when
explicitly stopped), the ``_IRQ_SCAN_DONE`` event will be raised.
Central Role
------------
A central device can connect to peripherals that it has discovered using the observer role (see :meth:`gap_scan<BLE.gap_scan>`) or with a known address.
.. method:: BLE.gap_connect(addr_type, addr, scan_duration_ms=2000, min_conn_interval_us=None, max_conn_interval_us=None, /)
Connect to a peripheral.
See :meth:`gap_scan <BLE.gap_scan>` for details about address types.
To cancel an outstanding connection attempt early, call
``gap_connect(None)``.
On success, the ``_IRQ_PERIPHERAL_CONNECT`` event will be raised. If
cancelling a connection attempt, the ``_IRQ_PERIPHERAL_DISCONNECT`` event
will be raised.
The device will wait up to *scan_duration_ms* to receive an advertising
payload from the device.
The connection interval can be configured in **micro**\ seconds using either
or both of *min_conn_interval_us* and *max_conn_interval_us*. Otherwise a
default interval will be chosen, typically between 30000 and 50000
microseconds. A shorter interval will increase throughput, at the expense
of power usage.
Peripheral Role
---------------
A peripheral device is expected to send connectable advertisements (see
:meth:`gap_advertise<BLE.gap_advertise>`). It will usually be acting as a GATT
server, having first registered services and characteristics using
:meth:`gatts_register_services<BLE.gatts_register_services>`.
When a central connects, the ``_IRQ_CENTRAL_CONNECT`` event will be raised.
Central & Peripheral Roles
--------------------------
.. method:: BLE.gap_disconnect(conn_handle, /)
Disconnect the specified connection handle. This can either be a
central that has connected to this device (if acting as a peripheral)
or a peripheral that was previously connected to by this device (if acting
as a central).
On success, the ``_IRQ_PERIPHERAL_DISCONNECT`` or ``_IRQ_CENTRAL_DISCONNECT``
event will be raised.
Returns ``False`` if the connection handle wasn't connected, and ``True``
otherwise.
GATT Server
-----------
A GATT server has a set of registered services. Each service may contain
characteristics, which each have a value. Characteristics can also contain
descriptors, which themselves have values.
These values are stored locally, and are accessed by their "value handle" which
is generated during service registration. They can also be read from or written
to by a remote client device. Additionally, a server can "notify" a
characteristic to a connected client via a connection handle.
A device in either central or peripheral roles may function as a GATT server,
however in most cases it will be more common for a peripheral device to act
as the server.
Characteristics and descriptors have a default maximum size of 20 bytes.
Anything written to them by a client will be truncated to this length. However,
any local write will increase the maximum size, so if you want to allow larger
writes from a client to a given characteristic, use
:meth:`gatts_write<BLE.gatts_write>` after registration. e.g.
``gatts_write(char_handle, bytes(100))``.
.. method:: BLE.gatts_register_services(services_definition, /)
Configures the server with the specified services, replacing any
existing services.
*services_definition* is a list of **services**, where each **service** is a
two-element tuple containing a UUID and a list of **characteristics**.
Each **characteristic** is a two-or-three-element tuple containing a UUID, a
**flags** value, and optionally a list of *descriptors*.
Each **descriptor** is a two-element tuple containing a UUID and a **flags**
value.
The **flags** are a bitwise-OR combination of the flags defined below. These
set both the behaviour of the characteristic (or descriptor) as well as the
security and privacy requirements.
The return value is a list (one element per service) of tuples (each element
is a value handle). Characteristics and descriptor handles are flattened
into the same tuple, in the order that they are defined.
The following example registers two services (Heart Rate, and Nordic UART)::
HR_UUID = bluetooth.UUID(0x180D)
HR_CHAR = (bluetooth.UUID(0x2A37), bluetooth.FLAG_READ | bluetooth.FLAG_NOTIFY,)
HR_SERVICE = (HR_UUID, (HR_CHAR,),)
UART_UUID = bluetooth.UUID('6E400001-B5A3-F393-E0A9-E50E24DCCA9E')
UART_TX = (bluetooth.UUID('6E400003-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_READ | bluetooth.FLAG_NOTIFY,)
UART_RX = (bluetooth.UUID('6E400002-B5A3-F393-E0A9-E50E24DCCA9E'), bluetooth.FLAG_WRITE,)
UART_SERVICE = (UART_UUID, (UART_TX, UART_RX,),)
SERVICES = (HR_SERVICE, UART_SERVICE,)
( (hr,), (tx, rx,), ) = bt.gatts_register_services(SERVICES)
The three value handles (``hr``, ``tx``, ``rx``) can be used with
:meth:`gatts_read <BLE.gatts_read>`, :meth:`gatts_write <BLE.gatts_write>`, :meth:`gatts_notify <BLE.gatts_notify>`, and
:meth:`gatts_indicate <BLE.gatts_indicate>`.
**Note:** Advertising must be stopped before registering services.
Available flags for characteristics and descriptors are::
from micropython import const
_FLAG_BROADCAST = const(0x0001)
_FLAG_READ = const(0x0002)
_FLAG_WRITE_NO_RESPONSE = const(0x0004)
_FLAG_WRITE = const(0x0008)
_FLAG_NOTIFY = const(0x0010)
_FLAG_INDICATE = const(0x0020)
_FLAG_AUTHENTICATED_SIGNED_WRITE = const(0x0040)
_FLAG_AUX_WRITE = const(0x0100)
_FLAG_READ_ENCRYPTED = const(0x0200)
_FLAG_READ_AUTHENTICATED = const(0x0400)
_FLAG_READ_AUTHORIZED = const(0x0800)
_FLAG_WRITE_ENCRYPTED = const(0x1000)
_FLAG_WRITE_AUTHENTICATED = const(0x2000)
_FLAG_WRITE_AUTHORIZED = const(0x4000)
As for the IRQs above, any required constants should be added to your Python code.
.. method:: BLE.gatts_read(value_handle, /)
Reads the local value for this handle (which has either been written by
:meth:`gatts_write <BLE.gatts_write>` or by a remote client).
.. method:: BLE.gatts_write(value_handle, data, send_update=False, /)
Writes the local value for this handle, which can be read by a client.
If *send_update* is ``True``, then any subscribed clients will be notified
(or indicated, depending on what they're subscribed to and which operations
the characteristic supports) about this write.
.. method:: BLE.gatts_notify(conn_handle, value_handle, data=None, /)
Sends a notification request to a connected client.
If *data* is not ``None``, then that value is sent to the client as part of
the notification. The local value will not be modified.
Otherwise, if *data* is ``None``, then the current local value (as
set with :meth:`gatts_write <BLE.gatts_write>`) will be sent.
**Note:** The notification will be sent regardless of the subscription
status of the client to this characteristic.
.. method:: BLE.gatts_indicate(conn_handle, value_handle, /)
Sends an indication request containing the characteristic's current value to
a connected client.
On acknowledgment (or failure, e.g. timeout), the
``_IRQ_GATTS_INDICATE_DONE`` event will be raised.
**Note:** The indication will be sent regardless of the subscription
status of the client to this characteristic.
.. method:: BLE.gatts_set_buffer(value_handle, len, append=False, /)
Sets the internal buffer size for a value in bytes. This will limit the
largest possible write that can be received. The default is 20.
Setting *append* to ``True`` will make all remote writes append to, rather
than replace, the current value. At most *len* bytes can be buffered in
this way. When you use :meth:`gatts_read <BLE.gatts_read>`, the value will
be cleared after reading. This feature is useful when implementing something
like the Nordic UART Service.
GATT Client
-----------
A GATT client can discover and read/write characteristics on a remote GATT server.
It is more common for a central role device to act as the GATT client, however
it's also possible for a peripheral to act as a client in order to discover
information about the central that has connected to it (e.g. to read the
device name from the device information service).
.. method:: BLE.gattc_discover_services(conn_handle, uuid=None, /)
Query a connected server for its services.
Optionally specify a service *uuid* to query for that service only.
For each service discovered, the ``_IRQ_GATTC_SERVICE_RESULT`` event will
be raised, followed by ``_IRQ_GATTC_SERVICE_DONE`` on completion.
.. method:: BLE.gattc_discover_characteristics(conn_handle, start_handle, end_handle, uuid=None, /)
Query a connected server for characteristics in the specified range.
Optionally specify a characteristic *uuid* to query for that
characteristic only.
You can use ``start_handle=1``, ``end_handle=0xffff`` to search for a
characteristic in any service.
For each characteristic discovered, the ``_IRQ_GATTC_CHARACTERISTIC_RESULT``
event will be raised, followed by ``_IRQ_GATTC_CHARACTERISTIC_DONE`` on completion.
.. method:: BLE.gattc_discover_descriptors(conn_handle, start_handle, end_handle, /)
Query a connected server for descriptors in the specified range.
For each descriptor discovered, the ``_IRQ_GATTC_DESCRIPTOR_RESULT`` event
will be raised, followed by ``_IRQ_GATTC_DESCRIPTOR_DONE`` on completion.
.. method:: BLE.gattc_read(conn_handle, value_handle, /)
Issue a remote read to a connected server for the specified
characteristic or descriptor handle.
When a value is available, the ``_IRQ_GATTC_READ_RESULT`` event will be
raised. Additionally, the ``_IRQ_GATTC_READ_DONE`` will be raised.
.. method:: BLE.gattc_write(conn_handle, value_handle, data, mode=0, /)
Issue a remote write to a connected server for the specified
characteristic or descriptor handle.
The argument *mode* specifies the write behaviour, with the currently
supported values being:
* ``mode=0`` (default) is a write-without-response: the write will
be sent to the remote server but no confirmation will be
returned, and no event will be raised.
* ``mode=1`` is a write-with-response: the remote server is
requested to send a response/acknowledgement that it received the
data.
If a response is received from the remote server the
``_IRQ_GATTC_WRITE_DONE`` event will be raised.
.. method:: BLE.gattc_exchange_mtu(conn_handle, /)
Initiate MTU exchange with a connected server, using the preferred MTU
set using ``BLE.config(mtu=value)``.
The ``_IRQ_MTU_EXCHANGED`` event will be raised when MTU exchange
completes.
**Note:** MTU exchange is typically initiated by the central. When using
the BlueKitchen stack in the central role, it does not support a remote
peripheral initiating the MTU exchange. NimBLE works for both roles.
L2CAP connection-oriented-channels
----------------------------------
This feature allows for socket-like data exchange between two BLE devices.
Once the devices are connected via GAP, either device can listen for the
other to connect on a numeric PSM (Protocol/Service Multiplexer).
**Note:** This is currently only supported when using the NimBLE stack on
STM32 and Unix (not ESP32). Only one L2CAP channel may be active at a given
time (i.e. you cannot connect while listening).
Active L2CAP channels are identified by the connection handle that they were
established on and a CID (channel ID).
Connection-oriented channels have built-in credit-based flow control. Unlike
ATT, where devices negotiate a shared MTU, both the listening and connecting
devices each set an independent MTU which limits the maximum amount of
outstanding data that the remote device can send before it is fully consumed
in :meth:`l2cap_recvinto <BLE.l2cap_recvinto>`.
.. method:: BLE.l2cap_listen(psm, mtu, /)
Start listening for incoming L2CAP channel requests on the specified *psm*
with the local MTU set to *mtu*.
When a remote device initiates a connection, the ``_IRQ_L2CAP_ACCEPT``
event will be raised, which gives the listening server a chance to reject
the incoming connection (by returning a non-zero integer).
Once the connection is accepted, the ``_IRQ_L2CAP_CONNECT`` event will be
raised, allowing the server to obtain the channel id (CID) and the local and
remote MTU.
**Note:** It is not currently possible to stop listening.
.. method:: BLE.l2cap_connect(conn_handle, psm, mtu, /)
Connect to a listening peer on the specified *psm* with local MTU set to *mtu*.
On successful connection, the the ``_IRQ_L2CAP_CONNECT`` event will be
raised, allowing the client to obtain the CID and the local and remote (peer) MTU.
An unsuccessful connection will raise the ``_IRQ_L2CAP_DISCONNECT`` event
with a non-zero status.
.. method:: BLE.l2cap_disconnect(conn_handle, cid, /)
Disconnect an active L2CAP channel with the specified *conn_handle* and
*cid*.
.. method:: BLE.l2cap_send(conn_handle, cid, buf, /)
Send the specified *buf* (which must support the buffer protocol) on the
L2CAP channel identified by *conn_handle* and *cid*.
The specified buffer cannot be larger than the remote (peer) MTU, and no
more than twice the size of the local MTU.
This will return ``False`` if the channel is now "stalled", which means that
:meth:`l2cap_send <BLE.l2cap_send>` must not be called again until the
``_IRQ_L2CAP_SEND_READY`` event is received (which will happen when the
remote device grants more credits, typically after it has received and
processed the data).
.. method:: BLE.l2cap_recvinto(conn_handle, cid, buf, /)
Receive data from the specified *conn_handle* and *cid* into the provided
*buf* (which must support the buffer protocol, e.g. bytearray or
memoryview).
Returns the number of bytes read from the channel.
If *buf* is None, then returns the number of bytes available.
**Note:** After receiving the ``_IRQ_L2CAP_RECV`` event, the application should
continue calling :meth:`l2cap_recvinto <BLE.l2cap_recvinto>` until no more
bytes are available in the receive buffer (typically up to the size of the
remote (peer) MTU).
Until the receive buffer is empty, the remote device will not be granted
more channel credits and will be unable to send any more data.
Pairing and bonding
-------------------
Pairing allows a connection to be encrypted and authenticated via exchange
of secrets (with optional MITM protection via passkey authentication).
Bonding is the process of storing those secrets into non-volatile storage.
When bonded, a device is able to resolve a resolvable private address (RPA)
from another device based on the stored identity resolving key (IRK).
To support bonding, an application must implement the ``_IRQ_GET_SECRET``
and ``_IRQ_SET_SECRET`` events.
**Note:** This is currently only supported when using the NimBLE stack on
STM32 and Unix (not ESP32).
.. method:: BLE.gap_pair(conn_handle, /)
Initiate pairing with the remote device.
Before calling this, ensure that the ``io``, ``mitm``, ``le_secure``, and
``bond`` configuration options are set (via :meth:`config<BLE.config>`).
On successful pairing, the ``_IRQ_ENCRYPTION_UPDATE`` event will be raised.
.. method:: BLE.gap_passkey(conn_handle, action, passkey, /)
Respond to a ``_IRQ_PASSKEY_ACTION`` event for the specified *conn_handle*
and *action*.
The *passkey* is a numeric value and will depend on on the
*action* (which will depend on what I/O capability has been set):
* When the *action* is ``_PASSKEY_ACTION_INPUT``, then the application should
prompt the user to enter the passkey that is shown on the remote device.
* When the *action* is ``_PASSKEY_ACTION_DISPLAY``, then the application should
generate a random 6-digit passkey and show it to the user.
* When the *action* is ``_PASSKEY_ACTION_NUMERIC_COMPARISON``, then the application
should show the passkey that was provided in the ``_IRQ_PASSKEY_ACTION`` event
and then respond with either ``0`` (cancel pairing), or ``1`` (accept pairing).
class UUID
----------
Constructor
-----------
.. class:: UUID(value, /)
Creates a UUID instance with the specified **value**.
The **value** can be either:
- A 16-bit integer. e.g. ``0x2908``.
- A 128-bit UUID string. e.g. ``'6E400001-B5A3-F393-E0A9-E50E24DCCA9E'``.

View File

@@ -0,0 +1,159 @@
:mod:`btree` -- simple BTree database
=====================================
.. module:: btree
:synopsis: simple BTree database
The ``btree`` module implements a simple key-value database using external
storage (disk files, or in general case, a random-access `stream`). Keys are
stored sorted in the database, and besides efficient retrieval by a key
value, a database also supports efficient ordered range scans (retrieval
of values with the keys in a given range). On the application interface
side, BTree database work as close a possible to a way standard `dict`
type works, one notable difference is that both keys and values must
be `bytes` objects (so, if you want to store objects of other types, you
need to serialize them to `bytes` first).
The module is based on the well-known BerkelyDB library, version 1.xx.
Example::
import btree
# First, we need to open a stream which holds a database
# This is usually a file, but can be in-memory database
# using io.BytesIO, a raw flash partition, etc.
# Oftentimes, you want to create a database file if it doesn't
# exist and open if it exists. Idiom below takes care of this.
# DO NOT open database with "a+b" access mode.
try:
f = open("mydb", "r+b")
except OSError:
f = open("mydb", "w+b")
# Now open a database itself
db = btree.open(f)
# The keys you add will be sorted internally in the database
db[b"3"] = b"three"
db[b"1"] = b"one"
db[b"2"] = b"two"
# Assume that any changes are cached in memory unless
# explicitly flushed (or database closed). Flush database
# at the end of each "transaction".
db.flush()
# Prints b'two'
print(db[b"2"])
# Iterate over sorted keys in the database, starting from b"2"
# until the end of the database, returning only values.
# Mind that arguments passed to values() method are *key* values.
# Prints:
# b'two'
# b'three'
for word in db.values(b"2"):
print(word)
del db[b"2"]
# No longer true, prints False
print(b"2" in db)
# Prints:
# b"1"
# b"3"
for key in db:
print(key)
db.close()
# Don't forget to close the underlying stream!
f.close()
Functions
---------
.. function:: open(stream, *, flags=0, pagesize=0, cachesize=0, minkeypage=0)
Open a database from a random-access `stream` (like an open file). All
other parameters are optional and keyword-only, and allow to tweak advanced
parameters of the database operation (most users will not need them):
* *flags* - Currently unused.
* *pagesize* - Page size used for the nodes in BTree. Acceptable range
is 512-65536. If 0, a port-specific default will be used, optimized for
port's memory usage and/or performance.
* *cachesize* - Suggested memory cache size in bytes. For a
board with enough memory using larger values may improve performance.
Cache policy is as follows: entire cache is not allocated at once;
instead, accessing a new page in database will allocate a memory buffer
for it, until value specified by *cachesize* is reached. Then, these
buffers will be managed using LRU (least recently used) policy. More
buffers may still be allocated if needed (e.g., if a database contains
big keys and/or values). Allocated cache buffers aren't reclaimed.
* *minkeypage* - Minimum number of keys to store per page. Default value
of 0 equivalent to 2.
Returns a BTree object, which implements a dictionary protocol (set
of methods), and some additional methods described below.
Methods
-------
.. method:: btree.close()
Close the database. It's mandatory to close the database at the end of
processing, as some unwritten data may be still in the cache. Note that
this does not close underlying stream with which the database was opened,
it should be closed separately (which is also mandatory to make sure that
data flushed from buffer to the underlying storage).
.. method:: btree.flush()
Flush any data in cache to the underlying stream.
.. method:: btree.__getitem__(key)
btree.get(key, default=None, /)
btree.__setitem__(key, val)
btree.__delitem__(key)
btree.__contains__(key)
Standard dictionary methods.
.. method:: btree.__iter__()
A BTree object can be iterated over directly (similar to a dictionary)
to get access to all keys in order.
.. method:: btree.keys([start_key, [end_key, [flags]]])
btree.values([start_key, [end_key, [flags]]])
btree.items([start_key, [end_key, [flags]]])
These methods are similar to standard dictionary methods, but also can
take optional parameters to iterate over a key sub-range, instead of
the entire database. Note that for all 3 methods, *start_key* and
*end_key* arguments represent key values. For example, `values()`
method will iterate over values corresponding to they key range
given. None values for *start_key* means "from the first key", no
*end_key* or its value of None means "until the end of database".
By default, range is inclusive of *start_key* and exclusive of
*end_key*, you can include *end_key* in iteration by passing *flags*
of `btree.INCL`. You can iterate in descending key direction
by passing *flags* of `btree.DESC`. The flags values can be ORed
together.
Constants
---------
.. data:: INCL
A flag for `keys()`, `values()`, `items()` methods to specify that
scanning should be inclusive of the end key.
.. data:: DESC
A flag for `keys()`, `values()`, `items()` methods to specify that
scanning should be in descending direction of keys.

View File

@@ -0,0 +1,195 @@
:mod:`builtins` -- builtin functions and exceptions
===================================================
All builtin functions and exceptions are described here. They are also
available via ``builtins`` module.
Functions and types
-------------------
.. function:: abs()
.. function:: all()
.. function:: any()
.. function:: bin()
.. class:: bool()
.. class:: bytearray()
.. class:: bytes()
|see_cpython| `python:bytes`.
.. function:: callable()
.. function:: chr()
.. function:: classmethod()
.. function:: compile()
.. class:: complex()
.. function:: delattr(obj, name)
The argument *name* should be a string, and this function deletes the named
attribute from the object given by *obj*.
.. class:: dict()
.. function:: dir()
.. function:: divmod()
.. function:: enumerate()
.. function:: eval()
.. function:: exec()
.. function:: filter()
.. class:: float()
.. class:: frozenset()
.. function:: getattr()
.. function:: globals()
.. function:: hasattr()
.. function:: hash()
.. function:: hex()
.. function:: id()
.. function:: input()
.. class:: int()
.. classmethod:: from_bytes(bytes, byteorder)
In MicroPython, `byteorder` parameter must be positional (this is
compatible with CPython).
.. method:: to_bytes(size, byteorder)
In MicroPython, `byteorder` parameter must be positional (this is
compatible with CPython).
.. function:: isinstance()
.. function:: issubclass()
.. function:: iter()
.. function:: len()
.. class:: list()
.. function:: locals()
.. function:: map()
.. function:: max()
.. class:: memoryview()
.. function:: min()
.. function:: next()
.. class:: object()
.. function:: oct()
.. function:: open()
.. function:: ord()
.. function:: pow()
.. function:: print()
.. function:: property()
.. function:: range()
.. function:: repr()
.. function:: reversed()
.. function:: round()
.. class:: set()
.. function:: setattr()
.. class:: slice()
The *slice* builtin is the type that slice objects have.
.. function:: sorted()
.. function:: staticmethod()
.. class:: str()
.. function:: sum()
.. function:: super()
.. class:: tuple()
.. function:: type()
.. function:: zip()
Exceptions
----------
.. exception:: AssertionError
.. exception:: AttributeError
.. exception:: Exception
.. exception:: ImportError
.. exception:: IndexError
.. exception:: KeyboardInterrupt
.. exception:: KeyError
.. exception:: MemoryError
.. exception:: NameError
.. exception:: NotImplementedError
.. exception:: OSError
.. exception:: RuntimeError
.. exception:: StopIteration
.. exception:: SyntaxError
.. exception:: SystemExit
|see_cpython| `python:SystemExit`.
.. exception:: TypeError
|see_cpython| `python:TypeError`.
.. exception:: ValueError
.. exception:: ZeroDivisionError

View File

@@ -0,0 +1,63 @@
:mod:`cmath` -- mathematical functions for complex numbers
==========================================================
.. module:: cmath
:synopsis: mathematical functions for complex numbers
|see_cpython_module| :mod:`python:cmath`.
The ``cmath`` module provides some basic mathematical functions for
working with complex numbers.
Availability: not available on WiPy and ESP8266. Floating point support
required for this module.
Functions
---------
.. function:: cos(z)
Return the cosine of ``z``.
.. function:: exp(z)
Return the exponential of ``z``.
.. function:: log(z)
Return the natural logarithm of ``z``. The branch cut is along the negative real axis.
.. function:: log10(z)
Return the base-10 logarithm of ``z``. The branch cut is along the negative real axis.
.. function:: phase(z)
Returns the phase of the number ``z``, in the range (-pi, +pi].
.. function:: polar(z)
Returns, as a tuple, the polar form of ``z``.
.. function:: rect(r, phi)
Returns the complex number with modulus ``r`` and phase ``phi``.
.. function:: sin(z)
Return the sine of ``z``.
.. function:: sqrt(z)
Return the square-root of ``z``.
Constants
---------
.. data:: e
base of the natural logarithm
.. data:: pi
the ratio of a circle's circumference to its diameter

View File

@@ -0,0 +1,82 @@
:mod:`collections` -- collection and container types
====================================================
.. module:: collections
:synopsis: collection and container types
|see_cpython_module| :mod:`python:collections`.
This module implements advanced collection and container types to
hold/accumulate various objects.
Classes
-------
.. class:: deque(iterable, maxlen[, flags])
Deques (double-ended queues) are a list-like container that support O(1)
appends and pops from either side of the deque. New deques are created
using the following arguments:
- *iterable* must be the empty tuple, and the new deque is created empty.
- *maxlen* must be specified and the deque will be bounded to this
maximum length. Once the deque is full, any new items added will
discard items from the opposite end.
- The optional *flags* can be 1 to check for overflow when adding items.
As well as supporting `bool` and `len`, deque objects have the following
methods:
.. method:: deque.append(x)
Add *x* to the right side of the deque.
Raises IndexError if overflow checking is enabled and there is no more room left.
.. method:: deque.popleft()
Remove and return an item from the left side of the deque.
Raises IndexError if no items are present.
.. function:: namedtuple(name, fields)
This is factory function to create a new namedtuple type with a specific
name and set of fields. A namedtuple is a subclass of tuple which allows
to access its fields not just by numeric index, but also with an attribute
access syntax using symbolic field names. Fields is a sequence of strings
specifying field names. For compatibility with CPython it can also be a
a string with space-separated field named (but this is less efficient).
Example of use::
from collections import namedtuple
MyTuple = namedtuple("MyTuple", ("id", "name"))
t1 = MyTuple(1, "foo")
t2 = MyTuple(2, "bar")
print(t1.name)
assert t2.name == t2[1]
.. class:: OrderedDict(...)
``dict`` type subclass which remembers and preserves the order of keys
added. When ordered dict is iterated over, keys/items are returned in
the order they were added::
from collections import OrderedDict
# To make benefit of ordered keys, OrderedDict should be initialized
# from sequence of (key, value) pairs.
d = OrderedDict([("z", 1), ("a", 2)])
# More items can be added as usual
d["w"] = 5
d["b"] = 3
for k, v in d.items():
print(k, v)
Output::
z 1
a 2
w 5
b 3

View File

@@ -0,0 +1,40 @@
:mod:`cryptolib` -- cryptographic ciphers
=========================================
.. module:: cryptolib
:synopsis: cryptographic ciphers
Classes
-------
.. class:: aes
.. classmethod:: __init__(key, mode, [IV])
Initialize cipher object, suitable for encryption/decryption. Note:
after initialization, cipher object can be use only either for
encryption or decryption. Running decrypt() operation after encrypt()
or vice versa is not supported.
Parameters are:
* *key* is an encryption/decryption key (bytes-like).
* *mode* is:
* ``1`` (or ``cryptolib.MODE_ECB`` if it exists) for Electronic Code Book (ECB).
* ``2`` (or ``cryptolib.MODE_CBC`` if it exists) for Cipher Block Chaining (CBC).
* ``6`` (or ``cryptolib.MODE_CTR`` if it exists) for Counter mode (CTR).
* *IV* is an initialization vector for CBC mode.
* For Counter mode, *IV* is the initial value for the counter.
.. method:: encrypt(in_buf, [out_buf])
Encrypt *in_buf*. If no *out_buf* is given result is returned as a
newly allocated `bytes` object. Otherwise, result is written into
mutable buffer *out_buf*. *in_buf* and *out_buf* can also refer
to the same mutable buffer, in which case data is encrypted in-place.
.. method:: decrypt(in_buf, [out_buf])
Like `encrypt()`, but for decryption.

View File

@@ -0,0 +1,34 @@
:mod:`errno` -- system error codes
==================================
.. module:: errno
:synopsis: system error codes
|see_cpython_module| :mod:`python:errno`.
This module provides access to symbolic error codes for `OSError` exception.
A particular inventory of codes depends on :term:`MicroPython port`.
Constants
---------
.. data:: EEXIST, EAGAIN, etc.
Error codes, based on ANSI C/POSIX standard. All error codes start with
"E". As mentioned above, inventory of the codes depends on
:term:`MicroPython port`. Errors are usually accessible as ``exc.errno``
where ``exc`` is an instance of `OSError`. Usage example::
try:
os.mkdir("my_dir")
except OSError as exc:
if exc.errno == errno.EEXIST:
print("Directory already exists")
.. data:: errorcode
Dictionary mapping numeric error codes to strings with symbolic error
code (see above)::
>>> print(errno.errorcode[errno.EEXIST])
EEXIST

View File

@@ -0,0 +1,116 @@
:mod:`esp` --- functions related to the ESP8266 and ESP32
=========================================================
.. module:: esp
:synopsis: functions related to the ESP8266 and ESP32
The ``esp`` module contains specific functions related to both the ESP8266 and
ESP32 modules. Some functions are only available on one or the other of these
ports.
Functions
---------
.. function:: sleep_type([sleep_type])
**Note**: ESP8266 only
Get or set the sleep type.
If the *sleep_type* parameter is provided, sets the sleep type to its
value. If the function is called without parameters, returns the current
sleep type.
The possible sleep types are defined as constants:
* ``SLEEP_NONE`` -- all functions enabled,
* ``SLEEP_MODEM`` -- modem sleep, shuts down the WiFi Modem circuit.
* ``SLEEP_LIGHT`` -- light sleep, shuts down the WiFi Modem circuit
and suspends the processor periodically.
The system enters the set sleep mode automatically when possible.
.. function:: deepsleep(time_us=0, /)
**Note**: ESP8266 only - use `machine.deepsleep()` on ESP32
Enter deep sleep.
The whole module powers down, except for the RTC clock circuit, which can
be used to restart the module after the specified time if the pin 16 is
connected to the reset pin. Otherwise the module will sleep until manually
reset.
.. function:: flash_id()
**Note**: ESP8266 only
Read the device ID of the flash memory.
.. function:: flash_size()
Read the total size of the flash memory.
.. function:: flash_user_start()
Read the memory offset at which the user flash space begins.
.. function:: flash_read(byte_offset, length_or_buffer)
.. function:: flash_write(byte_offset, bytes)
.. function:: flash_erase(sector_no)
.. function:: osdebug(level)
Turn esp os debugging messages on or off.
The *level* parameter sets the threshold for the log messages for all esp components.
The log levels are defined as constants:
* ``LOG_NONE`` -- No log output
* ``LOG_ERROR`` -- Critical errors, software module can not recover on its own
* ``LOG_WARN`` -- Error conditions from which recovery measures have been taken
* ``LOG_INFO`` -- Information messages which describe normal flow of events
* ``LOG_DEBUG`` -- Extra information which is not necessary for normal use (values, pointers, sizes, etc)
* ``LOG_VERBOSE`` -- Bigger chunks of debugging information, or frequent messages
which can potentially flood the output
.. function:: set_native_code_location(start, length)
**Note**: ESP8266 only
Set the location that native code will be placed for execution after it is
compiled. Native code is emitted when the ``@micropython.native``,
``@micropython.viper`` and ``@micropython.asm_xtensa`` decorators are applied
to a function. The ESP8266 must execute code from either iRAM or the lower
1MByte of flash (which is memory mapped), and this function controls the
location.
If *start* and *length* are both ``None`` then the native code location is
set to the unused portion of memory at the end of the iRAM1 region. The
size of this unused portion depends on the firmware and is typically quite
small (around 500 bytes), and is enough to store a few very small
functions. The advantage of using this iRAM1 region is that it does not
get worn out by writing to it.
If neither *start* nor *length* are ``None`` then they should be integers.
*start* should specify the byte offset from the beginning of the flash at
which native code should be stored. *length* specifies how many bytes of
flash from *start* can be used to store native code. *start* and *length*
should be multiples of the sector size (being 4096 bytes). The flash will
be automatically erased before writing to it so be sure to use a region of
flash that is not otherwise used, for example by the firmware or the
filesystem.
When using the flash to store native code *start+length* must be less
than or equal to 1MByte. Note that the flash can be worn out if repeated
erasures (and writes) are made so use this feature sparingly.
In particular, native code needs to be recompiled and rewritten to flash
on each boot (including wake from deepsleep).
In both cases above, using iRAM1 or flash, if there is no more room left
in the specified region then the use of a native decorator on a function
will lead to `MemoryError` exception being raised during compilation of
that function.

View File

@@ -0,0 +1,348 @@
.. currentmodule:: esp32
:mod:`esp32` --- functionality specific to the ESP32
====================================================
.. module:: esp32
:synopsis: functionality specific to the ESP32
The ``esp32`` module contains functions and classes specifically aimed at
controlling ESP32 modules.
Functions
---------
.. function:: wake_on_touch(wake)
Configure whether or not a touch will wake the device from sleep.
*wake* should be a boolean value.
.. function:: wake_on_ext0(pin, level)
Configure how EXT0 wakes the device from sleep. *pin* can be ``None``
or a valid Pin object. *level* should be ``esp32.WAKEUP_ALL_LOW`` or
``esp32.WAKEUP_ANY_HIGH``.
.. function:: wake_on_ext1(pins, level)
Configure how EXT1 wakes the device from sleep. *pins* can be ``None``
or a tuple/list of valid Pin objects. *level* should be ``esp32.WAKEUP_ALL_LOW``
or ``esp32.WAKEUP_ANY_HIGH``.
.. function:: gpio_deep_sleep_hold(enable)
Configure whether non-RTC GPIO pin configuration is retained during
deep-sleep mode for held pads. *enable* should be a boolean value.
.. function:: raw_temperature()
Read the raw value of the internal temperature sensor, returning an integer.
.. function:: hall_sensor()
Read the raw value of the internal Hall sensor, returning an integer.
.. function:: idf_heap_info(capabilities)
Returns information about the ESP-IDF heap memory regions. One of them contains
the MicroPython heap and the others are used by ESP-IDF, e.g., for network
buffers and other data. This data is useful to get a sense of how much memory
is available to ESP-IDF and the networking stack in particular. It may shed
some light on situations where ESP-IDF operations fail due to allocation failures.
The information returned is *not* useful to troubleshoot Python allocation failures,
use `micropython.mem_info()` instead.
The capabilities parameter corresponds to ESP-IDF's ``MALLOC_CAP_XXX`` values but the
two most useful ones are predefined as `esp32.HEAP_DATA` for data heap regions and
`esp32.HEAP_EXEC` for executable regions as used by the native code emitter.
The return value is a list of 4-tuples, where each 4-tuple corresponds to one heap
and contains: the total bytes, the free bytes, the largest free block, and
the minimum free seen over time.
Example after booting::
>>> import esp32; esp32.idf_heap_info(esp32.HEAP_DATA)
[(240, 0, 0, 0), (7288, 0, 0, 0), (16648, 4, 4, 4), (79912, 35712, 35512, 35108),
(15072, 15036, 15036, 15036), (113840, 0, 0, 0)]
Flash partitions
----------------
This class gives access to the partitions in the device's flash memory and includes
methods to enable over-the-air (OTA) updates.
.. class:: Partition(id, block_size=4096, /)
Create an object representing a partition. *id* can be a string which is the label
of the partition to retrieve, or one of the constants: ``BOOT`` or ``RUNNING``.
*block_size* specifies the byte size of an individual block.
.. classmethod:: Partition.find(type=TYPE_APP, subtype=0xff, label=None, block_size=4096)
Find a partition specified by *type*, *subtype* and *label*. Returns a
(possibly empty) list of Partition objects. Note: ``subtype=0xff`` matches any subtype
and ``label=None`` matches any label.
*block_size* specifies the byte size of an individual block used by the returned
objects.
.. method:: Partition.info()
Returns a 6-tuple ``(type, subtype, addr, size, label, encrypted)``.
.. method:: Partition.readblocks(block_num, buf)
Partition.readblocks(block_num, buf, offset)
.. method:: Partition.writeblocks(block_num, buf)
Partition.writeblocks(block_num, buf, offset)
.. method:: Partition.ioctl(cmd, arg)
These methods implement the simple and :ref:`extended
<block-device-interface>` block protocol defined by
:class:`os.AbstractBlockDev`.
.. method:: Partition.set_boot()
Sets the partition as the boot partition.
.. method:: Partition.get_next_update()
Gets the next update partition after this one, and returns a new Partition object.
Typical usage is ``Partition(Partition.RUNNING).get_next_update()``
which returns the next partition to update given the current running one.
.. classmethod:: Partition.mark_app_valid_cancel_rollback()
Signals that the current boot is considered successful.
Calling ``mark_app_valid_cancel_rollback`` is required on the first boot of a new
partition to avoid an automatic rollback at the next boot.
This uses the ESP-IDF "app rollback" feature with "CONFIG_BOOTLOADER_APP_ROLLBACK_ENABLE"
and an ``OSError(-261)`` is raised if called on firmware that doesn't have the
feature enabled.
It is OK to call ``mark_app_valid_cancel_rollback`` on every boot and it is not
necessary when booting firmare that was loaded using esptool.
Constants
~~~~~~~~~
.. data:: Partition.BOOT
Partition.RUNNING
Used in the `Partition` constructor to fetch various partitions: ``BOOT`` is the
partition that will be booted at the next reset and ``RUNNING`` is the currently
running partition.
.. data:: Partition.TYPE_APP
Partition.TYPE_DATA
Used in `Partition.find` to specify the partition type: ``APP`` is for bootable
firmware partitions (typically labelled ``factory``, ``ota_0``, ``ota_1``), and
``DATA`` is for other partitions, e.g. ``nvs``, ``otadata``, ``phy_init``, ``vfs``.
.. data:: HEAP_DATA
HEAP_EXEC
Used in `idf_heap_info`.
.. _esp32.RMT:
RMT
---
The RMT (Remote Control) module, specific to the ESP32, was originally designed
to send and receive infrared remote control signals. However, due to a flexible
design and very accurate (as low as 12.5ns) pulse generation, it can also be
used to transmit or receive many other types of digital signals::
import esp32
from machine import Pin
r = esp32.RMT(0, pin=Pin(18), clock_div=8)
r # RMT(channel=0, pin=18, source_freq=80000000, clock_div=8, idle_level=0)
# To apply a carrier frequency to the high output
r = esp32.RMT(0, pin=Pin(18), clock_div=8, tx_carrier=(38000, 50, 1))
# The channel resolution is 100ns (1/(source_freq/clock_div)).
r.write_pulses((1, 20, 2, 40), 0) # Send 0 for 100ns, 1 for 2000ns, 0 for 200ns, 1 for 4000ns
The input to the RMT module is an 80MHz clock (in the future it may be able to
configure the input clock but, for now, it's fixed). ``clock_div`` *divides*
the clock input which determines the resolution of the RMT channel. The
numbers specified in ``write_pulses`` are multiplied by the resolution to
define the pulses.
``clock_div`` is an 8-bit divider (0-255) and each pulse can be defined by
multiplying the resolution by a 15-bit (0-32,768) number. There are eight
channels (0-7) and each can have a different clock divider.
So, in the example above, the 80MHz clock is divided by 8. Thus the
resolution is (1/(80Mhz/8)) 100ns. Since the ``start`` level is 0 and toggles
with each number, the bitstream is ``0101`` with durations of [100ns, 2000ns,
100ns, 4000ns].
For more details see Espressif's `ESP-IDF RMT documentation.
<https://docs.espressif.com/projects/esp-idf/en/latest/api-reference/peripherals/rmt.html>`_.
.. Warning::
The current MicroPython RMT implementation lacks some features, most notably
receiving pulses. RMT should be considered a
*beta feature* and the interface may change in the future.
.. class:: RMT(channel, *, pin=None, clock_div=8, idle_level=False, tx_carrier=None)
This class provides access to one of the eight RMT channels. *channel* is
required and identifies which RMT channel (0-7) will be configured. *pin*,
also required, configures which Pin is bound to the RMT channel. *clock_div*
is an 8-bit clock divider that divides the source clock (80MHz) to the RMT
channel allowing the resolution to be specified. *idle_level* specifies
what level the output will be when no transmission is in progress and can
be any value that converts to a boolean, with ``True`` representing high
voltage and ``False`` representing low.
To enable the transmission carrier feature, *tx_carrier* should be a tuple
of three positive integers: carrier frequency, duty percent (``0`` to
``100``) and the output level to apply the carrier to (a boolean as per
*idle_level*).
.. method:: RMT.source_freq()
Returns the source clock frequency. Currently the source clock is not
configurable so this will always return 80MHz.
.. method:: RMT.clock_div()
Return the clock divider. Note that the channel resolution is
``1 / (source_freq / clock_div)``.
.. method:: RMT.wait_done(*, timeout=0)
Returns ``True`` if the channel is idle or ``False`` if a sequence of
pulses started with `RMT.write_pulses` is being transmitted. If the
*timeout* keyword argument is given then block for up to this many
milliseconds for transmission to complete.
.. method:: RMT.loop(enable_loop)
Configure looping on the channel. *enable_loop* is bool, set to ``True`` to
enable looping on the *next* call to `RMT.write_pulses`. If called with
``False`` while a looping sequence is currently being transmitted then the
current loop iteration will be completed and then transmission will stop.
.. method:: RMT.write_pulses(duration, data=True)
Begin transmitting a sequence. There are three ways to specify this:
**Mode 1:** *duration* is a list or tuple of durations. The optional *data*
argument specifies the initial output level. The output level will toggle
after each duration.
**Mode 2:** *duration* is a positive integer and *data* is a list or tuple
of output levels. *duration* specifies a fixed duration for each.
**Mode 3:** *duration* and *data* are lists or tuples of equal length,
specifying individual durations and the output level for each.
Durations are in integer units of the channel resolution (as described
above), between 1 and 32767 units. Output levels are any value that can
be converted to a boolean, with ``True`` representing high voltage and
``False`` representing low.
If transmission of an earlier sequence is in progress then this method will
block until that transmission is complete before beginning the new sequence.
If looping has been enabled with `RMT.loop`, the sequence will be
repeated indefinitely. Further calls to this method will block until the
end of the current loop iteration before immediately beginning to loop the
new sequence of pulses. Looping sequences longer than 126 pulses is not
supported by the hardware.
.. staticmethod:: RMT.bitstream_channel([value])
Select which RMT channel is used by the `machine.bitstream` implementation.
*value* can be ``None`` or a valid RMT channel number. The default RMT
channel is the highest numbered one.
Passing in ``None`` disables the use of RMT and instead selects a bit-banging
implementation for `machine.bitstream`.
Passing in no argument will not change the channel. This function returns
the current channel number.
Ultra-Low-Power co-processor
----------------------------
.. class:: ULP()
This class provides access to the Ultra-Low-Power co-processor.
.. method:: ULP.set_wakeup_period(period_index, period_us)
Set the wake-up period.
.. method:: ULP.load_binary(load_addr, program_binary)
Load a *program_binary* into the ULP at the given *load_addr*.
.. method:: ULP.run(entry_point)
Start the ULP running at the given *entry_point*.
Constants
---------
.. data:: esp32.WAKEUP_ALL_LOW
esp32.WAKEUP_ANY_HIGH
Selects the wake level for pins.
Non-Volatile Storage
--------------------
This class gives access to the Non-Volatile storage managed by ESP-IDF. The NVS is partitioned
into namespaces and each namespace contains typed key-value pairs. The keys are strings and the
values may be various integer types, strings, and binary blobs. The driver currently only
supports 32-bit signed integers and blobs.
.. warning::
Changes to NVS need to be committed to flash by calling the commit method. Failure
to call commit results in changes being lost at the next reset.
.. class:: NVS(namespace)
Create an object providing access to a namespace (which is automatically created if not
present).
.. method:: NVS.set_i32(key, value)
Sets a 32-bit signed integer value for the specified key. Remember to call *commit*!
.. method:: NVS.get_i32(key)
Returns the signed integer value for the specified key. Raises an OSError if the key does not
exist or has a different type.
.. method:: NVS.set_blob(key, value)
Sets a binary blob value for the specified key. The value passed in must support the buffer
protocol, e.g. bytes, bytearray, str. (Note that esp-idf distinguishes blobs and strings, this
method always writes a blob even if a string is passed in as value.)
Remember to call *commit*!
.. method:: NVS.get_blob(key, buffer)
Reads the value of the blob for the specified key into the buffer, which must be a bytearray.
Returns the actual length read. Raises an OSError if the key does not exist, has a different
type, or if the buffer is too small.
.. method:: NVS.erase_key(key)
Erases a key-value pair.
.. method:: NVS.commit()
Commits changes made by *set_xxx* methods to flash.

View File

@@ -0,0 +1,168 @@
:mod:`framebuf` --- frame buffer manipulation
=============================================
.. module:: framebuf
:synopsis: Frame buffer manipulation
This module provides a general frame buffer which can be used to create
bitmap images, which can then be sent to a display.
class FrameBuffer
-----------------
The FrameBuffer class provides a pixel buffer which can be drawn upon with
pixels, lines, rectangles, text and even other FrameBuffer's. It is useful
when generating output for displays.
For example::
import framebuf
# FrameBuffer needs 2 bytes for every RGB565 pixel
fbuf = framebuf.FrameBuffer(bytearray(100 * 10 * 2), 100, 10, framebuf.RGB565)
fbuf.fill(0)
fbuf.text('MicroPython!', 0, 0, 0xffff)
fbuf.hline(0, 9, 96, 0xffff)
Constructors
------------
.. class:: FrameBuffer(buffer, width, height, format, stride=width, /)
Construct a FrameBuffer object. The parameters are:
- *buffer* is an object with a buffer protocol which must be large
enough to contain every pixel defined by the width, height and
format of the FrameBuffer.
- *width* is the width of the FrameBuffer in pixels
- *height* is the height of the FrameBuffer in pixels
- *format* specifies the type of pixel used in the FrameBuffer;
permissible values are listed under Constants below. These set the
number of bits used to encode a color value and the layout of these
bits in *buffer*.
Where a color value c is passed to a method, c is a small integer
with an encoding that is dependent on the format of the FrameBuffer.
- *stride* is the number of pixels between each horizontal line
of pixels in the FrameBuffer. This defaults to *width* but may
need adjustments when implementing a FrameBuffer within another
larger FrameBuffer or screen. The *buffer* size must accommodate
an increased step size.
One must specify valid *buffer*, *width*, *height*, *format* and
optionally *stride*. Invalid *buffer* size or dimensions may lead to
unexpected errors.
Drawing primitive shapes
------------------------
The following methods draw shapes onto the FrameBuffer.
.. method:: FrameBuffer.fill(c)
Fill the entire FrameBuffer with the specified color.
.. method:: FrameBuffer.pixel(x, y[, c])
If *c* is not given, get the color value of the specified pixel.
If *c* is given, set the specified pixel to the given color.
.. method:: FrameBuffer.hline(x, y, w, c)
.. method:: FrameBuffer.vline(x, y, h, c)
.. method:: FrameBuffer.line(x1, y1, x2, y2, c)
Draw a line from a set of coordinates using the given color and
a thickness of 1 pixel. The `line` method draws the line up to
a second set of coordinates whereas the `hline` and `vline`
methods draw horizontal and vertical lines respectively up to
a given length.
.. method:: FrameBuffer.rect(x, y, w, h, c)
.. method:: FrameBuffer.fill_rect(x, y, w, h, c)
Draw a rectangle at the given location, size and color. The `rect`
method draws only a 1 pixel outline whereas the `fill_rect` method
draws both the outline and interior.
Drawing text
------------
.. method:: FrameBuffer.text(s, x, y[, c])
Write text to the FrameBuffer using the the coordinates as the upper-left
corner of the text. The color of the text can be defined by the optional
argument but is otherwise a default value of 1. All characters have
dimensions of 8x8 pixels and there is currently no way to change the font.
Other methods
-------------
.. method:: FrameBuffer.scroll(xstep, ystep)
Shift the contents of the FrameBuffer by the given vector. This may
leave a footprint of the previous colors in the FrameBuffer.
.. method:: FrameBuffer.blit(fbuf, x, y, key=-1, palette=None)
Draw another FrameBuffer on top of the current one at the given coordinates.
If *key* is specified then it should be a color integer and the
corresponding color will be considered transparent: all pixels with that
color value will not be drawn.
The *palette* argument enables blitting between FrameBuffers with differing
formats. Typical usage is to render a monochrome or grayscale glyph/icon to
a color display. The *palette* is a FrameBuffer instance whose format is
that of the current FrameBuffer. The *palette* height is one pixel and its
pixel width is the number of colors in the source FrameBuffer. The *palette*
for an N-bit source needs 2**N pixels; the *palette* for a monochrome source
would have 2 pixels representing background and foreground colors. The
application assigns a color to each pixel in the *palette*. The color of the
current pixel will be that of that *palette* pixel whose x position is the
color of the corresponding source pixel.
Constants
---------
.. data:: framebuf.MONO_VLSB
Monochrome (1-bit) color format
This defines a mapping where the bits in a byte are vertically mapped with
bit 0 being nearest the top of the screen. Consequently each byte occupies
8 vertical pixels. Subsequent bytes appear at successive horizontal
locations until the rightmost edge is reached. Further bytes are rendered
at locations starting at the leftmost edge, 8 pixels lower.
.. data:: framebuf.MONO_HLSB
Monochrome (1-bit) color format
This defines a mapping where the bits in a byte are horizontally mapped.
Each byte occupies 8 horizontal pixels with bit 7 being the leftmost.
Subsequent bytes appear at successive horizontal locations until the
rightmost edge is reached. Further bytes are rendered on the next row, one
pixel lower.
.. data:: framebuf.MONO_HMSB
Monochrome (1-bit) color format
This defines a mapping where the bits in a byte are horizontally mapped.
Each byte occupies 8 horizontal pixels with bit 0 being the leftmost.
Subsequent bytes appear at successive horizontal locations until the
rightmost edge is reached. Further bytes are rendered on the next row, one
pixel lower.
.. data:: framebuf.RGB565
Red Green Blue (16-bit, 5+6+5) color format
.. data:: framebuf.GS2_HMSB
Grayscale (2-bit) color format
.. data:: framebuf.GS4_HMSB
Grayscale (4-bit) color format
.. data:: framebuf.GS8
Grayscale (8-bit) color format

View File

@@ -0,0 +1,66 @@
:mod:`gc` -- control the garbage collector
==========================================
.. module:: gc
:synopsis: control the garbage collector
|see_cpython_module| :mod:`python:gc`.
Functions
---------
.. function:: enable()
Enable automatic garbage collection.
.. function:: disable()
Disable automatic garbage collection. Heap memory can still be allocated,
and garbage collection can still be initiated manually using :meth:`gc.collect`.
.. function:: collect()
Run a garbage collection.
.. function:: mem_alloc()
Return the number of bytes of heap RAM that are allocated.
.. admonition:: Difference to CPython
:class: attention
This function is MicroPython extension.
.. function:: mem_free()
Return the number of bytes of available heap RAM, or -1 if this amount
is not known.
.. admonition:: Difference to CPython
:class: attention
This function is MicroPython extension.
.. function:: threshold([amount])
Set or query the additional GC allocation threshold. Normally, a collection
is triggered only when a new allocation cannot be satisfied, i.e. on an
out-of-memory (OOM) condition. If this function is called, in addition to
OOM, a collection will be triggered each time after *amount* bytes have been
allocated (in total, since the previous time such an amount of bytes
have been allocated). *amount* is usually specified as less than the
full heap size, with the intention to trigger a collection earlier than when the
heap becomes exhausted, and in the hope that an early collection will prevent
excessive memory fragmentation. This is a heuristic measure, the effect
of which will vary from application to application, as well as
the optimal value of the *amount* parameter.
Calling the function without argument will return the current value of
the threshold. A value of -1 means a disabled allocation threshold.
.. admonition:: Difference to CPython
:class: attention
This function is a MicroPython extension. CPython has a similar
function - ``set_threshold()``, but due to different GC
implementations, its signature and semantics are different.

View File

@@ -0,0 +1,57 @@
:mod:`hashlib` -- hashing algorithms
====================================
.. module:: hashlib
:synopsis: hashing algorithms
|see_cpython_module| :mod:`python:hashlib`.
This module implements binary data hashing algorithms. The exact inventory
of available algorithms depends on a board. Among the algorithms which may
be implemented:
* SHA256 - The current generation, modern hashing algorithm (of SHA2 series).
It is suitable for cryptographically-secure purposes. Included in the
MicroPython core and any board is recommended to provide this, unless
it has particular code size constraints.
* SHA1 - A previous generation algorithm. Not recommended for new usages,
but SHA1 is a part of number of Internet standards and existing
applications, so boards targeting network connectivity and
interoperability will try to provide this.
* MD5 - A legacy algorithm, not considered cryptographically secure. Only
selected boards, targeting interoperability with legacy applications,
will offer this.
Constructors
------------
.. class:: hashlib.sha256([data])
Create an SHA256 hasher object and optionally feed ``data`` into it.
.. class:: hashlib.sha1([data])
Create an SHA1 hasher object and optionally feed ``data`` into it.
.. class:: hashlib.md5([data])
Create an MD5 hasher object and optionally feed ``data`` into it.
Methods
-------
.. method:: hash.update(data)
Feed more binary data into hash.
.. method:: hash.digest()
Return hash for all data passed through hash, as a bytes object. After this
method is called, more data cannot be fed into the hash any longer.
.. method:: hash.hexdigest()
This method is NOT implemented. Use ``binascii.hexlify(hash.digest())``
to achieve a similar effect.

View File

@@ -0,0 +1,31 @@
:mod:`heapq` -- heap queue algorithm
====================================
.. module:: heapq
:synopsis: heap queue algorithm
|see_cpython_module| :mod:`python:heapq`.
This module implements the
`min heap queue algorithm <https://en.wikipedia.org/wiki/Heap_%28data_structure%29>`_.
A heap queue is essentially a list that has its elements stored in such a way
that the first item of the list is always the smallest.
Functions
---------
.. function:: heappush(heap, item)
Push the ``item`` onto the ``heap``.
.. function:: heappop(heap)
Pop the first item from the ``heap``, and return it. Raise ``IndexError`` if
``heap`` is empty.
The returned item will be the smallest item in the ``heap``.
.. function:: heapify(x)
Convert the list ``x`` into a heap. This is an in-place operation.

View File

@@ -0,0 +1,198 @@
.. _micropython_lib:
MicroPython libraries
=====================
.. warning::
Important summary of this section
* MicroPython provides built-in modules that mirror the functionality of the
Python standard library (e.g. :mod:`os`, :mod:`time`), as well as
MicroPython-specific modules (e.g. :mod:`bluetooth`, :mod:`machine`).
* Most standard library modules implement a subset of the functionality of
the equivalent Python module, and in a few cases provide some
MicroPython-specific extensions (e.g. :mod:`array`, :mod:`os`)
* Due to resource constraints or other limitations, some ports or firmware
versions may not include all the functionality documented here.
* To allow for extensibility, the built-in modules can be extended from
Python code loaded onto the device.
This chapter describes modules (function and class libraries) which are built
into MicroPython. This documentation in general aspires to describe all modules
and functions/classes which are implemented in the MicroPython project.
However, MicroPython is highly configurable, and each port to a particular
board/embedded system may include only a subset of the available MicroPython
libraries.
With that in mind, please be warned that some functions/classes in a module (or
even the entire module) described in this documentation **may be unavailable**
in a particular build of MicroPython on a particular system. The best place to
find general information of the availability/non-availability of a particular
feature is the "General Information" section which contains information
pertaining to a specific :term:`MicroPython port`.
On some ports you are able to discover the available, built-in libraries that
can be imported by entering the following at the :term:`REPL`::
help('modules')
Beyond the built-in libraries described in this documentation, many more
modules from the Python standard library, as well as further MicroPython
extensions to it, can be found in :term:`micropython-lib`.
Python standard libraries and micro-libraries
---------------------------------------------
The following standard Python libraries have been "micro-ified" to fit in with
the philosophy of MicroPython. They provide the core functionality of that
module and are intended to be a drop-in replacement for the standard Python
library.
.. toctree::
:maxdepth: 1
array.rst
binascii.rst
builtins.rst
cmath.rst
collections.rst
errno.rst
gc.rst
hashlib.rst
heapq.rst
io.rst
json.rst
math.rst
os.rst
random.rst
re.rst
select.rst
socket.rst
ssl.rst
struct.rst
sys.rst
time.rst
uasyncio.rst
zlib.rst
_thread.rst
MicroPython-specific libraries
------------------------------
Functionality specific to the MicroPython implementation is available in
the following libraries.
.. toctree::
:maxdepth: 1
bluetooth.rst
btree.rst
cryptolib.rst
framebuf.rst
machine.rst
micropython.rst
neopixel.rst
network.rst
uctypes.rst
The following libraries provide drivers for hardware components.
.. toctree::
:maxdepth: 1
wm8960.rst
Port-specific libraries
-----------------------
In some cases the following port/board-specific libraries have functions or
classes similar to those in the :mod:`machine` library. Where this occurs, the
entry in the port specific library exposes hardware functionality unique to
that platform.
To write portable code use functions and classes from the :mod:`machine` module.
To access platform-specific hardware use the appropriate library, e.g.
:mod:`pyb` in the case of the Pyboard.
Libraries specific to the pyboard
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following libraries are specific to the pyboard.
.. toctree::
:maxdepth: 2
pyb.rst
stm.rst
lcd160cr.rst
Libraries specific to the WiPy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following libraries and classes are specific to the WiPy.
.. toctree::
:maxdepth: 2
wipy.rst
machine.ADCWiPy.rst
machine.TimerWiPy.rst
Libraries specific to the ESP8266 and ESP32
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following libraries are specific to the ESP8266 and ESP32.
.. toctree::
:maxdepth: 2
esp.rst
esp32.rst
Libraries specific to the RP2040
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following libraries are specific to the RP2040, as used in the Raspberry Pi Pico.
.. toctree::
:maxdepth: 2
rp2.rst
Libraries specific to Zephyr
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following libraries are specific to the Zephyr port.
.. toctree::
:maxdepth: 2
zephyr.rst
Extending built-in libraries from Python
----------------------------------------
In most cases, the above modules are actually named ``umodule`` rather than
``module``, but MicroPython will alias any module prefixed with a ``u`` to the
non-``u`` version. However a file (or :term:`frozen module`) named
``module.py`` will take precedence over this alias.
This allows the user to provide an extended implementation of a built-in library
(perhaps to provide additional CPython compatibility). The user-provided module
(in ``module.py``) can still use the built-in functionality by importing
``umodule`` directly. This is used extensively in :term:`micropython-lib`. See
:ref:`packages` for more information.
This applies to both the Python standard libraries (e.g. ``os``, ``time``, etc),
but also the MicroPython libraries too (e.g. ``machine``, ``bluetooth``, etc).
The main exception is the port-specific libraries (``pyb``, ``esp``, etc).
*Other than when you specifically want to force the use of the built-in module,
we recommend always using ``import module`` rather than ``import umodule``.*

View File

@@ -0,0 +1,131 @@
:mod:`io` -- input/output streams
=================================
.. module:: io
:synopsis: input/output streams
|see_cpython_module| :mod:`python:io`.
This module contains additional types of `stream` (file-like) objects
and helper functions.
Conceptual hierarchy
--------------------
.. admonition:: Difference to CPython
:class: attention
Conceptual hierarchy of stream base classes is simplified in MicroPython,
as described in this section.
(Abstract) base stream classes, which serve as a foundation for behaviour
of all the concrete classes, adhere to few dichotomies (pair-wise
classifications) in CPython. In MicroPython, they are somewhat simplified
and made implicit to achieve higher efficiencies and save resources.
An important dichotomy in CPython is unbuffered vs buffered streams. In
MicroPython, all streams are currently unbuffered. This is because all
modern OSes, and even many RTOSes and filesystem drivers already perform
buffering on their side. Adding another layer of buffering is counter-
productive (an issue known as "bufferbloat") and takes precious memory.
Note that there still cases where buffering may be useful, so we may
introduce optional buffering support at a later time.
But in CPython, another important dichotomy is tied with "bufferedness" -
it's whether a stream may incur short read/writes or not. A short read
is when a user asks e.g. 10 bytes from a stream, but gets less, similarly
for writes. In CPython, unbuffered streams are automatically short
operation susceptible, while buffered are guarantee against them. The
no short read/writes is an important trait, as it allows to develop
more concise and efficient programs - something which is highly desirable
for MicroPython. So, while MicroPython doesn't support buffered streams,
it still provides for no-short-operations streams. Whether there will
be short operations or not depends on each particular class' needs, but
developers are strongly advised to favour no-short-operations behaviour
for the reasons stated above. For example, MicroPython sockets are
guaranteed to avoid short read/writes. Actually, at this time, there is
no example of a short-operations stream class in the core, and one would
be a port-specific class, where such a need is governed by hardware
peculiarities.
The no-short-operations behaviour gets tricky in case of non-blocking
streams, blocking vs non-blocking behaviour being another CPython dichotomy,
fully supported by MicroPython. Non-blocking streams never wait for
data either to arrive or be written - they read/write whatever possible,
or signal lack of data (or ability to write data). Clearly, this conflicts
with "no-short-operations" policy, and indeed, a case of non-blocking
buffered (and this no-short-ops) streams is convoluted in CPython - in
some places, such combination is prohibited, in some it's undefined or
just not documented, in some cases it raises verbose exceptions. The
matter is much simpler in MicroPython: non-blocking stream are important
for efficient asynchronous operations, so this property prevails on
the "no-short-ops" one. So, while blocking streams will avoid short
reads/writes whenever possible (the only case to get a short read is
if end of file is reached, or in case of error (but errors don't
return short data, but raise exceptions)), non-blocking streams may
produce short data to avoid blocking the operation.
The final dichotomy is binary vs text streams. MicroPython of course
supports these, but while in CPython text streams are inherently
buffered, they aren't in MicroPython. (Indeed, that's one of the cases
for which we may introduce buffering support.)
Note that for efficiency, MicroPython doesn't provide abstract base
classes corresponding to the hierarchy above, and it's not possible
to implement, or subclass, a stream class in pure Python.
Functions
---------
.. function:: open(name, mode='r', **kwargs)
Open a file. Builtin ``open()`` function is aliased to this function.
All ports (which provide access to file system) are required to support
*mode* parameter, but support for other arguments vary by port.
Classes
-------
.. class:: FileIO(...)
This is type of a file open in binary mode, e.g. using ``open(name, "rb")``.
You should not instantiate this class directly.
.. class:: TextIOWrapper(...)
This is type of a file open in text mode, e.g. using ``open(name, "rt")``.
You should not instantiate this class directly.
.. class:: StringIO([string])
.. class:: BytesIO([string])
In-memory file-like objects for input/output. `StringIO` is used for
text-mode I/O (similar to a normal file opened with "t" modifier).
`BytesIO` is used for binary-mode I/O (similar to a normal file
opened with "b" modifier). Initial contents of file-like objects
can be specified with *string* parameter (should be normal string
for `StringIO` or bytes object for `BytesIO`). All the usual file
methods like ``read()``, ``write()``, ``seek()``, ``flush()``,
``close()`` are available on these objects, and additionally, a
following method:
.. method:: getvalue()
Get the current contents of the underlying buffer which holds data.
.. class:: StringIO(alloc_size)
:noindex:
.. class:: BytesIO(alloc_size)
:noindex:
Create an empty `StringIO`/`BytesIO` object, preallocated to hold up
to *alloc_size* number of bytes. That means that writing that amount
of bytes won't lead to reallocation of the buffer, and thus won't hit
out-of-memory situation or lead to memory fragmentation. These constructors
are a MicroPython extension and are recommended for usage only in special
cases and in system-level libraries, not for end-user applications.
.. admonition:: Difference to CPython
:class: attention
These constructors are a MicroPython extension.

View File

@@ -0,0 +1,41 @@
:mod:`json` -- JSON encoding and decoding
=========================================
.. module:: json
:synopsis: JSON encoding and decoding
|see_cpython_module| :mod:`python:json`.
This modules allows to convert between Python objects and the JSON
data format.
Functions
---------
.. function:: dump(obj, stream, separators=None)
Serialise *obj* to a JSON string, writing it to the given *stream*.
If specified, separators should be an ``(item_separator, key_separator)``
tuple. The default is ``(', ', ': ')``. To get the most compact JSON
representation, you should specify ``(',', ':')`` to eliminate whitespace.
.. function:: dumps(obj, separators=None)
Return *obj* represented as a JSON string.
The arguments have the same meaning as in `dump`.
.. function:: load(stream)
Parse the given *stream*, interpreting it as a JSON string and
deserialising the data to a Python object. The resulting object is
returned.
Parsing continues until end-of-file is encountered.
A :exc:`ValueError` is raised if the data in *stream* is not correctly formed.
.. function:: loads(str)
Parse the JSON *str* and return an object. Raises :exc:`ValueError` if the
string is not correctly formed.

View File

@@ -0,0 +1,394 @@
:mod:`lcd160cr` --- control of LCD160CR display
===============================================
.. module:: lcd160cr
:synopsis: control of LCD160CR display
This module provides control of the MicroPython LCD160CR display.
.. image:: http://micropython.org/resources/LCD160CRv10-persp.jpg
:alt: LCD160CRv1.0 picture
:width: 640px
Further resources are available via the following links:
* `LCD160CRv1.0 reference manual <http://micropython.org/resources/LCD160CRv10-refmanual.pdf>`_ (100KiB PDF)
* `LCD160CRv1.0 schematics <http://micropython.org/resources/LCD160CRv10-schematics.pdf>`_ (1.6MiB PDF)
class LCD160CR
--------------
The LCD160CR class provides an interface to the display. Create an
instance of this class and use its methods to draw to the LCD and get
the status of the touch panel.
For example::
import lcd160cr
lcd = lcd160cr.LCD160CR('X')
lcd.set_orient(lcd160cr.PORTRAIT)
lcd.set_pos(0, 0)
lcd.set_text_color(lcd.rgb(255, 0, 0), lcd.rgb(0, 0, 0))
lcd.set_font(1)
lcd.write('Hello MicroPython!')
print('touch:', lcd.get_touch())
Constructors
------------
.. class:: LCD160CR(connect=None, *, pwr=None, i2c=None, spi=None, i2c_addr=98)
Construct an LCD160CR object. The parameters are:
- *connect* is a string specifying the physical connection of the LCD
display to the board; valid values are "X", "Y", "XY", "YX".
Use "X" when the display is connected to a pyboard in the X-skin
position, and "Y" when connected in the Y-skin position. "XY"
and "YX" are used when the display is connected to the right or
left side of the pyboard, respectively.
- *pwr* is a Pin object connected to the LCD's power/enabled pin.
- *i2c* is an I2C object connected to the LCD's I2C interface.
- *spi* is an SPI object connected to the LCD's SPI interface.
- *i2c_addr* is the I2C address of the display.
One must specify either a valid *connect* or all of *pwr*, *i2c* and *spi*.
If a valid *connect* is given then any of *pwr*, *i2c* or *spi* which are
not passed as parameters (i.e. they are ``None``) will be created based on the
value of *connect*. This allows to override the default interface to the
display if needed.
The default values are:
- "X" is for the X-skin and uses:
``pwr=Pin("X4")``, ``i2c=I2C("X")``, ``spi=SPI("X")``
- "Y" is for the Y-skin and uses:
``pwr=Pin("Y4")``, ``i2c=I2C("Y")``, ``spi=SPI("Y")``
- "XY" is for the right-side and uses:
``pwr=Pin("X4")``, ``i2c=I2C("Y")``, ``spi=SPI("X")``
- "YX" is for the left-side and uses:
``pwr=Pin("Y4")``, ``i2c=I2C("X")``, ``spi=SPI("Y")``
See `this image <http://micropython.org/resources/LCD160CRv10-positions.jpg>`_
for how the display can be connected to the pyboard.
Static methods
--------------
.. staticmethod:: LCD160CR.rgb(r, g, b)
Return a 16-bit integer representing the given rgb color values. The
16-bit value can be used to set the font color (see
:meth:`LCD160CR.set_text_color`) pen color (see :meth:`LCD160CR.set_pen`)
and draw individual pixels.
.. staticmethod:: LCD160CR.clip_line(data, w, h):
Clip the given line data. This is for internal use.
Instance members
----------------
The following instance members are publicly accessible.
.. data:: LCD160CR.w
.. data:: LCD160CR.h
The width and height of the display, respectively, in pixels. These
members are updated when calling :meth:`LCD160CR.set_orient` and should
be considered read-only.
Setup commands
--------------
.. method:: LCD160CR.set_power(on)
Turn the display on or off, depending on the given value of *on*: 0 or ``False``
will turn the display off, and 1 or ``True`` will turn it on.
.. method:: LCD160CR.set_orient(orient)
Set the orientation of the display. The *orient* parameter can be one
of `PORTRAIT`, `LANDSCAPE`, `PORTRAIT_UPSIDEDOWN`, `LANDSCAPE_UPSIDEDOWN`.
.. method:: LCD160CR.set_brightness(value)
Set the brightness of the display, between 0 and 31.
.. method:: LCD160CR.set_i2c_addr(addr)
Set the I2C address of the display. The *addr* value must have the
lower 2 bits cleared.
.. method:: LCD160CR.set_uart_baudrate(baudrate)
Set the baudrate of the UART interface.
.. method:: LCD160CR.set_startup_deco(value)
Set the start-up decoration of the display. The *value* parameter can be a
logical or of `STARTUP_DECO_NONE`, `STARTUP_DECO_MLOGO`, `STARTUP_DECO_INFO`.
.. method:: LCD160CR.save_to_flash()
Save the following parameters to flash so they persist on restart and power up:
initial decoration, orientation, brightness, UART baud rate, I2C address.
Pixel access methods
--------------------
The following methods manipulate individual pixels on the display.
.. method:: LCD160CR.set_pixel(x, y, c)
Set the specified pixel to the given color. The color should be a 16-bit
integer and can be created by :meth:`LCD160CR.rgb`.
.. method:: LCD160CR.get_pixel(x, y)
Get the 16-bit value of the specified pixel.
.. method:: LCD160CR.get_line(x, y, buf)
Low-level method to get a line of pixels into the given buffer.
To read *n* pixels *buf* should be *2*n+1* bytes in length. The first byte
is a dummy byte and should be ignored, and subsequent bytes represent the
pixels in the line starting at coordinate *(x, y)*.
.. method:: LCD160CR.screen_dump(buf, x=0, y=0, w=None, h=None)
Dump the contents of the screen to the given buffer. The parameters *x* and *y*
specify the starting coordinate, and *w* and *h* the size of the region. If *w*
or *h* are ``None`` then they will take on their maximum values, set by the size
of the screen minus the given *x* and *y* values. *buf* should be large enough
to hold ``2*w*h`` bytes. If it's smaller then only the initial horizontal lines
will be stored.
.. method:: LCD160CR.screen_load(buf)
Load the entire screen from the given buffer.
Drawing text
------------
To draw text one sets the position, color and font, and then uses
`LCD160CR.write` to draw the text.
.. method:: LCD160CR.set_pos(x, y)
Set the position for text output using :meth:`LCD160CR.write`. The position
is the upper-left corner of the text.
.. method:: LCD160CR.set_text_color(fg, bg)
Set the foreground and background color of the text.
.. method:: LCD160CR.set_font(font, scale=0, bold=0, trans=0, scroll=0)
Set the font for the text. Subsequent calls to `write` will use the newly
configured font. The parameters are:
- *font* is the font family to use, valid values are 0, 1, 2, 3.
- *scale* is a scaling value for each character pixel, where the pixels
are drawn as a square with side length equal to *scale + 1*. The value
can be between 0 and 63.
- *bold* controls the number of pixels to overdraw each character pixel,
making a bold effect. The lower 2 bits of *bold* are the number of
pixels to overdraw in the horizontal direction, and the next 2 bits are
for the vertical direction. For example, a *bold* value of 5 will
overdraw 1 pixel in both the horizontal and vertical directions.
- *trans* can be either 0 or 1 and if set to 1 the characters will be
drawn with a transparent background.
- *scroll* can be either 0 or 1 and if set to 1 the display will do a
soft scroll if the text moves to the next line.
.. method:: LCD160CR.write(s)
Write text to the display, using the current position, color and font.
As text is written the position is automatically incremented. The
display supports basic VT100 control codes such as newline and backspace.
Drawing primitive shapes
------------------------
Primitive drawing commands use a foreground and background color set by the
`set_pen` method.
.. method:: LCD160CR.set_pen(line, fill)
Set the line and fill color for primitive shapes.
.. method:: LCD160CR.erase()
Erase the entire display to the pen fill color.
.. method:: LCD160CR.dot(x, y)
Draw a single pixel at the given location using the pen line color.
.. method:: LCD160CR.rect(x, y, w, h)
.. method:: LCD160CR.rect_outline(x, y, w, h)
.. method:: LCD160CR.rect_interior(x, y, w, h)
Draw a rectangle at the given location and size using the pen line
color for the outline, and the pen fill color for the interior.
The `rect` method draws the outline and interior, while the other methods
just draw one or the other.
.. method:: LCD160CR.line(x1, y1, x2, y2)
Draw a line between the given coordinates using the pen line color.
.. method:: LCD160CR.dot_no_clip(x, y)
.. method:: LCD160CR.rect_no_clip(x, y, w, h)
.. method:: LCD160CR.rect_outline_no_clip(x, y, w, h)
.. method:: LCD160CR.rect_interior_no_clip(x, y, w, h)
.. method:: LCD160CR.line_no_clip(x1, y1, x2, y2)
These methods are as above but don't do any clipping on the input
coordinates. They are faster than the clipping versions and can be
used when you know that the coordinates are within the display.
.. method:: LCD160CR.poly_dot(data)
Draw a sequence of dots using the pen line color.
The *data* should be a buffer of bytes, with each successive pair of
bytes corresponding to coordinate pairs (x, y).
.. method:: LCD160CR.poly_line(data)
Similar to :meth:`LCD160CR.poly_dot` but draws lines between the dots.
Touch screen methods
--------------------
.. method:: LCD160CR.touch_config(calib=False, save=False, irq=None)
Configure the touch panel:
- If *calib* is ``True`` then the call will trigger a touch calibration of
the resistive touch sensor. This requires the user to touch various
parts of the screen.
- If *save* is ``True`` then the touch parameters will be saved to NVRAM
to persist across reset/power up.
- If *irq* is ``True`` then the display will be configured to pull the IRQ
line low when a touch force is detected. If *irq* is ``False`` then this
feature is disabled. If *irq* is ``None`` (the default value) then no
change is made to this setting.
.. method:: LCD160CR.is_touched()
Returns a boolean: ``True`` if there is currently a touch force on the screen,
``False`` otherwise.
.. method:: LCD160CR.get_touch()
Returns a 3-tuple of: *(active, x, y)*. If there is currently a touch force
on the screen then *active* is 1, otherwise it is 0. The *x* and *y* values
indicate the position of the current or most recent touch.
Advanced commands
-----------------
.. method:: LCD160CR.set_spi_win(x, y, w, h)
Set the window that SPI data is written to.
.. method:: LCD160CR.fast_spi(flush=True)
Ready the display to accept RGB pixel data on the SPI bus, resetting the location
of the first byte to go to the top-left corner of the window set by
:meth:`LCD160CR.set_spi_win`.
The method returns an SPI object which can be used to write the pixel data.
Pixels should be sent as 16-bit RGB values in the 5-6-5 format. The destination
counter will increase as data is sent, and data can be sent in arbitrary sized
chunks. Once the destination counter reaches the end of the window specified by
:meth:`LCD160CR.set_spi_win` it will wrap around to the top-left corner of that window.
.. method:: LCD160CR.show_framebuf(buf)
Show the given buffer on the display. *buf* should be an array of bytes containing
the 16-bit RGB values for the pixels, and they will be written to the area
specified by :meth:`LCD160CR.set_spi_win`, starting from the top-left corner.
The `framebuf <framebuf.html>`_ module can be used to construct frame buffers
and provides drawing primitives. Using a frame buffer will improve
performance of animations when compared to drawing directly to the screen.
.. method:: LCD160CR.set_scroll(on)
Turn scrolling on or off. This controls globally whether any window regions will
scroll.
.. method:: LCD160CR.set_scroll_win(win, x=-1, y=0, w=0, h=0, vec=0, pat=0, fill=0x07e0, color=0)
Configure a window region for scrolling:
- *win* is the window id to configure. There are 0..7 standard windows for
general purpose use. Window 8 is the text scroll window (the ticker).
- *x*, *y*, *w*, *h* specify the location of the window in the display.
- *vec* specifies the direction and speed of scroll: it is a 16-bit value
of the form ``0bF.ddSSSSSSSSSSSS``. *dd* is 0, 1, 2, 3 for +x, +y, -x,
-y scrolling. *F* sets the speed format, with 0 meaning that the window
is shifted *S % 256* pixel every frame, and 1 meaning that the window
is shifted 1 pixel every *S* frames.
- *pat* is a 16-bit pattern mask for the background.
- *fill* is the fill color.
- *color* is the extra color, either of the text or pattern foreground.
.. method:: LCD160CR.set_scroll_win_param(win, param, value)
Set a single parameter of a scrolling window region:
- *win* is the window id, 0..8.
- *param* is the parameter number to configure, 0..7, and corresponds
to the parameters in the `set_scroll_win` method.
- *value* is the value to set.
.. method:: LCD160CR.set_scroll_buf(s)
Set the string for scrolling in window 8. The parameter *s* must be a string
with length 32 or less.
.. method:: LCD160CR.jpeg(buf)
Display a JPEG. *buf* should contain the entire JPEG data. JPEG data should
not include EXIF information. The following encodings are supported: Baseline
DCT, Huffman coding, 8 bits per sample, 3 color components, YCbCr4:2:2.
The origin of the JPEG is set by :meth:`LCD160CR.set_pos`.
.. method:: LCD160CR.jpeg_start(total_len)
.. method:: LCD160CR.jpeg_data(buf)
Display a JPEG with the data split across multiple buffers. There must be
a single call to `jpeg_start` to begin with, specifying the total number of
bytes in the JPEG. Then this number of bytes must be transferred to the
display using one or more calls to the `jpeg_data` command.
.. method:: LCD160CR.feed_wdt()
The first call to this method will start the display's internal watchdog
timer. Subsequent calls will feed the watchdog. The timeout is roughly 30
seconds.
.. method:: LCD160CR.reset()
Reset the display.
Constants
---------
.. data:: lcd160cr.PORTRAIT
lcd160cr.LANDSCAPE
lcd160cr.PORTRAIT_UPSIDEDOWN
lcd160cr.LANDSCAPE_UPSIDEDOWN
Orientations of the display, used by :meth:`LCD160CR.set_orient`.
.. data:: lcd160cr.STARTUP_DECO_NONE
lcd160cr.STARTUP_DECO_MLOGO
lcd160cr.STARTUP_DECO_INFO
Types of start-up decoration, can be OR'ed together, used by
:meth:`LCD160CR.set_startup_deco`.

View File

@@ -0,0 +1,66 @@
.. currentmodule:: machine
.. _machine.ADC:
class ADC -- analog to digital conversion
=========================================
The ADC class provides an interface to analog-to-digital convertors, and
represents a single endpoint that can sample a continuous voltage and
convert it to a discretised value.
For extra control over ADC sampling see :ref:`machine.ADCBlock <machine.ADCBlock>`.
Example usage::
from machine import ADC
adc = ADC(pin) # create an ADC object acting on a pin
val = adc.read_u16() # read a raw analog value in the range 0-65535
val = adc.read_uv() # read an analog value in microvolts
Constructors
------------
.. class:: ADC(id, *, sample_ns, atten)
Access the ADC associated with a source identified by *id*. This
*id* may be an integer (usually specifying a channel number), a
:ref:`Pin <machine.Pin>` object, or other value supported by the
underlying machine.
If additional keyword-arguments are given then they will configure
various aspects of the ADC. If not given, these settings will take
previous or default values. The settings are:
- *sample_ns* is the sampling time in nanoseconds.
- *atten* specifies the input attenuation.
Methods
-------
.. method:: ADC.init(*, sample_ns, atten)
Apply the given settings to the ADC. Only those arguments that are
specified will be changed. See the ADC constructor above for what the
arguments are.
.. method:: ADC.block()
Return the :ref:`ADCBlock <machine.ADCBlock>` instance associated with
this ADC object.
This method only exists if the port supports the
:ref:`ADCBlock <machine.ADCBlock>` class.
.. method:: ADC.read_u16()
Take an analog reading and return an integer in the range 0-65535.
The return value represents the raw reading taken by the ADC, scaled
such that the minimum value is 0 and the maximum value is 65535.
.. method:: ADC.read_uv()
Take an analog reading and return an integer value with units of
microvolts. It is up to the particular port whether or not this value
is calibrated, and how calibration is done.

View File

@@ -0,0 +1,58 @@
.. currentmodule:: machine
.. _machine.ADCBlock:
class ADCBlock -- control ADC peripherals
=========================================
The ADCBlock class provides access to an ADC peripheral which has a
number of channels that can be used to sample analog values. It allows
finer control over configuration of :ref:`machine.ADC <machine.ADC>`
objects, which do the actual sampling.
This class is not always available.
Example usage::
from machine import ADCBlock
block = ADCBlock(id, bits=12) # create an ADCBlock with 12-bit resolution
adc = block.connect(4, pin) # connect channel 4 to the given pin
val = adc.read_uv() # read an analog value
Constructors
------------
.. class:: ADCBlock(id, *, bits)
Access the ADC peripheral identified by *id*, which may be an integer
or string.
The *bits* argument, if given, sets the resolution in bits of the
conversion process. If not specified then the previous or default
resolution is used.
Methods
-------
.. method:: ADCBlock.init(*, bits)
Configure the ADC peripheral. *bits* will set the resolution of the
conversion process.
.. method:: ADCBlock.connect(channel)
ADCBlock.connect(source)
ADCBlock.connect(channel, source)
Connect up a channel on the ADC peripheral so it is ready for sampling,
and return an :ref:`ADC <machine.ADC>` object that represents that connection.
The *channel* argument must be an integer, and *source* must be an object
(for example a :ref:`Pin <machine.Pin>`) which can be connected up for sampling.
If only *channel* is given then it is configured for sampling.
If only *source* is given then that object is connected to a default
channel ready for sampling.
If both *channel* and *source* are given then they are connected together
and made ready for sampling.

View File

@@ -0,0 +1,81 @@
.. currentmodule:: machine
.. _machine.ADCWiPy:
class ADCWiPy -- analog to digital conversion
=============================================
.. note::
This class is a non-standard ADC implementation for the WiPy.
It is available simply as ``machine.ADC`` on the WiPy but is named in the
documentation below as ``machine.ADCWiPy`` to distinguish it from the
more general :ref:`machine.ADC <machine.ADC>` class.
Usage::
import machine
adc = machine.ADC() # create an ADC object
apin = adc.channel(pin='GP3') # create an analog pin on GP3
val = apin() # read an analog value
Constructors
------------
.. class:: ADCWiPy(id=0, *, bits=12)
Create an ADC object associated with the given pin.
This allows you to then read analog values on that pin.
For more info check the `pinout and alternate functions
table. <https://raw.githubusercontent.com/wipy/wipy/master/docs/PinOUT.png>`_
.. warning::
ADC pin input range is 0-1.4V (being 1.8V the absolute maximum that it
can withstand). When GP2, GP3, GP4 or GP5 are remapped to the
ADC block, 1.8 V is the maximum. If these pins are used in digital mode,
then the maximum allowed input is 3.6V.
Methods
-------
.. method:: ADCWiPy.channel(id, *, pin)
Create an analog pin. If only channel ID is given, the correct pin will
be selected. Alternatively, only the pin can be passed and the correct
channel will be selected. Examples::
# all of these are equivalent and enable ADC channel 1 on GP3
apin = adc.channel(1)
apin = adc.channel(pin='GP3')
apin = adc.channel(id=1, pin='GP3')
.. method:: ADCWiPy.init()
Enable the ADC block.
.. method:: ADCWiPy.deinit()
Disable the ADC block.
class ADCChannel --- read analog values from internal or external sources
=========================================================================
ADC channels can be connected to internal points of the MCU or to GPIO pins.
ADC channels are created using the ADC.channel method.
.. method:: adcchannel()
Fast method to read the channel value.
.. method:: adcchannel.value()
Read the channel value.
.. method:: adcchannel.init()
Re-init (and effectively enable) the ADC channel.
.. method:: adcchannel.deinit()
Disable the ADC channel.

View File

@@ -0,0 +1,200 @@
.. currentmodule:: machine
.. _machine.I2C:
class I2C -- a two-wire serial protocol
=======================================
I2C is a two-wire protocol for communicating between devices. At the physical
level it consists of 2 wires: SCL and SDA, the clock and data lines respectively.
I2C objects are created attached to a specific bus. They can be initialised
when created, or initialised later on.
Printing the I2C object gives you information about its configuration.
Both hardware and software I2C implementations exist via the
:ref:`machine.I2C <machine.I2C>` and `machine.SoftI2C` classes. Hardware I2C uses
underlying hardware support of the system to perform the reads/writes and is
usually efficient and fast but may have restrictions on which pins can be used.
Software I2C is implemented by bit-banging and can be used on any pin but is not
as efficient. These classes have the same methods available and differ primarily
in the way they are constructed.
Example usage::
from machine import I2C
i2c = I2C(freq=400000) # create I2C peripheral at frequency of 400kHz
# depending on the port, extra parameters may be required
# to select the peripheral and/or pins to use
i2c.scan() # scan for peripherals, returning a list of 7-bit addresses
i2c.writeto(42, b'123') # write 3 bytes to peripheral with 7-bit address 42
i2c.readfrom(42, 4) # read 4 bytes from peripheral with 7-bit address 42
i2c.readfrom_mem(42, 8, 3) # read 3 bytes from memory of peripheral 42,
# starting at memory-address 8 in the peripheral
i2c.writeto_mem(42, 2, b'\x10') # write 1 byte to memory of peripheral 42
# starting at address 2 in the peripheral
Constructors
------------
.. class:: I2C(id, *, scl, sda, freq=400000)
Construct and return a new I2C object using the following parameters:
- *id* identifies a particular I2C peripheral. Allowed values for
depend on the particular port/board
- *scl* should be a pin object specifying the pin to use for SCL.
- *sda* should be a pin object specifying the pin to use for SDA.
- *freq* should be an integer which sets the maximum frequency
for SCL.
Note that some ports/boards will have default values of *scl* and *sda*
that can be changed in this constructor. Others will have fixed values
of *scl* and *sda* that cannot be changed.
.. _machine.SoftI2C:
.. class:: SoftI2C(scl, sda, *, freq=400000, timeout=50000)
Construct a new software I2C object. The parameters are:
- *scl* should be a pin object specifying the pin to use for SCL.
- *sda* should be a pin object specifying the pin to use for SDA.
- *freq* should be an integer which sets the maximum frequency
for SCL.
- *timeout* is the maximum time in microseconds to wait for clock
stretching (SCL held low by another device on the bus), after
which an ``OSError(ETIMEDOUT)`` exception is raised.
General Methods
---------------
.. method:: I2C.init(scl, sda, *, freq=400000)
Initialise the I2C bus with the given arguments:
- *scl* is a pin object for the SCL line
- *sda* is a pin object for the SDA line
- *freq* is the SCL clock rate
.. method:: I2C.deinit()
Turn off the I2C bus.
Availability: WiPy.
.. method:: I2C.scan()
Scan all I2C addresses between 0x08 and 0x77 inclusive and return a list of
those that respond. A device responds if it pulls the SDA line low after
its address (including a write bit) is sent on the bus.
Primitive I2C operations
------------------------
The following methods implement the primitive I2C controller bus operations and can
be combined to make any I2C transaction. They are provided if you need more
control over the bus, otherwise the standard methods (see below) can be used.
These methods are only available on the `machine.SoftI2C` class.
.. method:: I2C.start()
Generate a START condition on the bus (SDA transitions to low while SCL is high).
.. method:: I2C.stop()
Generate a STOP condition on the bus (SDA transitions to high while SCL is high).
.. method:: I2C.readinto(buf, nack=True, /)
Reads bytes from the bus and stores them into *buf*. The number of bytes
read is the length of *buf*. An ACK will be sent on the bus after
receiving all but the last byte. After the last byte is received, if *nack*
is true then a NACK will be sent, otherwise an ACK will be sent (and in this
case the peripheral assumes more bytes are going to be read in a later call).
.. method:: I2C.write(buf)
Write the bytes from *buf* to the bus. Checks that an ACK is received
after each byte and stops transmitting the remaining bytes if a NACK is
received. The function returns the number of ACKs that were received.
Standard bus operations
-----------------------
The following methods implement the standard I2C controller read and write
operations that target a given peripheral device.
.. method:: I2C.readfrom(addr, nbytes, stop=True, /)
Read *nbytes* from the peripheral specified by *addr*.
If *stop* is true then a STOP condition is generated at the end of the transfer.
Returns a `bytes` object with the data read.
.. method:: I2C.readfrom_into(addr, buf, stop=True, /)
Read into *buf* from the peripheral specified by *addr*.
The number of bytes read will be the length of *buf*.
If *stop* is true then a STOP condition is generated at the end of the transfer.
The method returns ``None``.
.. method:: I2C.writeto(addr, buf, stop=True, /)
Write the bytes from *buf* to the peripheral specified by *addr*. If a
NACK is received following the write of a byte from *buf* then the
remaining bytes are not sent. If *stop* is true then a STOP condition is
generated at the end of the transfer, even if a NACK is received.
The function returns the number of ACKs that were received.
.. method:: I2C.writevto(addr, vector, stop=True, /)
Write the bytes contained in *vector* to the peripheral specified by *addr*.
*vector* should be a tuple or list of objects with the buffer protocol.
The *addr* is sent once and then the bytes from each object in *vector*
are written out sequentially. The objects in *vector* may be zero bytes
in length in which case they don't contribute to the output.
If a NACK is received following the write of a byte from one of the
objects in *vector* then the remaining bytes, and any remaining objects,
are not sent. If *stop* is true then a STOP condition is generated at
the end of the transfer, even if a NACK is received. The function
returns the number of ACKs that were received.
Memory operations
-----------------
Some I2C devices act as a memory device (or set of registers) that can be read
from and written to. In this case there are two addresses associated with an
I2C transaction: the peripheral address and the memory address. The following
methods are convenience functions to communicate with such devices.
.. method:: I2C.readfrom_mem(addr, memaddr, nbytes, *, addrsize=8)
Read *nbytes* from the peripheral specified by *addr* starting from the memory
address specified by *memaddr*.
The argument *addrsize* specifies the address size in bits.
Returns a `bytes` object with the data read.
.. method:: I2C.readfrom_mem_into(addr, memaddr, buf, *, addrsize=8)
Read into *buf* from the peripheral specified by *addr* starting from the
memory address specified by *memaddr*. The number of bytes read is the
length of *buf*.
The argument *addrsize* specifies the address size in bits (on ESP8266
this argument is not recognised and the address size is always 8 bits).
The method returns ``None``.
.. method:: I2C.writeto_mem(addr, memaddr, buf, *, addrsize=8)
Write *buf* to the peripheral specified by *addr* starting from the
memory address specified by *memaddr*.
The argument *addrsize* specifies the address size in bits (on ESP8266
this argument is not recognised and the address size is always 8 bits).
The method returns ``None``.

View File

@@ -0,0 +1,166 @@
.. currentmodule:: machine
.. _machine.I2S:
class I2S -- Inter-IC Sound bus protocol
========================================
I2S is a synchronous serial protocol used to connect digital audio devices.
At the physical level, a bus consists of 3 lines: SCK, WS, SD.
The I2S class supports controller operation. Peripheral operation is not supported.
The I2S class is currently available as a Technical Preview. During the preview period, feedback from
users is encouraged. Based on this feedback, the I2S class API and implementation may be changed.
I2S objects can be created and initialized using::
from machine import I2S
from machine import Pin
# ESP32
sck_pin = Pin(14) # Serial clock output
ws_pin = Pin(13) # Word clock output
sd_pin = Pin(12) # Serial data output
or
# PyBoards
sck_pin = Pin("Y6") # Serial clock output
ws_pin = Pin("Y5") # Word clock output
sd_pin = Pin("Y8") # Serial data output
audio_out = I2S(2,
sck=sck_pin, ws=ws_pin, sd=sd_pin,
mode=I2S.TX,
bits=16,
format=I2S.MONO,
rate=44100,
ibuf=20000)
audio_in = I2S(2,
sck=sck_pin, ws=ws_pin, sd=sd_pin,
mode=I2S.RX,
bits=32,
format=I2S.STEREO,
rate=22050,
ibuf=20000)
3 modes of operation are supported:
- blocking
- non-blocking
- uasyncio
blocking::
num_written = audio_out.write(buf) # blocks until buf emptied
num_read = audio_in.readinto(buf) # blocks until buf filled
non-blocking::
audio_out.irq(i2s_callback) # i2s_callback is called when buf is emptied
num_written = audio_out.write(buf) # returns immediately
audio_in.irq(i2s_callback) # i2s_callback is called when buf is filled
num_read = audio_in.readinto(buf) # returns immediately
uasyncio::
swriter = uasyncio.StreamWriter(audio_out)
swriter.write(buf)
await swriter.drain()
sreader = uasyncio.StreamReader(audio_in)
num_read = await sreader.readinto(buf)
Some codec devices like the WM8960 or SGTL5000 require separate initialization
before they can operate with the I2S class. For these, separate drivers are
supplied, which also offer methods for controlling volume, audio processing and
other things. For these drivers see:
- :ref:`wm8960`
Constructor
-----------
.. class:: I2S(id, *, sck, ws, sd, mck=None, mode, bits, format, rate, ibuf)
Construct an I2S object of the given id:
- ``id`` identifies a particular I2S bus; it is board and port specific
Keyword-only parameters that are supported on all ports:
- ``sck`` is a pin object for the serial clock line
- ``ws`` is a pin object for the word select line
- ``sd`` is a pin object for the serial data line
- ``mck`` is a pin object for the master clock line;
master clock frequency is sampling rate * 256
- ``mode`` specifies receive or transmit
- ``bits`` specifies sample size (bits), 16 or 32
- ``format`` specifies channel format, STEREO or MONO
- ``rate`` specifies audio sampling rate (Hz);
this is the frequency of the ``ws`` signal
- ``ibuf`` specifies internal buffer length (bytes)
For all ports, DMA runs continuously in the background and allows user applications to perform other operations while
sample data is transfered between the internal buffer and the I2S peripheral unit.
Increasing the size of the internal buffer has the potential to increase the time that user applications can perform non-I2S operations
before underflow (e.g. ``write`` method) or overflow (e.g. ``readinto`` method).
Methods
-------
.. method:: I2S.init(sck, ...)
see Constructor for argument descriptions
.. method:: I2S.deinit()
Deinitialize the I2S bus
.. method:: I2S.readinto(buf)
Read audio samples into the buffer specified by ``buf``. ``buf`` must support the buffer protocol, such as bytearray or array.
"buf" byte ordering is little-endian. For Stereo format, left channel sample precedes right channel sample. For Mono format,
the left channel sample data is used.
Returns number of bytes read
.. method:: I2S.write(buf)
Write audio samples contained in ``buf``. ``buf`` must support the buffer protocol, such as bytearray or array.
"buf" byte ordering is little-endian. For Stereo format, left channel sample precedes right channel sample. For Mono format,
the sample data is written to both the right and left channels.
Returns number of bytes written
.. method:: I2S.irq(handler)
Set a callback. ``handler`` is called when ``buf`` is emptied (``write`` method) or becomes full (``readinto`` method).
Setting a callback changes the ``write`` and ``readinto`` methods to non-blocking operation.
``handler`` is called in the context of the MicroPython scheduler.
.. staticmethod:: I2S.shift(*, buf, bits, shift)
bitwise shift of all samples contained in ``buf``. ``bits`` specifies sample size in bits. ``shift`` specifies the number of bits to shift each sample.
Positive for left shift, negative for right shift.
Typically used for volume control. Each bit shift changes sample volume by 6dB.
Constants
---------
.. data:: I2S.RX
for initialising the I2S bus ``mode`` to receive
.. data:: I2S.TX
for initialising the I2S bus ``mode`` to transmit
.. data:: I2S.STEREO
for initialising the I2S bus ``format`` to stereo
.. data:: I2S.MONO
for initialising the I2S bus ``format`` to mono

View File

@@ -0,0 +1,122 @@
.. currentmodule:: machine
.. _machine.PWM:
class PWM -- pulse width modulation
===================================
This class provides pulse width modulation output.
Example usage::
from machine import PWM
pwm = PWM(pin) # create a PWM object on a pin
pwm.duty_u16(32768) # set duty to 50%
# reinitialise with a period of 200us, duty of 5us
pwm.init(freq=5000, duty_ns=5000)
pwm.duty_ns(3000) # set pulse width to 3us
pwm.deinit()
Constructors
------------
.. class:: PWM(dest, *, freq, duty_u16, duty_ns)
Construct and return a new PWM object using the following parameters:
- *dest* is the entity on which the PWM is output, which is usually a
:ref:`machine.Pin <machine.Pin>` object, but a port may allow other values,
like integers.
- *freq* should be an integer which sets the frequency in Hz for the
PWM cycle.
- *duty_u16* sets the duty cycle as a ratio ``duty_u16 / 65535``.
- *duty_ns* sets the pulse width in nanoseconds.
Setting *freq* may affect other PWM objects if the objects share the same
underlying PWM generator (this is hardware specific).
Only one of *duty_u16* and *duty_ns* should be specified at a time.
Methods
-------
.. method:: PWM.init(*, freq, duty_u16, duty_ns)
Modify settings for the PWM object. See the above constructor for details
about the parameters.
.. method:: PWM.deinit()
Disable the PWM output.
.. method:: PWM.freq([value])
Get or set the current frequency of the PWM output.
With no arguments the frequency in Hz is returned.
With a single *value* argument the frequency is set to that value in Hz. The
method may raise a ``ValueError`` if the frequency is outside the valid range.
.. method:: PWM.duty_u16([value])
Get or set the current duty cycle of the PWM output, as an unsigned 16-bit
value in the range 0 to 65535 inclusive.
With no arguments the duty cycle is returned.
With a single *value* argument the duty cycle is set to that value, measured
as the ratio ``value / 65535``.
.. method:: PWM.duty_ns([value])
Get or set the current pulse width of the PWM output, as a value in nanoseconds.
With no arguments the pulse width in nanoseconds is returned.
With a single *value* argument the pulse width is set to that value.
Specific PWM class implementations
----------------------------------
The following concrete class(es) implement enhancements to the PWM class.
| :ref:`pyb.Timer for PyBoard <pyb.Timer>`
Limitations of PWM
------------------
* Not all frequencies can be generated with absolute accuracy due to
the discrete nature of the computing hardware. Typically the PWM frequency
is obtained by dividing some integer base frequency by an integer divider.
For example, if the base frequency is 80MHz and the required PWM frequency is
300kHz the divider must be a non-integer number 80000000 / 300000 = 266.67.
After rounding the divider is set to 267 and the PWM frequency will be
80000000 / 267 = 299625.5 Hz, not 300kHz. If the divider is set to 266 then
the PWM frequency will be 80000000 / 266 = 300751.9 Hz, but again not 300kHz.
Some ports like the RP2040 one use a fractional divider, which allow a finer
granularity of the frequency at higher frequencies by switching the PWM
pulse duration between two adjacent values, such that the resulting average
frequency is more close to the intended one, at the cost of spectral purity.
* The duty cycle has the same discrete nature and its absolute accuracy is not
achievable. On most hardware platforms the duty will be applied at the next
frequency period. Therefore, you should wait more than "1/frequency" before
measuring the duty.
* The frequency and the duty cycle resolution are usually interdependent.
The higher the PWM frequency the lower the duty resolution which is available,
and vice versa. For example, a 300kHz PWM frequency can have a duty cycle
resolution of 8 bit, not 16-bit as may be expected. In this case, the lowest
8 bits of *duty_u16* are insignificant. So::
pwm=PWM(Pin(13), freq=300_000, duty_u16=2**16//2)
and::
pwm=PWM(Pin(13), freq=300_000, duty_u16=2**16//2 + 255)
will generate PWM with the same 50% duty cycle.

View File

@@ -0,0 +1,276 @@
.. currentmodule:: machine
.. _machine.Pin:
class Pin -- control I/O pins
=============================
A pin object is used to control I/O pins (also known as GPIO - general-purpose
input/output). Pin objects are commonly associated with a physical pin that can
drive an output voltage and read input voltages. The pin class has methods to set the mode of
the pin (IN, OUT, etc) and methods to get and set the digital logic level.
For analog control of a pin, see the :class:`ADC` class.
A pin object is constructed by using an identifier which unambiguously
specifies a certain I/O pin. The allowed forms of the identifier and the
physical pin that the identifier maps to are port-specific. Possibilities
for the identifier are an integer, a string or a tuple with port and pin
number.
Usage Model::
from machine import Pin
# create an output pin on pin #0
p0 = Pin(0, Pin.OUT)
# set the value low then high
p0.value(0)
p0.value(1)
# create an input pin on pin #2, with a pull up resistor
p2 = Pin(2, Pin.IN, Pin.PULL_UP)
# read and print the pin value
print(p2.value())
# reconfigure pin #0 in input mode with a pull down resistor
p0.init(p0.IN, p0.PULL_DOWN)
# configure an irq callback
p0.irq(lambda p:print(p))
Constructors
------------
.. class:: Pin(id, mode=-1, pull=-1, *, value=None, drive=0, alt=-1)
Access the pin peripheral (GPIO pin) associated with the given ``id``. If
additional arguments are given in the constructor then they are used to initialise
the pin. Any settings that are not specified will remain in their previous state.
The arguments are:
- ``id`` is mandatory and can be an arbitrary object. Among possible value
types are: int (an internal Pin identifier), str (a Pin name), and tuple
(pair of [port, pin]).
- ``mode`` specifies the pin mode, which can be one of:
- ``Pin.IN`` - Pin is configured for input. If viewed as an output the pin
is in high-impedance state.
- ``Pin.OUT`` - Pin is configured for (normal) output.
- ``Pin.OPEN_DRAIN`` - Pin is configured for open-drain output. Open-drain
output works in the following way: if the output value is set to 0 the pin
is active at a low level; if the output value is 1 the pin is in a high-impedance
state. Not all ports implement this mode, or some might only on certain pins.
- ``Pin.ALT`` - Pin is configured to perform an alternative function, which is
port specific. For a pin configured in such a way any other Pin methods
(except :meth:`Pin.init`) are not applicable (calling them will lead to undefined,
or a hardware-specific, result). Not all ports implement this mode.
- ``Pin.ALT_OPEN_DRAIN`` - The Same as ``Pin.ALT``, but the pin is configured as
open-drain. Not all ports implement this mode.
- ``Pin.ANALOG`` - Pin is configured for analog input, see the :class:`ADC` class.
- ``pull`` specifies if the pin has a (weak) pull resistor attached, and can be
one of:
- ``None`` - No pull up or down resistor.
- ``Pin.PULL_UP`` - Pull up resistor enabled.
- ``Pin.PULL_DOWN`` - Pull down resistor enabled.
- ``value`` is valid only for Pin.OUT and Pin.OPEN_DRAIN modes and specifies initial
output pin value if given, otherwise the state of the pin peripheral remains
unchanged.
- ``drive`` specifies the output power of the pin and can be one of: ``Pin.DRIVE_0``,
``Pin.DRIVE_1``, etc., increasing in drive strength. The actual current driving
capabilities are port dependent. Not all ports implement this argument.
- ``alt`` specifies an alternate function for the pin and the values it can take are
port dependent. This argument is valid only for ``Pin.ALT`` and ``Pin.ALT_OPEN_DRAIN``
modes. It may be used when a pin supports more than one alternate function. If only
one pin alternate function is supported the this argument is not required. Not all
ports implement this argument.
As specified above, the Pin class allows to set an alternate function for a particular
pin, but it does not specify any further operations on such a pin. Pins configured in
alternate-function mode are usually not used as GPIO but are instead driven by other
hardware peripherals. The only operation supported on such a pin is re-initialising,
by calling the constructor or :meth:`Pin.init` method. If a pin that is configured in
alternate-function mode is re-initialised with ``Pin.IN``, ``Pin.OUT``, or
``Pin.OPEN_DRAIN``, the alternate function will be removed from the pin.
Methods
-------
.. method:: Pin.init(mode=-1, pull=-1, *, value=None, drive=0, alt=-1)
Re-initialise the pin using the given parameters. Only those arguments that
are specified will be set. The rest of the pin peripheral state will remain
unchanged. See the constructor documentation for details of the arguments.
Returns ``None``.
.. method:: Pin.value([x])
This method allows to set and get the value of the pin, depending on whether
the argument ``x`` is supplied or not.
If the argument is omitted then this method gets the digital logic level of
the pin, returning 0 or 1 corresponding to low and high voltage signals
respectively. The behaviour of this method depends on the mode of the pin:
- ``Pin.IN`` - The method returns the actual input value currently present
on the pin.
- ``Pin.OUT`` - The behaviour and return value of the method is undefined.
- ``Pin.OPEN_DRAIN`` - If the pin is in state '0' then the behaviour and
return value of the method is undefined. Otherwise, if the pin is in
state '1', the method returns the actual input value currently present
on the pin.
If the argument is supplied then this method sets the digital logic level of
the pin. The argument ``x`` can be anything that converts to a boolean.
If it converts to ``True``, the pin is set to state '1', otherwise it is set
to state '0'. The behaviour of this method depends on the mode of the pin:
- ``Pin.IN`` - The value is stored in the output buffer for the pin. The
pin state does not change, it remains in the high-impedance state. The
stored value will become active on the pin as soon as it is changed to
``Pin.OUT`` or ``Pin.OPEN_DRAIN`` mode.
- ``Pin.OUT`` - The output buffer is set to the given value immediately.
- ``Pin.OPEN_DRAIN`` - If the value is '0' the pin is set to a low voltage
state. Otherwise the pin is set to high-impedance state.
When setting the value this method returns ``None``.
.. method:: Pin.__call__([x])
Pin objects are callable. The call method provides a (fast) shortcut to set
and get the value of the pin. It is equivalent to Pin.value([x]).
See :meth:`Pin.value` for more details.
.. method:: Pin.on()
Set pin to "1" output level.
.. method:: Pin.off()
Set pin to "0" output level.
.. method:: Pin.irq(handler=None, trigger=(Pin.IRQ_FALLING | Pin.IRQ_RISING), *, priority=1, wake=None, hard=False)
Configure an interrupt handler to be called when the trigger source of the
pin is active. If the pin mode is ``Pin.IN`` then the trigger source is
the external value on the pin. If the pin mode is ``Pin.OUT`` then the
trigger source is the output buffer of the pin. Otherwise, if the pin mode
is ``Pin.OPEN_DRAIN`` then the trigger source is the output buffer for
state '0' and the external pin value for state '1'.
The arguments are:
- ``handler`` is an optional function to be called when the interrupt
triggers. The handler must take exactly one argument which is the
``Pin`` instance.
- ``trigger`` configures the event which can generate an interrupt.
Possible values are:
- ``Pin.IRQ_FALLING`` interrupt on falling edge.
- ``Pin.IRQ_RISING`` interrupt on rising edge.
- ``Pin.IRQ_LOW_LEVEL`` interrupt on low level.
- ``Pin.IRQ_HIGH_LEVEL`` interrupt on high level.
These values can be OR'ed together to trigger on multiple events.
- ``priority`` sets the priority level of the interrupt. The values it
can take are port-specific, but higher values always represent higher
priorities.
- ``wake`` selects the power mode in which this interrupt can wake up the
system. It can be ``machine.IDLE``, ``machine.SLEEP`` or ``machine.DEEPSLEEP``.
These values can also be OR'ed together to make a pin generate interrupts in
more than one power mode.
- ``hard`` if true a hardware interrupt is used. This reduces the delay
between the pin change and the handler being called. Hard interrupt
handlers may not allocate memory; see :ref:`isr_rules`.
Not all ports support this argument.
This method returns a callback object.
The following methods are not part of the core Pin API and only implemented on certain ports.
.. method:: Pin.low()
Set pin to "0" output level.
Availability: nrf, rp2, stm32 ports.
.. method:: Pin.high()
Set pin to "1" output level.
Availability: nrf, rp2, stm32 ports.
.. method:: Pin.mode([mode])
Get or set the pin mode.
See the constructor documentation for details of the ``mode`` argument.
Availability: cc3200, stm32 ports.
.. method:: Pin.pull([pull])
Get or set the pin pull state.
See the constructor documentation for details of the ``pull`` argument.
Availability: cc3200, stm32 ports.
.. method:: Pin.drive([drive])
Get or set the pin drive strength.
See the constructor documentation for details of the ``drive`` argument.
Availability: cc3200 port.
Constants
---------
The following constants are used to configure the pin objects. Note that
not all constants are available on all ports.
.. data:: Pin.IN
Pin.OUT
Pin.OPEN_DRAIN
Pin.ALT
Pin.ALT_OPEN_DRAIN
Pin.ANALOG
Selects the pin mode.
.. data:: Pin.PULL_UP
Pin.PULL_DOWN
Pin.PULL_HOLD
Selects whether there is a pull up/down resistor. Use the value
``None`` for no pull.
.. data:: Pin.DRIVE_0
Pin.DRIVE_1
Pin.DRIVE_2
Selects the pin drive strength. A port may define additional drive
constants with increasing number corresponding to increasing drive
strength.
.. data:: Pin.IRQ_FALLING
Pin.IRQ_RISING
Pin.IRQ_LOW_LEVEL
Pin.IRQ_HIGH_LEVEL
Selects the IRQ trigger type.

View File

@@ -0,0 +1,83 @@
.. currentmodule:: machine
.. _machine.RTC:
class RTC -- real time clock
============================
The RTC is an independent clock that keeps track of the date
and time.
Example usage::
rtc = machine.RTC()
rtc.datetime((2020, 1, 21, 2, 10, 32, 36, 0))
print(rtc.datetime())
Constructors
------------
.. class:: RTC(id=0, ...)
Create an RTC object. See init for parameters of initialization.
Methods
-------
.. method:: RTC.datetime([datetimetuple])
Get or set the date and time of the RTC.
With no arguments, this method returns an 8-tuple with the current
date and time. With 1 argument (being an 8-tuple) it sets the date
and time.
The 8-tuple has the following format:
(year, month, day, weekday, hours, minutes, seconds, subseconds)
The meaning of the ``subseconds`` field is hardware dependent.
.. method:: RTC.init(datetime)
Initialise the RTC. Datetime is a tuple of the form:
``(year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]])``
.. method:: RTC.now()
Get get the current datetime tuple.
.. method:: RTC.deinit()
Resets the RTC to the time of January 1, 2015 and starts running it again.
.. method:: RTC.alarm(id, time, *, repeat=False)
Set the RTC alarm. Time might be either a millisecond value to program the alarm to
current time + time_in_ms in the future, or a datetimetuple. If the time passed is in
milliseconds, repeat can be set to ``True`` to make the alarm periodic.
.. method:: RTC.alarm_left(alarm_id=0)
Get the number of milliseconds left before the alarm expires.
.. method:: RTC.cancel(alarm_id=0)
Cancel a running alarm.
.. method:: RTC.irq(*, trigger, handler=None, wake=machine.IDLE)
Create an irq object triggered by a real time clock alarm.
- ``trigger`` must be ``RTC.ALARM0``
- ``handler`` is the function to be called when the callback is triggered.
- ``wake`` specifies the sleep mode from where this interrupt can wake
up the system.
Constants
---------
.. data:: RTC.ALARM0
irq trigger source

View File

@@ -0,0 +1,47 @@
.. currentmodule:: machine
.. _machine.SD:
class SD -- secure digital memory card (cc3200 port only)
=========================================================
.. warning::
This is a non-standard class and is only available on the cc3200 port.
The SD card class allows to configure and enable the memory card
module of the WiPy and automatically mount it as ``/sd`` as part
of the file system. There are several pin combinations that can be
used to wire the SD card socket to the WiPy and the pins used can
be specified in the constructor. Please check the `pinout and alternate functions
table. <https://raw.githubusercontent.com/wipy/wipy/master/docs/PinOUT.png>`_ for
more info regarding the pins which can be remapped to be used with a SD card.
Example usage::
from machine import SD
import os
# clk cmd and dat0 pins must be passed along with
# their respective alternate functions
sd = machine.SD(pins=('GP10', 'GP11', 'GP15'))
os.mount(sd, '/sd')
# do normal file operations
Constructors
------------
.. class:: SD(id,... )
Create a SD card object. See ``init()`` for parameters if initialization.
Methods
-------
.. method:: SD.init(id=0, pins=('GP10', 'GP11', 'GP15'))
Enable the SD card. In order to initialize the card, give it a 3-tuple:
``(clk_pin, cmd_pin, dat0_pin)``.
.. method:: SD.deinit()
Disable the SD card.

View File

@@ -0,0 +1,167 @@
.. currentmodule:: machine
.. _machine.SDCard:
class SDCard -- secure digital memory card
==========================================
SD cards are one of the most common small form factor removable storage media.
SD cards come in a variety of sizes and physical form factors. MMC cards are
similar removable storage devices while eMMC devices are electrically similar
storage devices designed to be embedded into other systems. All three form
share a common protocol for communication with their host system and high-level
support looks the same for them all. As such in MicroPython they are implemented
in a single class called :class:`machine.SDCard` .
Both SD and MMC interfaces support being accessed with a variety of bus widths.
When being accessed with a 1-bit wide interface they can be accessed using the
SPI protocol. Different MicroPython hardware platforms support different widths
and pin configurations but for most platforms there is a standard configuration
for any given hardware. In general constructing an ``SDCard`` object with without
passing any parameters will initialise the interface to the default card slot
for the current hardware. The arguments listed below represent the common
arguments that might need to be set in order to use either a non-standard slot
or a non-standard pin assignment. The exact subset of arguments supported will
vary from platform to platform.
.. class:: SDCard(slot=1, width=1, cd=None, wp=None, sck=None, miso=None, mosi=None, cs=None, freq=20000000)
This class provides access to SD or MMC storage cards using either
a dedicated SD/MMC interface hardware or through an SPI channel.
The class implements the block protocol defined by :class:`os.AbstractBlockDev`.
This allows the mounting of an SD card to be as simple as::
os.mount(machine.SDCard(), "/sd")
The constructor takes the following parameters:
- *slot* selects which of the available interfaces to use. Leaving this
unset will select the default interface.
- *width* selects the bus width for the SD/MMC interface.
- *cd* can be used to specify a card-detect pin.
- *wp* can be used to specify a write-protect pin.
- *sck* can be used to specify an SPI clock pin.
- *miso* can be used to specify an SPI miso pin.
- *mosi* can be used to specify an SPI mosi pin.
- *cs* can be used to specify an SPI chip select pin.
- *freq* selects the SD/MMC interface frequency in Hz (only supported on the ESP32).
Implementation-specific details
-------------------------------
Different implementations of the ``SDCard`` class on different hardware support
varying subsets of the options above.
PyBoard
```````
The standard PyBoard has just one slot. No arguments are necessary or supported.
ESP32
`````
The ESP32 provides two channels of SD/MMC hardware and also supports
access to SD Cards through either of the two SPI ports that are
generally available to the user. As a result the *slot* argument can
take a value between 0 and 3, inclusive. Slots 0 and 1 use the
built-in SD/MMC hardware while slots 2 and 3 use the SPI ports. Slot 0
supports 1, 4 or 8-bit wide access while slot 1 supports 1 or 4-bit
access; the SPI slots only support 1-bit access.
.. note:: Slot 0 is used to communicate with on-board flash memory
on most ESP32 modules and so will be unavailable to the
user.
.. note:: Most ESP32 modules that provide an SD card slot using the
dedicated hardware only wire up 1 data pin, so the default
value for *width* is 1.
The pins used by the dedicated SD/MMC hardware are fixed. The pins
used by the SPI hardware can be reassigned.
.. note:: If any of the SPI signals are remapped then all of the SPI
signals will pass through a GPIO multiplexer unit which
can limit the performance of high frequency signals. Since
the normal operating speed for SD cards is 40MHz this can
cause problems on some cards.
The default (and preferred) pin assignment are as follows:
====== ====== ====== ====== ======
Slot 0 1 2 3
------ ------ ------ ------ ------
Signal Pin Pin Pin Pin
====== ====== ====== ====== ======
sck 6 14 18 14
cmd 11 15
cs 5 15
miso 19 12
mosi 23 13
D0 7 2
D1 8 4
D2 9 12
D3 10 13
D4 16
D5 17
D6 5
D7 18
====== ====== ====== ====== ======
cc3200
``````
You can set the pins used for SPI access by passing a tuple as the
*pins* argument.
*Note:* The current cc3200 SD card implementation names the this class
:class:`machine.SD` rather than :class:`machine.SDCard` .
mimxrt
``````
The SDCard module for the mimxrt port only supports access via dedicated SD/MMC
peripheral (USDHC) in 4-bit mode with 50MHz clock frequency exclusively.
Unfortunately the MIMXRT1011 controller does not support the USDHC peripheral.
Hence this controller does not feature the ``machine.SDCard`` module.
Due to the decision to only support 4-bit mode with 50MHz clock frequency the
interface has been simplified, and the constructor signature is:
.. class:: SDCard(slot=1)
:noindex:
The pins used for the USDHC peripheral have to be configured in ``mpconfigboard.h``.
Most of the controllers supported by the mimxrt port provide up to two USDHC
peripherals. Therefore the pin configuration is performed using the macro
``MICROPY_USDHCx`` with x being 1 or 2 respectively.
The following shows an example configuration for USDHC1::
#define MICROPY_USDHC1 \
{ \
.cmd = { GPIO_SD_B0_02_USDHC1_CMD}, \
.clk = { GPIO_SD_B0_03_USDHC1_CLK }, \
.cd_b = { GPIO_SD_B0_06_USDHC1_CD_B },\
.data0 = { GPIO_SD_B0_04_USDHC1_DATA0 },\
.data1 = { GPIO_SD_B0_05_USDHC1_DATA1 },\
.data2 = { GPIO_SD_B0_00_USDHC1_DATA2 },\
.data3 = { GPIO_SD_B0_01_USDHC1_DATA3 },\
}
If the card detect pin is not used (cb_b pin) then the respective entry has to be
filled with the following dummy value::
#define USDHC_DUMMY_PIN NULL , 0
Based on the definition of macro ``MICROPY_USDHC1`` and/or ``MICROPY_USDHC2``
the ``machine.SDCard`` module either supports one or two slots. If only one of
the defines is provided, calling ``machine.SDCard()`` or ``machine.SDCard(1)``
will return an instance using the respective USDHC peripheral. When both macros
are defined, calling ``machine.SDCard(2)`` returns an instance using USDHC2.

View File

@@ -0,0 +1,153 @@
.. currentmodule:: machine
.. _machine.SPI:
class SPI -- a Serial Peripheral Interface bus protocol (controller side)
=========================================================================
SPI is a synchronous serial protocol that is driven by a controller. At the
physical level, a bus consists of 3 lines: SCK, MOSI, MISO. Multiple devices
can share the same bus. Each device should have a separate, 4th signal,
CS (Chip Select), to select a particular device on a bus with which
communication takes place. Management of a CS signal should happen in
user code (via machine.Pin class).
Both hardware and software SPI implementations exist via the
:ref:`machine.SPI <machine.SPI>` and `machine.SoftSPI` classes. Hardware SPI uses underlying
hardware support of the system to perform the reads/writes and is usually
efficient and fast but may have restrictions on which pins can be used.
Software SPI is implemented by bit-banging and can be used on any pin but
is not as efficient. These classes have the same methods available and
differ primarily in the way they are constructed.
Example usage::
from machine import SPI, Pin
spi = SPI(0, baudrate=400000) # Create SPI peripheral 0 at frequency of 400kHz.
# Depending on the use case, extra parameters may be required
# to select the bus characteristics and/or pins to use.
cs = Pin(4, mode=Pin.OUT, value=1) # Create chip-select on pin 4.
try:
cs(0) # Select peripheral.
spi.write(b"12345678") # Write 8 bytes, and don't care about received data.
finally:
cs(1) # Deselect peripheral.
try:
cs(0) # Select peripheral.
rxdata = spi.read(8, 0x42) # Read 8 bytes while writing 0x42 for each byte.
finally:
cs(1) # Deselect peripheral.
rxdata = bytearray(8)
try:
cs(0) # Select peripheral.
spi.readinto(rxdata, 0x42) # Read 8 bytes inplace while writing 0x42 for each byte.
finally:
cs(1) # Deselect peripheral.
txdata = b"12345678"
rxdata = bytearray(len(txdata))
try:
cs(0) # Select peripheral.
spi.write_readinto(txdata, rxdata) # Simultaneously write and read bytes.
finally:
cs(1) # Deselect peripheral.
Constructors
------------
.. class:: SPI(id, ...)
Construct an SPI object on the given bus, *id*. Values of *id* depend
on a particular port and its hardware. Values 0, 1, etc. are commonly used
to select hardware SPI block #0, #1, etc.
With no additional parameters, the SPI object is created but not
initialised (it has the settings from the last initialisation of
the bus, if any). If extra arguments are given, the bus is initialised.
See ``init`` for parameters of initialisation.
.. _machine.SoftSPI:
.. class:: SoftSPI(baudrate=500000, *, polarity=0, phase=0, bits=8, firstbit=MSB, sck=None, mosi=None, miso=None)
Construct a new software SPI object. Additional parameters must be
given, usually at least *sck*, *mosi* and *miso*, and these are used
to initialise the bus. See `SPI.init` for a description of the parameters.
Methods
-------
.. method:: SPI.init(baudrate=1000000, *, polarity=0, phase=0, bits=8, firstbit=SPI.MSB, sck=None, mosi=None, miso=None, pins=(SCK, MOSI, MISO))
Initialise the SPI bus with the given parameters:
- ``baudrate`` is the SCK clock rate.
- ``polarity`` can be 0 or 1, and is the level the idle clock line sits at.
- ``phase`` can be 0 or 1 to sample data on the first or second clock edge
respectively.
- ``bits`` is the width in bits of each transfer. Only 8 is guaranteed to be supported by all hardware.
- ``firstbit`` can be ``SPI.MSB`` or ``SPI.LSB``.
- ``sck``, ``mosi``, ``miso`` are pins (machine.Pin) objects to use for bus signals. For most
hardware SPI blocks (as selected by ``id`` parameter to the constructor), pins are fixed
and cannot be changed. In some cases, hardware blocks allow 2-3 alternative pin sets for
a hardware SPI block. Arbitrary pin assignments are possible only for a bitbanging SPI driver
(``id`` = -1).
- ``pins`` - WiPy port doesn't ``sck``, ``mosi``, ``miso`` arguments, and instead allows to
specify them as a tuple of ``pins`` parameter.
In the case of hardware SPI the actual clock frequency may be lower than the
requested baudrate. This is dependant on the platform hardware. The actual
rate may be determined by printing the SPI object.
.. method:: SPI.deinit()
Turn off the SPI bus.
.. method:: SPI.read(nbytes, write=0x00)
Read a number of bytes specified by ``nbytes`` while continuously writing
the single byte given by ``write``.
Returns a ``bytes`` object with the data that was read.
.. method:: SPI.readinto(buf, write=0x00)
Read into the buffer specified by ``buf`` while continuously writing the
single byte given by ``write``.
Returns ``None``.
Note: on WiPy this function returns the number of bytes read.
.. method:: SPI.write(buf)
Write the bytes contained in ``buf``.
Returns ``None``.
Note: on WiPy this function returns the number of bytes written.
.. method:: SPI.write_readinto(write_buf, read_buf)
Write the bytes from ``write_buf`` while reading into ``read_buf``. The
buffers can be the same or different, but both buffers must have the
same length.
Returns ``None``.
Note: on WiPy this function returns the number of bytes written.
Constants
---------
.. data:: SPI.CONTROLLER
for initialising the SPI bus to controller; this is only used for the WiPy
.. data:: SPI.MSB
SoftSPI.MSB
set the first bit to be the most significant bit
.. data:: SPI.LSB
SoftSPI.LSB
set the first bit to be the least significant bit

View File

@@ -0,0 +1,123 @@
.. currentmodule:: machine
.. _machine.Signal:
class Signal -- control and sense external I/O devices
======================================================
The Signal class is a simple extension of the `Pin` class. Unlike Pin, which
can be only in "absolute" 0 and 1 states, a Signal can be in "asserted"
(on) or "deasserted" (off) states, while being inverted (active-low) or
not. In other words, it adds logical inversion support to Pin functionality.
While this may seem a simple addition, it is exactly what is needed to
support wide array of simple digital devices in a way portable across
different boards, which is one of the major MicroPython goals. Regardless
of whether different users have an active-high or active-low LED, a normally
open or normally closed relay - you can develop a single, nicely looking
application which works with each of them, and capture hardware
configuration differences in few lines in the config file of your app.
Example::
from machine import Pin, Signal
# Suppose you have an active-high LED on pin 0
led1_pin = Pin(0, Pin.OUT)
# ... and active-low LED on pin 1
led2_pin = Pin(1, Pin.OUT)
# Now to light up both of them using Pin class, you'll need to set
# them to different values
led1_pin.value(1)
led2_pin.value(0)
# Signal class allows to abstract away active-high/active-low
# difference
led1 = Signal(led1_pin, invert=False)
led2 = Signal(led2_pin, invert=True)
# Now lighting up them looks the same
led1.value(1)
led2.value(1)
# Even better:
led1.on()
led2.on()
Following is the guide when Signal vs Pin should be used:
* Use Signal: If you want to control a simple on/off (including software
PWM!) devices like LEDs, multi-segment indicators, relays, buzzers, or
read simple binary sensors, like normally open or normally closed buttons,
pulled high or low, Reed switches, moisture/flame detectors, etc. etc.
Summing up, if you have a real physical device/sensor requiring GPIO
access, you likely should use a Signal.
* Use Pin: If you implement a higher-level protocol or bus to communicate
with more complex devices.
The split between Pin and Signal come from the use cases above and the
architecture of MicroPython: Pin offers the lowest overhead, which may
be important when bit-banging protocols. But Signal adds additional
flexibility on top of Pin, at the cost of minor overhead (much smaller
than if you implemented active-high vs active-low device differences in
Python manually!). Also, Pin is a low-level object which needs to be
implemented for each support board, while Signal is a high-level object
which comes for free once Pin is implemented.
If in doubt, give the Signal a try! Once again, it is offered to save
developers from the need to handle unexciting differences like active-low
vs active-high signals, and allow other users to share and enjoy your
application, instead of being frustrated by the fact that it doesn't
work for them simply because their LEDs or relays are wired in a slightly
different way.
Constructors
------------
.. class:: Signal(pin_obj, invert=False)
Signal(pin_arguments..., *, invert=False)
Create a Signal object. There're two ways to create it:
* By wrapping existing Pin object - universal method which works for
any board.
* By passing required Pin parameters directly to Signal constructor,
skipping the need to create intermediate Pin object. Available on
many, but not all boards.
The arguments are:
- ``pin_obj`` is existing Pin object.
- ``pin_arguments`` are the same arguments as can be passed to Pin constructor.
- ``invert`` - if True, the signal will be inverted (active low).
Methods
-------
.. method:: Signal.value([x])
This method allows to set and get the value of the signal, depending on whether
the argument ``x`` is supplied or not.
If the argument is omitted then this method gets the signal level, 1 meaning
signal is asserted (active) and 0 - signal inactive.
If the argument is supplied then this method sets the signal level. The
argument ``x`` can be anything that converts to a boolean. If it converts
to ``True``, the signal is active, otherwise it is inactive.
Correspondence between signal being active and actual logic level on the
underlying pin depends on whether signal is inverted (active-low) or not.
For non-inverted signal, active status corresponds to logical 1, inactive -
to logical 0. For inverted/active-low signal, active status corresponds
to logical 0, while inactive - to logical 1.
.. method:: Signal.on()
Activate signal.
.. method:: Signal.off()
Deactivate signal.

View File

@@ -0,0 +1,81 @@
.. currentmodule:: machine
.. _machine.Timer:
class Timer -- control hardware timers
======================================
Hardware timers deal with timing of periods and events. Timers are perhaps
the most flexible and heterogeneous kind of hardware in MCUs and SoCs,
differently greatly from a model to a model. MicroPython's Timer class
defines a baseline operation of executing a callback with a given period
(or once after some delay), and allow specific boards to define more
non-standard behaviour (which thus won't be portable to other boards).
See discussion of :ref:`important constraints <machine_callbacks>` on
Timer callbacks.
.. note::
Memory can't be allocated inside irq handlers (an interrupt) and so
exceptions raised within a handler don't give much information. See
:func:`micropython.alloc_emergency_exception_buf` for how to get around this
limitation.
If you are using a WiPy board please refer to :ref:`machine.TimerWiPy <machine.TimerWiPy>`
instead of this class.
Constructors
------------
.. class:: Timer(id, /, ...)
Construct a new timer object of the given ``id``. ``id`` of -1 constructs a
virtual timer (if supported by a board).
``id`` shall not be passed as a keyword argument.
See ``init`` for parameters of initialisation.
Methods
-------
.. method:: Timer.init(*, mode=Timer.PERIODIC, period=-1, callback=None)
Initialise the timer. Example::
def mycallback(t):
pass
# periodic with 100ms period
tim.init(period=100, callback=mycallback)
# one shot firing after 1000ms
tim.init(mode=Timer.ONE_SHOT, period=1000, callback=mycallback)
Keyword arguments:
- ``mode`` can be one of:
- ``Timer.ONE_SHOT`` - The timer runs once until the configured
period of the channel expires.
- ``Timer.PERIODIC`` - The timer runs periodically at the configured
frequency of the channel.
- ``period`` - The timer period, in milliseconds.
- ``callback`` - The callable to call upon expiration of the timer period.
The callback must take one argument, which is passed the Timer object.
The ``callback`` argument shall be specified. Otherwise an exception
will occurr upon timer expiration:
``TypeError: 'NoneType' object isn't callable``
.. method:: Timer.deinit()
Deinitialises the timer. Stops the timer, and disables the timer peripheral.
Constants
---------
.. data:: Timer.ONE_SHOT
Timer.PERIODIC
Timer operating mode.

View File

@@ -0,0 +1,159 @@
.. currentmodule:: machine
.. _machine.TimerWiPy:
class TimerWiPy -- control hardware timers
==========================================
.. note::
This class is a non-standard Timer implementation for the WiPy.
It is available simply as ``machine.Timer`` on the WiPy but is named in the
documentation below as ``machine.TimerWiPy`` to distinguish it from the
more general :ref:`machine.Timer <machine.Timer>` class.
Hardware timers deal with timing of periods and events. Timers are perhaps
the most flexible and heterogeneous kind of hardware in MCUs and SoCs,
differently greatly from a model to a model. MicroPython's Timer class
defines a baseline operation of executing a callback with a given period
(or once after some delay), and allow specific boards to define more
non-standard behaviour (which thus won't be portable to other boards).
See discussion of :ref:`important constraints <machine_callbacks>` on
Timer callbacks.
.. note::
Memory can't be allocated inside irq handlers (an interrupt) and so
exceptions raised within a handler don't give much information. See
:func:`micropython.alloc_emergency_exception_buf` for how to get around this
limitation.
Constructors
------------
.. class:: TimerWiPy(id, ...)
Construct a new timer object of the given id. Id of -1 constructs a
virtual timer (if supported by a board).
Methods
-------
.. method:: TimerWiPy.init(mode, *, width=16)
Initialise the timer. Example::
tim.init(Timer.PERIODIC) # periodic 16-bit timer
tim.init(Timer.ONE_SHOT, width=32) # one shot 32-bit timer
Keyword arguments:
- ``mode`` can be one of:
- ``TimerWiPy.ONE_SHOT`` - The timer runs once until the configured
period of the channel expires.
- ``TimerWiPy.PERIODIC`` - The timer runs periodically at the configured
frequency of the channel.
- ``TimerWiPy.PWM`` - Output a PWM signal on a pin.
- ``width`` must be either 16 or 32 (bits). For really low frequencies < 5Hz
(or large periods), 32-bit timers should be used. 32-bit mode is only available
for ``ONE_SHOT`` AND ``PERIODIC`` modes.
.. method:: TimerWiPy.deinit()
Deinitialises the timer. Stops the timer, and disables the timer peripheral.
.. method:: TimerWiPy.channel(channel, **, freq, period, polarity=TimerWiPy.POSITIVE, duty_cycle=0)
If only a channel identifier passed, then a previously initialized channel
object is returned (or ``None`` if there is no previous channel).
Otherwise, a TimerChannel object is initialized and returned.
The operating mode is is the one configured to the Timer object that was used to
create the channel.
- ``channel`` if the width of the timer is 16-bit, then must be either ``TIMER.A``, ``TIMER.B``.
If the width is 32-bit then it **must be** ``TIMER.A | TIMER.B``.
Keyword only arguments:
- ``freq`` sets the frequency in Hz.
- ``period`` sets the period in microseconds.
.. note::
Either ``freq`` or ``period`` must be given, never both.
- ``polarity`` this is applicable for ``PWM``, and defines the polarity of the duty cycle
- ``duty_cycle`` only applicable to ``PWM``. It's a percentage (0.00-100.00). Since the WiPy
doesn't support floating point numbers the duty cycle must be specified in the range 0-10000,
where 10000 would represent 100.00, 5050 represents 50.50, and so on.
.. note::
When the channel is in PWM mode, the corresponding pin is assigned automatically, therefore
there's no need to assign the alternate function of the pin via the ``Pin`` class. The pins which
support PWM functionality are the following:
- ``GP24`` on Timer 0 channel A.
- ``GP25`` on Timer 1 channel A.
- ``GP9`` on Timer 2 channel B.
- ``GP10`` on Timer 3 channel A.
- ``GP11`` on Timer 3 channel B.
class TimerChannel --- setup a channel for a timer
==================================================
Timer channels are used to generate/capture a signal using a timer.
TimerChannel objects are created using the Timer.channel() method.
Methods
-------
.. method:: timerchannel.irq(*, trigger, priority=1, handler=None)
The behaviour of this callback is heavily dependent on the operating
mode of the timer channel:
- If mode is ``TimerWiPy.PERIODIC`` the callback is executed periodically
with the configured frequency or period.
- If mode is ``TimerWiPy.ONE_SHOT`` the callback is executed once when
the configured timer expires.
- If mode is ``TimerWiPy.PWM`` the callback is executed when reaching the duty
cycle value.
The accepted params are:
- ``priority`` level of the interrupt. Can take values in the range 1-7.
Higher values represent higher priorities.
- ``handler`` is an optional function to be called when the interrupt is triggered.
- ``trigger`` must be ``TimerWiPy.TIMEOUT`` when the operating mode is either ``TimerWiPy.PERIODIC`` or
``TimerWiPy.ONE_SHOT``. In the case that mode is ``TimerWiPy.PWM`` then trigger must be equal to
``TimerWiPy.MATCH``.
Returns a callback object.
.. method:: timerchannel.freq([value])
Get or set the timer channel frequency (in Hz).
.. method:: timerchannel.period([value])
Get or set the timer channel period (in microseconds).
.. method:: timerchannel.duty_cycle([value])
Get or set the duty cycle of the PWM signal. It's a percentage (0.00-100.00). Since the WiPy
doesn't support floating point numbers the duty cycle must be specified in the range 0-10000,
where 10000 would represent 100.00, 5050 represents 50.50, and so on.
Constants
---------
.. data:: TimerWiPy.ONE_SHOT
.. data:: TimerWiPy.PERIODIC
Timer operating mode.

View File

@@ -0,0 +1,175 @@
.. currentmodule:: machine
.. _machine.UART:
class UART -- duplex serial communication bus
=============================================
UART implements the standard UART/USART duplex serial communications protocol. At
the physical level it consists of 2 lines: RX and TX. The unit of communication
is a character (not to be confused with a string character) which can be 8 or 9
bits wide.
UART objects can be created and initialised using::
from machine import UART
uart = UART(1, 9600) # init with given baudrate
uart.init(9600, bits=8, parity=None, stop=1) # init with given parameters
Supported parameters differ on a board:
Pyboard: Bits can be 7, 8 or 9. Stop can be 1 or 2. With *parity=None*,
only 8 and 9 bits are supported. With parity enabled, only 7 and 8 bits
are supported.
WiPy/CC3200: Bits can be 5, 6, 7, 8. Stop can be 1 or 2.
A UART object acts like a `stream` object and reading and writing is done
using the standard stream methods::
uart.read(10) # read 10 characters, returns a bytes object
uart.read() # read all available characters
uart.readline() # read a line
uart.readinto(buf) # read and store into the given buffer
uart.write('abc') # write the 3 characters
Constructors
------------
.. class:: UART(id, ...)
Construct a UART object of the given id.
Methods
-------
.. method:: UART.init(baudrate=9600, bits=8, parity=None, stop=1, *, ...)
Initialise the UART bus with the given parameters:
- *baudrate* is the clock rate.
- *bits* is the number of bits per character, 7, 8 or 9.
- *parity* is the parity, ``None``, 0 (even) or 1 (odd).
- *stop* is the number of stop bits, 1 or 2.
Additional keyword-only parameters that may be supported by a port are:
- *tx* specifies the TX pin to use.
- *rx* specifies the RX pin to use.
- *rts* specifies the RTS (output) pin to use for hardware receive flow control.
- *cts* specifies the CTS (input) pin to use for hardware transmit flow control.
- *txbuf* specifies the length in characters of the TX buffer.
- *rxbuf* specifies the length in characters of the RX buffer.
- *timeout* specifies the time to wait for the first character (in ms).
- *timeout_char* specifies the time to wait between characters (in ms).
- *invert* specifies which lines to invert.
- ``0`` will not invert lines (idle state of both lines is logic high).
- ``UART.INV_TX`` will invert TX line (idle state of TX line now logic low).
- ``UART.INV_RX`` will invert RX line (idle state of RX line now logic low).
- ``UART.INV_TX | UART.INV_RX`` will invert both lines (idle state at logic low).
- *flow* specifies which hardware flow control signals to use. The value
is a bitmask.
- ``0`` will ignore hardware flow control signals.
- ``UART.RTS`` will enable receive flow control by using the RTS output pin to
signal if the receive FIFO has sufficient space to accept more data.
- ``UART.CTS`` will enable transmit flow control by pausing transmission when the
CTS input pin signals that the receiver is running low on buffer space.
- ``UART.RTS | UART.CTS`` will enable both, for full hardware flow control.
On the WiPy only the following keyword-only parameter is supported:
- *pins* is a 4 or 2 item list indicating the TX, RX, RTS and CTS pins (in that order).
Any of the pins can be None if one wants the UART to operate with limited functionality.
If the RTS pin is given the the RX pin must be given as well. The same applies to CTS.
When no pins are given, then the default set of TX and RX pins is taken, and hardware
flow control will be disabled. If *pins* is ``None``, no pin assignment will be made.
.. method:: UART.deinit()
Turn off the UART bus.
.. method:: UART.any()
Returns an integer counting the number of characters that can be read without
blocking. It will return 0 if there are no characters available and a positive
number if there are characters. The method may return 1 even if there is more
than one character available for reading.
For more sophisticated querying of available characters use select.poll::
poll = select.poll()
poll.register(uart, select.POLLIN)
poll.poll(timeout)
.. method:: UART.read([nbytes])
Read characters. If ``nbytes`` is specified then read at most that many bytes,
otherwise read as much data as possible. It may return sooner if a timeout
is reached. The timeout is configurable in the constructor.
Return value: a bytes object containing the bytes read in. Returns ``None``
on timeout.
.. method:: UART.readinto(buf[, nbytes])
Read bytes into the ``buf``. If ``nbytes`` is specified then read at most
that many bytes. Otherwise, read at most ``len(buf)`` bytes. It may return sooner if a timeout
is reached. The timeout is configurable in the constructor.
Return value: number of bytes read and stored into ``buf`` or ``None`` on
timeout.
.. method:: UART.readline()
Read a line, ending in a newline character. It may return sooner if a timeout
is reached. The timeout is configurable in the constructor.
Return value: the line read or ``None`` on timeout.
.. method:: UART.write(buf)
Write the buffer of bytes to the bus.
Return value: number of bytes written or ``None`` on timeout.
.. method:: UART.sendbreak()
Send a break condition on the bus. This drives the bus low for a duration
longer than required for a normal transmission of a character.
.. method:: UART.irq(trigger, priority=1, handler=None, wake=machine.IDLE)
Create a callback to be triggered when data is received on the UART.
- *trigger* can only be ``UART.RX_ANY``
- *priority* level of the interrupt. Can take values in the range 1-7.
Higher values represent higher priorities.
- *handler* an optional function to be called when new characters arrive.
- *wake* can only be ``machine.IDLE``.
.. note::
The handler will be called whenever any of the following two conditions are met:
- 8 new characters have been received.
- At least 1 new character is waiting in the Rx buffer and the Rx line has been
silent for the duration of 1 complete frame.
This means that when the handler function is called there will be between 1 to 8
characters waiting.
Returns an irq object.
Availability: WiPy.
Constants
---------
.. data:: UART.RX_ANY
IRQ trigger sources
Availability: WiPy.

View File

@@ -0,0 +1,38 @@
.. currentmodule:: machine
.. _machine.WDT:
class WDT -- watchdog timer
===========================
The WDT is used to restart the system when the application crashes and ends
up into a non recoverable state. Once started it cannot be stopped or
reconfigured in any way. After enabling, the application must "feed" the
watchdog periodically to prevent it from expiring and resetting the system.
Example usage::
from machine import WDT
wdt = WDT(timeout=2000) # enable it with a timeout of 2s
wdt.feed()
Availability of this class: pyboard, WiPy, esp8266, esp32.
Constructors
------------
.. class:: WDT(id=0, timeout=5000)
Create a WDT object and start it. The timeout must be given in milliseconds.
Once it is running the timeout cannot be changed and the WDT cannot be stopped either.
Notes: On the esp32 the minimum timeout is 1 second. On the esp8266 a timeout
cannot be specified, it is determined by the underlying system.
Methods
-------
.. method:: WDT.feed()
Feed the WDT to prevent it from resetting the system. The application
should place this call in a sensible place ensuring that the WDT is
only fed after verifying that everything is functioning correctly.

View File

@@ -0,0 +1,212 @@
:mod:`machine` --- functions related to the hardware
====================================================
.. module:: machine
:synopsis: functions related to the hardware
The ``machine`` module contains specific functions related to the hardware
on a particular board. Most functions in this module allow to achieve direct
and unrestricted access to and control of hardware blocks on a system
(like CPU, timers, buses, etc.). Used incorrectly, this can lead to
malfunction, lockups, crashes of your board, and in extreme cases, hardware
damage.
.. _machine_callbacks:
A note of callbacks used by functions and class methods of :mod:`machine` module:
all these callbacks should be considered as executing in an interrupt context.
This is true for both physical devices with IDs >= 0 and "virtual" devices
with negative IDs like -1 (these "virtual" devices are still thin shims on
top of real hardware and real hardware interrupts). See :ref:`isr_rules`.
Reset related functions
-----------------------
.. function:: reset()
Resets the device in a manner similar to pushing the external RESET
button.
.. function:: soft_reset()
Performs a soft reset of the interpreter, deleting all Python objects and
resetting the Python heap. It tries to retain the method by which the user
is connected to the MicroPython REPL (eg serial, USB, Wifi).
.. function:: reset_cause()
Get the reset cause. See :ref:`constants <machine_constants>` for the possible return values.
.. function:: bootloader([value])
Reset the device and enter its bootloader. This is typically used to put the
device into a state where it can be programmed with new firmware.
Some ports support passing in an optional *value* argument which can control
which bootloader to enter, what to pass to it, or other things.
Interrupt related functions
---------------------------
.. function:: disable_irq()
Disable interrupt requests.
Returns the previous IRQ state which should be considered an opaque value.
This return value should be passed to the `enable_irq()` function to restore
interrupts to their original state, before `disable_irq()` was called.
.. function:: enable_irq(state)
Re-enable interrupt requests.
The *state* parameter should be the value that was returned from the most
recent call to the `disable_irq()` function.
Power related functions
-----------------------
.. function:: freq([hz])
Returns the CPU frequency in hertz.
On some ports this can also be used to set the CPU frequency by passing in *hz*.
.. function:: idle()
Gates the clock to the CPU, useful to reduce power consumption at any time during
short or long periods. Peripherals continue working and execution resumes as soon
as any interrupt is triggered (on many ports this includes system timer
interrupt occurring at regular intervals on the order of millisecond).
.. function:: sleep()
.. note:: This function is deprecated, use `lightsleep()` instead with no arguments.
.. function:: lightsleep([time_ms])
deepsleep([time_ms])
Stops execution in an attempt to enter a low power state.
If *time_ms* is specified then this will be the maximum time in milliseconds that
the sleep will last for. Otherwise the sleep can last indefinitely.
With or without a timeout, execution may resume at any time if there are events
that require processing. Such events, or wake sources, should be configured before
sleeping, like `Pin` change or `RTC` timeout.
The precise behaviour and power-saving capabilities of lightsleep and deepsleep is
highly dependent on the underlying hardware, but the general properties are:
* A lightsleep has full RAM and state retention. Upon wake execution is resumed
from the point where the sleep was requested, with all subsystems operational.
* A deepsleep may not retain RAM or any other state of the system (for example
peripherals or network interfaces). Upon wake execution is resumed from the main
script, similar to a hard or power-on reset. The `reset_cause()` function will
return `machine.DEEPSLEEP` and this can be used to distinguish a deepsleep wake
from other resets.
.. function:: wake_reason()
Get the wake reason. See :ref:`constants <machine_constants>` for the possible return values.
Availability: ESP32, WiPy.
Miscellaneous functions
-----------------------
.. function:: unique_id()
Returns a byte string with a unique identifier of a board/SoC. It will vary
from a board/SoC instance to another, if underlying hardware allows. Length
varies by hardware (so use substring of a full value if you expect a short
ID). In some MicroPython ports, ID corresponds to the network MAC address.
.. function:: time_pulse_us(pin, pulse_level, timeout_us=1000000, /)
Time a pulse on the given *pin*, and return the duration of the pulse in
microseconds. The *pulse_level* argument should be 0 to time a low pulse
or 1 to time a high pulse.
If the current input value of the pin is different to *pulse_level*,
the function first (*) waits until the pin input becomes equal to *pulse_level*,
then (**) times the duration that the pin is equal to *pulse_level*.
If the pin is already equal to *pulse_level* then timing starts straight away.
The function will return -2 if there was timeout waiting for condition marked
(*) above, and -1 if there was timeout during the main measurement, marked (**)
above. The timeout is the same for both cases and given by *timeout_us* (which
is in microseconds).
.. function:: bitstream(pin, encoding, timing, data, /)
Transmits *data* by bit-banging the specified *pin*. The *encoding* argument
specifies how the bits are encoded, and *timing* is an encoding-specific timing
specification.
The supported encodings are:
- ``0`` for "high low" pulse duration modulation. This will transmit 0 and
1 bits as timed pulses, starting with the most significant bit.
The *timing* must be a four-tuple of nanoseconds in the format
``(high_time_0, low_time_0, high_time_1, low_time_1)``. For example,
``(400, 850, 800, 450)`` is the timing specification for WS2812 RGB LEDs
at 800kHz.
The accuracy of the timing varies between ports. On Cortex M0 at 48MHz, it is
at best +/- 120ns, however on faster MCUs (ESP8266, ESP32, STM32, Pyboard), it
will be closer to +/-30ns.
.. note:: For controlling WS2812 / NeoPixel strips, see the :mod:`neopixel`
module for a higher-level API.
.. function:: rng()
Return a 24-bit software generated random number.
Availability: WiPy.
.. _machine_constants:
Constants
---------
.. data:: machine.IDLE
machine.SLEEP
machine.DEEPSLEEP
IRQ wake values.
.. data:: machine.PWRON_RESET
machine.HARD_RESET
machine.WDT_RESET
machine.DEEPSLEEP_RESET
machine.SOFT_RESET
Reset causes.
.. data:: machine.WLAN_WAKE
machine.PIN_WAKE
machine.RTC_WAKE
Wake-up reasons.
Classes
-------
.. toctree::
:maxdepth: 1
machine.Pin.rst
machine.Signal.rst
machine.ADC.rst
machine.ADCBlock.rst
machine.PWM.rst
machine.UART.rst
machine.SPI.rst
machine.I2C.rst
machine.I2S.rst
machine.RTC.rst
machine.Timer.rst
machine.WDT.rst
machine.SD.rst
machine.SDCard.rst

View File

@@ -0,0 +1,185 @@
:mod:`math` -- mathematical functions
=====================================
.. module:: math
:synopsis: mathematical functions
|see_cpython_module| :mod:`python:math`.
The ``math`` module provides some basic mathematical functions for
working with floating-point numbers.
*Note:* On the pyboard, floating-point numbers have 32-bit precision.
Availability: not available on WiPy. Floating point support required
for this module.
Functions
---------
.. function:: acos(x)
Return the inverse cosine of ``x``.
.. function:: acosh(x)
Return the inverse hyperbolic cosine of ``x``.
.. function:: asin(x)
Return the inverse sine of ``x``.
.. function:: asinh(x)
Return the inverse hyperbolic sine of ``x``.
.. function:: atan(x)
Return the inverse tangent of ``x``.
.. function:: atan2(y, x)
Return the principal value of the inverse tangent of ``y/x``.
.. function:: atanh(x)
Return the inverse hyperbolic tangent of ``x``.
.. function:: ceil(x)
Return an integer, being ``x`` rounded towards positive infinity.
.. function:: copysign(x, y)
Return ``x`` with the sign of ``y``.
.. function:: cos(x)
Return the cosine of ``x``.
.. function:: cosh(x)
Return the hyperbolic cosine of ``x``.
.. function:: degrees(x)
Return radians ``x`` converted to degrees.
.. function:: erf(x)
Return the error function of ``x``.
.. function:: erfc(x)
Return the complementary error function of ``x``.
.. function:: exp(x)
Return the exponential of ``x``.
.. function:: expm1(x)
Return ``exp(x) - 1``.
.. function:: fabs(x)
Return the absolute value of ``x``.
.. function:: floor(x)
Return an integer, being ``x`` rounded towards negative infinity.
.. function:: fmod(x, y)
Return the remainder of ``x/y``.
.. function:: frexp(x)
Decomposes a floating-point number into its mantissa and exponent.
The returned value is the tuple ``(m, e)`` such that ``x == m * 2**e``
exactly. If ``x == 0`` then the function returns ``(0.0, 0)``, otherwise
the relation ``0.5 <= abs(m) < 1`` holds.
.. function:: gamma(x)
Return the gamma function of ``x``.
.. function:: isfinite(x)
Return ``True`` if ``x`` is finite.
.. function:: isinf(x)
Return ``True`` if ``x`` is infinite.
.. function:: isnan(x)
Return ``True`` if ``x`` is not-a-number
.. function:: ldexp(x, exp)
Return ``x * (2**exp)``.
.. function:: lgamma(x)
Return the natural logarithm of the gamma function of ``x``.
.. function:: log(x)
Return the natural logarithm of ``x``.
.. function:: log10(x)
Return the base-10 logarithm of ``x``.
.. function:: log2(x)
Return the base-2 logarithm of ``x``.
.. function:: modf(x)
Return a tuple of two floats, being the fractional and integral parts of
``x``. Both return values have the same sign as ``x``.
.. function:: pow(x, y)
Returns ``x`` to the power of ``y``.
.. function:: radians(x)
Return degrees ``x`` converted to radians.
.. function:: sin(x)
Return the sine of ``x``.
.. function:: sinh(x)
Return the hyperbolic sine of ``x``.
.. function:: sqrt(x)
Return the square root of ``x``.
.. function:: tan(x)
Return the tangent of ``x``.
.. function:: tanh(x)
Return the hyperbolic tangent of ``x``.
.. function:: trunc(x)
Return an integer, being ``x`` rounded towards 0.
Constants
---------
.. data:: e
base of the natural logarithm
.. data:: pi
the ratio of a circle's circumference to its diameter

View File

@@ -0,0 +1,149 @@
:mod:`micropython` -- access and control MicroPython internals
==============================================================
.. module:: micropython
:synopsis: access and control MicroPython internals
Functions
---------
.. function:: const(expr)
Used to declare that the expression is a constant so that the compile can
optimise it. The use of this function should be as follows::
from micropython import const
CONST_X = const(123)
CONST_Y = const(2 * CONST_X + 1)
Constants declared this way are still accessible as global variables from
outside the module they are declared in. On the other hand, if a constant
begins with an underscore then it is hidden, it is not available as a global
variable, and does not take up any memory during execution.
This `const` function is recognised directly by the MicroPython parser and is
provided as part of the :mod:`micropython` module mainly so that scripts can be
written which run under both CPython and MicroPython, by following the above
pattern.
.. function:: opt_level([level])
If *level* is given then this function sets the optimisation level for subsequent
compilation of scripts, and returns ``None``. Otherwise it returns the current
optimisation level.
The optimisation level controls the following compilation features:
- Assertions: at level 0 assertion statements are enabled and compiled into the
bytecode; at levels 1 and higher assertions are not compiled.
- Built-in ``__debug__`` variable: at level 0 this variable expands to ``True``;
at levels 1 and higher it expands to ``False``.
- Source-code line numbers: at levels 0, 1 and 2 source-code line number are
stored along with the bytecode so that exceptions can report the line number
they occurred at; at levels 3 and higher line numbers are not stored.
The default optimisation level is usually level 0.
.. function:: alloc_emergency_exception_buf(size)
Allocate *size* bytes of RAM for the emergency exception buffer (a good
size is around 100 bytes). The buffer is used to create exceptions in cases
when normal RAM allocation would fail (eg within an interrupt handler) and
therefore give useful traceback information in these situations.
A good way to use this function is to put it at the start of your main script
(eg ``boot.py`` or ``main.py``) and then the emergency exception buffer will be active
for all the code following it.
.. function:: mem_info([verbose])
Print information about currently used memory. If the *verbose* argument
is given then extra information is printed.
The information that is printed is implementation dependent, but currently
includes the amount of stack and heap used. In verbose mode it prints out
the entire heap indicating which blocks are used and which are free.
.. function:: qstr_info([verbose])
Print information about currently interned strings. If the *verbose*
argument is given then extra information is printed.
The information that is printed is implementation dependent, but currently
includes the number of interned strings and the amount of RAM they use. In
verbose mode it prints out the names of all RAM-interned strings.
.. function:: stack_use()
Return an integer representing the current amount of stack that is being
used. The absolute value of this is not particularly useful, rather it
should be used to compute differences in stack usage at different points.
.. function:: heap_lock()
.. function:: heap_unlock()
.. function:: heap_locked()
Lock or unlock the heap. When locked no memory allocation can occur and a
`MemoryError` will be raised if any heap allocation is attempted.
`heap_locked()` returns a true value if the heap is currently locked.
These functions can be nested, ie `heap_lock()` can be called multiple times
in a row and the lock-depth will increase, and then `heap_unlock()` must be
called the same number of times to make the heap available again.
Both `heap_unlock()` and `heap_locked()` return the current lock depth
(after unlocking for the former) as a non-negative integer, with 0 meaning
the heap is not locked.
If the REPL becomes active with the heap locked then it will be forcefully
unlocked.
Note: `heap_locked()` is not enabled on most ports by default,
requires ``MICROPY_PY_MICROPYTHON_HEAP_LOCKED``.
.. function:: kbd_intr(chr)
Set the character that will raise a `KeyboardInterrupt` exception. By
default this is set to 3 during script execution, corresponding to Ctrl-C.
Passing -1 to this function will disable capture of Ctrl-C, and passing 3
will restore it.
This function can be used to prevent the capturing of Ctrl-C on the
incoming stream of characters that is usually used for the REPL, in case
that stream is used for other purposes.
.. function:: schedule(func, arg)
Schedule the function *func* to be executed "very soon". The function
is passed the value *arg* as its single argument. "Very soon" means that
the MicroPython runtime will do its best to execute the function at the
earliest possible time, given that it is also trying to be efficient, and
that the following conditions hold:
- A scheduled function will never preempt another scheduled function.
- Scheduled functions are always executed "between opcodes" which means
that all fundamental Python operations (such as appending to a list)
are guaranteed to be atomic.
- A given port may define "critical regions" within which scheduled
functions will never be executed. Functions may be scheduled within
a critical region but they will not be executed until that region
is exited. An example of a critical region is a preempting interrupt
handler (an IRQ).
A use for this function is to schedule a callback from a preempting IRQ.
Such an IRQ puts restrictions on the code that runs in the IRQ (for example
the heap may be locked) and scheduling a function to call later will lift
those restrictions.
Note: If `schedule()` is called from a preempting IRQ, when memory
allocation is not allowed and the callback to be passed to `schedule()` is
a bound method, passing this directly will fail. This is because creating a
reference to a bound method causes memory allocation. A solution is to
create a reference to the method in the class constructor and to pass that
reference to `schedule()`. This is discussed in detail here
:ref:`reference documentation <isr_rules>` under "Creation of Python
objects".
There is a finite queue to hold the scheduled functions and `schedule()`
will raise a `RuntimeError` if the queue is full.

View File

@@ -0,0 +1,73 @@
:mod:`neopixel` --- control of WS2812 / NeoPixel LEDs
=====================================================
.. module:: neopixel
:synopsis: control of WS2812 / NeoPixel LEDs
This module provides a driver for WS2818 / NeoPixel LEDs.
.. note:: This module is only included by default on the ESP8266 and ESP32
ports. On STM32 / Pyboard, you can `download the module
<https://github.com/micropython/micropython/blob/master/drivers/neopixel/neopixel.py>`_
and copy it to the filesystem.
class NeoPixel
--------------
This class stores pixel data for a WS2812 LED strip connected to a pin. The
application should set pixel data and then call :meth:`NeoPixel.write`
when it is ready to update the strip.
For example::
import neopixel
# 32 LED strip connected to X8.
p = machine.Pin.board.X8
n = neopixel.NeoPixel(p, 32)
# Draw a red gradient.
for i in range(32):
n[i] = (i * 8, 0, 0)
# Update the strip.
n.write()
Constructors
------------
.. class:: NeoPixel(pin, n, *, bpp=3, timing=1)
Construct an NeoPixel object. The parameters are:
- *pin* is a machine.Pin instance.
- *n* is the number of LEDs in the strip.
- *bpp* is 3 for RGB LEDs, and 4 for RGBW LEDs.
- *timing* is 0 for 400KHz, and 1 for 800kHz LEDs (most are 800kHz).
Pixel access methods
--------------------
.. method:: NeoPixel.fill(pixel)
Sets the value of all pixels to the specified *pixel* value (i.e. an
RGB/RGBW tuple).
.. method:: NeoPixel.__len__()
Returns the number of LEDs in the strip.
.. method:: NeoPixel.__setitem__(index, val)
Set the pixel at *index* to the value, which is an RGB/RGBW tuple.
.. method:: NeoPixel.__getitem__(index)
Returns the pixel at *index* as an RGB/RGBW tuple.
Output methods
--------------
.. method:: NeoPixel.write()
Writes the current pixel data to the strip.

View File

@@ -0,0 +1,89 @@
.. currentmodule:: network
.. _network.CC3K:
class CC3K -- control CC3000 WiFi modules
=========================================
This class provides a driver for CC3000 WiFi modules. Example usage::
import network
nic = network.CC3K(pyb.SPI(2), pyb.Pin.board.Y5, pyb.Pin.board.Y4, pyb.Pin.board.Y3)
nic.connect('your-ssid', 'your-password')
while not nic.isconnected():
pyb.delay(50)
print(nic.ifconfig())
# now use socket as usual
...
For this example to work the CC3000 module must have the following connections:
- MOSI connected to Y8
- MISO connected to Y7
- CLK connected to Y6
- CS connected to Y5
- VBEN connected to Y4
- IRQ connected to Y3
It is possible to use other SPI buses and other pins for CS, VBEN and IRQ.
Constructors
------------
.. class:: CC3K(spi, pin_cs, pin_en, pin_irq)
Create a CC3K driver object, initialise the CC3000 module using the given SPI bus
and pins, and return the CC3K object.
Arguments are:
- *spi* is an :ref:`SPI object <pyb.SPI>` which is the SPI bus that the CC3000 is
connected to (the MOSI, MISO and CLK pins).
- *pin_cs* is a :ref:`Pin object <pyb.Pin>` which is connected to the CC3000 CS pin.
- *pin_en* is a :ref:`Pin object <pyb.Pin>` which is connected to the CC3000 VBEN pin.
- *pin_irq* is a :ref:`Pin object <pyb.Pin>` which is connected to the CC3000 IRQ pin.
All of these objects will be initialised by the driver, so there is no need to
initialise them yourself. For example, you can use::
nic = network.CC3K(pyb.SPI(2), pyb.Pin.board.Y5, pyb.Pin.board.Y4, pyb.Pin.board.Y3)
Methods
-------
.. method:: CC3K.connect(ssid, key=None, *, security=WPA2, bssid=None)
Connect to a WiFi access point using the given SSID, and other security
parameters.
.. method:: CC3K.disconnect()
Disconnect from the WiFi access point.
.. method:: CC3K.isconnected()
Returns True if connected to a WiFi access point and has a valid IP address,
False otherwise.
.. method:: CC3K.ifconfig()
Returns a 7-tuple with (ip, subnet mask, gateway, DNS server, DHCP server,
MAC address, SSID).
.. method:: CC3K.patch_version()
Return the version of the patch program (firmware) on the CC3000.
.. method:: CC3K.patch_program('pgm')
Upload the current firmware to the CC3000. You must pass 'pgm' as the first
argument in order for the upload to proceed.
Constants
---------
.. data:: CC3K.WEP
.. data:: CC3K.WPA
.. data:: CC3K.WPA2
security type to use

View File

@@ -0,0 +1,93 @@
.. currentmodule:: network
.. _network.LAN:
class LAN -- control an Ethernet module
=======================================
This class allows you to control the Ethernet interface. The PHY hardware type is board-specific.
Example usage::
import network
nic = network.LAN(0)
print(nic.ifconfig())
# now use socket as usual
...
Constructors
------------
.. class:: LAN(id, *, phy_type=<board_default>, phy_addr=<board_default>, phy_clock=<board_default>)
Create a LAN driver object, initialise the LAN module using the given
PHY driver name, and return the LAN object.
Arguments are:
- *id* is the number of the Ethernet port, either 0 or 1.
- *phy_type* is the name of the PHY driver. For most board the on-board PHY has to be used and
is the default. Suitable values are port specific.
- *phy_addr* specifies the address of the PHY interface. As with *phy_type*, the hardwired value has
to be used for most boards and that value is the default.
- *phy_clock* specifies, whether the data clock is provided by the Ethernet controller or the PYH interface.
The default value is the one that matches the board. If set to ``True``, the clock is driven by the
Ethernet controller, otherwise by the PHY interface.
For example, with the Seeed Arch Mix board you can use::
nic = LAN(0, phy_type=LAN.PHY_LAN8720, phy_addr=2, phy_clock=False)
Methods
-------
.. method:: LAN.active([state])
With a parameter, it sets the interface active if *state* is true, otherwise it
sets it inactive.
Without a parameter, it returns the state.
.. method:: LAN.isconnected()
Returns ``True`` if the physical Ethernet link is connected and up.
Returns ``False`` otherwise.
.. method:: LAN.status()
Returns the LAN status.
.. method:: LAN.ifconfig([(ip, subnet, gateway, dns)])
Get/set IP address, subnet mask, gateway and DNS.
When called with no arguments, this method returns a 4-tuple with the above information.
To set the above values, pass a 4-tuple with the required information. For example::
nic.ifconfig(('192.168.0.4', '255.255.255.0', '192.168.0.1', '8.8.8.8'))
.. method:: LAN.config(config_parameters)
Sets or gets parameters of the LAN interface. The only parameter that can be
retrieved is the MAC address, using::
mac = LAN.config("mac")
The parameters that can be set are:
- ``trace=n`` sets trace levels; suitable values are:
- 2: trace TX
- 4: trace RX
- 8: full trace
- ``low_power=bool`` sets or clears low power mode, valid values being ``False``
or ``True``.
Specific LAN class implementations
----------------------------------
On the mimxrt port, suitable values for the *phy_type* constructor argument are:
``PHY_KSZ8081``, ``PHY_DP83825``, ``PHY_DP83848``, ``PHY_LAN8720``, ``PHY_RTL8211F``.

View File

@@ -0,0 +1,71 @@
.. currentmodule:: network
.. _network.WIZNET5K:
class WIZNET5K -- control WIZnet5x00 Ethernet modules
=====================================================
This class allows you to control WIZnet5x00 Ethernet adaptors based on
the W5200 and W5500 chipsets. The particular chipset that is supported
by the firmware is selected at compile-time via the MICROPY_PY_NETWORK_WIZNET5K
option.
Example usage::
import network
nic = network.WIZNET5K(pyb.SPI(1), pyb.Pin.board.X5, pyb.Pin.board.X4)
print(nic.ifconfig())
# now use socket as usual
...
For this example to work the WIZnet5x00 module must have the following connections:
- MOSI connected to X8
- MISO connected to X7
- SCLK connected to X6
- nSS connected to X5
- nRESET connected to X4
It is possible to use other SPI buses and other pins for nSS and nRESET.
Constructors
------------
.. class:: WIZNET5K(spi, pin_cs, pin_rst)
Create a WIZNET5K driver object, initialise the WIZnet5x00 module using the given
SPI bus and pins, and return the WIZNET5K object.
Arguments are:
- *spi* is an :ref:`SPI object <pyb.SPI>` which is the SPI bus that the WIZnet5x00 is
connected to (the MOSI, MISO and SCLK pins).
- *pin_cs* is a :ref:`Pin object <pyb.Pin>` which is connected to the WIZnet5x00 nSS pin.
- *pin_rst* is a :ref:`Pin object <pyb.Pin>` which is connected to the WIZnet5x00 nRESET pin.
All of these objects will be initialised by the driver, so there is no need to
initialise them yourself. For example, you can use::
nic = network.WIZNET5K(pyb.SPI(1), pyb.Pin.board.X5, pyb.Pin.board.X4)
Methods
-------
.. method:: WIZNET5K.isconnected()
Returns ``True`` if the physical Ethernet link is connected and up.
Returns ``False`` otherwise.
.. method:: WIZNET5K.ifconfig([(ip, subnet, gateway, dns)])
Get/set IP address, subnet mask, gateway and DNS.
When called with no arguments, this method returns a 4-tuple with the above information.
To set the above values, pass a 4-tuple with the required information. For example::
nic.ifconfig(('192.168.0.4', '255.255.255.0', '192.168.0.1', '8.8.8.8'))
.. method:: WIZNET5K.regs()
Dump the WIZnet5x00 registers. Useful for debugging.

View File

@@ -0,0 +1,136 @@
.. currentmodule:: network
.. _network.WLAN:
class WLAN -- control built-in WiFi interfaces
==============================================
This class provides a driver for WiFi network processors. Example usage::
import network
# enable station interface and connect to WiFi access point
nic = network.WLAN(network.STA_IF)
nic.active(True)
nic.connect('your-ssid', 'your-password')
# now use sockets as usual
Constructors
------------
.. class:: WLAN(interface_id)
Create a WLAN network interface object. Supported interfaces are
``network.STA_IF`` (station aka client, connects to upstream WiFi access
points) and ``network.AP_IF`` (access point, allows other WiFi clients to
connect). Availability of the methods below depends on interface type.
For example, only STA interface may `WLAN.connect()` to an access point.
Methods
-------
.. method:: WLAN.active([is_active])
Activate ("up") or deactivate ("down") network interface, if boolean
argument is passed. Otherwise, query current state if no argument is
provided. Most other methods require active interface.
.. method:: WLAN.connect(ssid=None, password=None, *, bssid=None)
Connect to the specified wireless network, using the specified password.
If *bssid* is given then the connection will be restricted to the
access-point with that MAC address (the *ssid* must also be specified
in this case).
.. method:: WLAN.disconnect()
Disconnect from the currently connected wireless network.
.. method:: WLAN.scan()
Scan for the available wireless networks.
Hidden networks -- where the SSID is not broadcast -- will also be scanned
if the WLAN interface allows it.
Scanning is only possible on STA interface. Returns list of tuples with
the information about WiFi access points:
(ssid, bssid, channel, RSSI, authmode, hidden)
*bssid* is hardware address of an access point, in binary form, returned as
bytes object. You can use `binascii.hexlify()` to convert it to ASCII form.
There are five values for authmode:
* 0 -- open
* 1 -- WEP
* 2 -- WPA-PSK
* 3 -- WPA2-PSK
* 4 -- WPA/WPA2-PSK
and two for hidden:
* 0 -- visible
* 1 -- hidden
.. method:: WLAN.status([param])
Return the current status of the wireless connection.
When called with no argument the return value describes the network link status.
The possible statuses are defined as constants:
* ``STAT_IDLE`` -- no connection and no activity,
* ``STAT_CONNECTING`` -- connecting in progress,
* ``STAT_WRONG_PASSWORD`` -- failed due to incorrect password,
* ``STAT_NO_AP_FOUND`` -- failed because no access point replied,
* ``STAT_CONNECT_FAIL`` -- failed due to other problems,
* ``STAT_GOT_IP`` -- connection successful.
When called with one argument *param* should be a string naming the status
parameter to retrieve. Supported parameters in WiFI STA mode are: ``'rssi'``.
.. method:: WLAN.isconnected()
In case of STA mode, returns ``True`` if connected to a WiFi access
point and has a valid IP address. In AP mode returns ``True`` when a
station is connected. Returns ``False`` otherwise.
.. method:: WLAN.ifconfig([(ip, subnet, gateway, dns)])
Get/set IP-level network interface parameters: IP address, subnet mask,
gateway and DNS server. When called with no arguments, this method returns
a 4-tuple with the above information. To set the above values, pass a
4-tuple with the required information. For example::
nic.ifconfig(('192.168.0.4', '255.255.255.0', '192.168.0.1', '8.8.8.8'))
.. method:: WLAN.config('param')
WLAN.config(param=value, ...)
Get or set general network interface parameters. These methods allow to work
with additional parameters beyond standard IP configuration (as dealt with by
`WLAN.ifconfig()`). These include network-specific and hardware-specific
parameters. For setting parameters, keyword argument syntax should be used,
multiple parameters can be set at once. For querying, parameters name should
be quoted as a string, and only one parameter can be queries at time::
# Set WiFi access point name (formally known as ESSID) and WiFi channel
ap.config(essid='My AP', channel=11)
# Query params one by one
print(ap.config('essid'))
print(ap.config('channel'))
Following are commonly supported parameters (availability of a specific parameter
depends on network technology type, driver, and :term:`MicroPython port`).
============= ===========
Parameter Description
============= ===========
mac MAC address (bytes)
essid WiFi access point name (string)
channel WiFi channel (integer)
hidden Whether ESSID is hidden (boolean)
authmode Authentication mode supported (enumeration, see module constants)
password Access password (string)
dhcp_hostname The DHCP hostname to use
reconnects Number of reconnect attempts to make (integer, 0=none, -1=unlimited)
txpower Maximum transmit power in dBm (integer or float)
============= ===========

View File

@@ -0,0 +1,161 @@
.. currentmodule:: network
.. _network.WLANWiPy:
class WLANWiPy -- WiPy specific WiFi control
============================================
.. note::
This class is a non-standard WLAN implementation for the WiPy.
It is available simply as ``network.WLAN`` on the WiPy but is named in the
documentation below as ``network.WLANWiPy`` to distinguish it from the
more general :ref:`network.WLAN <network.WLAN>` class.
This class provides a driver for the WiFi network processor in the WiPy. Example usage::
import network
import time
# setup as a station
wlan = network.WLAN(mode=WLAN.STA)
wlan.connect('your-ssid', auth=(WLAN.WPA2, 'your-key'))
while not wlan.isconnected():
time.sleep_ms(50)
print(wlan.ifconfig())
# now use socket as usual
...
Constructors
------------
.. class:: WLANWiPy(id=0, ...)
Create a WLAN object, and optionally configure it. See `init()` for params of configuration.
.. note::
The ``WLAN`` constructor is special in the sense that if no arguments besides the id are given,
it will return the already existing ``WLAN`` instance without re-configuring it. This is
because ``WLAN`` is a system feature of the WiPy. If the already existing instance is not
initialized it will do the same as the other constructors an will initialize it with default
values.
Methods
-------
.. method:: WLANWiPy.init(mode, *, ssid, auth, channel, antenna)
Set or get the WiFi network processor configuration.
Arguments are:
- *mode* can be either ``WLAN.STA`` or ``WLAN.AP``.
- *ssid* is a string with the ssid name. Only needed when mode is ``WLAN.AP``.
- *auth* is a tuple with (sec, key). Security can be ``None``, ``WLAN.WEP``,
``WLAN.WPA`` or ``WLAN.WPA2``. The key is a string with the network password.
If ``sec`` is ``WLAN.WEP`` the key must be a string representing hexadecimal
values (e.g. 'ABC1DE45BF'). Only needed when mode is ``WLAN.AP``.
- *channel* a number in the range 1-11. Only needed when mode is ``WLAN.AP``.
- *antenna* selects between the internal and the external antenna. Can be either
``WLAN.INT_ANT`` or ``WLAN.EXT_ANT``.
For example, you can do::
# create and configure as an access point
wlan.init(mode=WLAN.AP, ssid='wipy-wlan', auth=(WLAN.WPA2,'www.wipy.io'), channel=7, antenna=WLAN.INT_ANT)
or::
# configure as an station
wlan.init(mode=WLAN.STA)
.. method:: WLANWiPy.connect(ssid, *, auth=None, bssid=None, timeout=None)
Connect to a WiFi access point using the given SSID, and other security
parameters.
- *auth* is a tuple with (sec, key). Security can be ``None``, ``WLAN.WEP``,
``WLAN.WPA`` or ``WLAN.WPA2``. The key is a string with the network password.
If ``sec`` is ``WLAN.WEP`` the key must be a string representing hexadecimal
values (e.g. 'ABC1DE45BF').
- *bssid* is the MAC address of the AP to connect to. Useful when there are several
APs with the same ssid.
- *timeout* is the maximum time in milliseconds to wait for the connection to succeed.
.. method:: WLANWiPy.scan()
Performs a network scan and returns a list of named tuples with (ssid, bssid, sec, channel, rssi).
Note that channel is always ``None`` since this info is not provided by the WiPy.
.. method:: WLANWiPy.disconnect()
Disconnect from the WiFi access point.
.. method:: WLANWiPy.isconnected()
In case of STA mode, returns ``True`` if connected to a WiFi access point and has a valid IP address.
In AP mode returns ``True`` when a station is connected, ``False`` otherwise.
.. method:: WLANWiPy.ifconfig(if_id=0, config=['dhcp' or configtuple])
With no parameters given returns a 4-tuple of *(ip, subnet_mask, gateway, DNS_server)*.
if ``'dhcp'`` is passed as a parameter then the DHCP client is enabled and the IP params
are negotiated with the AP.
If the 4-tuple config is given then a static IP is configured. For instance::
wlan.ifconfig(config=('192.168.0.4', '255.255.255.0', '192.168.0.1', '8.8.8.8'))
.. method:: WLANWiPy.mode([mode])
Get or set the WLAN mode.
.. method:: WLANWiPy.ssid([ssid])
Get or set the SSID when in AP mode.
.. method:: WLANWiPy.auth([auth])
Get or set the authentication type when in AP mode.
.. method:: WLANWiPy.channel([channel])
Get or set the channel (only applicable in AP mode).
.. method:: WLANWiPy.antenna([antenna])
Get or set the antenna type (external or internal).
.. method:: WLANWiPy.mac([mac_addr])
Get or set a 6-byte long bytes object with the MAC address.
.. method:: WLANWiPy.irq(*, handler, wake)
Create a callback to be triggered when a WLAN event occurs during ``machine.SLEEP``
mode. Events are triggered by socket activity or by WLAN connection/disconnection.
- *handler* is the function that gets called when the IRQ is triggered.
- *wake* must be ``machine.SLEEP``.
Returns an IRQ object.
Constants
---------
.. data:: WLANWiPy.STA
.. data:: WLANWiPy.AP
selects the WLAN mode
.. data:: WLANWiPy.WEP
.. data:: WLANWiPy.WPA
.. data:: WLANWiPy.WPA2
selects the network security
.. data:: WLANWiPy.INT_ANT
.. data:: WLANWiPy.EXT_ANT
selects the antenna type

View File

@@ -0,0 +1,174 @@
****************************************
:mod:`network` --- network configuration
****************************************
.. module:: network
:synopsis: network configuration
This module provides network drivers and routing configuration. To use this
module, a MicroPython variant/build with network capabilities must be installed.
Network drivers for specific hardware are available within this module and are
used to configure hardware network interface(s). Network services provided
by configured interfaces are then available for use via the :mod:`socket`
module.
For example::
# connect/ show IP config a specific network interface
# see below for examples of specific drivers
import network
import time
nic = network.Driver(...)
if not nic.isconnected():
nic.connect()
print("Waiting for connection...")
while not nic.isconnected():
time.sleep(1)
print(nic.ifconfig())
# now use socket as usual
import socket
addr = socket.getaddrinfo('micropython.org', 80)[0][-1]
s = socket.socket()
s.connect(addr)
s.send(b'GET / HTTP/1.1\r\nHost: micropython.org\r\n\r\n')
data = s.recv(1000)
s.close()
Common network adapter interface
================================
This section describes an (implied) abstract base class for all network
interface classes implemented by :term:`MicroPython ports <MicroPython port>`
for different hardware. This means that MicroPython does not actually
provide ``AbstractNIC`` class, but any actual NIC class, as described
in the following sections, implements methods as described here.
.. class:: AbstractNIC(id=None, ...)
Instantiate a network interface object. Parameters are network interface
dependent. If there are more than one interface of the same type, the first
parameter should be `id`.
.. method:: AbstractNIC.active([is_active])
Activate ("up") or deactivate ("down") the network interface, if
a boolean argument is passed. Otherwise, query current state if
no argument is provided. Most other methods require an active
interface (behaviour of calling them on inactive interface is
undefined).
.. method:: AbstractNIC.connect([service_id, key=None, *, ...])
Connect the interface to a network. This method is optional, and
available only for interfaces which are not "always connected".
If no parameters are given, connect to the default (or the only)
service. If a single parameter is given, it is the primary identifier
of a service to connect to. It may be accompanied by a key
(password) required to access said service. There can be further
arbitrary keyword-only parameters, depending on the networking medium
type and/or particular device. Parameters can be used to: a)
specify alternative service identifier types; b) provide additional
connection parameters. For various medium types, there are different
sets of predefined/recommended parameters, among them:
* WiFi: *bssid* keyword to connect to a specific BSSID (MAC address)
.. method:: AbstractNIC.disconnect()
Disconnect from network.
.. method:: AbstractNIC.isconnected()
Returns ``True`` if connected to network, otherwise returns ``False``.
.. method:: AbstractNIC.scan(*, ...)
Scan for the available network services/connections. Returns a
list of tuples with discovered service parameters. For various
network media, there are different variants of predefined/
recommended tuple formats, among them:
* WiFi: (ssid, bssid, channel, RSSI, authmode, hidden). There
may be further fields, specific to a particular device.
The function may accept additional keyword arguments to filter scan
results (e.g. scan for a particular service, on a particular channel,
for services of a particular set, etc.), and to affect scan
duration and other parameters. Where possible, parameter names
should match those in connect().
.. method:: AbstractNIC.status([param])
Query dynamic status information of the interface. When called with no
argument the return value describes the network link status. Otherwise
*param* should be a string naming the particular status parameter to
retrieve.
The return types and values are dependent on the network
medium/technology. Some of the parameters that may be supported are:
* WiFi STA: use ``'rssi'`` to retrieve the RSSI of the AP signal
* WiFi AP: use ``'stations'`` to retrieve a list of all the STAs
connected to the AP. The list contains tuples of the form
(MAC, RSSI).
.. method:: AbstractNIC.ifconfig([(ip, subnet, gateway, dns)])
Get/set IP-level network interface parameters: IP address, subnet mask,
gateway and DNS server. When called with no arguments, this method returns
a 4-tuple with the above information. To set the above values, pass a
4-tuple with the required information. For example::
nic.ifconfig(('192.168.0.4', '255.255.255.0', '192.168.0.1', '8.8.8.8'))
.. method:: AbstractNIC.config('param')
AbstractNIC.config(param=value, ...)
Get or set general network interface parameters. These methods allow to work
with additional parameters beyond standard IP configuration (as dealt with by
`ifconfig()`). These include network-specific and hardware-specific
parameters. For setting parameters, the keyword argument
syntax should be used, and multiple parameters can be set at once. For
querying, a parameter name should be quoted as a string, and only one
parameter can be queried at a time::
# Set WiFi access point name (formally known as ESSID) and WiFi channel
ap.config(essid='My AP', channel=11)
# Query params one by one
print(ap.config('essid'))
print(ap.config('channel'))
Specific network class implementations
======================================
The following concrete classes implement the AbstractNIC interface and
provide a way to control networking interfaces of various kinds.
.. toctree::
:maxdepth: 1
network.WLAN.rst
network.WLANWiPy.rst
network.CC3K.rst
network.WIZNET5K.rst
network.LAN.rst
Network functions
=================
The following are functions available in the network module.
.. function:: phy_mode([mode])
Get or set the PHY mode.
If the *mode* parameter is provided, sets the mode to its value. If
the function is called without parameters, returns the current mode.
The possible modes are defined as constants:
* ``MODE_11B`` -- IEEE 802.11b,
* ``MODE_11G`` -- IEEE 802.11g,
* ``MODE_11N`` -- IEEE 802.11n.
Availability: ESP8266.

View File

@@ -0,0 +1,327 @@
:mod:`os` -- basic "operating system" services
==============================================
.. module:: os
:synopsis: basic "operating system" services
|see_cpython_module| :mod:`python:os`.
The ``os`` module contains functions for filesystem access and mounting,
terminal redirection and duplication, and the ``uname`` and ``urandom``
functions.
General functions
-----------------
.. function:: uname()
Return a tuple (possibly a named tuple) containing information about the
underlying machine and/or its operating system. The tuple has five fields
in the following order, each of them being a string:
* ``sysname`` -- the name of the underlying system
* ``nodename`` -- the network name (can be the same as ``sysname``)
* ``release`` -- the version of the underlying system
* ``version`` -- the MicroPython version and build date
* ``machine`` -- an identifier for the underlying hardware (eg board, CPU)
.. function:: urandom(n)
Return a bytes object with *n* random bytes. Whenever possible, it is
generated by the hardware random number generator.
Filesystem access
-----------------
.. function:: chdir(path)
Change current directory.
.. function:: getcwd()
Get the current directory.
.. function:: ilistdir([dir])
This function returns an iterator which then yields tuples corresponding to
the entries in the directory that it is listing. With no argument it lists the
current directory, otherwise it lists the directory given by *dir*.
The tuples have the form *(name, type, inode[, size])*:
- *name* is a string (or bytes if *dir* is a bytes object) and is the name of
the entry;
- *type* is an integer that specifies the type of the entry, with 0x4000 for
directories and 0x8000 for regular files;
- *inode* is an integer corresponding to the inode of the file, and may be 0
for filesystems that don't have such a notion.
- Some platforms may return a 4-tuple that includes the entry's *size*. For
file entries, *size* is an integer representing the size of the file
or -1 if unknown. Its meaning is currently undefined for directory
entries.
.. function:: listdir([dir])
With no argument, list the current directory. Otherwise list the given directory.
.. function:: mkdir(path)
Create a new directory.
.. function:: remove(path)
Remove a file.
.. function:: rmdir(path)
Remove a directory.
.. function:: rename(old_path, new_path)
Rename a file.
.. function:: stat(path)
Get the status of a file or directory.
.. function:: statvfs(path)
Get the status of a fileystem.
Returns a tuple with the filesystem information in the following order:
* ``f_bsize`` -- file system block size
* ``f_frsize`` -- fragment size
* ``f_blocks`` -- size of fs in f_frsize units
* ``f_bfree`` -- number of free blocks
* ``f_bavail`` -- number of free blocks for unprivileged users
* ``f_files`` -- number of inodes
* ``f_ffree`` -- number of free inodes
* ``f_favail`` -- number of free inodes for unprivileged users
* ``f_flag`` -- mount flags
* ``f_namemax`` -- maximum filename length
Parameters related to inodes: ``f_files``, ``f_ffree``, ``f_avail``
and the ``f_flags`` parameter may return ``0`` as they can be unavailable
in a port-specific implementation.
.. function:: sync()
Sync all filesystems.
Terminal redirection and duplication
------------------------------------
.. function:: dupterm(stream_object, index=0, /)
Duplicate or switch the MicroPython terminal (the REPL) on the given `stream`-like
object. The *stream_object* argument must be a native stream object, or derive
from ``io.IOBase`` and implement the ``readinto()`` and
``write()`` methods. The stream should be in non-blocking mode and
``readinto()`` should return ``None`` if there is no data available for reading.
After calling this function all terminal output is repeated on this stream,
and any input that is available on the stream is passed on to the terminal input.
The *index* parameter should be a non-negative integer and specifies which
duplication slot is set. A given port may implement more than one slot (slot 0
will always be available) and in that case terminal input and output is
duplicated on all the slots that are set.
If ``None`` is passed as the *stream_object* then duplication is cancelled on
the slot given by *index*.
The function returns the previous stream-like object in the given slot.
Filesystem mounting
-------------------
Some ports provide a Virtual Filesystem (VFS) and the ability to mount multiple
"real" filesystems within this VFS. Filesystem objects can be mounted at either
the root of the VFS, or at a subdirectory that lives in the root. This allows
dynamic and flexible configuration of the filesystem that is seen by Python
programs. Ports that have this functionality provide the :func:`mount` and
:func:`umount` functions, and possibly various filesystem implementations
represented by VFS classes.
.. function:: mount(fsobj, mount_point, *, readonly)
Mount the filesystem object *fsobj* at the location in the VFS given by the
*mount_point* string. *fsobj* can be a a VFS object that has a ``mount()``
method, or a block device. If it's a block device then the filesystem type
is automatically detected (an exception is raised if no filesystem was
recognised). *mount_point* may be ``'/'`` to mount *fsobj* at the root,
or ``'/<name>'`` to mount it at a subdirectory under the root.
If *readonly* is ``True`` then the filesystem is mounted read-only.
During the mount process the method ``mount()`` is called on the filesystem
object.
Will raise ``OSError(EPERM)`` if *mount_point* is already mounted.
.. function:: umount(mount_point)
Unmount a filesystem. *mount_point* can be a string naming the mount location,
or a previously-mounted filesystem object. During the unmount process the
method ``umount()`` is called on the filesystem object.
Will raise ``OSError(EINVAL)`` if *mount_point* is not found.
.. class:: VfsFat(block_dev)
Create a filesystem object that uses the FAT filesystem format. Storage of
the FAT filesystem is provided by *block_dev*.
Objects created by this constructor can be mounted using :func:`mount`.
.. staticmethod:: mkfs(block_dev)
Build a FAT filesystem on *block_dev*.
.. class:: VfsLfs1(block_dev, readsize=32, progsize=32, lookahead=32)
Create a filesystem object that uses the `littlefs v1 filesystem format`_.
Storage of the littlefs filesystem is provided by *block_dev*, which must
support the :ref:`extended interface <block-device-interface>`.
Objects created by this constructor can be mounted using :func:`mount`.
See :ref:`filesystem` for more information.
.. staticmethod:: mkfs(block_dev, readsize=32, progsize=32, lookahead=32)
Build a Lfs1 filesystem on *block_dev*.
.. note:: There are reports of littlefs v1 failing in certain situations,
for details see `littlefs issue 347`_.
.. class:: VfsLfs2(block_dev, readsize=32, progsize=32, lookahead=32, mtime=True)
Create a filesystem object that uses the `littlefs v2 filesystem format`_.
Storage of the littlefs filesystem is provided by *block_dev*, which must
support the :ref:`extended interface <block-device-interface>`.
Objects created by this constructor can be mounted using :func:`mount`.
The *mtime* argument enables modification timestamps for files, stored using
littlefs attributes. This option can be disabled or enabled differently each
mount time and timestamps will only be added or updated if *mtime* is enabled,
otherwise the timestamps will remain untouched. Littlefs v2 filesystems without
timestamps will work without reformatting and timestamps will be added
transparently to existing files once they are opened for writing. When *mtime*
is enabled `os.stat` on files without timestamps will return 0 for the timestamp.
See :ref:`filesystem` for more information.
.. staticmethod:: mkfs(block_dev, readsize=32, progsize=32, lookahead=32)
Build a Lfs2 filesystem on *block_dev*.
.. note:: There are reports of littlefs v2 failing in certain situations,
for details see `littlefs issue 295`_.
.. _littlefs v1 filesystem format: https://github.com/ARMmbed/littlefs/tree/v1
.. _littlefs v2 filesystem format: https://github.com/ARMmbed/littlefs
.. _littlefs issue 295: https://github.com/ARMmbed/littlefs/issues/295
.. _littlefs issue 347: https://github.com/ARMmbed/littlefs/issues/347
Block devices
-------------
A block device is an object which implements the block protocol. This enables a
device to support MicroPython filesystems. The physical hardware is represented
by a user defined class. The :class:`AbstractBlockDev` class is a template for
the design of such a class: MicroPython does not actually provide that class,
but an actual block device class must implement the methods described below.
A concrete implementation of this class will usually allow access to the
memory-like functionality of a piece of hardware (like flash memory). A block
device can be formatted to any supported filesystem and mounted using ``os``
methods.
See :ref:`filesystem` for example implementations of block devices using the
two variants of the block protocol described below.
.. _block-device-interface:
Simple and extended interface
.............................
There are two compatible signatures for the ``readblocks`` and ``writeblocks``
methods (see below), in order to support a variety of use cases. A given block
device may implement one form or the other, or both at the same time. The second
form (with the offset parameter) is referred to as the "extended interface".
Some filesystems (such as littlefs) that require more control over write
operations, for example writing to sub-block regions without erasing, may require
that the block device supports the extended interface.
.. class:: AbstractBlockDev(...)
Construct a block device object. The parameters to the constructor are
dependent on the specific block device.
.. method:: readblocks(block_num, buf)
readblocks(block_num, buf, offset)
The first form reads aligned, multiples of blocks.
Starting at the block given by the index *block_num*, read blocks from
the device into *buf* (an array of bytes).
The number of blocks to read is given by the length of *buf*,
which will be a multiple of the block size.
The second form allows reading at arbitrary locations within a block,
and arbitrary lengths.
Starting at block index *block_num*, and byte offset within that block
of *offset*, read bytes from the device into *buf* (an array of bytes).
The number of bytes to read is given by the length of *buf*.
.. method:: writeblocks(block_num, buf)
writeblocks(block_num, buf, offset)
The first form writes aligned, multiples of blocks, and requires that the
blocks that are written to be first erased (if necessary) by this method.
Starting at the block given by the index *block_num*, write blocks from
*buf* (an array of bytes) to the device.
The number of blocks to write is given by the length of *buf*,
which will be a multiple of the block size.
The second form allows writing at arbitrary locations within a block,
and arbitrary lengths. Only the bytes being written should be changed,
and the caller of this method must ensure that the relevant blocks are
erased via a prior ``ioctl`` call.
Starting at block index *block_num*, and byte offset within that block
of *offset*, write bytes from *buf* (an array of bytes) to the device.
The number of bytes to write is given by the length of *buf*.
Note that implementations must never implicitly erase blocks if the offset
argument is specified, even if it is zero.
.. method:: ioctl(op, arg)
Control the block device and query its parameters. The operation to
perform is given by *op* which is one of the following integers:
- 1 -- initialise the device (*arg* is unused)
- 2 -- shutdown the device (*arg* is unused)
- 3 -- sync the device (*arg* is unused)
- 4 -- get a count of the number of blocks, should return an integer
(*arg* is unused)
- 5 -- get the number of bytes in a block, should return an integer,
or ``None`` in which case the default value of 512 is used
(*arg* is unused)
- 6 -- erase a block, *arg* is the block number to erase
As a minimum ``ioctl(4, ...)`` must be intercepted; for littlefs
``ioctl(6, ...)`` must also be intercepted. The need for others is
hardware dependent.
Prior to any call to ``writeblocks(block, ...)`` littlefs issues
``ioctl(6, block)``. This enables a device driver to erase the block
prior to a write if the hardware requires it. Alternatively a driver
might intercept ``ioctl(6, block)`` and return 0 (success). In this case
the driver assumes responsibility for detecting the need for erasure.
Unless otherwise stated ``ioctl(op, arg)`` can return ``None``.
Consequently an implementation can ignore unused values of ``op``. Where
``op`` is intercepted, the return value for operations 4 and 5 are as
detailed above. Other operations should return 0 on success and non-zero
for failure, with the value returned being an ``OSError`` errno code.

Some files were not shown because too many files have changed in this diff Show More