initial commit
This commit is contained in:
131
.gitignore
vendored
Normal file
131
.gitignore
vendored
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
database.db
|
||||||
|
|
||||||
|
# Byte-compiled / optimized / DLL files
|
||||||
|
__pycache__/
|
||||||
|
*.py[cod]
|
||||||
|
*$py.class
|
||||||
|
|
||||||
|
# C extensions
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Distribution / packaging
|
||||||
|
.Python
|
||||||
|
build/
|
||||||
|
develop-eggs/
|
||||||
|
dist/
|
||||||
|
downloads/
|
||||||
|
eggs/
|
||||||
|
.eggs/
|
||||||
|
lib/
|
||||||
|
lib64/
|
||||||
|
parts/
|
||||||
|
sdist/
|
||||||
|
var/
|
||||||
|
wheels/
|
||||||
|
pip-wheel-metadata/
|
||||||
|
share/python-wheels/
|
||||||
|
*.egg-info/
|
||||||
|
.installed.cfg
|
||||||
|
*.egg
|
||||||
|
MANIFEST
|
||||||
|
|
||||||
|
# PyInstaller
|
||||||
|
# Usually these files are written by a python script from a template
|
||||||
|
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||||
|
*.manifest
|
||||||
|
*.spec
|
||||||
|
|
||||||
|
# Installer logs
|
||||||
|
pip-log.txt
|
||||||
|
pip-delete-this-directory.txt
|
||||||
|
|
||||||
|
# Unit test / coverage reports
|
||||||
|
htmlcov/
|
||||||
|
.tox/
|
||||||
|
.nox/
|
||||||
|
.coverage
|
||||||
|
.coverage.*
|
||||||
|
.cache
|
||||||
|
nosetests.xml
|
||||||
|
coverage.xml
|
||||||
|
*.cover
|
||||||
|
*.py,cover
|
||||||
|
.hypothesis/
|
||||||
|
.pytest_cache/
|
||||||
|
|
||||||
|
# Translations
|
||||||
|
*.mo
|
||||||
|
*.pot
|
||||||
|
|
||||||
|
# Django stuff:
|
||||||
|
*.log
|
||||||
|
local_settings.py
|
||||||
|
db.sqlite3
|
||||||
|
db.sqlite3-journal
|
||||||
|
|
||||||
|
# Flask stuff:
|
||||||
|
instance/
|
||||||
|
.webassets-cache
|
||||||
|
|
||||||
|
# Scrapy stuff:
|
||||||
|
.scrapy
|
||||||
|
|
||||||
|
# Sphinx documentation
|
||||||
|
docs/_build/
|
||||||
|
|
||||||
|
# PyBuilder
|
||||||
|
target/
|
||||||
|
|
||||||
|
# Jupyter Notebook
|
||||||
|
.ipynb_checkpoints
|
||||||
|
|
||||||
|
# IPython
|
||||||
|
profile_default/
|
||||||
|
ipython_config.py
|
||||||
|
|
||||||
|
# pyenv
|
||||||
|
.python-version
|
||||||
|
|
||||||
|
# pipenv
|
||||||
|
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||||
|
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||||
|
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||||
|
# install all needed dependencies.
|
||||||
|
#Pipfile.lock
|
||||||
|
|
||||||
|
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
||||||
|
__pypackages__/
|
||||||
|
|
||||||
|
# Celery stuff
|
||||||
|
celerybeat-schedule
|
||||||
|
celerybeat.pid
|
||||||
|
|
||||||
|
# SageMath parsed files
|
||||||
|
*.sage.py
|
||||||
|
|
||||||
|
# Environments
|
||||||
|
.env
|
||||||
|
.venv
|
||||||
|
env/
|
||||||
|
venv/
|
||||||
|
ENV/
|
||||||
|
env.bak/
|
||||||
|
venv.bak/
|
||||||
|
|
||||||
|
# Spyder project settings
|
||||||
|
.spyderproject
|
||||||
|
.spyproject
|
||||||
|
|
||||||
|
# Rope project settings
|
||||||
|
.ropeproject
|
||||||
|
|
||||||
|
# mkdocs documentation
|
||||||
|
/site
|
||||||
|
|
||||||
|
# mypy
|
||||||
|
.mypy_cache/
|
||||||
|
.dmypy.json
|
||||||
|
dmypy.json
|
||||||
|
|
||||||
|
# Pyre type checker
|
||||||
|
.pyre/
|
||||||
339
LICENSE
Normal file
339
LICENSE
Normal file
@@ -0,0 +1,339 @@
|
|||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
Version 2, June 1991
|
||||||
|
|
||||||
|
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The licenses for most software are designed to take away your
|
||||||
|
freedom to share and change it. By contrast, the GNU General Public
|
||||||
|
License is intended to guarantee your freedom to share and change free
|
||||||
|
software--to make sure the software is free for all its users. This
|
||||||
|
General Public License applies to most of the Free Software
|
||||||
|
Foundation's software and to any other program whose authors commit to
|
||||||
|
using it. (Some other Free Software Foundation software is covered by
|
||||||
|
the GNU Lesser General Public License instead.) You can apply it to
|
||||||
|
your programs, too.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
this service if you wish), that you receive source code or can get it
|
||||||
|
if you want it, that you can change the software or use pieces of it
|
||||||
|
in new free programs; and that you know you can do these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to make restrictions that forbid
|
||||||
|
anyone to deny you these rights or to ask you to surrender the rights.
|
||||||
|
These restrictions translate to certain responsibilities for you if you
|
||||||
|
distribute copies of the software, or if you modify it.
|
||||||
|
|
||||||
|
For example, if you distribute copies of such a program, whether
|
||||||
|
gratis or for a fee, you must give the recipients all the rights that
|
||||||
|
you have. You must make sure that they, too, receive or can get the
|
||||||
|
source code. And you must show them these terms so they know their
|
||||||
|
rights.
|
||||||
|
|
||||||
|
We protect your rights with two steps: (1) copyright the software, and
|
||||||
|
(2) offer you this license which gives you legal permission to copy,
|
||||||
|
distribute and/or modify the software.
|
||||||
|
|
||||||
|
Also, for each author's protection and ours, we want to make certain
|
||||||
|
that everyone understands that there is no warranty for this free
|
||||||
|
software. If the software is modified by someone else and passed on, we
|
||||||
|
want its recipients to know that what they have is not the original, so
|
||||||
|
that any problems introduced by others will not reflect on the original
|
||||||
|
authors' reputations.
|
||||||
|
|
||||||
|
Finally, any free program is threatened constantly by software
|
||||||
|
patents. We wish to avoid the danger that redistributors of a free
|
||||||
|
program will individually obtain patent licenses, in effect making the
|
||||||
|
program proprietary. To prevent this, we have made it clear that any
|
||||||
|
patent must be licensed for everyone's free use or not licensed at all.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
|
||||||
|
GNU GENERAL PUBLIC LICENSE
|
||||||
|
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||||
|
|
||||||
|
0. This License applies to any program or other work which contains
|
||||||
|
a notice placed by the copyright holder saying it may be distributed
|
||||||
|
under the terms of this General Public License. The "Program", below,
|
||||||
|
refers to any such program or work, and a "work based on the Program"
|
||||||
|
means either the Program or any derivative work under copyright law:
|
||||||
|
that is to say, a work containing the Program or a portion of it,
|
||||||
|
either verbatim or with modifications and/or translated into another
|
||||||
|
language. (Hereinafter, translation is included without limitation in
|
||||||
|
the term "modification".) Each licensee is addressed as "you".
|
||||||
|
|
||||||
|
Activities other than copying, distribution and modification are not
|
||||||
|
covered by this License; they are outside its scope. The act of
|
||||||
|
running the Program is not restricted, and the output from the Program
|
||||||
|
is covered only if its contents constitute a work based on the
|
||||||
|
Program (independent of having been made by running the Program).
|
||||||
|
Whether that is true depends on what the Program does.
|
||||||
|
|
||||||
|
1. You may copy and distribute verbatim copies of the Program's
|
||||||
|
source code as you receive it, in any medium, provided that you
|
||||||
|
conspicuously and appropriately publish on each copy an appropriate
|
||||||
|
copyright notice and disclaimer of warranty; keep intact all the
|
||||||
|
notices that refer to this License and to the absence of any warranty;
|
||||||
|
and give any other recipients of the Program a copy of this License
|
||||||
|
along with the Program.
|
||||||
|
|
||||||
|
You may charge a fee for the physical act of transferring a copy, and
|
||||||
|
you may at your option offer warranty protection in exchange for a fee.
|
||||||
|
|
||||||
|
2. You may modify your copy or copies of the Program or any portion
|
||||||
|
of it, thus forming a work based on the Program, and copy and
|
||||||
|
distribute such modifications or work under the terms of Section 1
|
||||||
|
above, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) You must cause the modified files to carry prominent notices
|
||||||
|
stating that you changed the files and the date of any change.
|
||||||
|
|
||||||
|
b) You must cause any work that you distribute or publish, that in
|
||||||
|
whole or in part contains or is derived from the Program or any
|
||||||
|
part thereof, to be licensed as a whole at no charge to all third
|
||||||
|
parties under the terms of this License.
|
||||||
|
|
||||||
|
c) If the modified program normally reads commands interactively
|
||||||
|
when run, you must cause it, when started running for such
|
||||||
|
interactive use in the most ordinary way, to print or display an
|
||||||
|
announcement including an appropriate copyright notice and a
|
||||||
|
notice that there is no warranty (or else, saying that you provide
|
||||||
|
a warranty) and that users may redistribute the program under
|
||||||
|
these conditions, and telling the user how to view a copy of this
|
||||||
|
License. (Exception: if the Program itself is interactive but
|
||||||
|
does not normally print such an announcement, your work based on
|
||||||
|
the Program is not required to print an announcement.)
|
||||||
|
|
||||||
|
These requirements apply to the modified work as a whole. If
|
||||||
|
identifiable sections of that work are not derived from the Program,
|
||||||
|
and can be reasonably considered independent and separate works in
|
||||||
|
themselves, then this License, and its terms, do not apply to those
|
||||||
|
sections when you distribute them as separate works. But when you
|
||||||
|
distribute the same sections as part of a whole which is a work based
|
||||||
|
on the Program, the distribution of the whole must be on the terms of
|
||||||
|
this License, whose permissions for other licensees extend to the
|
||||||
|
entire whole, and thus to each and every part regardless of who wrote it.
|
||||||
|
|
||||||
|
Thus, it is not the intent of this section to claim rights or contest
|
||||||
|
your rights to work written entirely by you; rather, the intent is to
|
||||||
|
exercise the right to control the distribution of derivative or
|
||||||
|
collective works based on the Program.
|
||||||
|
|
||||||
|
In addition, mere aggregation of another work not based on the Program
|
||||||
|
with the Program (or with a work based on the Program) on a volume of
|
||||||
|
a storage or distribution medium does not bring the other work under
|
||||||
|
the scope of this License.
|
||||||
|
|
||||||
|
3. You may copy and distribute the Program (or a work based on it,
|
||||||
|
under Section 2) in object code or executable form under the terms of
|
||||||
|
Sections 1 and 2 above provided that you also do one of the following:
|
||||||
|
|
||||||
|
a) Accompany it with the complete corresponding machine-readable
|
||||||
|
source code, which must be distributed under the terms of Sections
|
||||||
|
1 and 2 above on a medium customarily used for software interchange; or,
|
||||||
|
|
||||||
|
b) Accompany it with a written offer, valid for at least three
|
||||||
|
years, to give any third party, for a charge no more than your
|
||||||
|
cost of physically performing source distribution, a complete
|
||||||
|
machine-readable copy of the corresponding source code, to be
|
||||||
|
distributed under the terms of Sections 1 and 2 above on a medium
|
||||||
|
customarily used for software interchange; or,
|
||||||
|
|
||||||
|
c) Accompany it with the information you received as to the offer
|
||||||
|
to distribute corresponding source code. (This alternative is
|
||||||
|
allowed only for noncommercial distribution and only if you
|
||||||
|
received the program in object code or executable form with such
|
||||||
|
an offer, in accord with Subsection b above.)
|
||||||
|
|
||||||
|
The source code for a work means the preferred form of the work for
|
||||||
|
making modifications to it. For an executable work, complete source
|
||||||
|
code means all the source code for all modules it contains, plus any
|
||||||
|
associated interface definition files, plus the scripts used to
|
||||||
|
control compilation and installation of the executable. However, as a
|
||||||
|
special exception, the source code distributed need not include
|
||||||
|
anything that is normally distributed (in either source or binary
|
||||||
|
form) with the major components (compiler, kernel, and so on) of the
|
||||||
|
operating system on which the executable runs, unless that component
|
||||||
|
itself accompanies the executable.
|
||||||
|
|
||||||
|
If distribution of executable or object code is made by offering
|
||||||
|
access to copy from a designated place, then offering equivalent
|
||||||
|
access to copy the source code from the same place counts as
|
||||||
|
distribution of the source code, even though third parties are not
|
||||||
|
compelled to copy the source along with the object code.
|
||||||
|
|
||||||
|
4. You may not copy, modify, sublicense, or distribute the Program
|
||||||
|
except as expressly provided under this License. Any attempt
|
||||||
|
otherwise to copy, modify, sublicense or distribute the Program is
|
||||||
|
void, and will automatically terminate your rights under this License.
|
||||||
|
However, parties who have received copies, or rights, from you under
|
||||||
|
this License will not have their licenses terminated so long as such
|
||||||
|
parties remain in full compliance.
|
||||||
|
|
||||||
|
5. You are not required to accept this License, since you have not
|
||||||
|
signed it. However, nothing else grants you permission to modify or
|
||||||
|
distribute the Program or its derivative works. These actions are
|
||||||
|
prohibited by law if you do not accept this License. Therefore, by
|
||||||
|
modifying or distributing the Program (or any work based on the
|
||||||
|
Program), you indicate your acceptance of this License to do so, and
|
||||||
|
all its terms and conditions for copying, distributing or modifying
|
||||||
|
the Program or works based on it.
|
||||||
|
|
||||||
|
6. Each time you redistribute the Program (or any work based on the
|
||||||
|
Program), the recipient automatically receives a license from the
|
||||||
|
original licensor to copy, distribute or modify the Program subject to
|
||||||
|
these terms and conditions. You may not impose any further
|
||||||
|
restrictions on the recipients' exercise of the rights granted herein.
|
||||||
|
You are not responsible for enforcing compliance by third parties to
|
||||||
|
this License.
|
||||||
|
|
||||||
|
7. If, as a consequence of a court judgment or allegation of patent
|
||||||
|
infringement or for any other reason (not limited to patent issues),
|
||||||
|
conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot
|
||||||
|
distribute so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you
|
||||||
|
may not distribute the Program at all. For example, if a patent
|
||||||
|
license would not permit royalty-free redistribution of the Program by
|
||||||
|
all those who receive copies directly or indirectly through you, then
|
||||||
|
the only way you could satisfy both it and this License would be to
|
||||||
|
refrain entirely from distribution of the Program.
|
||||||
|
|
||||||
|
If any portion of this section is held invalid or unenforceable under
|
||||||
|
any particular circumstance, the balance of the section is intended to
|
||||||
|
apply and the section as a whole is intended to apply in other
|
||||||
|
circumstances.
|
||||||
|
|
||||||
|
It is not the purpose of this section to induce you to infringe any
|
||||||
|
patents or other property right claims or to contest validity of any
|
||||||
|
such claims; this section has the sole purpose of protecting the
|
||||||
|
integrity of the free software distribution system, which is
|
||||||
|
implemented by public license practices. Many people have made
|
||||||
|
generous contributions to the wide range of software distributed
|
||||||
|
through that system in reliance on consistent application of that
|
||||||
|
system; it is up to the author/donor to decide if he or she is willing
|
||||||
|
to distribute software through any other system and a licensee cannot
|
||||||
|
impose that choice.
|
||||||
|
|
||||||
|
This section is intended to make thoroughly clear what is believed to
|
||||||
|
be a consequence of the rest of this License.
|
||||||
|
|
||||||
|
8. If the distribution and/or use of the Program is restricted in
|
||||||
|
certain countries either by patents or by copyrighted interfaces, the
|
||||||
|
original copyright holder who places the Program under this License
|
||||||
|
may add an explicit geographical distribution limitation excluding
|
||||||
|
those countries, so that distribution is permitted only in or among
|
||||||
|
countries not thus excluded. In such case, this License incorporates
|
||||||
|
the limitation as if written in the body of this License.
|
||||||
|
|
||||||
|
9. The Free Software Foundation may publish revised and/or new versions
|
||||||
|
of the General Public License from time to time. Such new versions will
|
||||||
|
be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the Program
|
||||||
|
specifies a version number of this License which applies to it and "any
|
||||||
|
later version", you have the option of following the terms and conditions
|
||||||
|
either of that version or of any later version published by the Free
|
||||||
|
Software Foundation. If the Program does not specify a version number of
|
||||||
|
this License, you may choose any version ever published by the Free Software
|
||||||
|
Foundation.
|
||||||
|
|
||||||
|
10. If you wish to incorporate parts of the Program into other free
|
||||||
|
programs whose distribution conditions are different, write to the author
|
||||||
|
to ask for permission. For software which is copyrighted by the Free
|
||||||
|
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||||
|
make exceptions for this. Our decision will be guided by the two goals
|
||||||
|
of preserving the free status of all derivatives of our free software and
|
||||||
|
of promoting the sharing and reuse of software generally.
|
||||||
|
|
||||||
|
NO WARRANTY
|
||||||
|
|
||||||
|
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||||
|
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||||
|
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||||
|
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||||
|
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||||
|
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||||
|
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||||
|
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||||
|
REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||||
|
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||||
|
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||||
|
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||||
|
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||||
|
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||||
|
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||||
|
POSSIBILITY OF SUCH DAMAGES.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
convey the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This program is free software; you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU General Public License as published by
|
||||||
|
the Free Software Foundation; either version 2 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU General Public License along
|
||||||
|
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
If the program is interactive, make it output a short notice like this
|
||||||
|
when it starts in an interactive mode:
|
||||||
|
|
||||||
|
Gnomovision version 69, Copyright (C) year name of author
|
||||||
|
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||||
|
This is free software, and you are welcome to redistribute it
|
||||||
|
under certain conditions; type `show c' for details.
|
||||||
|
|
||||||
|
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||||
|
parts of the General Public License. Of course, the commands you use may
|
||||||
|
be called something other than `show w' and `show c'; they could even be
|
||||||
|
mouse-clicks or menu items--whatever suits your program.
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or your
|
||||||
|
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||||
|
necessary. Here is a sample; alter the names:
|
||||||
|
|
||||||
|
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||||
|
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||||
|
|
||||||
|
<signature of Ty Coon>, 1 April 1989
|
||||||
|
Ty Coon, President of Vice
|
||||||
|
|
||||||
|
This General Public License does not permit incorporating your program into
|
||||||
|
proprietary programs. If your program is a subroutine library, you may
|
||||||
|
consider it more useful to permit linking proprietary applications with the
|
||||||
|
library. If this is what you want to do, use the GNU Lesser General
|
||||||
|
Public License instead of this License.
|
||||||
25
README.md
Normal file
25
README.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Python-With-Gtk-Template
|
||||||
|
A template project for Python with Gtk applications.
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
* PyGObject (Gtk introspection library)
|
||||||
|
* pygobject-stubs (For actually getting pylsp or python-language-server to auto complete in LSPs. Do if GTK3 --no-cache-dir --config-settings=config=Gtk3,Gdk3,Soup2)
|
||||||
|
* pyxdg (Desktop ".desktop" file parser)
|
||||||
|
* setproctitle (Define process title to search and kill more easily)
|
||||||
|
* sqlmodel (SQL databases and is powered by Pydantic and SQLAlchemy)
|
||||||
|
|
||||||
|
### Note
|
||||||
|
* pyrightconfig.json can prompt IDEs that use pyright lsp on where imports are located- look at venvPath and venv. "venvPath" is parent path of "venv" where "venv" is just the name of the folder under the parent path that is the python created venv.
|
||||||
|
* Move respetive sub folder content under user_config to the same places in Linux. Though, user/share/<app name> can go to ~/.config folder if prefered.
|
||||||
|
* In additiion, place the plugins folder in the same app folder you moved to /usr/share/<app name> or ~/.config/<app name> .
|
||||||
|
There are a "\<change_me\>" strings and files that need to be set according to your app's name located at:
|
||||||
|
* \_\_builtins\_\_.py
|
||||||
|
* user_config/bin/app_name
|
||||||
|
* user_config/usr/share/app_name
|
||||||
|
* user_config/usr/share/app_name/icons/app_name.png
|
||||||
|
* user_config/usr/share/app_name/icons/app_name-64x64.png
|
||||||
|
* user_config/usr/share/applications/app_name.desktop
|
||||||
|
|
||||||
|
|
||||||
|
For the user_config, after changing names and files, copy all content to their respective destinations.
|
||||||
|
The logic follows Debian Dpkg packaging and its placement logic.
|
||||||
31
plugins/README.txt
Normal file
31
plugins/README.txt
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
### Note
|
||||||
|
Copy the example and rename it to your desired name. Plugins define a ui target slot with the 'ui_target' requests data but don't have to if not directly interacted with.
|
||||||
|
Plugins must have a run method defined; though, you do not need to necessarily do anything within it. The run method implies that the passed in event system or other data is ready for the plugin to use.
|
||||||
|
|
||||||
|
|
||||||
|
### Manifest Example (All are required even if empty.)
|
||||||
|
```
|
||||||
|
class Manifest:
|
||||||
|
name: str = "Example Plugin"
|
||||||
|
author: str = "John Doe"
|
||||||
|
version: str = "0.0.1"
|
||||||
|
support: str = ""
|
||||||
|
pre_launch: bool = False
|
||||||
|
requests: {} = {
|
||||||
|
'pass_ui_objects': ["plugin_control_list"],
|
||||||
|
'pass_events': True,
|
||||||
|
'bind_keys': []
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Requests
|
||||||
|
```
|
||||||
|
requests: {} = {
|
||||||
|
'pass_events': true, # If empty or not present will be ignored.
|
||||||
|
"pass_ui_objects": [""], # Request reference to a UI component. Will be passed back as array to plugin.
|
||||||
|
'bind_keys': [f"{name}||send_message:<Control>f"],
|
||||||
|
f"{name}||do_save:<Control>s"] # Bind keys with method and key pare using list. Must pass "name" like shown with delimiter to its right.
|
||||||
|
|
||||||
|
}
|
||||||
|
```
|
||||||
3
plugins/autopairs/__init__.py
Normal file
3
plugins/autopairs/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Module
|
||||||
|
"""
|
||||||
3
plugins/autopairs/__main__.py
Normal file
3
plugins/autopairs/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Package
|
||||||
|
"""
|
||||||
10
plugins/autopairs/manifest.json
Normal file
10
plugins/autopairs/manifest.json
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"name": "Autopairs",
|
||||||
|
"author": "ITDominator",
|
||||||
|
"credit": "Hamad Al Marri",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"support": "",
|
||||||
|
"requests": {
|
||||||
|
"pass_events": true
|
||||||
|
}
|
||||||
|
}
|
||||||
140
plugins/autopairs/plugin.py
Normal file
140
plugins/autopairs/plugin.py
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
# Python imports
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from plugins.plugin_base import PluginBase
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: Threads WILL NOT die with parent's destruction.
|
||||||
|
def threaded(fn):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start()
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
# NOTE: Threads WILL die with parent's destruction.
|
||||||
|
def daemon_threaded(fn):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(PluginBase):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.name = "Autopairs" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus
|
||||||
|
# where self.name should not be needed for message comms
|
||||||
|
|
||||||
|
self.chars = {
|
||||||
|
"quotedbl": "\"",
|
||||||
|
"apostrophe": "'",
|
||||||
|
"parenleft": "(",
|
||||||
|
"bracketleft": "[",
|
||||||
|
"braceleft": "{",
|
||||||
|
"less": "<",
|
||||||
|
"grave": "`",
|
||||||
|
}
|
||||||
|
|
||||||
|
self.close = {
|
||||||
|
"\"": "\"",
|
||||||
|
"'": "'",
|
||||||
|
"(": ")",
|
||||||
|
"[": "]",
|
||||||
|
"{": "}",
|
||||||
|
"<": ">",
|
||||||
|
"`": "`",
|
||||||
|
}
|
||||||
|
|
||||||
|
def generate_reference_ui_element(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def subscribe_to_events(self):
|
||||||
|
self._event_system.subscribe("set_active_src_view", self._set_active_src_view)
|
||||||
|
self._event_system.subscribe("autopairs", self._autopairs)
|
||||||
|
|
||||||
|
def _set_active_src_view(self, source_view):
|
||||||
|
self._active_src_view = source_view
|
||||||
|
self._buffer = self._active_src_view.get_buffer()
|
||||||
|
self._tag_table = self._buffer.get_tag_table()
|
||||||
|
|
||||||
|
def _autopairs(self, keyval_name, ctrl, alt, shift):
|
||||||
|
if keyval_name in self.chars:
|
||||||
|
return self.text_insert(self._buffer, keyval_name)
|
||||||
|
|
||||||
|
# NOTE: All of below to EOF, lovingly taken from Hamad Al Marri's Gamma
|
||||||
|
# text editor. I did do some cleanup of comments but otherwise pretty
|
||||||
|
# much the same code just fitted to my plugin architecture.
|
||||||
|
# Link: https://gitlab.com/hamadmarri/gamma-text-editor
|
||||||
|
def text_insert(self, buffer, text):
|
||||||
|
selection = buffer.get_selection_bounds()
|
||||||
|
if selection == ():
|
||||||
|
return self.add_close(buffer, text, )
|
||||||
|
else:
|
||||||
|
return self.add_enclose(buffer, text, selection)
|
||||||
|
|
||||||
|
def add_close(self, buffer, text):
|
||||||
|
text = self.chars[text]
|
||||||
|
text += self.close[text]
|
||||||
|
|
||||||
|
position = buffer.get_iter_at_mark( buffer.get_insert() )
|
||||||
|
|
||||||
|
c = position.get_char()
|
||||||
|
if not c in (" ", "", ";", ":", "\t", ",", ".", "\n", "\r") \
|
||||||
|
and not c in list(self.close.values()):
|
||||||
|
return False
|
||||||
|
|
||||||
|
buffer.insert(position, text)
|
||||||
|
|
||||||
|
position = buffer.get_iter_at_mark(buffer.get_insert())
|
||||||
|
position.backward_char()
|
||||||
|
buffer.place_cursor(position)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def add_enclose(self, buffer, text, selection):
|
||||||
|
(start, end) = selection
|
||||||
|
selected = buffer.get_text(start, end, False)
|
||||||
|
if len(selected) <= 3 and selected in ("<", ">", ">>>"
|
||||||
|
"<<", ">>",
|
||||||
|
"\"", "'", "`",
|
||||||
|
"(", ")",
|
||||||
|
"[", "]",
|
||||||
|
"{", "}",
|
||||||
|
"=", "==",
|
||||||
|
"!=", "==="):
|
||||||
|
return False
|
||||||
|
|
||||||
|
start_mark = buffer.create_mark("startclose", start, False)
|
||||||
|
end_mark = buffer.create_mark("endclose", end, False)
|
||||||
|
|
||||||
|
buffer.begin_user_action()
|
||||||
|
|
||||||
|
t = self.chars[text]
|
||||||
|
buffer.insert(start, t)
|
||||||
|
end = buffer.get_iter_at_mark(end_mark)
|
||||||
|
t = self.close[t]
|
||||||
|
buffer.insert(end, t)
|
||||||
|
|
||||||
|
start = buffer.get_iter_at_mark(start_mark)
|
||||||
|
end = buffer.get_iter_at_mark(end_mark)
|
||||||
|
end.backward_char()
|
||||||
|
buffer.select_range(start, end)
|
||||||
|
|
||||||
|
buffer.end_user_action()
|
||||||
|
|
||||||
|
return True
|
||||||
3
plugins/colorize/__init__.py
Normal file
3
plugins/colorize/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Module
|
||||||
|
"""
|
||||||
3
plugins/colorize/__main__.py
Normal file
3
plugins/colorize/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Package
|
||||||
|
"""
|
||||||
107
plugins/colorize/color_converter_mixin.py
Normal file
107
plugins/colorize/color_converter_mixin.py
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
# Python imports
|
||||||
|
import colorsys
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
class ColorConverterMixin:
|
||||||
|
# NOTE: HSV HSL, and Hex Alpha parsing are available in Gtk 4.0- not lower.
|
||||||
|
# So, for compatability we're gunna convert to rgba string ourselves...
|
||||||
|
def get_color_text(self, buffer, start, end):
|
||||||
|
text = buffer.get_text(start, end, include_hidden_chars = False)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if "hsl" in text:
|
||||||
|
text = self.hsl_to_rgb(text)
|
||||||
|
|
||||||
|
if "hsv" in text:
|
||||||
|
text = self.hsv_to_rgb(text)
|
||||||
|
|
||||||
|
if "#" == text[0]:
|
||||||
|
hex = text[1:]
|
||||||
|
size = len(hex)
|
||||||
|
if size in [4, 8, 16]:
|
||||||
|
rgba = self.hex_to_rgba(hex, size)
|
||||||
|
print(rgba)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
...
|
||||||
|
|
||||||
|
return text
|
||||||
|
|
||||||
|
def hex_to_rgba(self, hex, size):
|
||||||
|
rgba = []
|
||||||
|
slots = None
|
||||||
|
step = 2
|
||||||
|
bytes = 16
|
||||||
|
|
||||||
|
if size == 4: # NOTE: RGBA
|
||||||
|
step = 1
|
||||||
|
slots = (0, 1, 2, 3)
|
||||||
|
|
||||||
|
if size == 6: # NOTE: RR GG BB
|
||||||
|
slots = (0, 2, 4)
|
||||||
|
|
||||||
|
if size == 8: # NOTE: RR GG BB AA
|
||||||
|
step = 2
|
||||||
|
slots = (0, 2, 4, 6)
|
||||||
|
|
||||||
|
if size == 16: # NOTE: RRRR GGGG BBBB AAAA
|
||||||
|
step = 4
|
||||||
|
slots = (0, 4, 8, 12)
|
||||||
|
|
||||||
|
for i in slots:
|
||||||
|
v = int(hex[i : i + step], bytes)
|
||||||
|
rgba.append(v)
|
||||||
|
|
||||||
|
|
||||||
|
rgb_sub = ','.join(map(str, tuple(rgba)))
|
||||||
|
|
||||||
|
return f"rgba({rgb_sub})"
|
||||||
|
|
||||||
|
# return tuple(rgba)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def hsl_to_rgb(self, text):
|
||||||
|
_h, _s , _l = text.replace("hsl", "") \
|
||||||
|
.replace("deg", "") \
|
||||||
|
.replace("(", "") \
|
||||||
|
.replace(")", "") \
|
||||||
|
.replace("%", "") \
|
||||||
|
.replace(" ", "") \
|
||||||
|
.split(",")
|
||||||
|
|
||||||
|
h = None
|
||||||
|
s = None
|
||||||
|
l = None
|
||||||
|
|
||||||
|
h, s , l = int(_h) / 360, float(_s) / 100, float(_l) / 100
|
||||||
|
|
||||||
|
rgb = tuple(round(i * 255) for i in colorsys.hls_to_rgb(h, l, s))
|
||||||
|
rgb_sub = ','.join(map(str, rgb))
|
||||||
|
|
||||||
|
return f"rgb({rgb_sub})"
|
||||||
|
|
||||||
|
|
||||||
|
def hsv_to_rgb(self, text):
|
||||||
|
_h, _s , _v = text.replace("hsv", "") \
|
||||||
|
.replace("deg", "") \
|
||||||
|
.replace("(", "") \
|
||||||
|
.replace(")", "") \
|
||||||
|
.replace("%", "") \
|
||||||
|
.replace(" ", "") \
|
||||||
|
.split(",")
|
||||||
|
|
||||||
|
h = None
|
||||||
|
s = None
|
||||||
|
v = None
|
||||||
|
|
||||||
|
h, s , v = int(_h) / 360, float(_s) / 100, float(_v) / 100
|
||||||
|
|
||||||
|
rgb = tuple(round(i * 255) for i in colorsys.hsv_to_rgb(h,s,v))
|
||||||
|
rgb_sub = ','.join(map(str, rgb))
|
||||||
|
|
||||||
|
return f"rgb({rgb_sub})"
|
||||||
9
plugins/colorize/manifest.json
Normal file
9
plugins/colorize/manifest.json
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"name": "Colorize",
|
||||||
|
"author": "ITDominator",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"support": "",
|
||||||
|
"requests": {
|
||||||
|
"pass_events": true
|
||||||
|
}
|
||||||
|
}
|
||||||
228
plugins/colorize/plugin.py
Normal file
228
plugins/colorize/plugin.py
Normal file
@@ -0,0 +1,228 @@
|
|||||||
|
# Python imports
|
||||||
|
import random
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
gi.require_version('Gdk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
from gi.repository import Gdk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from plugins.plugin_base import PluginBase
|
||||||
|
from .color_converter_mixin import ColorConverterMixin
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(ColorConverterMixin, PluginBase):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.name = "Colorize" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus
|
||||||
|
# where self.name should not be needed for message comms
|
||||||
|
self.tag_stub_name = "colorize_tag"
|
||||||
|
self._buffer = None
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def generate_reference_ui_element(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def subscribe_to_events(self):
|
||||||
|
self._event_system.subscribe("set_active_src_view", self._set_active_src_view)
|
||||||
|
self._event_system.subscribe("buffer_changed_first_load", self._buffer_changed_first_load)
|
||||||
|
self._event_system.subscribe("buffer_changed", self._buffer_changed)
|
||||||
|
|
||||||
|
|
||||||
|
def _set_active_src_view(self, source_view):
|
||||||
|
self._active_src_view = source_view
|
||||||
|
|
||||||
|
|
||||||
|
def _buffer_changed_first_load(self, buffer):
|
||||||
|
self._buffer = buffer
|
||||||
|
self._do_colorize(buffer)
|
||||||
|
|
||||||
|
def _buffer_changed(self, buffer):
|
||||||
|
self._event_system.emit("pause_event_processing")
|
||||||
|
self._handle_colorize(buffer)
|
||||||
|
self._event_system.emit("resume_event_processing")
|
||||||
|
|
||||||
|
def _handle_colorize(self, buffer):
|
||||||
|
self._buffer = buffer
|
||||||
|
tag_table = buffer.get_tag_table()
|
||||||
|
mark = buffer.get_insert()
|
||||||
|
start = None
|
||||||
|
end = buffer.get_iter_at_mark(mark)
|
||||||
|
|
||||||
|
i = 0
|
||||||
|
walker_iter = end.copy()
|
||||||
|
working_tag = self.find_working_tag(walker_iter, i)
|
||||||
|
if working_tag:
|
||||||
|
start = self.find_start_range(walker_iter, working_tag)
|
||||||
|
|
||||||
|
self.find_end_range(end, working_tag)
|
||||||
|
buffer.remove_tag(working_tag, start, end)
|
||||||
|
else:
|
||||||
|
start = self.traverse_backward_25_or_less(walker_iter)
|
||||||
|
self.traverse_forward_25_or_less(end)
|
||||||
|
|
||||||
|
self._do_colorize(buffer, start, end)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def find_working_tag(self, walker_iter, i):
|
||||||
|
tags = walker_iter.get_tags()
|
||||||
|
for tag in tags:
|
||||||
|
if tag.props.name and self.tag_stub_name in tag.props.name:
|
||||||
|
return tag
|
||||||
|
|
||||||
|
res = walker_iter.backward_char()
|
||||||
|
|
||||||
|
if not res: return
|
||||||
|
if i > 25: return
|
||||||
|
return self.find_working_tag(walker_iter, i + 1)
|
||||||
|
|
||||||
|
def find_start_range(self, walker_iter, working_tag):
|
||||||
|
tags = walker_iter.get_tags()
|
||||||
|
for tag in tags:
|
||||||
|
if tag.props.name and working_tag.props.name in tag.props.name:
|
||||||
|
res = walker_iter.backward_char()
|
||||||
|
if res:
|
||||||
|
self.find_start_range(walker_iter, working_tag)
|
||||||
|
|
||||||
|
return walker_iter
|
||||||
|
|
||||||
|
def find_end_range(self, end, working_tag):
|
||||||
|
tags = end.get_tags()
|
||||||
|
for tag in tags:
|
||||||
|
if tag.props.name and working_tag.props.name in tag.props.name:
|
||||||
|
res = end.forward_char()
|
||||||
|
if res:
|
||||||
|
self.find_end_range(end, working_tag)
|
||||||
|
|
||||||
|
def traverse_backward_25_or_less(self, walker_iter):
|
||||||
|
i = 1
|
||||||
|
while i <= 25:
|
||||||
|
res = walker_iter.backward_char()
|
||||||
|
if not res: break
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
def traverse_forward_25_or_less(self, end):
|
||||||
|
i = 1
|
||||||
|
while i <= 25:
|
||||||
|
res = end.forward_char()
|
||||||
|
if not res: break
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
def _do_colorize(self, buffer = None, start_itr = None, end_itr = None):
|
||||||
|
# rgb(a), hsl, hsv
|
||||||
|
results = self.finalize_non_hex_matches( self.collect_preliminary_results(buffer, start_itr, end_itr) )
|
||||||
|
self.process_results(buffer, results)
|
||||||
|
|
||||||
|
# hex color search
|
||||||
|
results = self.finalize_hex_matches( self.collect_preliminary_hex_results(buffer, start_itr, end_itr) )
|
||||||
|
self.process_results(buffer, results)
|
||||||
|
|
||||||
|
|
||||||
|
def collect_preliminary_results(self, buffer = None, start_itr = None, end_itr = None):
|
||||||
|
if not buffer: return []
|
||||||
|
|
||||||
|
if not start_itr:
|
||||||
|
start_itr = buffer.get_start_iter()
|
||||||
|
|
||||||
|
results1 = self.search(start_itr, end_itr, "rgb")
|
||||||
|
results2 = self.search(start_itr, end_itr, "hsl")
|
||||||
|
results3 = self.search(start_itr, end_itr, "hsv")
|
||||||
|
|
||||||
|
return results1 + results2 + results3
|
||||||
|
|
||||||
|
def collect_preliminary_hex_results(self, buffer = None, start_itr = None, end_itr = None):
|
||||||
|
if not buffer: return []
|
||||||
|
|
||||||
|
if not start_itr:
|
||||||
|
start_itr = buffer.get_start_iter()
|
||||||
|
|
||||||
|
results1 = self.search(start_itr, end_itr, "#")
|
||||||
|
|
||||||
|
return results1
|
||||||
|
|
||||||
|
def search(self, start_itr = None, end_itr = None, query = None):
|
||||||
|
if not start_itr or not query: return None, None
|
||||||
|
|
||||||
|
results = []
|
||||||
|
flags = Gtk.TextSearchFlags.VISIBLE_ONLY | Gtk.TextSearchFlags.TEXT_ONLY
|
||||||
|
while True:
|
||||||
|
result = start_itr.forward_search(query, flags, end_itr)
|
||||||
|
if not result: break
|
||||||
|
|
||||||
|
results.append(result)
|
||||||
|
start_itr = result[1]
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def finalize_non_hex_matches(self, result_hits: [] = []):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
for start, end in result_hits:
|
||||||
|
if end.get_char() == "a":
|
||||||
|
end.forward_char()
|
||||||
|
|
||||||
|
if end.get_char() != "(":
|
||||||
|
continue
|
||||||
|
|
||||||
|
end.forward_chars(21)
|
||||||
|
if end.get_char() == ")":
|
||||||
|
end.forward_char()
|
||||||
|
results.append([start, end])
|
||||||
|
continue
|
||||||
|
|
||||||
|
while end.get_char() != "(":
|
||||||
|
if end.get_char() == ")":
|
||||||
|
end.forward_char()
|
||||||
|
results.append([start, end])
|
||||||
|
break
|
||||||
|
|
||||||
|
end.forward_chars(-1)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def finalize_hex_matches(self, result_hits: [] = []):
|
||||||
|
results = []
|
||||||
|
|
||||||
|
for start, end in result_hits:
|
||||||
|
i = 0
|
||||||
|
_ch = end.get_char()
|
||||||
|
ch = ord(end.get_char()) if _ch else -1
|
||||||
|
|
||||||
|
while ((ch >= 48 and ch <= 57) or (ch >= 65 and ch <= 70) or (ch >= 97 and ch <= 102)):
|
||||||
|
if i > 16: break
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
end.forward_char()
|
||||||
|
_ch = end.get_char()
|
||||||
|
ch = ord(end.get_char()) if _ch else -1
|
||||||
|
|
||||||
|
if i in [3, 4, 6, 8, 9, 12, 16]:
|
||||||
|
results.append([start, end])
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def process_results(self, buffer, results):
|
||||||
|
for start, end in results:
|
||||||
|
text = self.get_color_text(buffer, start, end)
|
||||||
|
color = Gdk.RGBA()
|
||||||
|
|
||||||
|
if color.parse(text):
|
||||||
|
tag = self.get_colorized_tag(buffer, text, color)
|
||||||
|
buffer.apply_tag(tag, start, end)
|
||||||
|
|
||||||
|
def get_colorized_tag(self, buffer, tag, color: Gdk.RGBA):
|
||||||
|
tag_table = buffer.get_tag_table()
|
||||||
|
colorize_tag = f"{self.tag_stub_name}_{tag}"
|
||||||
|
search_tag = tag_table.lookup(colorize_tag)
|
||||||
|
if not search_tag:
|
||||||
|
search_tag = buffer.create_tag(colorize_tag, background_rgba = color)
|
||||||
|
|
||||||
|
return search_tag
|
||||||
3
plugins/commentzar/__init__.py
Normal file
3
plugins/commentzar/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Module
|
||||||
|
"""
|
||||||
3
plugins/commentzar/__main__.py
Normal file
3
plugins/commentzar/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Package
|
||||||
|
"""
|
||||||
66
plugins/commentzar/add_comment_mixin.py
Executable file
66
plugins/commentzar/add_comment_mixin.py
Executable file
@@ -0,0 +1,66 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class AddCommentMixin:
|
||||||
|
def add_comment_characters(self, buffer, start_tag, end_tag, start, end, deselect, oldPos):
|
||||||
|
smark = buffer.create_mark("start", start, False)
|
||||||
|
imark = buffer.create_mark("iter", start, False)
|
||||||
|
emark = buffer.create_mark("end", end, False)
|
||||||
|
number_lines = end.get_line() - start.get_line() + 1
|
||||||
|
comment_pos_iter = None
|
||||||
|
count = 0
|
||||||
|
|
||||||
|
buffer.begin_user_action()
|
||||||
|
|
||||||
|
for i in range(0, number_lines):
|
||||||
|
iter = buffer.get_iter_at_mark(imark)
|
||||||
|
if not comment_pos_iter:
|
||||||
|
(comment_pos_iter, count) = self.discard_white_spaces(iter)
|
||||||
|
|
||||||
|
if self.is_commented(comment_pos_iter, start_tag):
|
||||||
|
new_code = self.remove_comment_characters(buffer, start_tag, end_tag, start, end)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
comment_pos_iter = iter
|
||||||
|
for i in range(count):
|
||||||
|
c = iter.get_char()
|
||||||
|
if not c in (" ", "\t"):
|
||||||
|
break
|
||||||
|
|
||||||
|
iter.forward_char()
|
||||||
|
|
||||||
|
buffer.insert(comment_pos_iter, start_tag)
|
||||||
|
buffer.insert(comment_pos_iter, " ")
|
||||||
|
|
||||||
|
if end_tag:
|
||||||
|
if i != number_lines -1:
|
||||||
|
iter = buffer.get_iter_at_mark(imark)
|
||||||
|
iter.forward_to_line_end()
|
||||||
|
buffer.insert(iter, end_tag)
|
||||||
|
else:
|
||||||
|
iter = buffer.get_iter_at_mark(emark)
|
||||||
|
buffer.insert(iter, end_tag)
|
||||||
|
|
||||||
|
iter = buffer.get_iter_at_mark(imark)
|
||||||
|
iter.forward_line()
|
||||||
|
buffer.delete_mark(imark)
|
||||||
|
imark = buffer.create_mark("iter", iter, True)
|
||||||
|
|
||||||
|
buffer.end_user_action()
|
||||||
|
|
||||||
|
buffer.delete_mark(imark)
|
||||||
|
new_start = buffer.get_iter_at_mark(smark)
|
||||||
|
new_end = buffer.get_iter_at_mark(emark)
|
||||||
|
|
||||||
|
buffer.select_range(new_start, new_end)
|
||||||
|
buffer.delete_mark(smark)
|
||||||
|
buffer.delete_mark(emark)
|
||||||
|
|
||||||
|
if deselect:
|
||||||
|
oldPosIter = buffer.get_iter_at_offset(oldPos + 2)
|
||||||
|
buffer.place_cursor(oldPosIter)
|
||||||
30
plugins/commentzar/codecomment_tags.py
Executable file
30
plugins/commentzar/codecomment_tags.py
Executable file
@@ -0,0 +1,30 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class CodeCommentTags:
|
||||||
|
def get_comment_tags(self, lang):
|
||||||
|
(s, e) = self.get_line_comment_tags(lang)
|
||||||
|
if (s, e) == (None, None):
|
||||||
|
(s, e) = self.get_block_comment_tags(lang)
|
||||||
|
|
||||||
|
return (s, e)
|
||||||
|
|
||||||
|
def get_block_comment_tags(self, lang):
|
||||||
|
start_tag = lang.get_metadata('block-comment-start')
|
||||||
|
end_tag = lang.get_metadata('block-comment-end')
|
||||||
|
if start_tag and end_tag:
|
||||||
|
return (start_tag, end_tag)
|
||||||
|
|
||||||
|
return (None, None)
|
||||||
|
|
||||||
|
def get_line_comment_tags(self, lang):
|
||||||
|
start_tag = lang.get_metadata('line-comment-start')
|
||||||
|
if start_tag:
|
||||||
|
return (start_tag, None)
|
||||||
|
|
||||||
|
return (None, None)
|
||||||
11
plugins/commentzar/manifest.json
Normal file
11
plugins/commentzar/manifest.json
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"name": "Commentzar",
|
||||||
|
"author": "ITDominator",
|
||||||
|
"credit": "Hamad Al Marri",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"support": "",
|
||||||
|
"requests": {
|
||||||
|
"pass_events": true,
|
||||||
|
"bind_keys": ["Commentzar||keyboard_tggl_comment:<Control>slash"]
|
||||||
|
}
|
||||||
|
}
|
||||||
118
plugins/commentzar/plugin.py
Normal file
118
plugins/commentzar/plugin.py
Normal file
@@ -0,0 +1,118 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from plugins.plugin_base import PluginBase
|
||||||
|
from .codecomment_tags import CodeCommentTags
|
||||||
|
from .remove_comment_mixin import RemoveCommentMixin
|
||||||
|
from .add_comment_mixin import AddCommentMixin
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(AddCommentMixin, RemoveCommentMixin, CodeCommentTags, PluginBase):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.name = "Commentzar" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus
|
||||||
|
# where self.name should not be needed for message comms
|
||||||
|
|
||||||
|
|
||||||
|
def generate_reference_ui_element(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def subscribe_to_events(self):
|
||||||
|
self._event_system.subscribe("keyboard_tggl_comment", self._keyboard_tggl_comment)
|
||||||
|
self._event_system.subscribe("set_active_src_view", self._set_active_src_view)
|
||||||
|
|
||||||
|
def _set_active_src_view(self, source_view):
|
||||||
|
self._active_src_view = source_view
|
||||||
|
self._buffer = self._active_src_view.get_buffer()
|
||||||
|
self._tag_table = self._buffer.get_tag_table()
|
||||||
|
|
||||||
|
|
||||||
|
def _keyboard_tggl_comment(self):
|
||||||
|
buffer = self._buffer
|
||||||
|
lang = buffer.get_language()
|
||||||
|
if lang is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
(start_tag, end_tag) = self.get_comment_tags(lang)
|
||||||
|
if not start_tag and not end_tag:
|
||||||
|
return
|
||||||
|
|
||||||
|
sel = buffer.get_selection_bounds()
|
||||||
|
currentPosMark = buffer.get_insert()
|
||||||
|
oldPos = 0
|
||||||
|
|
||||||
|
# if user selected chars or multilines
|
||||||
|
if sel != ():
|
||||||
|
deselect = False
|
||||||
|
(start, end) = sel
|
||||||
|
if not start.starts_line():
|
||||||
|
start.set_line_offset(0)
|
||||||
|
if not end.ends_line():
|
||||||
|
end.forward_to_line_end()
|
||||||
|
else:
|
||||||
|
deselect = True
|
||||||
|
start = buffer.get_iter_at_mark(currentPosMark)
|
||||||
|
oldPos = buffer.get_iter_at_mark(currentPosMark).get_offset()
|
||||||
|
start.set_line_offset(0)
|
||||||
|
end = start.copy()
|
||||||
|
|
||||||
|
if not end.ends_line():
|
||||||
|
end.forward_to_line_end()
|
||||||
|
|
||||||
|
if start.get_offset() == end.get_offset():
|
||||||
|
buffer.begin_user_action()
|
||||||
|
buffer.insert(start, start_tag)
|
||||||
|
buffer.insert(start, " ")
|
||||||
|
buffer.end_user_action()
|
||||||
|
return
|
||||||
|
|
||||||
|
self._event_system.emit("pause_event_processing")
|
||||||
|
new_code = self.add_comment_characters(buffer, start_tag, end_tag, start, end, deselect, oldPos)
|
||||||
|
self._event_system.emit("resume_event_processing")
|
||||||
|
|
||||||
|
def discard_white_spaces(self, iter):
|
||||||
|
count = 0
|
||||||
|
while not iter.ends_line():
|
||||||
|
c = iter.get_char()
|
||||||
|
if not c in (" ", "\t"):
|
||||||
|
return (iter, count)
|
||||||
|
|
||||||
|
iter.forward_char()
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
return (iter, 0)
|
||||||
|
|
||||||
|
def is_commented(self, comment_pos_iter, start_tag):
|
||||||
|
head_iter = comment_pos_iter.copy()
|
||||||
|
self.forward_tag(head_iter, start_tag)
|
||||||
|
s = comment_pos_iter.get_slice(head_iter)
|
||||||
|
if s == start_tag:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def forward_tag(self, iter, tag):
|
||||||
|
iter.forward_chars(len(tag))
|
||||||
|
|
||||||
|
def backward_tag(self, iter, tag):
|
||||||
|
iter.backward_chars(len(tag))
|
||||||
|
|
||||||
|
def get_tag_position_in_line(self, tag, head_iter, iter):
|
||||||
|
while not iter.ends_line():
|
||||||
|
s = iter.get_slice(head_iter)
|
||||||
|
if s == tag:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
head_iter.forward_char()
|
||||||
|
iter.forward_char()
|
||||||
|
return False
|
||||||
49
plugins/commentzar/remove_comment_mixin.py
Executable file
49
plugins/commentzar/remove_comment_mixin.py
Executable file
@@ -0,0 +1,49 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class RemoveCommentMixin:
|
||||||
|
def remove_comment_characters(self, buffer, start_tag, end_tag, start, end):
|
||||||
|
smark = buffer.create_mark("start", start, False)
|
||||||
|
emark = buffer.create_mark("end", end, False)
|
||||||
|
number_lines = end.get_line() - start.get_line() + 1
|
||||||
|
iter = start.copy()
|
||||||
|
head_iter = iter.copy()
|
||||||
|
self.forward_tag(head_iter, start_tag)
|
||||||
|
|
||||||
|
buffer.begin_user_action()
|
||||||
|
|
||||||
|
for i in range(0, number_lines):
|
||||||
|
if self.get_tag_position_in_line(start_tag, head_iter, iter):
|
||||||
|
dmark = buffer.create_mark("delete", iter, False)
|
||||||
|
buffer.delete(iter, head_iter)
|
||||||
|
|
||||||
|
space_iter = head_iter.copy()
|
||||||
|
space_iter.forward_char()
|
||||||
|
s = head_iter.get_slice(space_iter)
|
||||||
|
if s == " ":
|
||||||
|
buffer.delete(head_iter, space_iter)
|
||||||
|
|
||||||
|
if end_tag:
|
||||||
|
iter = buffer.get_iter_at_mark(dmark)
|
||||||
|
head_iter = iter.copy()
|
||||||
|
self.forward_tag(head_iter, end_tag)
|
||||||
|
if self.get_tag_position_in_line(end_tag, head_iter, iter):
|
||||||
|
buffer.delete(iter, head_iter)
|
||||||
|
buffer.delete_mark(dmark)
|
||||||
|
|
||||||
|
iter = buffer.get_iter_at_mark(smark)
|
||||||
|
iter.forward_line()
|
||||||
|
buffer.delete_mark(smark)
|
||||||
|
head_iter = iter.copy()
|
||||||
|
self.forward_tag(head_iter, start_tag)
|
||||||
|
smark = buffer.create_mark("iter", iter, True)
|
||||||
|
|
||||||
|
buffer.end_user_action()
|
||||||
|
|
||||||
|
buffer.delete_mark(smark)
|
||||||
|
buffer.delete_mark(emark)
|
||||||
3
plugins/lsp_client/__init__.py
Normal file
3
plugins/lsp_client/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Module
|
||||||
|
"""
|
||||||
3
plugins/lsp_client/__main__.py
Normal file
3
plugins/lsp_client/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Package
|
||||||
|
"""
|
||||||
182
plugins/lsp_client/client_ipc.py
Normal file
182
plugins/lsp_client/client_ipc.py
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
# Python imports
|
||||||
|
import traceback
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
import base64
|
||||||
|
from multiprocessing.connection import Client
|
||||||
|
from multiprocessing.connection import Listener
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
from gi.repository import GLib
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from .lsp_message_structs import LSPResponseRequest
|
||||||
|
from .lsp_message_structs import LSPResponseNotification, LSPIDResponseNotification
|
||||||
|
from .lsp_message_structs import get_message_obj
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class ClientIPC:
|
||||||
|
""" Create a Messenger so talk to LSP Manager. """
|
||||||
|
def __init__(self, ipc_address: str = '127.0.0.1', conn_type: str = "socket"):
|
||||||
|
self.is_ipc_alive = False
|
||||||
|
self._ipc_port = 4848
|
||||||
|
self._ipc_address = ipc_address
|
||||||
|
self._conn_type = conn_type
|
||||||
|
self._ipc_authkey = b'' + bytes(f'lsp-client-endpoint-ipc', 'utf-8')
|
||||||
|
self._manager_ipc_authkey = b'' + bytes(f'lsp-manager-endpoint-ipc', 'utf-8')
|
||||||
|
self._ipc_timeout = 15.0
|
||||||
|
self._event_system = None
|
||||||
|
|
||||||
|
if conn_type == "socket":
|
||||||
|
self._ipc_address = f'/tmp/lsp-client-endpoint-ipc.sock'
|
||||||
|
self._manager_ipc_address = f'/tmp/lsp-manager-endpoint-ipc.sock'
|
||||||
|
elif conn_type == "full_network":
|
||||||
|
self._ipc_address = '0.0.0.0'
|
||||||
|
elif conn_type == "full_network_unsecured":
|
||||||
|
self._ipc_authkey = None
|
||||||
|
self._ipc_address = '0.0.0.0'
|
||||||
|
elif conn_type == "local_network_unsecured":
|
||||||
|
self._ipc_authkey = None
|
||||||
|
|
||||||
|
|
||||||
|
def set_event_system(self, event_system):
|
||||||
|
self._event_system = event_system
|
||||||
|
|
||||||
|
def create_ipc_listener(self) -> None:
|
||||||
|
if self._conn_type == "socket":
|
||||||
|
if os.path.exists(self._ipc_address) and settings_manager.is_dirty_start():
|
||||||
|
os.unlink(self._ipc_address)
|
||||||
|
|
||||||
|
listener = Listener(address=self._ipc_address, family="AF_UNIX", authkey=self._ipc_authkey)
|
||||||
|
elif "unsecured" not in self._conn_type:
|
||||||
|
listener = Listener((self._ipc_address, self._ipc_port), authkey=self._ipc_authkey)
|
||||||
|
else:
|
||||||
|
listener = Listener((self._ipc_address, self._ipc_port))
|
||||||
|
|
||||||
|
|
||||||
|
self.is_ipc_alive = True
|
||||||
|
self._run_ipc_loop(listener)
|
||||||
|
|
||||||
|
@daemon_threaded
|
||||||
|
def _run_ipc_loop(self, listener) -> None:
|
||||||
|
# NOTE: Not thread safe if using with Gtk. Need to import GLib and use idle_add
|
||||||
|
while self.is_ipc_alive:
|
||||||
|
try:
|
||||||
|
conn = listener.accept()
|
||||||
|
start_time = time.perf_counter()
|
||||||
|
self._handle_ipc_message(conn, start_time)
|
||||||
|
except Exception as e:
|
||||||
|
logger.debug( traceback.print_exc() )
|
||||||
|
|
||||||
|
listener.close()
|
||||||
|
|
||||||
|
def _handle_ipc_message(self, conn, start_time) -> None:
|
||||||
|
while self.is_ipc_alive:
|
||||||
|
msg = conn.recv()
|
||||||
|
|
||||||
|
if "MANAGER|" in msg:
|
||||||
|
data = msg.split("MANAGER|")[1].strip()
|
||||||
|
|
||||||
|
if data:
|
||||||
|
data_str = base64.b64decode(data.encode("utf-8")).decode("utf-8")
|
||||||
|
lsp_response = None
|
||||||
|
keys = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
lsp_response = json.loads(data_str)
|
||||||
|
keys = lsp_response.keys()
|
||||||
|
except Exception as e:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
if "result" in keys:
|
||||||
|
lsp_response = LSPResponseRequest(**get_message_obj(data_str))
|
||||||
|
|
||||||
|
if "method" in keys:
|
||||||
|
lsp_response = LSPResponseNotification( **get_message_obj(data_str) ) if not "id" in keys else LSPIDResponseNotification( **get_message_obj(data_str) )
|
||||||
|
|
||||||
|
if "notification" in keys:
|
||||||
|
...
|
||||||
|
|
||||||
|
if "response" in keys:
|
||||||
|
...
|
||||||
|
|
||||||
|
if "ignorable" in keys:
|
||||||
|
...
|
||||||
|
|
||||||
|
if lsp_response:
|
||||||
|
GLib.idle_add(self._do_emit, lsp_response)
|
||||||
|
|
||||||
|
conn.close()
|
||||||
|
break
|
||||||
|
|
||||||
|
if msg in ['close connection', 'close server']:
|
||||||
|
conn.close()
|
||||||
|
break
|
||||||
|
|
||||||
|
# NOTE: Not perfect but insures we don't lock up the connection for too long.
|
||||||
|
end_time = time.perf_counter()
|
||||||
|
if (end_time - start_time) > self._ipc_timeout:
|
||||||
|
conn.close()
|
||||||
|
break
|
||||||
|
|
||||||
|
def _do_emit(self, lsp_response):
|
||||||
|
self._event_system.emit("handle-lsp-message", (lsp_response,))
|
||||||
|
|
||||||
|
def send_manager_ipc_message(self, message: str) -> None:
|
||||||
|
try:
|
||||||
|
if self._conn_type == "socket":
|
||||||
|
if not os.path.exists(self._manager_ipc_address):
|
||||||
|
logger.error(f"Socket: {self._manager_ipc_address} doesn't exist. NOT sending message...")
|
||||||
|
return
|
||||||
|
|
||||||
|
conn = Client(address=self._manager_ipc_address, family="AF_UNIX", authkey=self._manager_ipc_authkey)
|
||||||
|
elif "unsecured" not in self._conn_type:
|
||||||
|
conn = Client((self._ipc_address, self._ipc_port), authkey=self._ipc_authkey)
|
||||||
|
else:
|
||||||
|
conn = Client((self._ipc_address, self._ipc_port))
|
||||||
|
|
||||||
|
conn.send( f"CLIENT|{ base64.b64encode(message.encode('utf-8')).decode('utf-8') }" )
|
||||||
|
conn.close()
|
||||||
|
except ConnectionRefusedError as e:
|
||||||
|
logger.error("Connection refused...")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error( repr(e) )
|
||||||
|
|
||||||
|
|
||||||
|
def send_ipc_message(self, message: str = "Empty Data...") -> None:
|
||||||
|
try:
|
||||||
|
if self._conn_type == "socket":
|
||||||
|
conn = Client(address=self._ipc_address, family="AF_UNIX", authkey=self._ipc_authkey)
|
||||||
|
elif "unsecured" not in self._conn_type:
|
||||||
|
conn = Client((self._ipc_address, self._ipc_port), authkey=self._ipc_authkey)
|
||||||
|
else:
|
||||||
|
conn = Client((self._ipc_address, self._ipc_port))
|
||||||
|
|
||||||
|
conn.send(message)
|
||||||
|
conn.close()
|
||||||
|
except ConnectionRefusedError as e:
|
||||||
|
logger.error("Connection refused...")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error( repr(e) )
|
||||||
|
|
||||||
|
def send_test_ipc_message(self, message: str = "Empty Data...") -> None:
|
||||||
|
try:
|
||||||
|
if self._conn_type == "socket":
|
||||||
|
conn = Client(address=self._ipc_address, family="AF_UNIX", authkey=self._ipc_authkey)
|
||||||
|
elif "unsecured" not in self._conn_type:
|
||||||
|
conn = Client((self._ipc_address, self._ipc_port), authkey=self._ipc_authkey)
|
||||||
|
else:
|
||||||
|
conn = Client((self._ipc_address, self._ipc_port))
|
||||||
|
|
||||||
|
conn.send(message)
|
||||||
|
conn.close()
|
||||||
|
except ConnectionRefusedError as e:
|
||||||
|
if self._conn_type == "socket":
|
||||||
|
logger.error("LSP Socket no longer valid.... Removing.")
|
||||||
|
os.unlink(self._ipc_address)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error( repr(e) )
|
||||||
7
plugins/lsp_client/config.json
Normal file
7
plugins/lsp_client/config.json
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"lsp_manager_start_command": ["python", "/opt/lsp-manager.zip"],
|
||||||
|
"websocket": {
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 8765
|
||||||
|
}
|
||||||
|
}
|
||||||
54
plugins/lsp_client/lsp_message_structs.py
Normal file
54
plugins/lsp_client/lsp_message_structs.py
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# Python imports
|
||||||
|
from dataclasses import dataclass
|
||||||
|
import json
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def get_message_obj(data: str):
|
||||||
|
return json.loads(data)
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LSPResponseRequest(object):
|
||||||
|
"""
|
||||||
|
Constructs a new LSP Response Request instance.
|
||||||
|
|
||||||
|
:param id result: The id of the given message.
|
||||||
|
:param dict result: The arguments of the given method.
|
||||||
|
"""
|
||||||
|
jsonrpc: str
|
||||||
|
id: int
|
||||||
|
result: dict
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LSPResponseNotification(object):
|
||||||
|
"""
|
||||||
|
Constructs a new LSP Response Notification instance.
|
||||||
|
|
||||||
|
:param str method: The type of lsp notification being made.
|
||||||
|
:params dict result: The arguments of the given method.
|
||||||
|
"""
|
||||||
|
jsonrpc: str
|
||||||
|
method: str
|
||||||
|
params: dict
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class LSPIDResponseNotification(object):
|
||||||
|
"""
|
||||||
|
Constructs a new LSP Response Notification instance.
|
||||||
|
|
||||||
|
:param str method: The type of lsp notification being made.
|
||||||
|
:params dict result: The arguments of the given method.
|
||||||
|
"""
|
||||||
|
jsonrpc: str
|
||||||
|
id: int
|
||||||
|
method: str
|
||||||
|
params: dict
|
||||||
|
|
||||||
|
|
||||||
|
class LSPResponseTypes(LSPResponseRequest, LSPResponseNotification, LSPIDResponseNotification):
|
||||||
|
...
|
||||||
16
plugins/lsp_client/manifest.json
Normal file
16
plugins/lsp_client/manifest.json
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
{
|
||||||
|
"name": "LSP Client",
|
||||||
|
"author": "ITDominator",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"credit": "",
|
||||||
|
"support": "",
|
||||||
|
"requests": {
|
||||||
|
"pass_events": true,
|
||||||
|
"pass_ui_objects": [
|
||||||
|
"separator_right"
|
||||||
|
],
|
||||||
|
"bind_keys": [
|
||||||
|
"LSP Client Toggle||tggl_lsp_window:<Control>l"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
270
plugins/lsp_client/plugin.py
Normal file
270
plugins/lsp_client/plugin.py
Normal file
@@ -0,0 +1,270 @@
|
|||||||
|
# Python imports
|
||||||
|
import signal
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from plugins.plugin_base import PluginBase
|
||||||
|
from .client_ipc import ClientIPC
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(PluginBase):
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
super().__init__()
|
||||||
|
self.name = "LSP Client" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus
|
||||||
|
# where self.name should not be needed for message comms
|
||||||
|
self.config_file = "config.json"
|
||||||
|
self.config: dict = {}
|
||||||
|
self.lsp_client_proc = None
|
||||||
|
self.lsp_window = None
|
||||||
|
|
||||||
|
|
||||||
|
def generate_reference_ui_element(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
try:
|
||||||
|
with open(self.config_file) as f:
|
||||||
|
self.config = json.load(f)
|
||||||
|
except Exception as e:
|
||||||
|
raise Exception(f"Couldn't load config.json...\n{repr(e)}")
|
||||||
|
|
||||||
|
self.lsp_window = Gtk.Window()
|
||||||
|
box1 = Gtk.Box()
|
||||||
|
box2 = Gtk.Box()
|
||||||
|
start_btn = Gtk.Button(label = "Start LSP Client")
|
||||||
|
stop_btn = Gtk.Button(label = "Stop LSP Client")
|
||||||
|
pid_label = Gtk.Label(label = "LSP PID: ")
|
||||||
|
|
||||||
|
box1.set_orientation( Gtk.Orientation.VERTICAL )
|
||||||
|
|
||||||
|
self.lsp_window.set_deletable(False)
|
||||||
|
self.lsp_window.set_skip_pager_hint(True)
|
||||||
|
self.lsp_window.set_skip_taskbar_hint(True)
|
||||||
|
self.lsp_window.set_title("LSP Manager")
|
||||||
|
self.lsp_window.set_size_request(480, 320)
|
||||||
|
|
||||||
|
start_btn.connect("clicked", self.start_lsp_manager)
|
||||||
|
stop_btn.connect("clicked", self.stop_lsp_manager)
|
||||||
|
|
||||||
|
box1.add(pid_label)
|
||||||
|
box2.add(start_btn)
|
||||||
|
box2.add(stop_btn)
|
||||||
|
box1.add(box2)
|
||||||
|
self.lsp_window.add(box1)
|
||||||
|
|
||||||
|
box1.show_all()
|
||||||
|
|
||||||
|
self.inner_subscribe_to_events()
|
||||||
|
|
||||||
|
def _shutting_down(self):
|
||||||
|
self.stop_lsp_manager()
|
||||||
|
|
||||||
|
def _tear_down(self, widget, eve):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def _tggl_lsp_window(self, widget = None):
|
||||||
|
if not self.lsp_window.is_visible():
|
||||||
|
self.lsp_window.show()
|
||||||
|
else:
|
||||||
|
self.lsp_window.hide()
|
||||||
|
|
||||||
|
|
||||||
|
def subscribe_to_events(self):
|
||||||
|
self._event_system.subscribe("tggl_lsp_window", self._tggl_lsp_window)
|
||||||
|
|
||||||
|
def inner_subscribe_to_events(self):
|
||||||
|
self._event_system.subscribe("shutting_down", self._shutting_down)
|
||||||
|
|
||||||
|
self._event_system.subscribe("textDocument/didOpen", self._lsp_did_open)
|
||||||
|
self._event_system.subscribe("textDocument/didSave", self._lsp_did_save)
|
||||||
|
self._event_system.subscribe("textDocument/didClose", self._lsp_did_close)
|
||||||
|
self._event_system.subscribe("textDocument/didChange", self._lsp_did_change)
|
||||||
|
self._event_system.subscribe("textDocument/definition", self._lsp_goto)
|
||||||
|
self._event_system.subscribe("textDocument/completion", self._lsp_completion)
|
||||||
|
|
||||||
|
def start_lsp_manager(self, button):
|
||||||
|
if self.lsp_client_proc: return
|
||||||
|
self.lsp_client_proc = subprocess.Popen(self.config["lsp_manager_start_command"])
|
||||||
|
self._load_client_ipc_server()
|
||||||
|
|
||||||
|
def _load_client_ipc_server(self):
|
||||||
|
self.client_ipc = ClientIPC()
|
||||||
|
self.client_ipc.set_event_system(self._event_system)
|
||||||
|
self._ipc_realization_check(self.client_ipc)
|
||||||
|
|
||||||
|
if not self.client_ipc.is_ipc_alive:
|
||||||
|
raise AppLaunchException(f"LSP IPC Server Already Exists...")
|
||||||
|
|
||||||
|
def _ipc_realization_check(self, ipc_server):
|
||||||
|
try:
|
||||||
|
ipc_server.create_ipc_listener()
|
||||||
|
except Exception:
|
||||||
|
ipc_server.send_test_ipc_message()
|
||||||
|
|
||||||
|
try:
|
||||||
|
ipc_server.create_ipc_listener()
|
||||||
|
except Exception as e:
|
||||||
|
...
|
||||||
|
|
||||||
|
def stop_lsp_manager(self, button = None):
|
||||||
|
if not self.lsp_client_proc: return
|
||||||
|
if not self.lsp_client_proc.poll() is None:
|
||||||
|
self.lsp_client_proc = None
|
||||||
|
return
|
||||||
|
|
||||||
|
self.lsp_client_proc.terminate()
|
||||||
|
self.client_ipc.is_ipc_alive = False
|
||||||
|
self.lsp_client_proc = None
|
||||||
|
|
||||||
|
def _lsp_did_open(self, language_id: str, uri: str, text: str):
|
||||||
|
if not self.lsp_client_proc: return
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"method": "textDocument/didOpen",
|
||||||
|
"language_id": language_id,
|
||||||
|
"uri": uri,
|
||||||
|
"version": -1,
|
||||||
|
"text": text,
|
||||||
|
"line": -1,
|
||||||
|
"column": -1,
|
||||||
|
"char": ""
|
||||||
|
}
|
||||||
|
|
||||||
|
self.send_message(data)
|
||||||
|
|
||||||
|
def _lsp_did_save(self, uri: str, text: str):
|
||||||
|
if not self.lsp_client_proc: return
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"method": "textDocument/didSave",
|
||||||
|
"language_id": "",
|
||||||
|
"uri": uri,
|
||||||
|
"version": -1,
|
||||||
|
"text": text,
|
||||||
|
"line": -1,
|
||||||
|
"column": -1,
|
||||||
|
"char": ""
|
||||||
|
}
|
||||||
|
|
||||||
|
self.send_message(data)
|
||||||
|
|
||||||
|
def _lsp_did_close(self, uri: str):
|
||||||
|
if not self.lsp_client_proc: return
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"method": "textDocument/didClose",
|
||||||
|
"language_id": "",
|
||||||
|
"uri": uri,
|
||||||
|
"version": -1,
|
||||||
|
"text": "",
|
||||||
|
"line": -1,
|
||||||
|
"column": -1,
|
||||||
|
"char": ""
|
||||||
|
}
|
||||||
|
|
||||||
|
self.send_message(data)
|
||||||
|
|
||||||
|
def _lsp_did_change(self, language_id: str, uri: str, buffer):
|
||||||
|
if not self.lsp_client_proc: return
|
||||||
|
|
||||||
|
iter = buffer.get_iter_at_mark( buffer.get_insert() )
|
||||||
|
line = iter.get_line()
|
||||||
|
column = iter.get_line_offset()
|
||||||
|
|
||||||
|
start, end = buffer.get_bounds()
|
||||||
|
|
||||||
|
text = buffer.get_text(start, end, include_hidden_chars = True)
|
||||||
|
data = {
|
||||||
|
"method": "textDocument/didChange",
|
||||||
|
"language_id": language_id,
|
||||||
|
"uri": uri,
|
||||||
|
"version": buffer.version_id,
|
||||||
|
"text": text,
|
||||||
|
"line": line,
|
||||||
|
"column": column,
|
||||||
|
"char": ""
|
||||||
|
}
|
||||||
|
|
||||||
|
self.send_message(data)
|
||||||
|
|
||||||
|
|
||||||
|
# def _lsp_did_change(self, language_id: str, uri: str, buffer):
|
||||||
|
# if not self.lsp_client_proc: return
|
||||||
|
|
||||||
|
# iter = buffer.get_iter_at_mark( buffer.get_insert() )
|
||||||
|
# line = iter.get_line()
|
||||||
|
# column = iter.get_line_offset()
|
||||||
|
# start = iter.copy()
|
||||||
|
# end = iter.copy()
|
||||||
|
|
||||||
|
# start.backward_line()
|
||||||
|
# start.forward_line()
|
||||||
|
# end.forward_line()
|
||||||
|
|
||||||
|
# text = buffer.get_text(start, end, include_hidden_chars = True)
|
||||||
|
# data = {
|
||||||
|
# "method": "textDocument/didChange",
|
||||||
|
# "language_id": language_id,
|
||||||
|
# "uri": uri,
|
||||||
|
# "version": buffer.version_id,
|
||||||
|
# "text": text,
|
||||||
|
# "line": line,
|
||||||
|
# "column": column,
|
||||||
|
# "char": ""
|
||||||
|
# }
|
||||||
|
|
||||||
|
# self.send_message(data)
|
||||||
|
|
||||||
|
|
||||||
|
def _lsp_goto(self, language_id: str, uri: str, line: int, column: int):
|
||||||
|
if not self.lsp_client_proc: return
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"method": "textDocument/definition",
|
||||||
|
"language_id": language_id,
|
||||||
|
"uri": uri,
|
||||||
|
"version": -1,
|
||||||
|
"text": "",
|
||||||
|
"line": line,
|
||||||
|
"column": column,
|
||||||
|
"char": ""
|
||||||
|
}
|
||||||
|
|
||||||
|
self.send_message(data)
|
||||||
|
|
||||||
|
def _lsp_completion(self, source_view):
|
||||||
|
if not self.lsp_client_proc: return
|
||||||
|
|
||||||
|
filepath = source_view.get_current_file()
|
||||||
|
if not filepath: return
|
||||||
|
|
||||||
|
uri = filepath.get_uri()
|
||||||
|
buffer = source_view.get_buffer()
|
||||||
|
iter = buffer.get_iter_at_mark( buffer.get_insert() )
|
||||||
|
line = iter.get_line()
|
||||||
|
column = iter.get_line_offset()
|
||||||
|
char = iter.get_char()
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"method": "textDocument/completion",
|
||||||
|
"language_id": source_view.get_filetype(),
|
||||||
|
"uri": uri,
|
||||||
|
"version": source_view.get_version_id(),
|
||||||
|
"text": "",
|
||||||
|
"line": line,
|
||||||
|
"column": column,
|
||||||
|
"char": char
|
||||||
|
}
|
||||||
|
|
||||||
|
self.send_message(data)
|
||||||
|
|
||||||
|
def send_message(self, data: dict):
|
||||||
|
self.client_ipc.send_manager_ipc_message( json.dumps(data) )
|
||||||
3
plugins/markdown_preview/__init__.py
Normal file
3
plugins/markdown_preview/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Module
|
||||||
|
"""
|
||||||
3
plugins/markdown_preview/__main__.py
Normal file
3
plugins/markdown_preview/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Package
|
||||||
|
"""
|
||||||
11
plugins/markdown_preview/manifest.json
Normal file
11
plugins/markdown_preview/manifest.json
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"name": "Markdown Preview",
|
||||||
|
"author": "ITDominator",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"support": "",
|
||||||
|
"requests": {
|
||||||
|
"pass_events": true,
|
||||||
|
"pass_ui_objects": ["separator_right"],
|
||||||
|
"bind_keys": ["Markdown Preview||tggle_markdown_preview:<Shift><Control>m"]
|
||||||
|
}
|
||||||
|
}
|
||||||
48
plugins/markdown_preview/markdown/__init__.py
Normal file
48
plugins/markdown_preview/markdown/__init__.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# - Documentation: https://python-markdown.github.io/
|
||||||
|
# - GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# - PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# - Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# - Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# - Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
Python-Markdown provides two public functions ([`markdown.markdown`][] and [`markdown.markdownFromFile`][])
|
||||||
|
both of which wrap the public class [`markdown.Markdown`][]. All submodules support these public functions
|
||||||
|
and class and/or provide extension support.
|
||||||
|
|
||||||
|
Modules:
|
||||||
|
core: Core functionality.
|
||||||
|
preprocessors: Pre-processors.
|
||||||
|
blockparser: Core Markdown block parser.
|
||||||
|
blockprocessors: Block processors.
|
||||||
|
treeprocessors: Tree processors.
|
||||||
|
inlinepatterns: Inline patterns.
|
||||||
|
postprocessors: Post-processors.
|
||||||
|
serializers: Serializers.
|
||||||
|
util: Utility functions.
|
||||||
|
htmlparser: HTML parser.
|
||||||
|
test_tools: Testing utilities.
|
||||||
|
extensions: Markdown extensions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from .core import Markdown, markdown, markdownFromFile
|
||||||
|
from .__meta__ import __version__, __version_info__ # noqa
|
||||||
|
|
||||||
|
# For backward compatibility as some extensions expect it...
|
||||||
|
from .extensions import Extension # noqa
|
||||||
|
|
||||||
|
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
|
||||||
151
plugins/markdown_preview/markdown/__main__.py
Normal file
151
plugins/markdown_preview/markdown/__main__.py
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import optparse
|
||||||
|
import codecs
|
||||||
|
import warnings
|
||||||
|
import markdown
|
||||||
|
try:
|
||||||
|
# We use `unsafe_load` because users may need to pass in actual Python
|
||||||
|
# objects. As this is only available from the CLI, the user has much
|
||||||
|
# worse problems if an attacker can use this as an attach vector.
|
||||||
|
from yaml import unsafe_load as yaml_load
|
||||||
|
except ImportError: # pragma: no cover
|
||||||
|
try:
|
||||||
|
# Fall back to PyYAML <5.1
|
||||||
|
from yaml import load as yaml_load
|
||||||
|
except ImportError:
|
||||||
|
# Fall back to JSON
|
||||||
|
from json import load as yaml_load
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from logging import DEBUG, WARNING, CRITICAL
|
||||||
|
|
||||||
|
logger = logging.getLogger('MARKDOWN')
|
||||||
|
|
||||||
|
|
||||||
|
def parse_options(args=None, values=None):
|
||||||
|
"""
|
||||||
|
Define and parse `optparse` options for command-line usage.
|
||||||
|
"""
|
||||||
|
usage = """%prog [options] [INPUTFILE]
|
||||||
|
(STDIN is assumed if no INPUTFILE is given)"""
|
||||||
|
desc = "A Python implementation of John Gruber's Markdown. " \
|
||||||
|
"https://Python-Markdown.github.io/"
|
||||||
|
ver = "%%prog %s" % markdown.__version__
|
||||||
|
|
||||||
|
parser = optparse.OptionParser(usage=usage, description=desc, version=ver)
|
||||||
|
parser.add_option("-f", "--file", dest="filename", default=None,
|
||||||
|
help="Write output to OUTPUT_FILE. Defaults to STDOUT.",
|
||||||
|
metavar="OUTPUT_FILE")
|
||||||
|
parser.add_option("-e", "--encoding", dest="encoding",
|
||||||
|
help="Encoding for input and output files.",)
|
||||||
|
parser.add_option("-o", "--output_format", dest="output_format",
|
||||||
|
default='xhtml', metavar="OUTPUT_FORMAT",
|
||||||
|
help="Use output format 'xhtml' (default) or 'html'.")
|
||||||
|
parser.add_option("-n", "--no_lazy_ol", dest="lazy_ol",
|
||||||
|
action='store_false', default=True,
|
||||||
|
help="Observe number of first item of ordered lists.")
|
||||||
|
parser.add_option("-x", "--extension", action="append", dest="extensions",
|
||||||
|
help="Load extension EXTENSION.", metavar="EXTENSION")
|
||||||
|
parser.add_option("-c", "--extension_configs",
|
||||||
|
dest="configfile", default=None,
|
||||||
|
help="Read extension configurations from CONFIG_FILE. "
|
||||||
|
"CONFIG_FILE must be of JSON or YAML format. YAML "
|
||||||
|
"format requires that a python YAML library be "
|
||||||
|
"installed. The parsed JSON or YAML must result in a "
|
||||||
|
"python dictionary which would be accepted by the "
|
||||||
|
"'extension_configs' keyword on the markdown.Markdown "
|
||||||
|
"class. The extensions must also be loaded with the "
|
||||||
|
"`--extension` option.",
|
||||||
|
metavar="CONFIG_FILE")
|
||||||
|
parser.add_option("-q", "--quiet", default=CRITICAL,
|
||||||
|
action="store_const", const=CRITICAL+10, dest="verbose",
|
||||||
|
help="Suppress all warnings.")
|
||||||
|
parser.add_option("-v", "--verbose",
|
||||||
|
action="store_const", const=WARNING, dest="verbose",
|
||||||
|
help="Print all warnings.")
|
||||||
|
parser.add_option("--noisy",
|
||||||
|
action="store_const", const=DEBUG, dest="verbose",
|
||||||
|
help="Print debug messages.")
|
||||||
|
|
||||||
|
(options, args) = parser.parse_args(args, values)
|
||||||
|
|
||||||
|
if len(args) == 0:
|
||||||
|
input_file = None
|
||||||
|
else:
|
||||||
|
input_file = args[0]
|
||||||
|
|
||||||
|
if not options.extensions:
|
||||||
|
options.extensions = []
|
||||||
|
|
||||||
|
extension_configs = {}
|
||||||
|
if options.configfile:
|
||||||
|
with codecs.open(
|
||||||
|
options.configfile, mode="r", encoding=options.encoding
|
||||||
|
) as fp:
|
||||||
|
try:
|
||||||
|
extension_configs = yaml_load(fp)
|
||||||
|
except Exception as e:
|
||||||
|
message = "Failed parsing extension config file: %s" % \
|
||||||
|
options.configfile
|
||||||
|
e.args = (message,) + e.args[1:]
|
||||||
|
raise
|
||||||
|
|
||||||
|
opts = {
|
||||||
|
'input': input_file,
|
||||||
|
'output': options.filename,
|
||||||
|
'extensions': options.extensions,
|
||||||
|
'extension_configs': extension_configs,
|
||||||
|
'encoding': options.encoding,
|
||||||
|
'output_format': options.output_format,
|
||||||
|
'lazy_ol': options.lazy_ol
|
||||||
|
}
|
||||||
|
|
||||||
|
return opts, options.verbose
|
||||||
|
|
||||||
|
|
||||||
|
def run(): # pragma: no cover
|
||||||
|
"""Run Markdown from the command line."""
|
||||||
|
|
||||||
|
# Parse options and adjust logging level if necessary
|
||||||
|
options, logging_level = parse_options()
|
||||||
|
if not options:
|
||||||
|
sys.exit(2)
|
||||||
|
logger.setLevel(logging_level)
|
||||||
|
console_handler = logging.StreamHandler()
|
||||||
|
logger.addHandler(console_handler)
|
||||||
|
if logging_level <= WARNING:
|
||||||
|
# Ensure deprecation warnings get displayed
|
||||||
|
warnings.filterwarnings('default')
|
||||||
|
logging.captureWarnings(True)
|
||||||
|
warn_logger = logging.getLogger('py.warnings')
|
||||||
|
warn_logger.addHandler(console_handler)
|
||||||
|
|
||||||
|
# Run
|
||||||
|
markdown.markdownFromFile(**options)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__': # pragma: no cover
|
||||||
|
# Support running module as a command line command.
|
||||||
|
# python -m markdown [options] [args]
|
||||||
|
run()
|
||||||
51
plugins/markdown_preview/markdown/__meta__.py
Normal file
51
plugins/markdown_preview/markdown/__meta__.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
# __version_info__ format:
|
||||||
|
# (major, minor, patch, dev/alpha/beta/rc/final, #)
|
||||||
|
# (1, 1, 2, 'dev', 0) => "1.1.2.dev0"
|
||||||
|
# (1, 1, 2, 'alpha', 1) => "1.1.2a1"
|
||||||
|
# (1, 2, 0, 'beta', 2) => "1.2b2"
|
||||||
|
# (1, 2, 0, 'rc', 4) => "1.2rc4"
|
||||||
|
# (1, 2, 0, 'final', 0) => "1.2"
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
|
||||||
|
__version_info__ = (3, 5, 1, 'final', 0)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_version(version_info):
|
||||||
|
" Returns a PEP 440-compliant version number from `version_info`. "
|
||||||
|
assert len(version_info) == 5
|
||||||
|
assert version_info[3] in ('dev', 'alpha', 'beta', 'rc', 'final')
|
||||||
|
|
||||||
|
parts = 2 if version_info[2] == 0 else 3
|
||||||
|
v = '.'.join(map(str, version_info[:parts]))
|
||||||
|
|
||||||
|
if version_info[3] == 'dev':
|
||||||
|
v += '.dev' + str(version_info[4])
|
||||||
|
elif version_info[3] != 'final':
|
||||||
|
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
|
||||||
|
v += mapping[version_info[3]] + str(version_info[4])
|
||||||
|
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
__version__ = _get_version(__version_info__)
|
||||||
160
plugins/markdown_preview/markdown/blockparser.py
Normal file
160
plugins/markdown_preview/markdown/blockparser.py
Normal file
@@ -0,0 +1,160 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
The block parser handles basic parsing of Markdown blocks. It doesn't concern
|
||||||
|
itself with inline elements such as `**bold**` or `*italics*`, but rather just
|
||||||
|
catches blocks, lists, quotes, etc.
|
||||||
|
|
||||||
|
The `BlockParser` is made up of a bunch of `BlockProcessors`, each handling a
|
||||||
|
different type of block. Extensions may add/replace/remove `BlockProcessors`
|
||||||
|
as they need to alter how Markdown blocks are parsed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
from typing import TYPE_CHECKING, Iterable, Any
|
||||||
|
from . import util
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from markdown import Markdown
|
||||||
|
from .blockprocessors import BlockProcessor
|
||||||
|
|
||||||
|
|
||||||
|
class State(list):
|
||||||
|
""" Track the current and nested state of the parser.
|
||||||
|
|
||||||
|
This utility class is used to track the state of the `BlockParser` and
|
||||||
|
support multiple levels if nesting. It's just a simple API wrapped around
|
||||||
|
a list. Each time a state is set, that state is appended to the end of the
|
||||||
|
list. Each time a state is reset, that state is removed from the end of
|
||||||
|
the list.
|
||||||
|
|
||||||
|
Therefore, each time a state is set for a nested block, that state must be
|
||||||
|
reset when we back out of that level of nesting or the state could be
|
||||||
|
corrupted.
|
||||||
|
|
||||||
|
While all the methods of a list object are available, only the three
|
||||||
|
defined below need be used.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def set(self, state: Any):
|
||||||
|
""" Set a new state. """
|
||||||
|
self.append(state)
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
""" Step back one step in nested state. """
|
||||||
|
self.pop()
|
||||||
|
|
||||||
|
def isstate(self, state: Any) -> bool:
|
||||||
|
""" Test that top (current) level is of given state. """
|
||||||
|
if len(self):
|
||||||
|
return self[-1] == state
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class BlockParser:
|
||||||
|
""" Parse Markdown blocks into an `ElementTree` object.
|
||||||
|
|
||||||
|
A wrapper class that stitches the various `BlockProcessors` together,
|
||||||
|
looping through them and creating an `ElementTree` object.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, md: Markdown):
|
||||||
|
""" Initialize the block parser.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
md: A Markdown instance.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
BlockParser.md (Markdown): A Markdown instance.
|
||||||
|
BlockParser.state (State): Tracks the nesting level of current location in document being parsed.
|
||||||
|
BlockParser.blockprocessors (util.Registry): A collection of
|
||||||
|
[`blockprocessors`][markdown.blockprocessors].
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.blockprocessors: util.Registry[BlockProcessor] = util.Registry()
|
||||||
|
self.state = State()
|
||||||
|
self.md = md
|
||||||
|
|
||||||
|
def parseDocument(self, lines: Iterable[str]) -> etree.ElementTree:
|
||||||
|
""" Parse a Markdown document into an `ElementTree`.
|
||||||
|
|
||||||
|
Given a list of lines, an `ElementTree` object (not just a parent
|
||||||
|
`Element`) is created and the root element is passed to the parser
|
||||||
|
as the parent. The `ElementTree` object is returned.
|
||||||
|
|
||||||
|
This should only be called on an entire document, not pieces.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
lines: A list of lines (strings).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An element tree.
|
||||||
|
"""
|
||||||
|
# Create an `ElementTree` from the lines
|
||||||
|
self.root = etree.Element(self.md.doc_tag)
|
||||||
|
self.parseChunk(self.root, '\n'.join(lines))
|
||||||
|
return etree.ElementTree(self.root)
|
||||||
|
|
||||||
|
def parseChunk(self, parent: etree.Element, text: str) -> None:
|
||||||
|
""" Parse a chunk of Markdown text and attach to given `etree` node.
|
||||||
|
|
||||||
|
While the `text` argument is generally assumed to contain multiple
|
||||||
|
blocks which will be split on blank lines, it could contain only one
|
||||||
|
block. Generally, this method would be called by extensions when
|
||||||
|
block parsing is required.
|
||||||
|
|
||||||
|
The `parent` `etree` Element passed in is altered in place.
|
||||||
|
Nothing is returned.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
parent: The parent element.
|
||||||
|
text: The text to parse.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.parseBlocks(parent, text.split('\n\n'))
|
||||||
|
|
||||||
|
def parseBlocks(self, parent: etree.Element, blocks: list[str]) -> None:
|
||||||
|
""" Process blocks of Markdown text and attach to given `etree` node.
|
||||||
|
|
||||||
|
Given a list of `blocks`, each `blockprocessor` is stepped through
|
||||||
|
until there are no blocks left. While an extension could potentially
|
||||||
|
call this method directly, it's generally expected to be used
|
||||||
|
internally.
|
||||||
|
|
||||||
|
This is a public method as an extension may need to add/alter
|
||||||
|
additional `BlockProcessors` which call this method to recursively
|
||||||
|
parse a nested block.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
parent: The parent element.
|
||||||
|
blocks: The blocks of text to parse.
|
||||||
|
|
||||||
|
"""
|
||||||
|
while blocks:
|
||||||
|
for processor in self.blockprocessors:
|
||||||
|
if processor.test(parent, blocks[0]):
|
||||||
|
if processor.run(parent, blocks) is not False:
|
||||||
|
# run returns True or None
|
||||||
|
break
|
||||||
636
plugins/markdown_preview/markdown/blockprocessors.py
Normal file
636
plugins/markdown_preview/markdown/blockprocessors.py
Normal file
@@ -0,0 +1,636 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
A block processor parses blocks of text and adds new elements to the ElementTree. Blocks of text,
|
||||||
|
separated from other text by blank lines, may have a different syntax and produce a differently
|
||||||
|
structured tree than other Markdown. Block processors excel at handling code formatting, equation
|
||||||
|
layouts, tables, etc.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
from typing import TYPE_CHECKING, Any
|
||||||
|
from . import util
|
||||||
|
from .blockparser import BlockParser
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from markdown import Markdown
|
||||||
|
|
||||||
|
logger = logging.getLogger('MARKDOWN')
|
||||||
|
|
||||||
|
|
||||||
|
def build_block_parser(md: Markdown, **kwargs: Any) -> BlockParser:
|
||||||
|
""" Build the default block parser used by Markdown. """
|
||||||
|
parser = BlockParser(md)
|
||||||
|
parser.blockprocessors.register(EmptyBlockProcessor(parser), 'empty', 100)
|
||||||
|
parser.blockprocessors.register(ListIndentProcessor(parser), 'indent', 90)
|
||||||
|
parser.blockprocessors.register(CodeBlockProcessor(parser), 'code', 80)
|
||||||
|
parser.blockprocessors.register(HashHeaderProcessor(parser), 'hashheader', 70)
|
||||||
|
parser.blockprocessors.register(SetextHeaderProcessor(parser), 'setextheader', 60)
|
||||||
|
parser.blockprocessors.register(HRProcessor(parser), 'hr', 50)
|
||||||
|
parser.blockprocessors.register(OListProcessor(parser), 'olist', 40)
|
||||||
|
parser.blockprocessors.register(UListProcessor(parser), 'ulist', 30)
|
||||||
|
parser.blockprocessors.register(BlockQuoteProcessor(parser), 'quote', 20)
|
||||||
|
parser.blockprocessors.register(ReferenceProcessor(parser), 'reference', 15)
|
||||||
|
parser.blockprocessors.register(ParagraphProcessor(parser), 'paragraph', 10)
|
||||||
|
return parser
|
||||||
|
|
||||||
|
|
||||||
|
class BlockProcessor:
|
||||||
|
""" Base class for block processors.
|
||||||
|
|
||||||
|
Each subclass will provide the methods below to work with the source and
|
||||||
|
tree. Each processor will need to define it's own `test` and `run`
|
||||||
|
methods. The `test` method should return True or False, to indicate
|
||||||
|
whether the current block should be processed by this processor. If the
|
||||||
|
test passes, the parser will call the processors `run` method.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
BlockProcessor.parser (BlockParser): The `BlockParser` instance this is attached to.
|
||||||
|
BlockProcessor.tab_length (int): The tab length set on the `Markdown` instance.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, parser: BlockParser):
|
||||||
|
self.parser = parser
|
||||||
|
self.tab_length = parser.md.tab_length
|
||||||
|
|
||||||
|
def lastChild(self, parent: etree.Element) -> etree.Element | None:
|
||||||
|
""" Return the last child of an `etree` element. """
|
||||||
|
if len(parent):
|
||||||
|
return parent[-1]
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def detab(self, text: str, length: int | None = None) -> tuple[str, str]:
|
||||||
|
""" Remove a tab from the front of each line of the given text. """
|
||||||
|
if length is None:
|
||||||
|
length = self.tab_length
|
||||||
|
newtext = []
|
||||||
|
lines = text.split('\n')
|
||||||
|
for line in lines:
|
||||||
|
if line.startswith(' ' * length):
|
||||||
|
newtext.append(line[length:])
|
||||||
|
elif not line.strip():
|
||||||
|
newtext.append('')
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
|
||||||
|
|
||||||
|
def looseDetab(self, text: str, level: int = 1) -> str:
|
||||||
|
""" Remove a tab from front of lines but allowing dedented lines. """
|
||||||
|
lines = text.split('\n')
|
||||||
|
for i in range(len(lines)):
|
||||||
|
if lines[i].startswith(' '*self.tab_length*level):
|
||||||
|
lines[i] = lines[i][self.tab_length*level:]
|
||||||
|
return '\n'.join(lines)
|
||||||
|
|
||||||
|
def test(self, parent: etree.Element, block: str) -> bool:
|
||||||
|
""" Test for block type. Must be overridden by subclasses.
|
||||||
|
|
||||||
|
As the parser loops through processors, it will call the `test`
|
||||||
|
method on each to determine if the given block of text is of that
|
||||||
|
type. This method must return a boolean `True` or `False`. The
|
||||||
|
actual method of testing is left to the needs of that particular
|
||||||
|
block type. It could be as simple as `block.startswith(some_string)`
|
||||||
|
or a complex regular expression. As the block type may be different
|
||||||
|
depending on the parent of the block (i.e. inside a list), the parent
|
||||||
|
`etree` element is also provided and may be used as part of the test.
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
parent: An `etree` element which will be the parent of the block.
|
||||||
|
block: A block of text from the source which has been split at blank lines.
|
||||||
|
"""
|
||||||
|
pass # pragma: no cover
|
||||||
|
|
||||||
|
def run(self, parent: etree.Element, blocks: list[str]) -> bool | None:
|
||||||
|
""" Run processor. Must be overridden by subclasses.
|
||||||
|
|
||||||
|
When the parser determines the appropriate type of a block, the parser
|
||||||
|
will call the corresponding processor's `run` method. This method
|
||||||
|
should parse the individual lines of the block and append them to
|
||||||
|
the `etree`.
|
||||||
|
|
||||||
|
Note that both the `parent` and `etree` keywords are pointers
|
||||||
|
to instances of the objects which should be edited in place. Each
|
||||||
|
processor must make changes to the existing objects as there is no
|
||||||
|
mechanism to return new/different objects to replace them.
|
||||||
|
|
||||||
|
This means that this method should be adding `SubElements` or adding text
|
||||||
|
to the parent, and should remove (`pop`) or add (`insert`) items to
|
||||||
|
the list of blocks.
|
||||||
|
|
||||||
|
If `False` is returned, this will have the same effect as returning `False`
|
||||||
|
from the `test` method.
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
parent: An `etree` element which is the parent of the current block.
|
||||||
|
blocks: A list of all remaining blocks of the document.
|
||||||
|
"""
|
||||||
|
pass # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
class ListIndentProcessor(BlockProcessor):
|
||||||
|
""" Process children of list items.
|
||||||
|
|
||||||
|
Example
|
||||||
|
|
||||||
|
* a list item
|
||||||
|
process this part
|
||||||
|
|
||||||
|
or this part
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
ITEM_TYPES = ['li']
|
||||||
|
""" List of tags used for list items. """
|
||||||
|
LIST_TYPES = ['ul', 'ol']
|
||||||
|
""" Types of lists this processor can operate on. """
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
super().__init__(*args)
|
||||||
|
self.INDENT_RE = re.compile(r'^(([ ]{%s})+)' % self.tab_length)
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return block.startswith(' '*self.tab_length) and \
|
||||||
|
not self.parser.state.isstate('detabbed') and \
|
||||||
|
(parent.tag in self.ITEM_TYPES or
|
||||||
|
(len(parent) and parent[-1] is not None and
|
||||||
|
(parent[-1].tag in self.LIST_TYPES)))
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
block = blocks.pop(0)
|
||||||
|
level, sibling = self.get_level(parent, block)
|
||||||
|
block = self.looseDetab(block, level)
|
||||||
|
|
||||||
|
self.parser.state.set('detabbed')
|
||||||
|
if parent.tag in self.ITEM_TYPES:
|
||||||
|
# It's possible that this parent has a `ul` or `ol` child list
|
||||||
|
# with a member. If that is the case, then that should be the
|
||||||
|
# parent. This is intended to catch the edge case of an indented
|
||||||
|
# list whose first member was parsed previous to this point
|
||||||
|
# see `OListProcessor`
|
||||||
|
if len(parent) and parent[-1].tag in self.LIST_TYPES:
|
||||||
|
self.parser.parseBlocks(parent[-1], [block])
|
||||||
|
else:
|
||||||
|
# The parent is already a `li`. Just parse the child block.
|
||||||
|
self.parser.parseBlocks(parent, [block])
|
||||||
|
elif sibling.tag in self.ITEM_TYPES:
|
||||||
|
# The sibling is a `li`. Use it as parent.
|
||||||
|
self.parser.parseBlocks(sibling, [block])
|
||||||
|
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
|
||||||
|
# The parent is a list (`ol` or `ul`) which has children.
|
||||||
|
# Assume the last child `li` is the parent of this block.
|
||||||
|
if sibling[-1].text:
|
||||||
|
# If the parent `li` has text, that text needs to be moved to a `p`
|
||||||
|
# The `p` must be 'inserted' at beginning of list in the event
|
||||||
|
# that other children already exist i.e.; a nested sub-list.
|
||||||
|
p = etree.Element('p')
|
||||||
|
p.text = sibling[-1].text
|
||||||
|
sibling[-1].text = ''
|
||||||
|
sibling[-1].insert(0, p)
|
||||||
|
self.parser.parseChunk(sibling[-1], block)
|
||||||
|
else:
|
||||||
|
self.create_item(sibling, block)
|
||||||
|
self.parser.state.reset()
|
||||||
|
|
||||||
|
def create_item(self, parent: etree.Element, block: str) -> None:
|
||||||
|
""" Create a new `li` and parse the block with it as the parent. """
|
||||||
|
li = etree.SubElement(parent, 'li')
|
||||||
|
self.parser.parseBlocks(li, [block])
|
||||||
|
|
||||||
|
def get_level(self, parent: etree.Element, block: str) -> tuple[int, etree.Element]:
|
||||||
|
""" Get level of indentation based on list level. """
|
||||||
|
# Get indent level
|
||||||
|
m = self.INDENT_RE.match(block)
|
||||||
|
if m:
|
||||||
|
indent_level = len(m.group(1))/self.tab_length
|
||||||
|
else:
|
||||||
|
indent_level = 0
|
||||||
|
if self.parser.state.isstate('list'):
|
||||||
|
# We're in a tight-list - so we already are at correct parent.
|
||||||
|
level = 1
|
||||||
|
else:
|
||||||
|
# We're in a loose-list - so we need to find parent.
|
||||||
|
level = 0
|
||||||
|
# Step through children of tree to find matching indent level.
|
||||||
|
while indent_level > level:
|
||||||
|
child = self.lastChild(parent)
|
||||||
|
if (child is not None and
|
||||||
|
(child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES)):
|
||||||
|
if child.tag in self.LIST_TYPES:
|
||||||
|
level += 1
|
||||||
|
parent = child
|
||||||
|
else:
|
||||||
|
# No more child levels. If we're short of `indent_level`,
|
||||||
|
# we have a code block. So we stop here.
|
||||||
|
break
|
||||||
|
return level, parent
|
||||||
|
|
||||||
|
|
||||||
|
class CodeBlockProcessor(BlockProcessor):
|
||||||
|
""" Process code blocks. """
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return block.startswith(' '*self.tab_length)
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
sibling = self.lastChild(parent)
|
||||||
|
block = blocks.pop(0)
|
||||||
|
theRest = ''
|
||||||
|
if (sibling is not None and sibling.tag == "pre" and
|
||||||
|
len(sibling) and sibling[0].tag == "code"):
|
||||||
|
# The previous block was a code block. As blank lines do not start
|
||||||
|
# new code blocks, append this block to the previous, adding back
|
||||||
|
# line breaks removed from the split into a list.
|
||||||
|
code = sibling[0]
|
||||||
|
block, theRest = self.detab(block)
|
||||||
|
code.text = util.AtomicString(
|
||||||
|
'{}\n{}\n'.format(code.text, util.code_escape(block.rstrip()))
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# This is a new code block. Create the elements and insert text.
|
||||||
|
pre = etree.SubElement(parent, 'pre')
|
||||||
|
code = etree.SubElement(pre, 'code')
|
||||||
|
block, theRest = self.detab(block)
|
||||||
|
code.text = util.AtomicString('%s\n' % util.code_escape(block.rstrip()))
|
||||||
|
if theRest:
|
||||||
|
# This block contained unindented line(s) after the first indented
|
||||||
|
# line. Insert these lines as the first block of the master blocks
|
||||||
|
# list for future processing.
|
||||||
|
blocks.insert(0, theRest)
|
||||||
|
|
||||||
|
|
||||||
|
class BlockQuoteProcessor(BlockProcessor):
|
||||||
|
""" Process blockquotes. """
|
||||||
|
|
||||||
|
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return bool(self.RE.search(block)) and not util.nearing_recursion_limit()
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
block = blocks.pop(0)
|
||||||
|
m = self.RE.search(block)
|
||||||
|
if m:
|
||||||
|
before = block[:m.start()] # Lines before blockquote
|
||||||
|
# Pass lines before blockquote in recursively for parsing first.
|
||||||
|
self.parser.parseBlocks(parent, [before])
|
||||||
|
# Remove `> ` from beginning of each line.
|
||||||
|
block = '\n'.join(
|
||||||
|
[self.clean(line) for line in block[m.start():].split('\n')]
|
||||||
|
)
|
||||||
|
sibling = self.lastChild(parent)
|
||||||
|
if sibling is not None and sibling.tag == "blockquote":
|
||||||
|
# Previous block was a blockquote so set that as this blocks parent
|
||||||
|
quote = sibling
|
||||||
|
else:
|
||||||
|
# This is a new blockquote. Create a new parent element.
|
||||||
|
quote = etree.SubElement(parent, 'blockquote')
|
||||||
|
# Recursively parse block with blockquote as parent.
|
||||||
|
# change parser state so blockquotes embedded in lists use `p` tags
|
||||||
|
self.parser.state.set('blockquote')
|
||||||
|
self.parser.parseChunk(quote, block)
|
||||||
|
self.parser.state.reset()
|
||||||
|
|
||||||
|
def clean(self, line: str) -> str:
|
||||||
|
""" Remove `>` from beginning of a line. """
|
||||||
|
m = self.RE.match(line)
|
||||||
|
if line.strip() == ">":
|
||||||
|
return ""
|
||||||
|
elif m:
|
||||||
|
return m.group(2)
|
||||||
|
else:
|
||||||
|
return line
|
||||||
|
|
||||||
|
|
||||||
|
class OListProcessor(BlockProcessor):
|
||||||
|
""" Process ordered list blocks. """
|
||||||
|
|
||||||
|
TAG: str = 'ol'
|
||||||
|
""" The tag used for the the wrapping element. """
|
||||||
|
STARTSWITH: str = '1'
|
||||||
|
"""
|
||||||
|
The integer (as a string ) with which the list starts. For example, if a list is initialized as
|
||||||
|
`3. Item`, then the `ol` tag will be assigned an HTML attribute of `starts="3"`. Default: `"1"`.
|
||||||
|
"""
|
||||||
|
LAZY_OL: bool = True
|
||||||
|
""" Ignore `STARTSWITH` if `True`. """
|
||||||
|
SIBLING_TAGS: list[str] = ['ol', 'ul']
|
||||||
|
"""
|
||||||
|
Markdown does not require the type of a new list item match the previous list item type.
|
||||||
|
This is the list of types which can be mixed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, parser: BlockParser):
|
||||||
|
super().__init__(parser)
|
||||||
|
# Detect an item (`1. item`). `group(1)` contains contents of item.
|
||||||
|
self.RE = re.compile(r'^[ ]{0,%d}\d+\.[ ]+(.*)' % (self.tab_length - 1))
|
||||||
|
# Detect items on secondary lines. they can be of either list type.
|
||||||
|
self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.)|[*+-])[ ]+(.*)' %
|
||||||
|
(self.tab_length - 1))
|
||||||
|
# Detect indented (nested) items of either type
|
||||||
|
self.INDENT_RE = re.compile(r'^[ ]{%d,%d}((\d+\.)|[*+-])[ ]+.*' %
|
||||||
|
(self.tab_length, self.tab_length * 2 - 1))
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return bool(self.RE.match(block))
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
# Check for multiple items in one block.
|
||||||
|
items = self.get_items(blocks.pop(0))
|
||||||
|
sibling = self.lastChild(parent)
|
||||||
|
|
||||||
|
if sibling is not None and sibling.tag in self.SIBLING_TAGS:
|
||||||
|
# Previous block was a list item, so set that as parent
|
||||||
|
lst = sibling
|
||||||
|
# make sure previous item is in a `p` - if the item has text,
|
||||||
|
# then it isn't in a `p`
|
||||||
|
if lst[-1].text:
|
||||||
|
# since it's possible there are other children for this
|
||||||
|
# sibling, we can't just `SubElement` the `p`, we need to
|
||||||
|
# insert it as the first item.
|
||||||
|
p = etree.Element('p')
|
||||||
|
p.text = lst[-1].text
|
||||||
|
lst[-1].text = ''
|
||||||
|
lst[-1].insert(0, p)
|
||||||
|
# if the last item has a tail, then the tail needs to be put in a `p`
|
||||||
|
# likely only when a header is not followed by a blank line
|
||||||
|
lch = self.lastChild(lst[-1])
|
||||||
|
if lch is not None and lch.tail:
|
||||||
|
p = etree.SubElement(lst[-1], 'p')
|
||||||
|
p.text = lch.tail.lstrip()
|
||||||
|
lch.tail = ''
|
||||||
|
|
||||||
|
# parse first block differently as it gets wrapped in a `p`.
|
||||||
|
li = etree.SubElement(lst, 'li')
|
||||||
|
self.parser.state.set('looselist')
|
||||||
|
firstitem = items.pop(0)
|
||||||
|
self.parser.parseBlocks(li, [firstitem])
|
||||||
|
self.parser.state.reset()
|
||||||
|
elif parent.tag in ['ol', 'ul']:
|
||||||
|
# this catches the edge case of a multi-item indented list whose
|
||||||
|
# first item is in a blank parent-list item:
|
||||||
|
# * * subitem1
|
||||||
|
# * subitem2
|
||||||
|
# see also `ListIndentProcessor`
|
||||||
|
lst = parent
|
||||||
|
else:
|
||||||
|
# This is a new list so create parent with appropriate tag.
|
||||||
|
lst = etree.SubElement(parent, self.TAG)
|
||||||
|
# Check if a custom start integer is set
|
||||||
|
if not self.LAZY_OL and self.STARTSWITH != '1':
|
||||||
|
lst.attrib['start'] = self.STARTSWITH
|
||||||
|
|
||||||
|
self.parser.state.set('list')
|
||||||
|
# Loop through items in block, recursively parsing each with the
|
||||||
|
# appropriate parent.
|
||||||
|
for item in items:
|
||||||
|
if item.startswith(' '*self.tab_length):
|
||||||
|
# Item is indented. Parse with last item as parent
|
||||||
|
self.parser.parseBlocks(lst[-1], [item])
|
||||||
|
else:
|
||||||
|
# New item. Create `li` and parse with it as parent
|
||||||
|
li = etree.SubElement(lst, 'li')
|
||||||
|
self.parser.parseBlocks(li, [item])
|
||||||
|
self.parser.state.reset()
|
||||||
|
|
||||||
|
def get_items(self, block: str) -> list[str]:
|
||||||
|
""" Break a block into list items. """
|
||||||
|
items = []
|
||||||
|
for line in block.split('\n'):
|
||||||
|
m = self.CHILD_RE.match(line)
|
||||||
|
if m:
|
||||||
|
# This is a new list item
|
||||||
|
# Check first item for the start index
|
||||||
|
if not items and self.TAG == 'ol':
|
||||||
|
# Detect the integer value of first list item
|
||||||
|
INTEGER_RE = re.compile(r'(\d+)')
|
||||||
|
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
|
||||||
|
# Append to the list
|
||||||
|
items.append(m.group(3))
|
||||||
|
elif self.INDENT_RE.match(line):
|
||||||
|
# This is an indented (possibly nested) item.
|
||||||
|
if items[-1].startswith(' '*self.tab_length):
|
||||||
|
# Previous item was indented. Append to that item.
|
||||||
|
items[-1] = '{}\n{}'.format(items[-1], line)
|
||||||
|
else:
|
||||||
|
items.append(line)
|
||||||
|
else:
|
||||||
|
# This is another line of previous item. Append to that item.
|
||||||
|
items[-1] = '{}\n{}'.format(items[-1], line)
|
||||||
|
return items
|
||||||
|
|
||||||
|
|
||||||
|
class UListProcessor(OListProcessor):
|
||||||
|
""" Process unordered list blocks. """
|
||||||
|
|
||||||
|
TAG: str = 'ul'
|
||||||
|
""" The tag used for the the wrapping element. """
|
||||||
|
|
||||||
|
def __init__(self, parser: BlockParser):
|
||||||
|
super().__init__(parser)
|
||||||
|
# Detect an item (`1. item`). `group(1)` contains contents of item.
|
||||||
|
self.RE = re.compile(r'^[ ]{0,%d}[*+-][ ]+(.*)' % (self.tab_length - 1))
|
||||||
|
|
||||||
|
|
||||||
|
class HashHeaderProcessor(BlockProcessor):
|
||||||
|
""" Process Hash Headers. """
|
||||||
|
|
||||||
|
# Detect a header at start of any line in block
|
||||||
|
RE = re.compile(r'(?:^|\n)(?P<level>#{1,6})(?P<header>(?:\\.|[^\\])*?)#*(?:\n|$)')
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return bool(self.RE.search(block))
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
block = blocks.pop(0)
|
||||||
|
m = self.RE.search(block)
|
||||||
|
if m:
|
||||||
|
before = block[:m.start()] # All lines before header
|
||||||
|
after = block[m.end():] # All lines after header
|
||||||
|
if before:
|
||||||
|
# As the header was not the first line of the block and the
|
||||||
|
# lines before the header must be parsed first,
|
||||||
|
# recursively parse this lines as a block.
|
||||||
|
self.parser.parseBlocks(parent, [before])
|
||||||
|
# Create header using named groups from RE
|
||||||
|
h = etree.SubElement(parent, 'h%d' % len(m.group('level')))
|
||||||
|
h.text = m.group('header').strip()
|
||||||
|
if after:
|
||||||
|
# Insert remaining lines as first block for future parsing.
|
||||||
|
blocks.insert(0, after)
|
||||||
|
else: # pragma: no cover
|
||||||
|
# This should never happen, but just in case...
|
||||||
|
logger.warn("We've got a problem header: %r" % block)
|
||||||
|
|
||||||
|
|
||||||
|
class SetextHeaderProcessor(BlockProcessor):
|
||||||
|
""" Process Setext-style Headers. """
|
||||||
|
|
||||||
|
# Detect Setext-style header. Must be first 2 lines of block.
|
||||||
|
RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE)
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return bool(self.RE.match(block))
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
lines = blocks.pop(0).split('\n')
|
||||||
|
# Determine level. `=` is 1 and `-` is 2.
|
||||||
|
if lines[1].startswith('='):
|
||||||
|
level = 1
|
||||||
|
else:
|
||||||
|
level = 2
|
||||||
|
h = etree.SubElement(parent, 'h%d' % level)
|
||||||
|
h.text = lines[0].strip()
|
||||||
|
if len(lines) > 2:
|
||||||
|
# Block contains additional lines. Add to master blocks for later.
|
||||||
|
blocks.insert(0, '\n'.join(lines[2:]))
|
||||||
|
|
||||||
|
|
||||||
|
class HRProcessor(BlockProcessor):
|
||||||
|
""" Process Horizontal Rules. """
|
||||||
|
|
||||||
|
# Python's `re` module doesn't officially support atomic grouping. However you can fake it.
|
||||||
|
# See https://stackoverflow.com/a/13577411/866026
|
||||||
|
RE = r'^[ ]{0,3}(?=(?P<atomicgroup>(-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,}))(?P=atomicgroup)[ ]*$'
|
||||||
|
# Detect hr on any line of a block.
|
||||||
|
SEARCH_RE = re.compile(RE, re.MULTILINE)
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
m = self.SEARCH_RE.search(block)
|
||||||
|
if m:
|
||||||
|
# Save match object on class instance so we can use it later.
|
||||||
|
self.match = m
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
block = blocks.pop(0)
|
||||||
|
match = self.match
|
||||||
|
# Check for lines in block before `hr`.
|
||||||
|
prelines = block[:match.start()].rstrip('\n')
|
||||||
|
if prelines:
|
||||||
|
# Recursively parse lines before `hr` so they get parsed first.
|
||||||
|
self.parser.parseBlocks(parent, [prelines])
|
||||||
|
# create hr
|
||||||
|
etree.SubElement(parent, 'hr')
|
||||||
|
# check for lines in block after `hr`.
|
||||||
|
postlines = block[match.end():].lstrip('\n')
|
||||||
|
if postlines:
|
||||||
|
# Add lines after `hr` to master blocks for later parsing.
|
||||||
|
blocks.insert(0, postlines)
|
||||||
|
|
||||||
|
|
||||||
|
class EmptyBlockProcessor(BlockProcessor):
|
||||||
|
""" Process blocks that are empty or start with an empty line. """
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return not block or block.startswith('\n')
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
block = blocks.pop(0)
|
||||||
|
filler = '\n\n'
|
||||||
|
if block:
|
||||||
|
# Starts with empty line
|
||||||
|
# Only replace a single line.
|
||||||
|
filler = '\n'
|
||||||
|
# Save the rest for later.
|
||||||
|
theRest = block[1:]
|
||||||
|
if theRest:
|
||||||
|
# Add remaining lines to master blocks for later.
|
||||||
|
blocks.insert(0, theRest)
|
||||||
|
sibling = self.lastChild(parent)
|
||||||
|
if (sibling is not None and sibling.tag == 'pre' and
|
||||||
|
len(sibling) and sibling[0].tag == 'code'):
|
||||||
|
# Last block is a code block. Append to preserve whitespace.
|
||||||
|
sibling[0].text = util.AtomicString(
|
||||||
|
'{}{}'.format(sibling[0].text, filler)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class ReferenceProcessor(BlockProcessor):
|
||||||
|
""" Process link references. """
|
||||||
|
RE = re.compile(
|
||||||
|
r'^[ ]{0,3}\[([^\[\]]*)\]:[ ]*\n?[ ]*([^\s]+)[ ]*(?:\n[ ]*)?((["\'])(.*)\4[ ]*|\((.*)\)[ ]*)?$', re.MULTILINE
|
||||||
|
)
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
block = blocks.pop(0)
|
||||||
|
m = self.RE.search(block)
|
||||||
|
if m:
|
||||||
|
id = m.group(1).strip().lower()
|
||||||
|
link = m.group(2).lstrip('<').rstrip('>')
|
||||||
|
title = m.group(5) or m.group(6)
|
||||||
|
self.parser.md.references[id] = (link, title)
|
||||||
|
if block[m.end():].strip():
|
||||||
|
# Add any content after match back to blocks as separate block
|
||||||
|
blocks.insert(0, block[m.end():].lstrip('\n'))
|
||||||
|
if block[:m.start()].strip():
|
||||||
|
# Add any content before match back to blocks as separate block
|
||||||
|
blocks.insert(0, block[:m.start()].rstrip('\n'))
|
||||||
|
return True
|
||||||
|
# No match. Restore block.
|
||||||
|
blocks.insert(0, block)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class ParagraphProcessor(BlockProcessor):
|
||||||
|
""" Process Paragraph blocks. """
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
block = blocks.pop(0)
|
||||||
|
if block.strip():
|
||||||
|
# Not a blank block. Add to parent, otherwise throw it away.
|
||||||
|
if self.parser.state.isstate('list'):
|
||||||
|
# The parent is a tight-list.
|
||||||
|
#
|
||||||
|
# Check for any children. This will likely only happen in a
|
||||||
|
# tight-list when a header isn't followed by a blank line.
|
||||||
|
# For example:
|
||||||
|
#
|
||||||
|
# * # Header
|
||||||
|
# Line 2 of list item - not part of header.
|
||||||
|
sibling = self.lastChild(parent)
|
||||||
|
if sibling is not None:
|
||||||
|
# Insert after sibling.
|
||||||
|
if sibling.tail:
|
||||||
|
sibling.tail = '{}\n{}'.format(sibling.tail, block)
|
||||||
|
else:
|
||||||
|
sibling.tail = '\n%s' % block
|
||||||
|
else:
|
||||||
|
# Append to parent.text
|
||||||
|
if parent.text:
|
||||||
|
parent.text = '{}\n{}'.format(parent.text, block)
|
||||||
|
else:
|
||||||
|
parent.text = block.lstrip()
|
||||||
|
else:
|
||||||
|
# Create a regular paragraph
|
||||||
|
p = etree.SubElement(parent, 'p')
|
||||||
|
p.text = block.lstrip()
|
||||||
510
plugins/markdown_preview/markdown/core.py
Normal file
510
plugins/markdown_preview/markdown/core.py
Normal file
@@ -0,0 +1,510 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import codecs
|
||||||
|
import sys
|
||||||
|
import logging
|
||||||
|
import importlib
|
||||||
|
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Mapping, Sequence, TextIO
|
||||||
|
from . import util
|
||||||
|
from .preprocessors import build_preprocessors
|
||||||
|
from .blockprocessors import build_block_parser
|
||||||
|
from .treeprocessors import build_treeprocessors
|
||||||
|
from .inlinepatterns import build_inlinepatterns
|
||||||
|
from .postprocessors import build_postprocessors
|
||||||
|
from .extensions import Extension
|
||||||
|
from .serializers import to_html_string, to_xhtml_string
|
||||||
|
from .util import BLOCK_LEVEL_ELEMENTS
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from xml.etree.ElementTree import Element
|
||||||
|
|
||||||
|
__all__ = ['Markdown', 'markdown', 'markdownFromFile']
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger('MARKDOWN')
|
||||||
|
|
||||||
|
|
||||||
|
class Markdown:
|
||||||
|
"""
|
||||||
|
A parser which converts Markdown to HTML.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
Markdown.tab_length (int): The number of spaces which correspond to a single tab. Default: `4`.
|
||||||
|
Markdown.ESCAPED_CHARS (list[str]): List of characters which get the backslash escape treatment.
|
||||||
|
Markdown.block_level_elements (list[str]): List of HTML tags which get treated as block-level elements.
|
||||||
|
See [`markdown.util.BLOCK_LEVEL_ELEMENTS`][] for the full list of elements.
|
||||||
|
Markdown.registeredExtensions (list[Extension]): List of extensions which have called
|
||||||
|
[`registerExtension`][markdown.Markdown.registerExtension] during setup.
|
||||||
|
Markdown.doc_tag (str): Element used to wrap document. Default: `div`.
|
||||||
|
Markdown.stripTopLevelTags (bool): Indicates whether the `doc_tag` should be removed. Default: 'True'.
|
||||||
|
Markdown.references (dict[str, tuple[str, str]]): A mapping of link references found in a parsed document
|
||||||
|
where the key is the reference name and the value is a tuple of the URL and title.
|
||||||
|
Markdown.htmlStash (util.HtmlStash): The instance of the `HtmlStash` used by an instance of this class.
|
||||||
|
Markdown.output_formats (dict[str, Callable[xml.etree.ElementTree.Element]]): A mapping of known output
|
||||||
|
formats by name and their respective serializers. Each serializer must be a callable which accepts an
|
||||||
|
[`Element`][xml.etree.ElementTree.Element] and returns a `str`.
|
||||||
|
Markdown.output_format (str): The output format set by
|
||||||
|
[`set_output_format`][markdown.Markdown.set_output_format].
|
||||||
|
Markdown.serializer (Callable[xml.etree.ElementTree.Element]): The serializer set by
|
||||||
|
[`set_output_format`][markdown.Markdown.set_output_format].
|
||||||
|
Markdown.preprocessors (util.Registry): A collection of [`preprocessors`][markdown.preprocessors].
|
||||||
|
Markdown.parser (blockparser.BlockParser): A collection of [`blockprocessors`][markdown.blockprocessors].
|
||||||
|
Markdown.inlinePatterns (util.Registry): A collection of [`inlinepatterns`][markdown.inlinepatterns].
|
||||||
|
Markdown.treeprocessors (util.Registry): A collection of [`treeprocessors`][markdown.treeprocessors].
|
||||||
|
Markdown.postprocessors (util.Registry): A collection of [`postprocessors`][markdown.postprocessors].
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
doc_tag = "div" # Element used to wrap document - later removed
|
||||||
|
|
||||||
|
output_formats: ClassVar[dict[str, Callable[[Element], str]]] = {
|
||||||
|
'html': to_html_string,
|
||||||
|
'xhtml': to_xhtml_string,
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
A mapping of known output formats by name and their respective serializers. Each serializer must be a
|
||||||
|
callable which accepts an [`Element`][xml.etree.ElementTree.Element] and returns a `str`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
"""
|
||||||
|
Creates a new Markdown instance.
|
||||||
|
|
||||||
|
Keyword Arguments:
|
||||||
|
extensions (list[Extension | str]): A list of extensions.
|
||||||
|
|
||||||
|
If an item is an instance of a subclass of [`markdown.extensions.Extension`][],
|
||||||
|
the instance will be used as-is. If an item is of type `str`, it is passed
|
||||||
|
to [`build_extension`][markdown.Markdown.build_extension] with its corresponding
|
||||||
|
`extension_configs` and the returned instance of [`markdown.extensions.Extension`][]
|
||||||
|
is used.
|
||||||
|
extension_configs (dict[str, dict[str, Any]]): Configuration settings for extensions.
|
||||||
|
output_format (str): Format of output. Supported formats are:
|
||||||
|
|
||||||
|
* `xhtml`: Outputs XHTML style tags. Default.
|
||||||
|
* `html`: Outputs HTML style tags.
|
||||||
|
tab_length (int): Length of tabs in the source. Default: `4`
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.tab_length: int = kwargs.get('tab_length', 4)
|
||||||
|
|
||||||
|
self.ESCAPED_CHARS: list[str] = [
|
||||||
|
'\\', '`', '*', '_', '{', '}', '[', ']', '(', ')', '>', '#', '+', '-', '.', '!'
|
||||||
|
]
|
||||||
|
""" List of characters which get the backslash escape treatment. """
|
||||||
|
|
||||||
|
self.block_level_elements: list[str] = BLOCK_LEVEL_ELEMENTS.copy()
|
||||||
|
|
||||||
|
self.registeredExtensions: list[Extension] = []
|
||||||
|
self.docType = "" # TODO: Maybe delete this. It does not appear to be used anymore.
|
||||||
|
self.stripTopLevelTags: bool = True
|
||||||
|
|
||||||
|
self.build_parser()
|
||||||
|
|
||||||
|
self.references: dict[str, tuple[str, str]] = {}
|
||||||
|
self.htmlStash: util.HtmlStash = util.HtmlStash()
|
||||||
|
self.registerExtensions(extensions=kwargs.get('extensions', []),
|
||||||
|
configs=kwargs.get('extension_configs', {}))
|
||||||
|
self.set_output_format(kwargs.get('output_format', 'xhtml'))
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def build_parser(self) -> Markdown:
|
||||||
|
"""
|
||||||
|
Build the parser from the various parts.
|
||||||
|
|
||||||
|
Assigns a value to each of the following attributes on the class instance:
|
||||||
|
|
||||||
|
* **`Markdown.preprocessors`** ([`Registry`][markdown.util.Registry]) -- A collection of
|
||||||
|
[`preprocessors`][markdown.preprocessors].
|
||||||
|
* **`Markdown.parser`** ([`BlockParser`][markdown.blockparser.BlockParser]) -- A collection of
|
||||||
|
[`blockprocessors`][markdown.blockprocessors].
|
||||||
|
* **`Markdown.inlinePatterns`** ([`Registry`][markdown.util.Registry]) -- A collection of
|
||||||
|
[`inlinepatterns`][markdown.inlinepatterns].
|
||||||
|
* **`Markdown.treeprocessors`** ([`Registry`][markdown.util.Registry]) -- A collection of
|
||||||
|
[`treeprocessors`][markdown.treeprocessors].
|
||||||
|
* **`Markdown.postprocessors`** ([`Registry`][markdown.util.Registry]) -- A collection of
|
||||||
|
[`postprocessors`][markdown.postprocessors].
|
||||||
|
|
||||||
|
This method could be redefined in a subclass to build a custom parser which is made up of a different
|
||||||
|
combination of processors and patterns.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.preprocessors = build_preprocessors(self)
|
||||||
|
self.parser = build_block_parser(self)
|
||||||
|
self.inlinePatterns = build_inlinepatterns(self)
|
||||||
|
self.treeprocessors = build_treeprocessors(self)
|
||||||
|
self.postprocessors = build_postprocessors(self)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def registerExtensions(
|
||||||
|
self,
|
||||||
|
extensions: Sequence[Extension | str],
|
||||||
|
configs: Mapping[str, Mapping[str, Any]]
|
||||||
|
) -> Markdown:
|
||||||
|
"""
|
||||||
|
Load a list of extensions into an instance of the `Markdown` class.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
extensions (list[Extension | str]): A list of extensions.
|
||||||
|
|
||||||
|
If an item is an instance of a subclass of [`markdown.extensions.Extension`][],
|
||||||
|
the instance will be used as-is. If an item is of type `str`, it is passed
|
||||||
|
to [`build_extension`][markdown.Markdown.build_extension] with its corresponding `configs` and the
|
||||||
|
returned instance of [`markdown.extensions.Extension`][] is used.
|
||||||
|
configs (dict[str, dict[str, Any]]): Configuration settings for extensions.
|
||||||
|
|
||||||
|
"""
|
||||||
|
for ext in extensions:
|
||||||
|
if isinstance(ext, str):
|
||||||
|
ext = self.build_extension(ext, configs.get(ext, {}))
|
||||||
|
if isinstance(ext, Extension):
|
||||||
|
ext.extendMarkdown(self)
|
||||||
|
logger.debug(
|
||||||
|
'Successfully loaded extension "%s.%s".'
|
||||||
|
% (ext.__class__.__module__, ext.__class__.__name__)
|
||||||
|
)
|
||||||
|
elif ext is not None:
|
||||||
|
raise TypeError(
|
||||||
|
'Extension "{}.{}" must be of type: "{}.{}"'.format(
|
||||||
|
ext.__class__.__module__, ext.__class__.__name__,
|
||||||
|
Extension.__module__, Extension.__name__
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def build_extension(self, ext_name: str, configs: Mapping[str, Any]) -> Extension:
|
||||||
|
"""
|
||||||
|
Build extension from a string name, then return an instance using the given `configs`.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
ext_name: Name of extension as a string.
|
||||||
|
configs: Configuration settings for extension.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An instance of the extension with the given configuration settings.
|
||||||
|
|
||||||
|
First attempt to load an entry point. The string name must be registered as an entry point in the
|
||||||
|
`markdown.extensions` group which points to a subclass of the [`markdown.extensions.Extension`][] class.
|
||||||
|
If multiple distributions have registered the same name, the first one found is returned.
|
||||||
|
|
||||||
|
If no entry point is found, assume dot notation (`path.to.module:ClassName`). Load the specified class and
|
||||||
|
return an instance. If no class is specified, import the module and call a `makeExtension` function and return
|
||||||
|
the [`markdown.extensions.Extension`][] instance returned by that function.
|
||||||
|
"""
|
||||||
|
configs = dict(configs)
|
||||||
|
|
||||||
|
entry_points = [ep for ep in util.get_installed_extensions() if ep.name == ext_name]
|
||||||
|
if entry_points:
|
||||||
|
ext = entry_points[0].load()
|
||||||
|
return ext(**configs)
|
||||||
|
|
||||||
|
# Get class name (if provided): `path.to.module:ClassName`
|
||||||
|
ext_name, class_name = ext_name.split(':', 1) if ':' in ext_name else (ext_name, '')
|
||||||
|
|
||||||
|
try:
|
||||||
|
module = importlib.import_module(ext_name)
|
||||||
|
logger.debug(
|
||||||
|
'Successfully imported extension module "%s".' % ext_name
|
||||||
|
)
|
||||||
|
except ImportError as e:
|
||||||
|
message = 'Failed loading extension "%s".' % ext_name
|
||||||
|
e.args = (message,) + e.args[1:]
|
||||||
|
raise
|
||||||
|
|
||||||
|
if class_name:
|
||||||
|
# Load given class name from module.
|
||||||
|
return getattr(module, class_name)(**configs)
|
||||||
|
else:
|
||||||
|
# Expect `makeExtension()` function to return a class.
|
||||||
|
try:
|
||||||
|
return module.makeExtension(**configs)
|
||||||
|
except AttributeError as e:
|
||||||
|
message = e.args[0]
|
||||||
|
message = "Failed to initiate extension " \
|
||||||
|
"'%s': %s" % (ext_name, message)
|
||||||
|
e.args = (message,) + e.args[1:]
|
||||||
|
raise
|
||||||
|
|
||||||
|
def registerExtension(self, extension: Extension) -> Markdown:
|
||||||
|
"""
|
||||||
|
Register an extension as having a resettable state.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
extension: An instance of the extension to register.
|
||||||
|
|
||||||
|
This should get called once by an extension during setup. A "registered" extension's
|
||||||
|
`reset` method is called by [`Markdown.reset()`][markdown.Markdown.reset]. Not all extensions have or need a
|
||||||
|
resettable state, and so it should not be assumed that all extensions are "registered."
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.registeredExtensions.append(extension)
|
||||||
|
return self
|
||||||
|
|
||||||
|
def reset(self) -> Markdown:
|
||||||
|
"""
|
||||||
|
Resets all state variables to prepare the parser instance for new input.
|
||||||
|
|
||||||
|
Called once upon creation of a class instance. Should be called manually between calls
|
||||||
|
to [`Markdown.convert`][markdown.Markdown.convert].
|
||||||
|
"""
|
||||||
|
self.htmlStash.reset()
|
||||||
|
self.references.clear()
|
||||||
|
|
||||||
|
for extension in self.registeredExtensions:
|
||||||
|
if hasattr(extension, 'reset'):
|
||||||
|
extension.reset()
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
def set_output_format(self, format: str) -> Markdown:
|
||||||
|
"""
|
||||||
|
Set the output format for the class instance.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
format: Must be a known value in `Markdown.output_formats`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.output_format = format.lower().rstrip('145') # ignore number
|
||||||
|
try:
|
||||||
|
self.serializer = self.output_formats[self.output_format]
|
||||||
|
except KeyError as e:
|
||||||
|
valid_formats = list(self.output_formats.keys())
|
||||||
|
valid_formats.sort()
|
||||||
|
message = 'Invalid Output Format: "%s". Use one of %s.' \
|
||||||
|
% (self.output_format,
|
||||||
|
'"' + '", "'.join(valid_formats) + '"')
|
||||||
|
e.args = (message,) + e.args[1:]
|
||||||
|
raise
|
||||||
|
return self
|
||||||
|
|
||||||
|
# Note: the `tag` argument is type annotated `Any` as ElementTree uses many various objects as tags.
|
||||||
|
# As there is no standardization in ElementTree, the type of a given tag is unpredictable.
|
||||||
|
def is_block_level(self, tag: Any) -> bool:
|
||||||
|
"""
|
||||||
|
Check if the given `tag` is a block level HTML tag.
|
||||||
|
|
||||||
|
Returns `True` for any string listed in `Markdown.block_level_elements`. A `tag` which is
|
||||||
|
not a string always returns `False`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if isinstance(tag, str):
|
||||||
|
return tag.lower().rstrip('/') in self.block_level_elements
|
||||||
|
# Some ElementTree tags are not strings, so return False.
|
||||||
|
return False
|
||||||
|
|
||||||
|
def convert(self, source: str) -> str:
|
||||||
|
"""
|
||||||
|
Convert a Markdown string to a string in the specified output format.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
source: Markdown formatted text as Unicode or ASCII string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A string in the specified output format.
|
||||||
|
|
||||||
|
Markdown parsing takes place in five steps:
|
||||||
|
|
||||||
|
1. A bunch of [`preprocessors`][markdown.preprocessors] munge the input text.
|
||||||
|
2. A [`BlockParser`][markdown.blockparser.BlockParser] parses the high-level structural elements of the
|
||||||
|
pre-processed text into an [`ElementTree`][xml.etree.ElementTree.ElementTree] object.
|
||||||
|
3. A bunch of [`treeprocessors`][markdown.treeprocessors] are run against the
|
||||||
|
[`ElementTree`][xml.etree.ElementTree.ElementTree] object. One such `treeprocessor`
|
||||||
|
([`markdown.treeprocessors.InlineProcessor`][]) runs [`inlinepatterns`][markdown.inlinepatterns]
|
||||||
|
against the [`ElementTree`][xml.etree.ElementTree.ElementTree] object, parsing inline markup.
|
||||||
|
4. Some [`postprocessors`][markdown.postprocessors] are run against the text after the
|
||||||
|
[`ElementTree`][xml.etree.ElementTree.ElementTree] object has been serialized into text.
|
||||||
|
5. The output is returned as a string.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Fix up the source text
|
||||||
|
if not source.strip():
|
||||||
|
return '' # a blank Unicode string
|
||||||
|
|
||||||
|
try:
|
||||||
|
source = str(source)
|
||||||
|
except UnicodeDecodeError as e: # pragma: no cover
|
||||||
|
# Customize error message while maintaining original traceback
|
||||||
|
e.reason += '. -- Note: Markdown only accepts Unicode input!'
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Split into lines and run the line preprocessors.
|
||||||
|
self.lines = source.split("\n")
|
||||||
|
for prep in self.preprocessors:
|
||||||
|
self.lines = prep.run(self.lines)
|
||||||
|
|
||||||
|
# Parse the high-level elements.
|
||||||
|
root = self.parser.parseDocument(self.lines).getroot()
|
||||||
|
|
||||||
|
# Run the tree-processors
|
||||||
|
for treeprocessor in self.treeprocessors:
|
||||||
|
newRoot = treeprocessor.run(root)
|
||||||
|
if newRoot is not None:
|
||||||
|
root = newRoot
|
||||||
|
|
||||||
|
# Serialize _properly_. Strip top-level tags.
|
||||||
|
output = self.serializer(root)
|
||||||
|
if self.stripTopLevelTags:
|
||||||
|
try:
|
||||||
|
start = output.index(
|
||||||
|
'<%s>' % self.doc_tag) + len(self.doc_tag) + 2
|
||||||
|
end = output.rindex('</%s>' % self.doc_tag)
|
||||||
|
output = output[start:end].strip()
|
||||||
|
except ValueError as e: # pragma: no cover
|
||||||
|
if output.strip().endswith('<%s />' % self.doc_tag):
|
||||||
|
# We have an empty document
|
||||||
|
output = ''
|
||||||
|
else:
|
||||||
|
# We have a serious problem
|
||||||
|
raise ValueError('Markdown failed to strip top-level '
|
||||||
|
'tags. Document=%r' % output.strip()) from e
|
||||||
|
|
||||||
|
# Run the text post-processors
|
||||||
|
for pp in self.postprocessors:
|
||||||
|
output = pp.run(output)
|
||||||
|
|
||||||
|
return output.strip()
|
||||||
|
|
||||||
|
def convertFile(
|
||||||
|
self,
|
||||||
|
input: str | TextIO | None = None,
|
||||||
|
output: str | TextIO | None = None,
|
||||||
|
encoding: str | None = None,
|
||||||
|
) -> Markdown:
|
||||||
|
"""
|
||||||
|
Converts a Markdown file and returns the HTML as a Unicode string.
|
||||||
|
|
||||||
|
Decodes the file using the provided encoding (defaults to `utf-8`),
|
||||||
|
passes the file content to markdown, and outputs the HTML to either
|
||||||
|
the provided stream or the file with provided name, using the same
|
||||||
|
encoding as the source file. The
|
||||||
|
[`xmlcharrefreplace`](https://docs.python.org/3/library/codecs.html#error-handlers)
|
||||||
|
error handler is used when encoding the output.
|
||||||
|
|
||||||
|
**Note:** This is the only place that decoding and encoding of Unicode
|
||||||
|
takes place in Python-Markdown. (All other code is Unicode-in /
|
||||||
|
Unicode-out.)
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
input: File object or path. Reads from `stdin` if `None`.
|
||||||
|
output: File object or path. Writes to `stdout` if `None`.
|
||||||
|
encoding: Encoding of input and output files. Defaults to `utf-8`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
encoding = encoding or "utf-8"
|
||||||
|
|
||||||
|
# Read the source
|
||||||
|
if input:
|
||||||
|
if isinstance(input, str):
|
||||||
|
input_file = codecs.open(input, mode="r", encoding=encoding)
|
||||||
|
else:
|
||||||
|
input_file = codecs.getreader(encoding)(input)
|
||||||
|
text = input_file.read()
|
||||||
|
input_file.close()
|
||||||
|
else:
|
||||||
|
text = sys.stdin.read()
|
||||||
|
if not isinstance(text, str): # pragma: no cover
|
||||||
|
text = text.decode(encoding)
|
||||||
|
|
||||||
|
text = text.lstrip('\ufeff') # remove the byte-order mark
|
||||||
|
|
||||||
|
# Convert
|
||||||
|
html = self.convert(text)
|
||||||
|
|
||||||
|
# Write to file or stdout
|
||||||
|
if output:
|
||||||
|
if isinstance(output, str):
|
||||||
|
output_file = codecs.open(output, "w",
|
||||||
|
encoding=encoding,
|
||||||
|
errors="xmlcharrefreplace")
|
||||||
|
output_file.write(html)
|
||||||
|
output_file.close()
|
||||||
|
else:
|
||||||
|
writer = codecs.getwriter(encoding)
|
||||||
|
output_file = writer(output, errors="xmlcharrefreplace")
|
||||||
|
output_file.write(html)
|
||||||
|
# Don't close here. User may want to write more.
|
||||||
|
else:
|
||||||
|
# Encode manually and write bytes to stdout.
|
||||||
|
html = html.encode(encoding, "xmlcharrefreplace")
|
||||||
|
try:
|
||||||
|
# Write bytes directly to buffer (Python 3).
|
||||||
|
sys.stdout.buffer.write(html)
|
||||||
|
except AttributeError: # pragma: no cover
|
||||||
|
# Probably Python 2, which works with bytes by default.
|
||||||
|
sys.stdout.write(html)
|
||||||
|
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
EXPORTED FUNCTIONS
|
||||||
|
=============================================================================
|
||||||
|
|
||||||
|
Those are the two functions we really mean to export: `markdown()` and
|
||||||
|
`markdownFromFile()`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def markdown(text: str, **kwargs: Any) -> str:
|
||||||
|
"""
|
||||||
|
Convert a markdown string to HTML and return HTML as a Unicode string.
|
||||||
|
|
||||||
|
This is a shortcut function for [`Markdown`][markdown.Markdown] class to cover the most
|
||||||
|
basic use case. It initializes an instance of [`Markdown`][markdown.Markdown], loads the
|
||||||
|
necessary extensions and runs the parser on the given text.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
text: Markdown formatted text as Unicode or ASCII string.
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
**kwargs: Any arguments accepted by the Markdown class.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A string in the specified output format.
|
||||||
|
|
||||||
|
"""
|
||||||
|
md = Markdown(**kwargs)
|
||||||
|
return md.convert(text)
|
||||||
|
|
||||||
|
|
||||||
|
def markdownFromFile(**kwargs: Any):
|
||||||
|
"""
|
||||||
|
Read Markdown text from a file and write output to a file or a stream.
|
||||||
|
|
||||||
|
This is a shortcut function which initializes an instance of [`Markdown`][markdown.Markdown],
|
||||||
|
and calls the [`convertFile`][markdown.Markdown.convertFile] method rather than
|
||||||
|
[`convert`][markdown.Markdown.convert].
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
input (str | TextIO): A file name or readable object.
|
||||||
|
output (str | TextIO): A file name or writable object.
|
||||||
|
encoding (str): Encoding of input and output.
|
||||||
|
**kwargs: Any arguments accepted by the `Markdown` class.
|
||||||
|
|
||||||
|
"""
|
||||||
|
md = Markdown(**kwargs)
|
||||||
|
md.convertFile(kwargs.get('input', None),
|
||||||
|
kwargs.get('output', None),
|
||||||
|
kwargs.get('encoding', None))
|
||||||
145
plugins/markdown_preview/markdown/extensions/__init__.py
Normal file
145
plugins/markdown_preview/markdown/extensions/__init__.py
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
Markdown accepts an [`Extension`][markdown.extensions.Extension] instance for each extension. Therefore, each extension
|
||||||
|
must to define a class that extends [`Extension`][markdown.extensions.Extension] and over-rides the
|
||||||
|
[`extendMarkdown`][markdown.extensions.Extension.extendMarkdown] method. Within this class one can manage configuration
|
||||||
|
options for their extension and attach the various processors and patterns which make up an extension to the
|
||||||
|
[`Markdown`][markdown.Markdown] instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import TYPE_CHECKING, Any, Mapping, Sequence
|
||||||
|
from ..util import parseBoolValue
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from markdown import Markdown
|
||||||
|
|
||||||
|
|
||||||
|
class Extension:
|
||||||
|
""" Base class for extensions to subclass. """
|
||||||
|
|
||||||
|
config: Mapping[str, list] = {}
|
||||||
|
"""
|
||||||
|
Default configuration for an extension.
|
||||||
|
|
||||||
|
This attribute is to be defined in a subclass and must be of the following format:
|
||||||
|
|
||||||
|
``` python
|
||||||
|
config = {
|
||||||
|
'key': ['value', 'description']
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that [`setConfig`][markdown.extensions.Extension.setConfig] will raise a [`KeyError`][]
|
||||||
|
if a default is not set for each option.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
""" Initiate Extension and set up configs. """
|
||||||
|
self.setConfigs(kwargs)
|
||||||
|
|
||||||
|
def getConfig(self, key: str, default: Any = '') -> Any:
|
||||||
|
"""
|
||||||
|
Return a single configuration option value.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
key: The configuration option name.
|
||||||
|
default: Default value to return if key is not set.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Value of stored configuration option.
|
||||||
|
"""
|
||||||
|
if key in self.config:
|
||||||
|
return self.config[key][0]
|
||||||
|
else:
|
||||||
|
return default
|
||||||
|
|
||||||
|
def getConfigs(self) -> dict[str, Any]:
|
||||||
|
"""
|
||||||
|
Return all configuration options.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
All configuration options.
|
||||||
|
"""
|
||||||
|
return {key: self.getConfig(key) for key in self.config.keys()}
|
||||||
|
|
||||||
|
def getConfigInfo(self) -> list[tuple[str, str]]:
|
||||||
|
"""
|
||||||
|
Return descriptions of all configuration options.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
All descriptions of configuration options.
|
||||||
|
"""
|
||||||
|
return [(key, self.config[key][1]) for key in self.config.keys()]
|
||||||
|
|
||||||
|
def setConfig(self, key: str, value: Any) -> None:
|
||||||
|
"""
|
||||||
|
Set a configuration option.
|
||||||
|
|
||||||
|
If the corresponding default value set in [`config`][markdown.extensions.Extension.config]
|
||||||
|
is a `bool` value or `None`, then `value` is passed through
|
||||||
|
[`parseBoolValue`][markdown.util.parseBoolValue] before being stored.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
key: Name of configuration option to set.
|
||||||
|
value: Value to assign to option.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
KeyError: If `key` is not known.
|
||||||
|
"""
|
||||||
|
if isinstance(self.config[key][0], bool):
|
||||||
|
value = parseBoolValue(value)
|
||||||
|
if self.config[key][0] is None:
|
||||||
|
value = parseBoolValue(value, preserve_none=True)
|
||||||
|
self.config[key][0] = value
|
||||||
|
|
||||||
|
def setConfigs(self, items: Mapping[str, Any] | Sequence[tuple[str, Any]]):
|
||||||
|
"""
|
||||||
|
Loop through a collection of configuration options, passing each to
|
||||||
|
[`setConfig`][markdown.extensions.Extension.setConfig].
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
items: Collection of configuration options.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
KeyError: for any unknown key.
|
||||||
|
"""
|
||||||
|
if hasattr(items, 'items'):
|
||||||
|
# it's a dict
|
||||||
|
items = items.items()
|
||||||
|
for key, value in items:
|
||||||
|
self.setConfig(key, value)
|
||||||
|
|
||||||
|
def extendMarkdown(self, md: Markdown) -> None:
|
||||||
|
"""
|
||||||
|
Add the various processors and patterns to the Markdown Instance.
|
||||||
|
|
||||||
|
This method must be overridden by every extension.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
md: The Markdown instance.
|
||||||
|
|
||||||
|
"""
|
||||||
|
raise NotImplementedError(
|
||||||
|
'Extension "%s.%s" must define an "extendMarkdown"'
|
||||||
|
'method.' % (self.__class__.__module__, self.__class__.__name__)
|
||||||
|
)
|
||||||
105
plugins/markdown_preview/markdown/extensions/abbr.py
Normal file
105
plugins/markdown_preview/markdown/extensions/abbr.py
Normal file
@@ -0,0 +1,105 @@
|
|||||||
|
# Abbreviation Extension for Python-Markdown
|
||||||
|
# ==========================================
|
||||||
|
|
||||||
|
# This extension adds abbreviation handling to Python-Markdown.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/abbreviations
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/)
|
||||||
|
# and [Seemant Kulleen](http://www.kulleen.org/)
|
||||||
|
|
||||||
|
# All changes Copyright 2008-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
This extension adds abbreviation handling to Python-Markdown.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/abbreviations)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..blockprocessors import BlockProcessor
|
||||||
|
from ..inlinepatterns import InlineProcessor
|
||||||
|
from ..util import AtomicString
|
||||||
|
import re
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
|
|
||||||
|
class AbbrExtension(Extension):
|
||||||
|
""" Abbreviation Extension for Python-Markdown. """
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Insert `AbbrPreprocessor` before `ReferencePreprocessor`. """
|
||||||
|
md.parser.blockprocessors.register(AbbrPreprocessor(md.parser), 'abbr', 16)
|
||||||
|
|
||||||
|
|
||||||
|
class AbbrPreprocessor(BlockProcessor):
|
||||||
|
""" Abbreviation Preprocessor - parse text for abbr references. """
|
||||||
|
|
||||||
|
RE = re.compile(r'^[*]\[(?P<abbr>[^\]]*)\][ ]?:[ ]*\n?[ ]*(?P<title>.*)$', re.MULTILINE)
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
"""
|
||||||
|
Find and remove all Abbreviation references from the text.
|
||||||
|
Each reference is set as a new `AbbrPattern` in the markdown instance.
|
||||||
|
|
||||||
|
"""
|
||||||
|
block = blocks.pop(0)
|
||||||
|
m = self.RE.search(block)
|
||||||
|
if m:
|
||||||
|
abbr = m.group('abbr').strip()
|
||||||
|
title = m.group('title').strip()
|
||||||
|
self.parser.md.inlinePatterns.register(
|
||||||
|
AbbrInlineProcessor(self._generate_pattern(abbr), title), 'abbr-%s' % abbr, 2
|
||||||
|
)
|
||||||
|
if block[m.end():].strip():
|
||||||
|
# Add any content after match back to blocks as separate block
|
||||||
|
blocks.insert(0, block[m.end():].lstrip('\n'))
|
||||||
|
if block[:m.start()].strip():
|
||||||
|
# Add any content before match back to blocks as separate block
|
||||||
|
blocks.insert(0, block[:m.start()].rstrip('\n'))
|
||||||
|
return True
|
||||||
|
# No match. Restore block.
|
||||||
|
blocks.insert(0, block)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _generate_pattern(self, text):
|
||||||
|
"""
|
||||||
|
Given a string, returns an regex pattern to match that string.
|
||||||
|
|
||||||
|
'HTML' -> r'(?P<abbr>[H][T][M][L])'
|
||||||
|
|
||||||
|
Note: we force each char as a literal match (in brackets) as we don't
|
||||||
|
know what they will be beforehand.
|
||||||
|
|
||||||
|
"""
|
||||||
|
chars = list(text)
|
||||||
|
for i in range(len(chars)):
|
||||||
|
chars[i] = r'[%s]' % chars[i]
|
||||||
|
return r'(?P<abbr>\b%s\b)' % (r''.join(chars))
|
||||||
|
|
||||||
|
|
||||||
|
class AbbrInlineProcessor(InlineProcessor):
|
||||||
|
""" Abbreviation inline pattern. """
|
||||||
|
|
||||||
|
def __init__(self, pattern, title):
|
||||||
|
super().__init__(pattern)
|
||||||
|
self.title = title
|
||||||
|
|
||||||
|
def handleMatch(self, m, data):
|
||||||
|
abbr = etree.Element('abbr')
|
||||||
|
abbr.text = AtomicString(m.group('abbr'))
|
||||||
|
abbr.set('title', self.title)
|
||||||
|
return abbr, m.start(0), m.end(0)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return AbbrExtension(**kwargs)
|
||||||
179
plugins/markdown_preview/markdown/extensions/admonition.py
Normal file
179
plugins/markdown_preview/markdown/extensions/admonition.py
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
# Admonition extension for Python-Markdown
|
||||||
|
# ========================================
|
||||||
|
|
||||||
|
# Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
|
||||||
|
|
||||||
|
# [rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/admonition
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright [Tiago Serafim](https://www.tiagoserafim.com/).
|
||||||
|
|
||||||
|
# All changes Copyright The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Adds rST-style admonitions. Inspired by [rST][] feature with the same name.
|
||||||
|
|
||||||
|
[rST]: http://docutils.sourceforge.net/docs/ref/rst/directives.html#specific-admonitions
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/admonition)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..blockprocessors import BlockProcessor
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class AdmonitionExtension(Extension):
|
||||||
|
""" Admonition extension for Python-Markdown. """
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add Admonition to Markdown instance. """
|
||||||
|
md.registerExtension(self)
|
||||||
|
|
||||||
|
md.parser.blockprocessors.register(AdmonitionProcessor(md.parser), 'admonition', 105)
|
||||||
|
|
||||||
|
|
||||||
|
class AdmonitionProcessor(BlockProcessor):
|
||||||
|
|
||||||
|
CLASSNAME = 'admonition'
|
||||||
|
CLASSNAME_TITLE = 'admonition-title'
|
||||||
|
RE = re.compile(r'(?:^|\n)!!! ?([\w\-]+(?: +[\w\-]+)*)(?: +"(.*?)")? *(?:\n|$)')
|
||||||
|
RE_SPACES = re.compile(' +')
|
||||||
|
|
||||||
|
def __init__(self, parser):
|
||||||
|
"""Initialization."""
|
||||||
|
|
||||||
|
super().__init__(parser)
|
||||||
|
|
||||||
|
self.current_sibling = None
|
||||||
|
self.content_indention = 0
|
||||||
|
|
||||||
|
def parse_content(self, parent, block):
|
||||||
|
"""Get sibling admonition.
|
||||||
|
|
||||||
|
Retrieve the appropriate sibling element. This can get tricky when
|
||||||
|
dealing with lists.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
old_block = block
|
||||||
|
the_rest = ''
|
||||||
|
|
||||||
|
# We already acquired the block via test
|
||||||
|
if self.current_sibling is not None:
|
||||||
|
sibling = self.current_sibling
|
||||||
|
block, the_rest = self.detab(block, self.content_indent)
|
||||||
|
self.current_sibling = None
|
||||||
|
self.content_indent = 0
|
||||||
|
return sibling, block, the_rest
|
||||||
|
|
||||||
|
sibling = self.lastChild(parent)
|
||||||
|
|
||||||
|
if sibling is None or sibling.tag != 'div' or sibling.get('class', '').find(self.CLASSNAME) == -1:
|
||||||
|
sibling = None
|
||||||
|
else:
|
||||||
|
# If the last child is a list and the content is sufficiently indented
|
||||||
|
# to be under it, then the content's sibling is in the list.
|
||||||
|
last_child = self.lastChild(sibling)
|
||||||
|
indent = 0
|
||||||
|
while last_child is not None:
|
||||||
|
if (
|
||||||
|
sibling is not None and block.startswith(' ' * self.tab_length * 2) and
|
||||||
|
last_child is not None and last_child.tag in ('ul', 'ol', 'dl')
|
||||||
|
):
|
||||||
|
|
||||||
|
# The expectation is that we'll find an `<li>` or `<dt>`.
|
||||||
|
# We should get its last child as well.
|
||||||
|
sibling = self.lastChild(last_child)
|
||||||
|
last_child = self.lastChild(sibling) if sibling is not None else None
|
||||||
|
|
||||||
|
# Context has been lost at this point, so we must adjust the
|
||||||
|
# text's indentation level so it will be evaluated correctly
|
||||||
|
# under the list.
|
||||||
|
block = block[self.tab_length:]
|
||||||
|
indent += self.tab_length
|
||||||
|
else:
|
||||||
|
last_child = None
|
||||||
|
|
||||||
|
if not block.startswith(' ' * self.tab_length):
|
||||||
|
sibling = None
|
||||||
|
|
||||||
|
if sibling is not None:
|
||||||
|
indent += self.tab_length
|
||||||
|
block, the_rest = self.detab(old_block, indent)
|
||||||
|
self.current_sibling = sibling
|
||||||
|
self.content_indent = indent
|
||||||
|
|
||||||
|
return sibling, block, the_rest
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
|
||||||
|
if self.RE.search(block):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return self.parse_content(parent, block)[0] is not None
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
block = blocks.pop(0)
|
||||||
|
m = self.RE.search(block)
|
||||||
|
|
||||||
|
if m:
|
||||||
|
if m.start() > 0:
|
||||||
|
self.parser.parseBlocks(parent, [block[:m.start()]])
|
||||||
|
block = block[m.end():] # removes the first line
|
||||||
|
block, theRest = self.detab(block)
|
||||||
|
else:
|
||||||
|
sibling, block, theRest = self.parse_content(parent, block)
|
||||||
|
|
||||||
|
if m:
|
||||||
|
klass, title = self.get_class_and_title(m)
|
||||||
|
div = etree.SubElement(parent, 'div')
|
||||||
|
div.set('class', '{} {}'.format(self.CLASSNAME, klass))
|
||||||
|
if title:
|
||||||
|
p = etree.SubElement(div, 'p')
|
||||||
|
p.text = title
|
||||||
|
p.set('class', self.CLASSNAME_TITLE)
|
||||||
|
else:
|
||||||
|
# Sibling is a list item, but we need to wrap it's content should be wrapped in <p>
|
||||||
|
if sibling.tag in ('li', 'dd') and sibling.text:
|
||||||
|
text = sibling.text
|
||||||
|
sibling.text = ''
|
||||||
|
p = etree.SubElement(sibling, 'p')
|
||||||
|
p.text = text
|
||||||
|
|
||||||
|
div = sibling
|
||||||
|
|
||||||
|
self.parser.parseChunk(div, block)
|
||||||
|
|
||||||
|
if theRest:
|
||||||
|
# This block contained unindented line(s) after the first indented
|
||||||
|
# line. Insert these lines as the first block of the master blocks
|
||||||
|
# list for future processing.
|
||||||
|
blocks.insert(0, theRest)
|
||||||
|
|
||||||
|
def get_class_and_title(self, match):
|
||||||
|
klass, title = match.group(1).lower(), match.group(2)
|
||||||
|
klass = self.RE_SPACES.sub(' ', klass)
|
||||||
|
if title is None:
|
||||||
|
# no title was provided, use the capitalized class name as title
|
||||||
|
# e.g.: `!!! note` will render
|
||||||
|
# `<p class="admonition-title">Note</p>`
|
||||||
|
title = klass.split(' ', 1)[0].capitalize()
|
||||||
|
elif title == '':
|
||||||
|
# an explicit blank title should not be rendered
|
||||||
|
# e.g.: `!!! warning ""` will *not* render `p` with a title
|
||||||
|
title = None
|
||||||
|
return klass, title
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return AdmonitionExtension(**kwargs)
|
||||||
179
plugins/markdown_preview/markdown/extensions/attr_list.py
Normal file
179
plugins/markdown_preview/markdown/extensions/attr_list.py
Normal file
@@ -0,0 +1,179 @@
|
|||||||
|
# Attribute List Extension for Python-Markdown
|
||||||
|
# ============================================
|
||||||
|
|
||||||
|
# Adds attribute list syntax. Inspired by
|
||||||
|
# [Maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
|
||||||
|
# feature of the same name.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/attr_list
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2011 [Waylan Limberg](http://achinghead.com/).
|
||||||
|
|
||||||
|
# All changes Copyright 2011-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Adds attribute list syntax. Inspired by
|
||||||
|
[Maruku](http://maruku.rubyforge.org/proposal.html#attribute_lists)'s
|
||||||
|
feature of the same name.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/attr_list)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..treeprocessors import Treeprocessor
|
||||||
|
import re
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from xml.etree.ElementTree import Element
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_double_quote(s, t):
|
||||||
|
k, v = t.split('=', 1)
|
||||||
|
return k, v.strip('"')
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_single_quote(s, t):
|
||||||
|
k, v = t.split('=', 1)
|
||||||
|
return k, v.strip("'")
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_key_value(s, t):
|
||||||
|
return t.split('=', 1)
|
||||||
|
|
||||||
|
|
||||||
|
def _handle_word(s, t):
|
||||||
|
if t.startswith('.'):
|
||||||
|
return '.', t[1:]
|
||||||
|
if t.startswith('#'):
|
||||||
|
return 'id', t[1:]
|
||||||
|
return t, t
|
||||||
|
|
||||||
|
|
||||||
|
_scanner = re.Scanner([
|
||||||
|
(r'[^ =]+=".*?"', _handle_double_quote),
|
||||||
|
(r"[^ =]+='.*?'", _handle_single_quote),
|
||||||
|
(r'[^ =]+=[^ =]+', _handle_key_value),
|
||||||
|
(r'[^ =]+', _handle_word),
|
||||||
|
(r' ', None)
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
def get_attrs(str: str) -> list[tuple[str, str]]:
|
||||||
|
""" Parse attribute list and return a list of attribute tuples. """
|
||||||
|
return _scanner.scan(str)[0]
|
||||||
|
|
||||||
|
|
||||||
|
def isheader(elem: Element) -> bool:
|
||||||
|
return elem.tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
|
||||||
|
|
||||||
|
|
||||||
|
class AttrListTreeprocessor(Treeprocessor):
|
||||||
|
|
||||||
|
BASE_RE = r'\{\:?[ ]*([^\}\n ][^\}\n]*)[ ]*\}'
|
||||||
|
HEADER_RE = re.compile(r'[ ]+{}[ ]*$'.format(BASE_RE))
|
||||||
|
BLOCK_RE = re.compile(r'\n[ ]*{}[ ]*$'.format(BASE_RE))
|
||||||
|
INLINE_RE = re.compile(r'^{}'.format(BASE_RE))
|
||||||
|
NAME_RE = re.compile(r'[^A-Z_a-z\u00c0-\u00d6\u00d8-\u00f6\u00f8-\u02ff'
|
||||||
|
r'\u0370-\u037d\u037f-\u1fff\u200c-\u200d'
|
||||||
|
r'\u2070-\u218f\u2c00-\u2fef\u3001-\ud7ff'
|
||||||
|
r'\uf900-\ufdcf\ufdf0-\ufffd'
|
||||||
|
r'\:\-\.0-9\u00b7\u0300-\u036f\u203f-\u2040]+')
|
||||||
|
|
||||||
|
def run(self, doc: Element):
|
||||||
|
for elem in doc.iter():
|
||||||
|
if self.md.is_block_level(elem.tag):
|
||||||
|
# Block level: check for `attrs` on last line of text
|
||||||
|
RE = self.BLOCK_RE
|
||||||
|
if isheader(elem) or elem.tag in ['dt', 'td', 'th']:
|
||||||
|
# header, def-term, or table cell: check for attributes at end of element
|
||||||
|
RE = self.HEADER_RE
|
||||||
|
if len(elem) and elem.tag == 'li':
|
||||||
|
# special case list items. children may include a `ul` or `ol`.
|
||||||
|
pos = None
|
||||||
|
# find the `ul` or `ol` position
|
||||||
|
for i, child in enumerate(elem):
|
||||||
|
if child.tag in ['ul', 'ol']:
|
||||||
|
pos = i
|
||||||
|
break
|
||||||
|
if pos is None and elem[-1].tail:
|
||||||
|
# use tail of last child. no `ul` or `ol`.
|
||||||
|
m = RE.search(elem[-1].tail)
|
||||||
|
if m:
|
||||||
|
self.assign_attrs(elem, m.group(1))
|
||||||
|
elem[-1].tail = elem[-1].tail[:m.start()]
|
||||||
|
elif pos is not None and pos > 0 and elem[pos-1].tail:
|
||||||
|
# use tail of last child before `ul` or `ol`
|
||||||
|
m = RE.search(elem[pos-1].tail)
|
||||||
|
if m:
|
||||||
|
self.assign_attrs(elem, m.group(1))
|
||||||
|
elem[pos-1].tail = elem[pos-1].tail[:m.start()]
|
||||||
|
elif elem.text:
|
||||||
|
# use text. `ul` is first child.
|
||||||
|
m = RE.search(elem.text)
|
||||||
|
if m:
|
||||||
|
self.assign_attrs(elem, m.group(1))
|
||||||
|
elem.text = elem.text[:m.start()]
|
||||||
|
elif len(elem) and elem[-1].tail:
|
||||||
|
# has children. Get from tail of last child
|
||||||
|
m = RE.search(elem[-1].tail)
|
||||||
|
if m:
|
||||||
|
self.assign_attrs(elem, m.group(1))
|
||||||
|
elem[-1].tail = elem[-1].tail[:m.start()]
|
||||||
|
if isheader(elem):
|
||||||
|
# clean up trailing #s
|
||||||
|
elem[-1].tail = elem[-1].tail.rstrip('#').rstrip()
|
||||||
|
elif elem.text:
|
||||||
|
# no children. Get from text.
|
||||||
|
m = RE.search(elem.text)
|
||||||
|
if m:
|
||||||
|
self.assign_attrs(elem, m.group(1))
|
||||||
|
elem.text = elem.text[:m.start()]
|
||||||
|
if isheader(elem):
|
||||||
|
# clean up trailing #s
|
||||||
|
elem.text = elem.text.rstrip('#').rstrip()
|
||||||
|
else:
|
||||||
|
# inline: check for `attrs` at start of tail
|
||||||
|
if elem.tail:
|
||||||
|
m = self.INLINE_RE.match(elem.tail)
|
||||||
|
if m:
|
||||||
|
self.assign_attrs(elem, m.group(1))
|
||||||
|
elem.tail = elem.tail[m.end():]
|
||||||
|
|
||||||
|
def assign_attrs(self, elem: Element, attrs: str) -> None:
|
||||||
|
""" Assign `attrs` to element. """
|
||||||
|
for k, v in get_attrs(attrs):
|
||||||
|
if k == '.':
|
||||||
|
# add to class
|
||||||
|
cls = elem.get('class')
|
||||||
|
if cls:
|
||||||
|
elem.set('class', '{} {}'.format(cls, v))
|
||||||
|
else:
|
||||||
|
elem.set('class', v)
|
||||||
|
else:
|
||||||
|
# assign attribute `k` with `v`
|
||||||
|
elem.set(self.sanitize_name(k), v)
|
||||||
|
|
||||||
|
def sanitize_name(self, name: str) -> str:
|
||||||
|
"""
|
||||||
|
Sanitize name as 'an XML Name, minus the ":"'.
|
||||||
|
See https://www.w3.org/TR/REC-xml-names/#NT-NCName
|
||||||
|
"""
|
||||||
|
return self.NAME_RE.sub('_', name)
|
||||||
|
|
||||||
|
|
||||||
|
class AttrListExtension(Extension):
|
||||||
|
""" Attribute List extension for Python-Markdown """
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
md.treeprocessors.register(AttrListTreeprocessor(md), 'attr_list', 8)
|
||||||
|
md.registerExtension(self)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return AttrListExtension(**kwargs)
|
||||||
338
plugins/markdown_preview/markdown/extensions/codehilite.py
Normal file
338
plugins/markdown_preview/markdown/extensions/codehilite.py
Normal file
@@ -0,0 +1,338 @@
|
|||||||
|
# CodeHilite Extension for Python-Markdown
|
||||||
|
# ========================================
|
||||||
|
|
||||||
|
# Adds code/syntax highlighting to standard Python-Markdown code blocks.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/code_hilite
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2006-2008 [Waylan Limberg](http://achinghead.com/).
|
||||||
|
|
||||||
|
# All changes Copyright 2008-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Adds code/syntax highlighting to standard Python-Markdown code blocks.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/code_hilite)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..treeprocessors import Treeprocessor
|
||||||
|
from ..util import parseBoolValue
|
||||||
|
|
||||||
|
try: # pragma: no cover
|
||||||
|
from pygments import highlight
|
||||||
|
from pygments.lexers import get_lexer_by_name, guess_lexer
|
||||||
|
from pygments.formatters import get_formatter_by_name
|
||||||
|
from pygments.util import ClassNotFound
|
||||||
|
pygments = True
|
||||||
|
except ImportError: # pragma: no cover
|
||||||
|
pygments = False
|
||||||
|
|
||||||
|
|
||||||
|
def parse_hl_lines(expr: str) -> list[int]:
|
||||||
|
"""Support our syntax for emphasizing certain lines of code.
|
||||||
|
|
||||||
|
`expr` should be like '1 2' to emphasize lines 1 and 2 of a code block.
|
||||||
|
Returns a list of integers, the line numbers to emphasize.
|
||||||
|
"""
|
||||||
|
if not expr:
|
||||||
|
return []
|
||||||
|
|
||||||
|
try:
|
||||||
|
return list(map(int, expr.split()))
|
||||||
|
except ValueError: # pragma: no cover
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------ The Main CodeHilite Class ----------------------
|
||||||
|
class CodeHilite:
|
||||||
|
"""
|
||||||
|
Determine language of source code, and pass it on to the Pygments highlighter.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
```python
|
||||||
|
code = CodeHilite(src=some_code, lang='python')
|
||||||
|
html = code.hilite()
|
||||||
|
```
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
src: Source string or any object with a `.readline` attribute.
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
lang (str): String name of Pygments lexer to use for highlighting. Default: `None`.
|
||||||
|
guess_lang (bool): Auto-detect which lexer to use.
|
||||||
|
Ignored if `lang` is set to a valid value. Default: `True`.
|
||||||
|
use_pygments (bool): Pass code to Pygments for code highlighting. If `False`, the code is
|
||||||
|
instead wrapped for highlighting by a JavaScript library. Default: `True`.
|
||||||
|
pygments_formatter (str): The name of a Pygments formatter or a formatter class used for
|
||||||
|
highlighting the code blocks. Default: `html`.
|
||||||
|
linenums (bool): An alias to Pygments `linenos` formatter option. Default: `None`.
|
||||||
|
css_class (str): An alias to Pygments `cssclass` formatter option. Default: 'codehilite'.
|
||||||
|
lang_prefix (str): Prefix prepended to the language. Default: "language-".
|
||||||
|
|
||||||
|
Other Options:
|
||||||
|
|
||||||
|
Any other options are accepted and passed on to the lexer and formatter. Therefore,
|
||||||
|
valid options include any options which are accepted by the `html` formatter or
|
||||||
|
whichever lexer the code's language uses. Note that most lexers do not have any
|
||||||
|
options. However, a few have very useful options, such as PHP's `startinline` option.
|
||||||
|
Any invalid options are ignored without error.
|
||||||
|
|
||||||
|
* **Formatter options**: <https://pygments.org/docs/formatters/#HtmlFormatter>
|
||||||
|
* **Lexer Options**: <https://pygments.org/docs/lexers/>
|
||||||
|
|
||||||
|
Additionally, when Pygments is enabled, the code's language is passed to the
|
||||||
|
formatter as an extra option `lang_str`, whose value being `{lang_prefix}{lang}`.
|
||||||
|
This option has no effect to the Pygments' builtin formatters.
|
||||||
|
|
||||||
|
Advanced Usage:
|
||||||
|
|
||||||
|
```python
|
||||||
|
code = CodeHilite(
|
||||||
|
src = some_code,
|
||||||
|
lang = 'php',
|
||||||
|
startinline = True, # Lexer option. Snippet does not start with `<?php`.
|
||||||
|
linenostart = 42, # Formatter option. Snippet starts on line 42.
|
||||||
|
hl_lines = [45, 49, 50], # Formatter option. Highlight lines 45, 49, and 50.
|
||||||
|
linenos = 'inline' # Formatter option. Avoid alignment problems.
|
||||||
|
)
|
||||||
|
html = code.hilite()
|
||||||
|
```
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, src: str, **options):
|
||||||
|
self.src = src
|
||||||
|
self.lang = options.pop('lang', None)
|
||||||
|
self.guess_lang = options.pop('guess_lang', True)
|
||||||
|
self.use_pygments = options.pop('use_pygments', True)
|
||||||
|
self.lang_prefix = options.pop('lang_prefix', 'language-')
|
||||||
|
self.pygments_formatter = options.pop('pygments_formatter', 'html')
|
||||||
|
|
||||||
|
if 'linenos' not in options:
|
||||||
|
options['linenos'] = options.pop('linenums', None)
|
||||||
|
if 'cssclass' not in options:
|
||||||
|
options['cssclass'] = options.pop('css_class', 'codehilite')
|
||||||
|
if 'wrapcode' not in options:
|
||||||
|
# Override Pygments default
|
||||||
|
options['wrapcode'] = True
|
||||||
|
# Disallow use of `full` option
|
||||||
|
options['full'] = False
|
||||||
|
|
||||||
|
self.options = options
|
||||||
|
|
||||||
|
def hilite(self, shebang=True) -> str:
|
||||||
|
"""
|
||||||
|
Pass code to the [Pygments](https://pygments.org/) highlighter with
|
||||||
|
optional line numbers. The output should then be styled with CSS to
|
||||||
|
your liking. No styles are applied by default - only styling hooks
|
||||||
|
(i.e.: `<span class="k">`).
|
||||||
|
|
||||||
|
returns : A string of html.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
self.src = self.src.strip('\n')
|
||||||
|
|
||||||
|
if self.lang is None and shebang:
|
||||||
|
self._parseHeader()
|
||||||
|
|
||||||
|
if pygments and self.use_pygments:
|
||||||
|
try:
|
||||||
|
lexer = get_lexer_by_name(self.lang, **self.options)
|
||||||
|
except ValueError:
|
||||||
|
try:
|
||||||
|
if self.guess_lang:
|
||||||
|
lexer = guess_lexer(self.src, **self.options)
|
||||||
|
else:
|
||||||
|
lexer = get_lexer_by_name('text', **self.options)
|
||||||
|
except ValueError: # pragma: no cover
|
||||||
|
lexer = get_lexer_by_name('text', **self.options)
|
||||||
|
if not self.lang:
|
||||||
|
# Use the guessed lexer's language instead
|
||||||
|
self.lang = lexer.aliases[0]
|
||||||
|
lang_str = f'{self.lang_prefix}{self.lang}'
|
||||||
|
if isinstance(self.pygments_formatter, str):
|
||||||
|
try:
|
||||||
|
formatter = get_formatter_by_name(self.pygments_formatter, **self.options)
|
||||||
|
except ClassNotFound:
|
||||||
|
formatter = get_formatter_by_name('html', **self.options)
|
||||||
|
else:
|
||||||
|
formatter = self.pygments_formatter(lang_str=lang_str, **self.options)
|
||||||
|
return highlight(self.src, lexer, formatter)
|
||||||
|
else:
|
||||||
|
# just escape and build markup usable by JavaScript highlighting libraries
|
||||||
|
txt = self.src.replace('&', '&')
|
||||||
|
txt = txt.replace('<', '<')
|
||||||
|
txt = txt.replace('>', '>')
|
||||||
|
txt = txt.replace('"', '"')
|
||||||
|
classes = []
|
||||||
|
if self.lang:
|
||||||
|
classes.append('{}{}'.format(self.lang_prefix, self.lang))
|
||||||
|
if self.options['linenos']:
|
||||||
|
classes.append('linenums')
|
||||||
|
class_str = ''
|
||||||
|
if classes:
|
||||||
|
class_str = ' class="{}"'.format(' '.join(classes))
|
||||||
|
return '<pre class="{}"><code{}>{}\n</code></pre>\n'.format(
|
||||||
|
self.options['cssclass'],
|
||||||
|
class_str,
|
||||||
|
txt
|
||||||
|
)
|
||||||
|
|
||||||
|
def _parseHeader(self):
|
||||||
|
"""
|
||||||
|
Determines language of a code block from shebang line and whether the
|
||||||
|
said line should be removed or left in place. If the shebang line
|
||||||
|
contains a path (even a single /) then it is assumed to be a real
|
||||||
|
shebang line and left alone. However, if no path is given
|
||||||
|
(e.i.: `#!python` or `:::python`) then it is assumed to be a mock shebang
|
||||||
|
for language identification of a code fragment and removed from the
|
||||||
|
code block prior to processing for code highlighting. When a mock
|
||||||
|
shebang (e.i: `#!python`) is found, line numbering is turned on. When
|
||||||
|
colons are found in place of a shebang (e.i.: `:::python`), line
|
||||||
|
numbering is left in the current state - off by default.
|
||||||
|
|
||||||
|
Also parses optional list of highlight lines, like:
|
||||||
|
|
||||||
|
:::python hl_lines="1 3"
|
||||||
|
"""
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
# split text into lines
|
||||||
|
lines = self.src.split("\n")
|
||||||
|
# pull first line to examine
|
||||||
|
fl = lines.pop(0)
|
||||||
|
|
||||||
|
c = re.compile(r'''
|
||||||
|
(?:(?:^::+)|(?P<shebang>^[#]!)) # Shebang or 2 or more colons
|
||||||
|
(?P<path>(?:/\w+)*[/ ])? # Zero or 1 path
|
||||||
|
(?P<lang>[\w#.+-]*) # The language
|
||||||
|
\s* # Arbitrary whitespace
|
||||||
|
# Optional highlight lines, single- or double-quote-delimited
|
||||||
|
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot))?
|
||||||
|
''', re.VERBOSE)
|
||||||
|
# search first line for shebang
|
||||||
|
m = c.search(fl)
|
||||||
|
if m:
|
||||||
|
# we have a match
|
||||||
|
try:
|
||||||
|
self.lang = m.group('lang').lower()
|
||||||
|
except IndexError: # pragma: no cover
|
||||||
|
self.lang = None
|
||||||
|
if m.group('path'):
|
||||||
|
# path exists - restore first line
|
||||||
|
lines.insert(0, fl)
|
||||||
|
if self.options['linenos'] is None and m.group('shebang'):
|
||||||
|
# Overridable and Shebang exists - use line numbers
|
||||||
|
self.options['linenos'] = True
|
||||||
|
|
||||||
|
self.options['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
|
||||||
|
else:
|
||||||
|
# No match
|
||||||
|
lines.insert(0, fl)
|
||||||
|
|
||||||
|
self.src = "\n".join(lines).strip("\n")
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------ The Markdown Extension -------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class HiliteTreeprocessor(Treeprocessor):
|
||||||
|
""" Highlight source code in code blocks. """
|
||||||
|
|
||||||
|
def code_unescape(self, text):
|
||||||
|
"""Unescape code."""
|
||||||
|
text = text.replace("<", "<")
|
||||||
|
text = text.replace(">", ">")
|
||||||
|
# Escaped '&' should be replaced at the end to avoid
|
||||||
|
# conflicting with < and >.
|
||||||
|
text = text.replace("&", "&")
|
||||||
|
return text
|
||||||
|
|
||||||
|
def run(self, root):
|
||||||
|
""" Find code blocks and store in `htmlStash`. """
|
||||||
|
blocks = root.iter('pre')
|
||||||
|
for block in blocks:
|
||||||
|
if len(block) == 1 and block[0].tag == 'code':
|
||||||
|
local_config = self.config.copy()
|
||||||
|
code = CodeHilite(
|
||||||
|
self.code_unescape(block[0].text),
|
||||||
|
tab_length=self.md.tab_length,
|
||||||
|
style=local_config.pop('pygments_style', 'default'),
|
||||||
|
**local_config
|
||||||
|
)
|
||||||
|
placeholder = self.md.htmlStash.store(code.hilite())
|
||||||
|
# Clear code block in `etree` instance
|
||||||
|
block.clear()
|
||||||
|
# Change to `p` element which will later
|
||||||
|
# be removed when inserting raw html
|
||||||
|
block.tag = 'p'
|
||||||
|
block.text = placeholder
|
||||||
|
|
||||||
|
|
||||||
|
class CodeHiliteExtension(Extension):
|
||||||
|
""" Add source code highlighting to markdown code blocks. """
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
# define default configs
|
||||||
|
self.config = {
|
||||||
|
'linenums': [
|
||||||
|
None, "Use lines numbers. True|table|inline=yes, False=no, None=auto. Default: `None`."
|
||||||
|
],
|
||||||
|
'guess_lang': [
|
||||||
|
True, "Automatic language detection - Default: `True`."
|
||||||
|
],
|
||||||
|
'css_class': [
|
||||||
|
"codehilite", "Set class name for wrapper <div> - Default: `codehilite`."
|
||||||
|
],
|
||||||
|
'pygments_style': [
|
||||||
|
'default', 'Pygments HTML Formatter Style (Colorscheme). Default: `default`.'
|
||||||
|
],
|
||||||
|
'noclasses': [
|
||||||
|
False, 'Use inline styles instead of CSS classes - Default `False`.'
|
||||||
|
],
|
||||||
|
'use_pygments': [
|
||||||
|
True, 'Highlight code blocks with pygments. Disable if using a JavaScript library. Default: `True`.'
|
||||||
|
],
|
||||||
|
'lang_prefix': [
|
||||||
|
'language-', 'Prefix prepended to the language when `use_pygments` is false. Default: `language-`.'
|
||||||
|
],
|
||||||
|
'pygments_formatter': [
|
||||||
|
'html', 'Use a specific formatter for Pygments highlighting. Default: `html`.'
|
||||||
|
],
|
||||||
|
}
|
||||||
|
""" Default configuration options. """
|
||||||
|
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
if key in self.config:
|
||||||
|
self.setConfig(key, value)
|
||||||
|
else:
|
||||||
|
# manually set unknown keywords.
|
||||||
|
if isinstance(value, str):
|
||||||
|
try:
|
||||||
|
# Attempt to parse `str` as a boolean value
|
||||||
|
value = parseBoolValue(value, preserve_none=True)
|
||||||
|
except ValueError:
|
||||||
|
pass # Assume it's not a boolean value. Use as-is.
|
||||||
|
self.config[key] = [value, '']
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add `HilitePostprocessor` to Markdown instance. """
|
||||||
|
hiliter = HiliteTreeprocessor(md)
|
||||||
|
hiliter.config = self.getConfigs()
|
||||||
|
md.treeprocessors.register(hiliter, 'hilite', 30)
|
||||||
|
|
||||||
|
md.registerExtension(self)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return CodeHiliteExtension(**kwargs)
|
||||||
119
plugins/markdown_preview/markdown/extensions/def_list.py
Normal file
119
plugins/markdown_preview/markdown/extensions/def_list.py
Normal file
@@ -0,0 +1,119 @@
|
|||||||
|
# Definition List Extension for Python-Markdown
|
||||||
|
# =============================================
|
||||||
|
|
||||||
|
# Adds parsing of Definition Lists to Python-Markdown.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/definition_lists
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2008 [Waylan Limberg](http://achinghead.com)
|
||||||
|
|
||||||
|
# All changes Copyright 2008-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Adds parsing of Definition Lists to Python-Markdown.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/definition_lists)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..blockprocessors import BlockProcessor, ListIndentProcessor
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class DefListProcessor(BlockProcessor):
|
||||||
|
""" Process Definition Lists. """
|
||||||
|
|
||||||
|
RE = re.compile(r'(^|\n)[ ]{0,3}:[ ]{1,3}(.*?)(\n|$)')
|
||||||
|
NO_INDENT_RE = re.compile(r'^[ ]{0,3}[^ :]')
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return bool(self.RE.search(block))
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
|
||||||
|
raw_block = blocks.pop(0)
|
||||||
|
m = self.RE.search(raw_block)
|
||||||
|
terms = [term.strip() for term in
|
||||||
|
raw_block[:m.start()].split('\n') if term.strip()]
|
||||||
|
block = raw_block[m.end():]
|
||||||
|
no_indent = self.NO_INDENT_RE.match(block)
|
||||||
|
if no_indent:
|
||||||
|
d, theRest = (block, None)
|
||||||
|
else:
|
||||||
|
d, theRest = self.detab(block)
|
||||||
|
if d:
|
||||||
|
d = '{}\n{}'.format(m.group(2), d)
|
||||||
|
else:
|
||||||
|
d = m.group(2)
|
||||||
|
sibling = self.lastChild(parent)
|
||||||
|
if not terms and sibling is None:
|
||||||
|
# This is not a definition item. Most likely a paragraph that
|
||||||
|
# starts with a colon at the beginning of a document or list.
|
||||||
|
blocks.insert(0, raw_block)
|
||||||
|
return False
|
||||||
|
if not terms and sibling.tag == 'p':
|
||||||
|
# The previous paragraph contains the terms
|
||||||
|
state = 'looselist'
|
||||||
|
terms = sibling.text.split('\n')
|
||||||
|
parent.remove(sibling)
|
||||||
|
# Acquire new sibling
|
||||||
|
sibling = self.lastChild(parent)
|
||||||
|
else:
|
||||||
|
state = 'list'
|
||||||
|
|
||||||
|
if sibling is not None and sibling.tag == 'dl':
|
||||||
|
# This is another item on an existing list
|
||||||
|
dl = sibling
|
||||||
|
if not terms and len(dl) and dl[-1].tag == 'dd' and len(dl[-1]):
|
||||||
|
state = 'looselist'
|
||||||
|
else:
|
||||||
|
# This is a new list
|
||||||
|
dl = etree.SubElement(parent, 'dl')
|
||||||
|
# Add terms
|
||||||
|
for term in terms:
|
||||||
|
dt = etree.SubElement(dl, 'dt')
|
||||||
|
dt.text = term
|
||||||
|
# Add definition
|
||||||
|
self.parser.state.set(state)
|
||||||
|
dd = etree.SubElement(dl, 'dd')
|
||||||
|
self.parser.parseBlocks(dd, [d])
|
||||||
|
self.parser.state.reset()
|
||||||
|
|
||||||
|
if theRest:
|
||||||
|
blocks.insert(0, theRest)
|
||||||
|
|
||||||
|
|
||||||
|
class DefListIndentProcessor(ListIndentProcessor):
|
||||||
|
""" Process indented children of definition list items. """
|
||||||
|
|
||||||
|
# Definition lists need to be aware of all list types
|
||||||
|
ITEM_TYPES = ['dd', 'li']
|
||||||
|
""" Include `dd` in list item types. """
|
||||||
|
LIST_TYPES = ['dl', 'ol', 'ul']
|
||||||
|
""" Include `dl` is list types. """
|
||||||
|
|
||||||
|
def create_item(self, parent, block):
|
||||||
|
""" Create a new `dd` or `li` (depending on parent) and parse the block with it as the parent. """
|
||||||
|
|
||||||
|
dd = etree.SubElement(parent, 'dd')
|
||||||
|
self.parser.parseBlocks(dd, [block])
|
||||||
|
|
||||||
|
|
||||||
|
class DefListExtension(Extension):
|
||||||
|
""" Add definition lists to Markdown. """
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add an instance of `DefListProcessor` to `BlockParser`. """
|
||||||
|
md.parser.blockprocessors.register(DefListIndentProcessor(md.parser), 'defindent', 85)
|
||||||
|
md.parser.blockprocessors.register(DefListProcessor(md.parser), 'deflist', 25)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return DefListExtension(**kwargs)
|
||||||
66
plugins/markdown_preview/markdown/extensions/extra.py
Normal file
66
plugins/markdown_preview/markdown/extensions/extra.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Python-Markdown Extra Extension
|
||||||
|
# ===============================
|
||||||
|
|
||||||
|
# A compilation of various Python-Markdown extensions that imitates
|
||||||
|
# [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/extra
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Copyright The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
A compilation of various Python-Markdown extensions that imitates
|
||||||
|
[PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/).
|
||||||
|
|
||||||
|
Note that each of the individual extensions still need to be available
|
||||||
|
on your `PYTHONPATH`. This extension simply wraps them all up as a
|
||||||
|
convenience so that only one extension needs to be listed when
|
||||||
|
initiating Markdown. See the documentation for each individual
|
||||||
|
extension for specifics about that extension.
|
||||||
|
|
||||||
|
There may be additional extensions that are distributed with
|
||||||
|
Python-Markdown that are not included here in Extra. Those extensions
|
||||||
|
are not part of PHP Markdown Extra, and therefore, not part of
|
||||||
|
Python-Markdown Extra. If you really would like Extra to include
|
||||||
|
additional extensions, we suggest creating your own clone of Extra
|
||||||
|
under a different name. You could also edit the `extensions` global
|
||||||
|
variable defined below, but be aware that such changes may be lost
|
||||||
|
when you upgrade to any future version of Python-Markdown.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/extra)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
|
||||||
|
extensions = [
|
||||||
|
'fenced_code',
|
||||||
|
'footnotes',
|
||||||
|
'attr_list',
|
||||||
|
'def_list',
|
||||||
|
'tables',
|
||||||
|
'abbr',
|
||||||
|
'md_in_html'
|
||||||
|
]
|
||||||
|
""" The list of included extensions. """
|
||||||
|
|
||||||
|
|
||||||
|
class ExtraExtension(Extension):
|
||||||
|
""" Add various extensions to Markdown class."""
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
""" `config` is a dumb holder which gets passed to the actual extension later. """
|
||||||
|
self.config = kwargs
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Register extension instances. """
|
||||||
|
md.registerExtensions(extensions, self.config)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return ExtraExtension(**kwargs)
|
||||||
182
plugins/markdown_preview/markdown/extensions/fenced_code.py
Normal file
182
plugins/markdown_preview/markdown/extensions/fenced_code.py
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
# Fenced Code Extension for Python Markdown
|
||||||
|
# =========================================
|
||||||
|
|
||||||
|
# This extension adds Fenced Code Blocks to Python-Markdown.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/fenced_code_blocks
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com/).
|
||||||
|
|
||||||
|
# All changes Copyright 2008-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
This extension adds Fenced Code Blocks to Python-Markdown.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/fenced_code_blocks)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from textwrap import dedent
|
||||||
|
from . import Extension
|
||||||
|
from ..preprocessors import Preprocessor
|
||||||
|
from .codehilite import CodeHilite, CodeHiliteExtension, parse_hl_lines
|
||||||
|
from .attr_list import get_attrs, AttrListExtension
|
||||||
|
from ..util import parseBoolValue
|
||||||
|
from ..serializers import _escape_attrib_html
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class FencedCodeExtension(Extension):
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.config = {
|
||||||
|
'lang_prefix': ['language-', 'Prefix prepended to the language. Default: "language-"']
|
||||||
|
}
|
||||||
|
""" Default configuration options. """
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add `FencedBlockPreprocessor` to the Markdown instance. """
|
||||||
|
md.registerExtension(self)
|
||||||
|
|
||||||
|
md.preprocessors.register(FencedBlockPreprocessor(md, self.getConfigs()), 'fenced_code_block', 25)
|
||||||
|
|
||||||
|
|
||||||
|
class FencedBlockPreprocessor(Preprocessor):
|
||||||
|
""" Find and extract fenced code blocks. """
|
||||||
|
|
||||||
|
FENCED_BLOCK_RE = re.compile(
|
||||||
|
dedent(r'''
|
||||||
|
(?P<fence>^(?:~{3,}|`{3,}))[ ]* # opening fence
|
||||||
|
((\{(?P<attrs>[^\}\n]*)\})| # (optional {attrs} or
|
||||||
|
(\.?(?P<lang>[\w#.+-]*)[ ]*)? # optional (.)lang
|
||||||
|
(hl_lines=(?P<quot>"|')(?P<hl_lines>.*?)(?P=quot)[ ]*)?) # optional hl_lines)
|
||||||
|
\n # newline (end of opening fence)
|
||||||
|
(?P<code>.*?)(?<=\n) # the code block
|
||||||
|
(?P=fence)[ ]*$ # closing fence
|
||||||
|
'''),
|
||||||
|
re.MULTILINE | re.DOTALL | re.VERBOSE
|
||||||
|
)
|
||||||
|
|
||||||
|
def __init__(self, md, config):
|
||||||
|
super().__init__(md)
|
||||||
|
self.config = config
|
||||||
|
self.checked_for_deps = False
|
||||||
|
self.codehilite_conf = {}
|
||||||
|
self.use_attr_list = False
|
||||||
|
# List of options to convert to boolean values
|
||||||
|
self.bool_options = [
|
||||||
|
'linenums',
|
||||||
|
'guess_lang',
|
||||||
|
'noclasses',
|
||||||
|
'use_pygments'
|
||||||
|
]
|
||||||
|
|
||||||
|
def run(self, lines):
|
||||||
|
""" Match and store Fenced Code Blocks in the `HtmlStash`. """
|
||||||
|
|
||||||
|
# Check for dependent extensions
|
||||||
|
if not self.checked_for_deps:
|
||||||
|
for ext in self.md.registeredExtensions:
|
||||||
|
if isinstance(ext, CodeHiliteExtension):
|
||||||
|
self.codehilite_conf = ext.getConfigs()
|
||||||
|
if isinstance(ext, AttrListExtension):
|
||||||
|
self.use_attr_list = True
|
||||||
|
|
||||||
|
self.checked_for_deps = True
|
||||||
|
|
||||||
|
text = "\n".join(lines)
|
||||||
|
while 1:
|
||||||
|
m = self.FENCED_BLOCK_RE.search(text)
|
||||||
|
if m:
|
||||||
|
lang, id, classes, config = None, '', [], {}
|
||||||
|
if m.group('attrs'):
|
||||||
|
id, classes, config = self.handle_attrs(get_attrs(m.group('attrs')))
|
||||||
|
if len(classes):
|
||||||
|
lang = classes.pop(0)
|
||||||
|
else:
|
||||||
|
if m.group('lang'):
|
||||||
|
lang = m.group('lang')
|
||||||
|
if m.group('hl_lines'):
|
||||||
|
# Support `hl_lines` outside of `attrs` for backward-compatibility
|
||||||
|
config['hl_lines'] = parse_hl_lines(m.group('hl_lines'))
|
||||||
|
|
||||||
|
# If `config` is not empty, then the `codehighlite` extension
|
||||||
|
# is enabled, so we call it to highlight the code
|
||||||
|
if self.codehilite_conf and self.codehilite_conf['use_pygments'] and config.get('use_pygments', True):
|
||||||
|
local_config = self.codehilite_conf.copy()
|
||||||
|
local_config.update(config)
|
||||||
|
# Combine classes with `cssclass`. Ensure `cssclass` is at end
|
||||||
|
# as Pygments appends a suffix under certain circumstances.
|
||||||
|
# Ignore ID as Pygments does not offer an option to set it.
|
||||||
|
if classes:
|
||||||
|
local_config['css_class'] = '{} {}'.format(
|
||||||
|
' '.join(classes),
|
||||||
|
local_config['css_class']
|
||||||
|
)
|
||||||
|
highliter = CodeHilite(
|
||||||
|
m.group('code'),
|
||||||
|
lang=lang,
|
||||||
|
style=local_config.pop('pygments_style', 'default'),
|
||||||
|
**local_config
|
||||||
|
)
|
||||||
|
|
||||||
|
code = highliter.hilite(shebang=False)
|
||||||
|
else:
|
||||||
|
id_attr = lang_attr = class_attr = kv_pairs = ''
|
||||||
|
if lang:
|
||||||
|
prefix = self.config.get('lang_prefix', 'language-')
|
||||||
|
lang_attr = f' class="{prefix}{_escape_attrib_html(lang)}"'
|
||||||
|
if classes:
|
||||||
|
class_attr = f' class="{_escape_attrib_html(" ".join(classes))}"'
|
||||||
|
if id:
|
||||||
|
id_attr = f' id="{_escape_attrib_html(id)}"'
|
||||||
|
if self.use_attr_list and config and not config.get('use_pygments', False):
|
||||||
|
# Only assign key/value pairs to code element if `attr_list` extension is enabled, key/value
|
||||||
|
# pairs were defined on the code block, and the `use_pygments` key was not set to `True`. The
|
||||||
|
# `use_pygments` key could be either set to `False` or not defined. It is omitted from output.
|
||||||
|
kv_pairs = ''.join(
|
||||||
|
f' {k}="{_escape_attrib_html(v)}"' for k, v in config.items() if k != 'use_pygments'
|
||||||
|
)
|
||||||
|
code = self._escape(m.group('code'))
|
||||||
|
code = f'<pre{id_attr}{class_attr}><code{lang_attr}{kv_pairs}>{code}</code></pre>'
|
||||||
|
|
||||||
|
placeholder = self.md.htmlStash.store(code)
|
||||||
|
text = f'{text[:m.start()]}\n{placeholder}\n{text[m.end():]}'
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return text.split("\n")
|
||||||
|
|
||||||
|
def handle_attrs(self, attrs):
|
||||||
|
""" Return tuple: `(id, [list, of, classes], {configs})` """
|
||||||
|
id = ''
|
||||||
|
classes = []
|
||||||
|
configs = {}
|
||||||
|
for k, v in attrs:
|
||||||
|
if k == 'id':
|
||||||
|
id = v
|
||||||
|
elif k == '.':
|
||||||
|
classes.append(v)
|
||||||
|
elif k == 'hl_lines':
|
||||||
|
configs[k] = parse_hl_lines(v)
|
||||||
|
elif k in self.bool_options:
|
||||||
|
configs[k] = parseBoolValue(v, fail_on_errors=False, preserve_none=True)
|
||||||
|
else:
|
||||||
|
configs[k] = v
|
||||||
|
return id, classes, configs
|
||||||
|
|
||||||
|
def _escape(self, txt):
|
||||||
|
""" basic html escaping """
|
||||||
|
txt = txt.replace('&', '&')
|
||||||
|
txt = txt.replace('<', '<')
|
||||||
|
txt = txt.replace('>', '>')
|
||||||
|
txt = txt.replace('"', '"')
|
||||||
|
return txt
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return FencedCodeExtension(**kwargs)
|
||||||
416
plugins/markdown_preview/markdown/extensions/footnotes.py
Normal file
416
plugins/markdown_preview/markdown/extensions/footnotes.py
Normal file
@@ -0,0 +1,416 @@
|
|||||||
|
# Footnotes Extension for Python-Markdown
|
||||||
|
# =======================================
|
||||||
|
|
||||||
|
# Adds footnote handling to Python-Markdown.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/footnotes
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Copyright The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Adds footnote handling to Python-Markdown.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/footnotes)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..blockprocessors import BlockProcessor
|
||||||
|
from ..inlinepatterns import InlineProcessor
|
||||||
|
from ..treeprocessors import Treeprocessor
|
||||||
|
from ..postprocessors import Postprocessor
|
||||||
|
from .. import util
|
||||||
|
from collections import OrderedDict
|
||||||
|
import re
|
||||||
|
import copy
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
|
FN_BACKLINK_TEXT = util.STX + "zz1337820767766393qq" + util.ETX
|
||||||
|
NBSP_PLACEHOLDER = util.STX + "qq3936677670287331zz" + util.ETX
|
||||||
|
RE_REF_ID = re.compile(r'(fnref)(\d+)')
|
||||||
|
|
||||||
|
|
||||||
|
class FootnoteExtension(Extension):
|
||||||
|
""" Footnote Extension. """
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
""" Setup configs. """
|
||||||
|
|
||||||
|
self.config = {
|
||||||
|
'PLACE_MARKER': [
|
||||||
|
'///Footnotes Go Here///', 'The text string that marks where the footnotes go'
|
||||||
|
],
|
||||||
|
'UNIQUE_IDS': [
|
||||||
|
False, 'Avoid name collisions across multiple calls to `reset()`.'
|
||||||
|
],
|
||||||
|
'BACKLINK_TEXT': [
|
||||||
|
'↩', "The text string that links from the footnote to the reader's place."
|
||||||
|
],
|
||||||
|
'SUPERSCRIPT_TEXT': [
|
||||||
|
'{}', "The text string that links from the reader's place to the footnote."
|
||||||
|
],
|
||||||
|
'BACKLINK_TITLE': [
|
||||||
|
'Jump back to footnote %d in the text',
|
||||||
|
'The text string used for the title HTML attribute of the backlink. '
|
||||||
|
'%d will be replaced by the footnote number.'
|
||||||
|
],
|
||||||
|
'SEPARATOR': [
|
||||||
|
':', 'Footnote separator.'
|
||||||
|
]
|
||||||
|
}
|
||||||
|
""" Default configuration options. """
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
# In multiple invocations, emit links that don't get tangled.
|
||||||
|
self.unique_prefix = 0
|
||||||
|
self.found_refs = {}
|
||||||
|
self.used_refs = set()
|
||||||
|
|
||||||
|
self.reset()
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add pieces to Markdown. """
|
||||||
|
md.registerExtension(self)
|
||||||
|
self.parser = md.parser
|
||||||
|
self.md = md
|
||||||
|
# Insert a `blockprocessor` before `ReferencePreprocessor`
|
||||||
|
md.parser.blockprocessors.register(FootnoteBlockProcessor(self), 'footnote', 17)
|
||||||
|
|
||||||
|
# Insert an inline pattern before `ImageReferencePattern`
|
||||||
|
FOOTNOTE_RE = r'\[\^([^\]]*)\]' # blah blah [^1] blah
|
||||||
|
md.inlinePatterns.register(FootnoteInlineProcessor(FOOTNOTE_RE, self), 'footnote', 175)
|
||||||
|
# Insert a tree-processor that would actually add the footnote div
|
||||||
|
# This must be before all other tree-processors (i.e., `inline` and
|
||||||
|
# `codehilite`) so they can run on the the contents of the div.
|
||||||
|
md.treeprocessors.register(FootnoteTreeprocessor(self), 'footnote', 50)
|
||||||
|
|
||||||
|
# Insert a tree-processor that will run after inline is done.
|
||||||
|
# In this tree-processor we want to check our duplicate footnote tracker
|
||||||
|
# And add additional `backrefs` to the footnote pointing back to the
|
||||||
|
# duplicated references.
|
||||||
|
md.treeprocessors.register(FootnotePostTreeprocessor(self), 'footnote-duplicate', 15)
|
||||||
|
|
||||||
|
# Insert a postprocessor after amp_substitute processor
|
||||||
|
md.postprocessors.register(FootnotePostprocessor(self), 'footnote', 25)
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
""" Clear footnotes on reset, and prepare for distinct document. """
|
||||||
|
self.footnotes: OrderedDict[str, str] = OrderedDict()
|
||||||
|
self.unique_prefix += 1
|
||||||
|
self.found_refs = {}
|
||||||
|
self.used_refs = set()
|
||||||
|
|
||||||
|
def unique_ref(self, reference, found: bool = False):
|
||||||
|
""" Get a unique reference if there are duplicates. """
|
||||||
|
if not found:
|
||||||
|
return reference
|
||||||
|
|
||||||
|
original_ref = reference
|
||||||
|
while reference in self.used_refs:
|
||||||
|
ref, rest = reference.split(self.get_separator(), 1)
|
||||||
|
m = RE_REF_ID.match(ref)
|
||||||
|
if m:
|
||||||
|
reference = '%s%d%s%s' % (m.group(1), int(m.group(2))+1, self.get_separator(), rest)
|
||||||
|
else:
|
||||||
|
reference = '%s%d%s%s' % (ref, 2, self.get_separator(), rest)
|
||||||
|
|
||||||
|
self.used_refs.add(reference)
|
||||||
|
if original_ref in self.found_refs:
|
||||||
|
self.found_refs[original_ref] += 1
|
||||||
|
else:
|
||||||
|
self.found_refs[original_ref] = 1
|
||||||
|
return reference
|
||||||
|
|
||||||
|
def findFootnotesPlaceholder(self, root):
|
||||||
|
""" Return ElementTree Element that contains Footnote placeholder. """
|
||||||
|
def finder(element):
|
||||||
|
for child in element:
|
||||||
|
if child.text:
|
||||||
|
if child.text.find(self.getConfig("PLACE_MARKER")) > -1:
|
||||||
|
return child, element, True
|
||||||
|
if child.tail:
|
||||||
|
if child.tail.find(self.getConfig("PLACE_MARKER")) > -1:
|
||||||
|
return child, element, False
|
||||||
|
child_res = finder(child)
|
||||||
|
if child_res is not None:
|
||||||
|
return child_res
|
||||||
|
return None
|
||||||
|
|
||||||
|
res = finder(root)
|
||||||
|
return res
|
||||||
|
|
||||||
|
def setFootnote(self, id, text) -> None:
|
||||||
|
""" Store a footnote for later retrieval. """
|
||||||
|
self.footnotes[id] = text
|
||||||
|
|
||||||
|
def get_separator(self):
|
||||||
|
""" Get the footnote separator. """
|
||||||
|
return self.getConfig("SEPARATOR")
|
||||||
|
|
||||||
|
def makeFootnoteId(self, id):
|
||||||
|
""" Return footnote link id. """
|
||||||
|
if self.getConfig("UNIQUE_IDS"):
|
||||||
|
return 'fn%s%d-%s' % (self.get_separator(), self.unique_prefix, id)
|
||||||
|
else:
|
||||||
|
return 'fn{}{}'.format(self.get_separator(), id)
|
||||||
|
|
||||||
|
def makeFootnoteRefId(self, id, found: bool = False):
|
||||||
|
""" Return footnote back-link id. """
|
||||||
|
if self.getConfig("UNIQUE_IDS"):
|
||||||
|
return self.unique_ref('fnref%s%d-%s' % (self.get_separator(), self.unique_prefix, id), found)
|
||||||
|
else:
|
||||||
|
return self.unique_ref('fnref{}{}'.format(self.get_separator(), id), found)
|
||||||
|
|
||||||
|
def makeFootnotesDiv(self, root):
|
||||||
|
""" Return `div` of footnotes as `etree` Element. """
|
||||||
|
|
||||||
|
if not list(self.footnotes.keys()):
|
||||||
|
return None
|
||||||
|
|
||||||
|
div = etree.Element("div")
|
||||||
|
div.set('class', 'footnote')
|
||||||
|
etree.SubElement(div, "hr")
|
||||||
|
ol = etree.SubElement(div, "ol")
|
||||||
|
surrogate_parent = etree.Element("div")
|
||||||
|
|
||||||
|
# Backward compatibility with old '%d' placeholder
|
||||||
|
backlink_title = self.getConfig("BACKLINK_TITLE").replace("%d", "{}")
|
||||||
|
|
||||||
|
for index, id in enumerate(self.footnotes.keys(), start=1):
|
||||||
|
li = etree.SubElement(ol, "li")
|
||||||
|
li.set("id", self.makeFootnoteId(id))
|
||||||
|
# Parse footnote with surrogate parent as `li` cannot be used.
|
||||||
|
# List block handlers have special logic to deal with `li`.
|
||||||
|
# When we are done parsing, we will copy everything over to `li`.
|
||||||
|
self.parser.parseChunk(surrogate_parent, self.footnotes[id])
|
||||||
|
for el in list(surrogate_parent):
|
||||||
|
li.append(el)
|
||||||
|
surrogate_parent.remove(el)
|
||||||
|
backlink = etree.Element("a")
|
||||||
|
backlink.set("href", "#" + self.makeFootnoteRefId(id))
|
||||||
|
backlink.set("class", "footnote-backref")
|
||||||
|
backlink.set(
|
||||||
|
"title",
|
||||||
|
backlink_title.format(index)
|
||||||
|
)
|
||||||
|
backlink.text = FN_BACKLINK_TEXT
|
||||||
|
|
||||||
|
if len(li):
|
||||||
|
node = li[-1]
|
||||||
|
if node.tag == "p":
|
||||||
|
node.text = node.text + NBSP_PLACEHOLDER
|
||||||
|
node.append(backlink)
|
||||||
|
else:
|
||||||
|
p = etree.SubElement(li, "p")
|
||||||
|
p.append(backlink)
|
||||||
|
return div
|
||||||
|
|
||||||
|
|
||||||
|
class FootnoteBlockProcessor(BlockProcessor):
|
||||||
|
""" Find all footnote references and store for later use. """
|
||||||
|
|
||||||
|
RE = re.compile(r'^[ ]{0,3}\[\^([^\]]*)\]:[ ]*(.*)$', re.MULTILINE)
|
||||||
|
|
||||||
|
def __init__(self, footnotes):
|
||||||
|
super().__init__(footnotes.parser)
|
||||||
|
self.footnotes = footnotes
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
""" Find, set, and remove footnote definitions. """
|
||||||
|
block = blocks.pop(0)
|
||||||
|
m = self.RE.search(block)
|
||||||
|
if m:
|
||||||
|
id = m.group(1)
|
||||||
|
fn_blocks = [m.group(2)]
|
||||||
|
|
||||||
|
# Handle rest of block
|
||||||
|
therest = block[m.end():].lstrip('\n')
|
||||||
|
m2 = self.RE.search(therest)
|
||||||
|
if m2:
|
||||||
|
# Another footnote exists in the rest of this block.
|
||||||
|
# Any content before match is continuation of this footnote, which may be lazily indented.
|
||||||
|
before = therest[:m2.start()].rstrip('\n')
|
||||||
|
fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(before)]).lstrip('\n')
|
||||||
|
# Add back to blocks everything from beginning of match forward for next iteration.
|
||||||
|
blocks.insert(0, therest[m2.start():])
|
||||||
|
else:
|
||||||
|
# All remaining lines of block are continuation of this footnote, which may be lazily indented.
|
||||||
|
fn_blocks[0] = '\n'.join([fn_blocks[0], self.detab(therest)]).strip('\n')
|
||||||
|
|
||||||
|
# Check for child elements in remaining blocks.
|
||||||
|
fn_blocks.extend(self.detectTabbed(blocks))
|
||||||
|
|
||||||
|
footnote = "\n\n".join(fn_blocks)
|
||||||
|
self.footnotes.setFootnote(id, footnote.rstrip())
|
||||||
|
|
||||||
|
if block[:m.start()].strip():
|
||||||
|
# Add any content before match back to blocks as separate block
|
||||||
|
blocks.insert(0, block[:m.start()].rstrip('\n'))
|
||||||
|
return True
|
||||||
|
# No match. Restore block.
|
||||||
|
blocks.insert(0, block)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def detectTabbed(self, blocks) -> list[str]:
|
||||||
|
""" Find indented text and remove indent before further processing.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A list of blocks with indentation removed.
|
||||||
|
"""
|
||||||
|
fn_blocks = []
|
||||||
|
while blocks:
|
||||||
|
if blocks[0].startswith(' '*4):
|
||||||
|
block = blocks.pop(0)
|
||||||
|
# Check for new footnotes within this block and split at new footnote.
|
||||||
|
m = self.RE.search(block)
|
||||||
|
if m:
|
||||||
|
# Another footnote exists in this block.
|
||||||
|
# Any content before match is continuation of this footnote, which may be lazily indented.
|
||||||
|
before = block[:m.start()].rstrip('\n')
|
||||||
|
fn_blocks.append(self.detab(before))
|
||||||
|
# Add back to blocks everything from beginning of match forward for next iteration.
|
||||||
|
blocks.insert(0, block[m.start():])
|
||||||
|
# End of this footnote.
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# Entire block is part of this footnote.
|
||||||
|
fn_blocks.append(self.detab(block))
|
||||||
|
else:
|
||||||
|
# End of this footnote.
|
||||||
|
break
|
||||||
|
return fn_blocks
|
||||||
|
|
||||||
|
def detab(self, block):
|
||||||
|
""" Remove one level of indent from a block.
|
||||||
|
|
||||||
|
Preserve lazily indented blocks by only removing indent from indented lines.
|
||||||
|
"""
|
||||||
|
lines = block.split('\n')
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if line.startswith(' '*4):
|
||||||
|
lines[i] = line[4:]
|
||||||
|
return '\n'.join(lines)
|
||||||
|
|
||||||
|
|
||||||
|
class FootnoteInlineProcessor(InlineProcessor):
|
||||||
|
""" `InlineProcessor` for footnote markers in a document's body text. """
|
||||||
|
|
||||||
|
def __init__(self, pattern, footnotes):
|
||||||
|
super().__init__(pattern)
|
||||||
|
self.footnotes = footnotes
|
||||||
|
|
||||||
|
def handleMatch(self, m, data):
|
||||||
|
id = m.group(1)
|
||||||
|
if id in self.footnotes.footnotes.keys():
|
||||||
|
sup = etree.Element("sup")
|
||||||
|
a = etree.SubElement(sup, "a")
|
||||||
|
sup.set('id', self.footnotes.makeFootnoteRefId(id, found=True))
|
||||||
|
a.set('href', '#' + self.footnotes.makeFootnoteId(id))
|
||||||
|
a.set('class', 'footnote-ref')
|
||||||
|
a.text = self.footnotes.getConfig("SUPERSCRIPT_TEXT").format(
|
||||||
|
list(self.footnotes.footnotes.keys()).index(id) + 1
|
||||||
|
)
|
||||||
|
return sup, m.start(0), m.end(0)
|
||||||
|
else:
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
|
||||||
|
class FootnotePostTreeprocessor(Treeprocessor):
|
||||||
|
""" Amend footnote div with duplicates. """
|
||||||
|
|
||||||
|
def __init__(self, footnotes):
|
||||||
|
self.footnotes = footnotes
|
||||||
|
|
||||||
|
def add_duplicates(self, li, duplicates) -> None:
|
||||||
|
""" Adjust current `li` and add the duplicates: `fnref2`, `fnref3`, etc. """
|
||||||
|
for link in li.iter('a'):
|
||||||
|
# Find the link that needs to be duplicated.
|
||||||
|
if link.attrib.get('class', '') == 'footnote-backref':
|
||||||
|
ref, rest = link.attrib['href'].split(self.footnotes.get_separator(), 1)
|
||||||
|
# Duplicate link the number of times we need to
|
||||||
|
# and point the to the appropriate references.
|
||||||
|
links = []
|
||||||
|
for index in range(2, duplicates + 1):
|
||||||
|
sib_link = copy.deepcopy(link)
|
||||||
|
sib_link.attrib['href'] = '%s%d%s%s' % (ref, index, self.footnotes.get_separator(), rest)
|
||||||
|
links.append(sib_link)
|
||||||
|
self.offset += 1
|
||||||
|
# Add all the new duplicate links.
|
||||||
|
el = list(li)[-1]
|
||||||
|
for link in links:
|
||||||
|
el.append(link)
|
||||||
|
break
|
||||||
|
|
||||||
|
def get_num_duplicates(self, li):
|
||||||
|
""" Get the number of duplicate refs of the footnote. """
|
||||||
|
fn, rest = li.attrib.get('id', '').split(self.footnotes.get_separator(), 1)
|
||||||
|
link_id = '{}ref{}{}'.format(fn, self.footnotes.get_separator(), rest)
|
||||||
|
return self.footnotes.found_refs.get(link_id, 0)
|
||||||
|
|
||||||
|
def handle_duplicates(self, parent) -> None:
|
||||||
|
""" Find duplicate footnotes and format and add the duplicates. """
|
||||||
|
for li in list(parent):
|
||||||
|
# Check number of duplicates footnotes and insert
|
||||||
|
# additional links if needed.
|
||||||
|
count = self.get_num_duplicates(li)
|
||||||
|
if count > 1:
|
||||||
|
self.add_duplicates(li, count)
|
||||||
|
|
||||||
|
def run(self, root):
|
||||||
|
""" Crawl the footnote div and add missing duplicate footnotes. """
|
||||||
|
self.offset = 0
|
||||||
|
for div in root.iter('div'):
|
||||||
|
if div.attrib.get('class', '') == 'footnote':
|
||||||
|
# Footnotes should be under the first ordered list under
|
||||||
|
# the footnote div. So once we find it, quit.
|
||||||
|
for ol in div.iter('ol'):
|
||||||
|
self.handle_duplicates(ol)
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
class FootnoteTreeprocessor(Treeprocessor):
|
||||||
|
""" Build and append footnote div to end of document. """
|
||||||
|
|
||||||
|
def __init__(self, footnotes):
|
||||||
|
self.footnotes = footnotes
|
||||||
|
|
||||||
|
def run(self, root):
|
||||||
|
footnotesDiv = self.footnotes.makeFootnotesDiv(root)
|
||||||
|
if footnotesDiv is not None:
|
||||||
|
result = self.footnotes.findFootnotesPlaceholder(root)
|
||||||
|
if result:
|
||||||
|
child, parent, isText = result
|
||||||
|
ind = list(parent).index(child)
|
||||||
|
if isText:
|
||||||
|
parent.remove(child)
|
||||||
|
parent.insert(ind, footnotesDiv)
|
||||||
|
else:
|
||||||
|
parent.insert(ind + 1, footnotesDiv)
|
||||||
|
child.tail = None
|
||||||
|
else:
|
||||||
|
root.append(footnotesDiv)
|
||||||
|
|
||||||
|
|
||||||
|
class FootnotePostprocessor(Postprocessor):
|
||||||
|
""" Replace placeholders with html entities. """
|
||||||
|
def __init__(self, footnotes):
|
||||||
|
self.footnotes = footnotes
|
||||||
|
|
||||||
|
def run(self, text):
|
||||||
|
text = text.replace(
|
||||||
|
FN_BACKLINK_TEXT, self.footnotes.getConfig("BACKLINK_TEXT")
|
||||||
|
)
|
||||||
|
return text.replace(NBSP_PLACEHOLDER, " ")
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
""" Return an instance of the `FootnoteExtension` """
|
||||||
|
return FootnoteExtension(**kwargs)
|
||||||
67
plugins/markdown_preview/markdown/extensions/legacy_attrs.py
Normal file
67
plugins/markdown_preview/markdown/extensions/legacy_attrs.py
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
An extension to Python Markdown which implements legacy attributes.
|
||||||
|
|
||||||
|
Prior to Python-Markdown version 3.0, the Markdown class had an `enable_attributes`
|
||||||
|
keyword which was on by default and provided for attributes to be defined for elements
|
||||||
|
using the format `{@key=value}`. This extension is provided as a replacement for
|
||||||
|
backward compatibility. New documents should be authored using `attr_lists`. However,
|
||||||
|
numerous documents exist which have been using the old attribute format for many
|
||||||
|
years. This extension can be used to continue to render those documents correctly.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
from markdown.treeprocessors import Treeprocessor, isString
|
||||||
|
from markdown.extensions import Extension
|
||||||
|
|
||||||
|
|
||||||
|
ATTR_RE = re.compile(r'\{@([^\}]*)=([^\}]*)}') # {@id=123}
|
||||||
|
|
||||||
|
|
||||||
|
class LegacyAttrs(Treeprocessor):
|
||||||
|
def run(self, doc):
|
||||||
|
"""Find and set values of attributes ({@key=value}). """
|
||||||
|
for el in doc.iter():
|
||||||
|
alt = el.get('alt', None)
|
||||||
|
if alt is not None:
|
||||||
|
el.set('alt', self.handleAttributes(el, alt))
|
||||||
|
if el.text and isString(el.text):
|
||||||
|
el.text = self.handleAttributes(el, el.text)
|
||||||
|
if el.tail and isString(el.tail):
|
||||||
|
el.tail = self.handleAttributes(el, el.tail)
|
||||||
|
|
||||||
|
def handleAttributes(self, el, txt):
|
||||||
|
""" Set attributes and return text without definitions. """
|
||||||
|
def attributeCallback(match):
|
||||||
|
el.set(match.group(1), match.group(2).replace('\n', ' '))
|
||||||
|
return ATTR_RE.sub(attributeCallback, txt)
|
||||||
|
|
||||||
|
|
||||||
|
class LegacyAttrExtension(Extension):
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add `LegacyAttrs` to Markdown instance. """
|
||||||
|
md.treeprocessors.register(LegacyAttrs(md), 'legacyattrs', 15)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return LegacyAttrExtension(**kwargs)
|
||||||
52
plugins/markdown_preview/markdown/extensions/legacy_em.py
Normal file
52
plugins/markdown_preview/markdown/extensions/legacy_em.py
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
# Legacy Em Extension for Python-Markdown
|
||||||
|
# =======================================
|
||||||
|
|
||||||
|
# This extension provides legacy behavior for _connected_words_.
|
||||||
|
|
||||||
|
# Copyright 2015-2018 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
This extension provides legacy behavior for _connected_words_.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..inlinepatterns import UnderscoreProcessor, EmStrongItem, EM_STRONG2_RE, STRONG_EM2_RE
|
||||||
|
import re
|
||||||
|
|
||||||
|
# _emphasis_
|
||||||
|
EMPHASIS_RE = r'(_)([^_]+)\1'
|
||||||
|
|
||||||
|
# __strong__
|
||||||
|
STRONG_RE = r'(_{2})(.+?)\1'
|
||||||
|
|
||||||
|
# __strong_em___
|
||||||
|
STRONG_EM_RE = r'(_)\1(?!\1)([^_]+?)\1(?!\1)(.+?)\1{3}'
|
||||||
|
|
||||||
|
|
||||||
|
class LegacyUnderscoreProcessor(UnderscoreProcessor):
|
||||||
|
"""Emphasis processor for handling strong and em matches inside underscores."""
|
||||||
|
|
||||||
|
PATTERNS = [
|
||||||
|
EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
|
||||||
|
EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
|
||||||
|
EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
|
||||||
|
EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
|
||||||
|
EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
class LegacyEmExtension(Extension):
|
||||||
|
""" Add legacy_em extension to Markdown class."""
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Modify inline patterns. """
|
||||||
|
md.inlinePatterns.register(LegacyUnderscoreProcessor(r'_'), 'em_strong2', 50)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
""" Return an instance of the `LegacyEmExtension` """
|
||||||
|
return LegacyEmExtension(**kwargs)
|
||||||
372
plugins/markdown_preview/markdown/extensions/md_in_html.py
Normal file
372
plugins/markdown_preview/markdown/extensions/md_in_html.py
Normal file
@@ -0,0 +1,372 @@
|
|||||||
|
# Python-Markdown Markdown in HTML Extension
|
||||||
|
# ===============================
|
||||||
|
|
||||||
|
# An implementation of [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/)'s
|
||||||
|
# parsing of Markdown syntax in raw HTML.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/raw_html
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Copyright The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
An implementation of [PHP Markdown Extra](http://michelf.com/projects/php-markdown/extra/)'s
|
||||||
|
parsing of Markdown syntax in raw HTML.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/raw_html)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..blockprocessors import BlockProcessor
|
||||||
|
from ..preprocessors import Preprocessor
|
||||||
|
from ..postprocessors import RawHtmlPostprocessor
|
||||||
|
from .. import util
|
||||||
|
from ..htmlparser import HTMLExtractor, blank_line_re
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
|
|
||||||
|
class HTMLExtractorExtra(HTMLExtractor):
|
||||||
|
"""
|
||||||
|
Override `HTMLExtractor` and create `etree` `Elements` for any elements which should have content parsed as
|
||||||
|
Markdown.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, md, *args, **kwargs):
|
||||||
|
# All block-level tags.
|
||||||
|
self.block_level_tags = set(md.block_level_elements.copy())
|
||||||
|
# Block-level tags in which the content only gets span level parsing
|
||||||
|
self.span_tags = set(
|
||||||
|
['address', 'dd', 'dt', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'legend', 'li', 'p', 'summary', 'td', 'th']
|
||||||
|
)
|
||||||
|
# Block-level tags which never get their content parsed.
|
||||||
|
self.raw_tags = set(['canvas', 'math', 'option', 'pre', 'script', 'style', 'textarea'])
|
||||||
|
|
||||||
|
super().__init__(md, *args, **kwargs)
|
||||||
|
|
||||||
|
# Block-level tags in which the content gets parsed as blocks
|
||||||
|
self.block_tags = set(self.block_level_tags) - (self.span_tags | self.raw_tags | self.empty_tags)
|
||||||
|
self.span_and_blocks_tags = self.block_tags | self.span_tags
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Reset this instance. Loses all unprocessed data."""
|
||||||
|
self.mdstack = [] # When markdown=1, stack contains a list of tags
|
||||||
|
self.treebuilder = etree.TreeBuilder()
|
||||||
|
self.mdstate = [] # one of 'block', 'span', 'off', or None
|
||||||
|
super().reset()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Handle any buffered data."""
|
||||||
|
super().close()
|
||||||
|
# Handle any unclosed tags.
|
||||||
|
if self.mdstack:
|
||||||
|
# Close the outermost parent. `handle_endtag` will close all unclosed children.
|
||||||
|
self.handle_endtag(self.mdstack[0])
|
||||||
|
|
||||||
|
def get_element(self):
|
||||||
|
""" Return element from `treebuilder` and reset `treebuilder` for later use. """
|
||||||
|
element = self.treebuilder.close()
|
||||||
|
self.treebuilder = etree.TreeBuilder()
|
||||||
|
return element
|
||||||
|
|
||||||
|
def get_state(self, tag, attrs):
|
||||||
|
""" Return state from tag and `markdown` attribute. One of 'block', 'span', or 'off'. """
|
||||||
|
md_attr = attrs.get('markdown', '0')
|
||||||
|
if md_attr == 'markdown':
|
||||||
|
# `<tag markdown>` is the same as `<tag markdown='1'>`.
|
||||||
|
md_attr = '1'
|
||||||
|
parent_state = self.mdstate[-1] if self.mdstate else None
|
||||||
|
if parent_state == 'off' or (parent_state == 'span' and md_attr != '0'):
|
||||||
|
# Only use the parent state if it is more restrictive than the markdown attribute.
|
||||||
|
md_attr = parent_state
|
||||||
|
if ((md_attr == '1' and tag in self.block_tags) or
|
||||||
|
(md_attr == 'block' and tag in self.span_and_blocks_tags)):
|
||||||
|
return 'block'
|
||||||
|
elif ((md_attr == '1' and tag in self.span_tags) or
|
||||||
|
(md_attr == 'span' and tag in self.span_and_blocks_tags)):
|
||||||
|
return 'span'
|
||||||
|
elif tag in self.block_level_tags:
|
||||||
|
return 'off'
|
||||||
|
else: # pragma: no cover
|
||||||
|
return None
|
||||||
|
|
||||||
|
def handle_starttag(self, tag, attrs):
|
||||||
|
# Handle tags that should always be empty and do not specify a closing tag
|
||||||
|
if tag in self.empty_tags and (self.at_line_start() or self.intail):
|
||||||
|
attrs = {key: value if value is not None else key for key, value in attrs}
|
||||||
|
if "markdown" in attrs:
|
||||||
|
attrs.pop('markdown')
|
||||||
|
element = etree.Element(tag, attrs)
|
||||||
|
data = etree.tostring(element, encoding='unicode', method='html')
|
||||||
|
else:
|
||||||
|
data = self.get_starttag_text()
|
||||||
|
self.handle_empty_tag(data, True)
|
||||||
|
return
|
||||||
|
|
||||||
|
if tag in self.block_level_tags and (self.at_line_start() or self.intail):
|
||||||
|
# Valueless attribute (ex: `<tag checked>`) results in `[('checked', None)]`.
|
||||||
|
# Convert to `{'checked': 'checked'}`.
|
||||||
|
attrs = {key: value if value is not None else key for key, value in attrs}
|
||||||
|
state = self.get_state(tag, attrs)
|
||||||
|
if self.inraw or (state in [None, 'off'] and not self.mdstack):
|
||||||
|
# fall back to default behavior
|
||||||
|
attrs.pop('markdown', None)
|
||||||
|
super().handle_starttag(tag, attrs)
|
||||||
|
else:
|
||||||
|
if 'p' in self.mdstack and tag in self.block_level_tags:
|
||||||
|
# Close unclosed 'p' tag
|
||||||
|
self.handle_endtag('p')
|
||||||
|
self.mdstate.append(state)
|
||||||
|
self.mdstack.append(tag)
|
||||||
|
attrs['markdown'] = state
|
||||||
|
self.treebuilder.start(tag, attrs)
|
||||||
|
else:
|
||||||
|
# Span level tag
|
||||||
|
if self.inraw:
|
||||||
|
super().handle_starttag(tag, attrs)
|
||||||
|
else:
|
||||||
|
text = self.get_starttag_text()
|
||||||
|
if self.mdstate and self.mdstate[-1] == "off":
|
||||||
|
self.handle_data(self.md.htmlStash.store(text))
|
||||||
|
else:
|
||||||
|
self.handle_data(text)
|
||||||
|
if tag in self.CDATA_CONTENT_ELEMENTS:
|
||||||
|
# This is presumably a standalone tag in a code span (see #1036).
|
||||||
|
self.clear_cdata_mode()
|
||||||
|
|
||||||
|
def handle_endtag(self, tag):
|
||||||
|
if tag in self.block_level_tags:
|
||||||
|
if self.inraw:
|
||||||
|
super().handle_endtag(tag)
|
||||||
|
elif tag in self.mdstack:
|
||||||
|
# Close element and any unclosed children
|
||||||
|
while self.mdstack:
|
||||||
|
item = self.mdstack.pop()
|
||||||
|
self.mdstate.pop()
|
||||||
|
self.treebuilder.end(item)
|
||||||
|
if item == tag:
|
||||||
|
break
|
||||||
|
if not self.mdstack:
|
||||||
|
# Last item in stack is closed. Stash it
|
||||||
|
element = self.get_element()
|
||||||
|
# Get last entry to see if it ends in newlines
|
||||||
|
# If it is an element, assume there is no newlines
|
||||||
|
item = self.cleandoc[-1] if self.cleandoc else ''
|
||||||
|
# If we only have one newline before block element, add another
|
||||||
|
if not item.endswith('\n\n') and item.endswith('\n'):
|
||||||
|
self.cleandoc.append('\n')
|
||||||
|
self.cleandoc.append(self.md.htmlStash.store(element))
|
||||||
|
self.cleandoc.append('\n\n')
|
||||||
|
self.state = []
|
||||||
|
# Check if element has a tail
|
||||||
|
if not blank_line_re.match(
|
||||||
|
self.rawdata[self.line_offset + self.offset + len(self.get_endtag_text(tag)):]):
|
||||||
|
# More content exists after `endtag`.
|
||||||
|
self.intail = True
|
||||||
|
else:
|
||||||
|
# Treat orphan closing tag as a span level tag.
|
||||||
|
text = self.get_endtag_text(tag)
|
||||||
|
if self.mdstate and self.mdstate[-1] == "off":
|
||||||
|
self.handle_data(self.md.htmlStash.store(text))
|
||||||
|
else:
|
||||||
|
self.handle_data(text)
|
||||||
|
else:
|
||||||
|
# Span level tag
|
||||||
|
if self.inraw:
|
||||||
|
super().handle_endtag(tag)
|
||||||
|
else:
|
||||||
|
text = self.get_endtag_text(tag)
|
||||||
|
if self.mdstate and self.mdstate[-1] == "off":
|
||||||
|
self.handle_data(self.md.htmlStash.store(text))
|
||||||
|
else:
|
||||||
|
self.handle_data(text)
|
||||||
|
|
||||||
|
def handle_startendtag(self, tag, attrs):
|
||||||
|
if tag in self.empty_tags:
|
||||||
|
attrs = {key: value if value is not None else key for key, value in attrs}
|
||||||
|
if "markdown" in attrs:
|
||||||
|
attrs.pop('markdown')
|
||||||
|
element = etree.Element(tag, attrs)
|
||||||
|
data = etree.tostring(element, encoding='unicode', method='html')
|
||||||
|
else:
|
||||||
|
data = self.get_starttag_text()
|
||||||
|
else:
|
||||||
|
data = self.get_starttag_text()
|
||||||
|
self.handle_empty_tag(data, is_block=self.md.is_block_level(tag))
|
||||||
|
|
||||||
|
def handle_data(self, data):
|
||||||
|
if self.intail and '\n' in data:
|
||||||
|
self.intail = False
|
||||||
|
if self.inraw or not self.mdstack:
|
||||||
|
super().handle_data(data)
|
||||||
|
else:
|
||||||
|
self.treebuilder.data(data)
|
||||||
|
|
||||||
|
def handle_empty_tag(self, data, is_block):
|
||||||
|
if self.inraw or not self.mdstack:
|
||||||
|
super().handle_empty_tag(data, is_block)
|
||||||
|
else:
|
||||||
|
if self.at_line_start() and is_block:
|
||||||
|
self.handle_data('\n' + self.md.htmlStash.store(data) + '\n\n')
|
||||||
|
else:
|
||||||
|
self.handle_data(self.md.htmlStash.store(data))
|
||||||
|
|
||||||
|
def parse_pi(self, i):
|
||||||
|
if self.at_line_start() or self.intail or self.mdstack:
|
||||||
|
# The same override exists in `HTMLExtractor` without the check
|
||||||
|
# for `mdstack`. Therefore, use parent of `HTMLExtractor` instead.
|
||||||
|
return super(HTMLExtractor, self).parse_pi(i)
|
||||||
|
# This is not the beginning of a raw block so treat as plain data
|
||||||
|
# and avoid consuming any tags which may follow (see #1066).
|
||||||
|
self.handle_data('<?')
|
||||||
|
return i + 2
|
||||||
|
|
||||||
|
def parse_html_declaration(self, i):
|
||||||
|
if self.at_line_start() or self.intail or self.mdstack:
|
||||||
|
# The same override exists in `HTMLExtractor` without the check
|
||||||
|
# for `mdstack`. Therefore, use parent of `HTMLExtractor` instead.
|
||||||
|
return super(HTMLExtractor, self).parse_html_declaration(i)
|
||||||
|
# This is not the beginning of a raw block so treat as plain data
|
||||||
|
# and avoid consuming any tags which may follow (see #1066).
|
||||||
|
self.handle_data('<!')
|
||||||
|
return i + 2
|
||||||
|
|
||||||
|
|
||||||
|
class HtmlBlockPreprocessor(Preprocessor):
|
||||||
|
"""Remove html blocks from the text and store them for later retrieval."""
|
||||||
|
|
||||||
|
def run(self, lines):
|
||||||
|
source = '\n'.join(lines)
|
||||||
|
parser = HTMLExtractorExtra(self.md)
|
||||||
|
parser.feed(source)
|
||||||
|
parser.close()
|
||||||
|
return ''.join(parser.cleandoc).split('\n')
|
||||||
|
|
||||||
|
|
||||||
|
class MarkdownInHtmlProcessor(BlockProcessor):
|
||||||
|
"""Process Markdown Inside HTML Blocks which have been stored in the `HtmlStash`."""
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
# Always return True. `run` will return `False` it not a valid match.
|
||||||
|
return True
|
||||||
|
|
||||||
|
def parse_element_content(self, element):
|
||||||
|
"""
|
||||||
|
Recursively parse the text content of an `etree` Element as Markdown.
|
||||||
|
|
||||||
|
Any block level elements generated from the Markdown will be inserted as children of the element in place
|
||||||
|
of the text content. All `markdown` attributes are removed. For any elements in which Markdown parsing has
|
||||||
|
been disabled, the text content of it and its children are wrapped in an `AtomicString`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
md_attr = element.attrib.pop('markdown', 'off')
|
||||||
|
|
||||||
|
if md_attr == 'block':
|
||||||
|
# Parse content as block level
|
||||||
|
# The order in which the different parts are parsed (text, children, tails) is important here as the
|
||||||
|
# order of elements needs to be preserved. We can't be inserting items at a later point in the current
|
||||||
|
# iteration as we don't want to do raw processing on elements created from parsing Markdown text (for
|
||||||
|
# example). Therefore, the order of operations is children, tails, text.
|
||||||
|
|
||||||
|
# Recursively parse existing children from raw HTML
|
||||||
|
for child in list(element):
|
||||||
|
self.parse_element_content(child)
|
||||||
|
|
||||||
|
# Parse Markdown text in tail of children. Do this separate to avoid raw HTML parsing.
|
||||||
|
# Save the position of each item to be inserted later in reverse.
|
||||||
|
tails = []
|
||||||
|
for pos, child in enumerate(element):
|
||||||
|
if child.tail:
|
||||||
|
block = child.tail.rstrip('\n')
|
||||||
|
child.tail = ''
|
||||||
|
# Use a dummy placeholder element.
|
||||||
|
dummy = etree.Element('div')
|
||||||
|
self.parser.parseBlocks(dummy, block.split('\n\n'))
|
||||||
|
children = list(dummy)
|
||||||
|
children.reverse()
|
||||||
|
tails.append((pos + 1, children))
|
||||||
|
|
||||||
|
# Insert the elements created from the tails in reverse.
|
||||||
|
tails.reverse()
|
||||||
|
for pos, tail in tails:
|
||||||
|
for item in tail:
|
||||||
|
element.insert(pos, item)
|
||||||
|
|
||||||
|
# Parse Markdown text content. Do this last to avoid raw HTML parsing.
|
||||||
|
if element.text:
|
||||||
|
block = element.text.rstrip('\n')
|
||||||
|
element.text = ''
|
||||||
|
# Use a dummy placeholder element as the content needs to get inserted before existing children.
|
||||||
|
dummy = etree.Element('div')
|
||||||
|
self.parser.parseBlocks(dummy, block.split('\n\n'))
|
||||||
|
children = list(dummy)
|
||||||
|
children.reverse()
|
||||||
|
for child in children:
|
||||||
|
element.insert(0, child)
|
||||||
|
|
||||||
|
elif md_attr == 'span':
|
||||||
|
# Span level parsing will be handled by inline processors.
|
||||||
|
# Walk children here to remove any `markdown` attributes.
|
||||||
|
for child in list(element):
|
||||||
|
self.parse_element_content(child)
|
||||||
|
|
||||||
|
else:
|
||||||
|
# Disable inline parsing for everything else
|
||||||
|
if element.text is None:
|
||||||
|
element.text = ''
|
||||||
|
element.text = util.AtomicString(element.text)
|
||||||
|
for child in list(element):
|
||||||
|
self.parse_element_content(child)
|
||||||
|
if child.tail:
|
||||||
|
child.tail = util.AtomicString(child.tail)
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
m = util.HTML_PLACEHOLDER_RE.match(blocks[0])
|
||||||
|
if m:
|
||||||
|
index = int(m.group(1))
|
||||||
|
element = self.parser.md.htmlStash.rawHtmlBlocks[index]
|
||||||
|
if isinstance(element, etree.Element):
|
||||||
|
# We have a matched element. Process it.
|
||||||
|
blocks.pop(0)
|
||||||
|
self.parse_element_content(element)
|
||||||
|
parent.append(element)
|
||||||
|
# Cleanup stash. Replace element with empty string to avoid confusing postprocessor.
|
||||||
|
self.parser.md.htmlStash.rawHtmlBlocks.pop(index)
|
||||||
|
self.parser.md.htmlStash.rawHtmlBlocks.insert(index, '')
|
||||||
|
# Confirm the match to the `blockparser`.
|
||||||
|
return True
|
||||||
|
# No match found.
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class MarkdownInHTMLPostprocessor(RawHtmlPostprocessor):
|
||||||
|
def stash_to_string(self, text):
|
||||||
|
""" Override default to handle any `etree` elements still in the stash. """
|
||||||
|
if isinstance(text, etree.Element):
|
||||||
|
return self.md.serializer(text)
|
||||||
|
else:
|
||||||
|
return str(text)
|
||||||
|
|
||||||
|
|
||||||
|
class MarkdownInHtmlExtension(Extension):
|
||||||
|
"""Add Markdown parsing in HTML to Markdown class."""
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Register extension instances. """
|
||||||
|
|
||||||
|
# Replace raw HTML preprocessor
|
||||||
|
md.preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20)
|
||||||
|
# Add `blockprocessor` which handles the placeholders for `etree` elements
|
||||||
|
md.parser.blockprocessors.register(
|
||||||
|
MarkdownInHtmlProcessor(md.parser), 'markdown_block', 105
|
||||||
|
)
|
||||||
|
# Replace raw HTML postprocessor
|
||||||
|
md.postprocessors.register(MarkdownInHTMLPostprocessor(md), 'raw_html', 30)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return MarkdownInHtmlExtension(**kwargs)
|
||||||
85
plugins/markdown_preview/markdown/extensions/meta.py
Normal file
85
plugins/markdown_preview/markdown/extensions/meta.py
Normal file
@@ -0,0 +1,85 @@
|
|||||||
|
# Meta Data Extension for Python-Markdown
|
||||||
|
# =======================================
|
||||||
|
|
||||||
|
# This extension adds Meta Data handling to markdown.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/meta_data
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2007-2008 [Waylan Limberg](http://achinghead.com).
|
||||||
|
|
||||||
|
# All changes Copyright 2008-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
This extension adds Meta Data handling to markdown.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/meta_data)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..preprocessors import Preprocessor
|
||||||
|
import re
|
||||||
|
import logging
|
||||||
|
|
||||||
|
log = logging.getLogger('MARKDOWN')
|
||||||
|
|
||||||
|
# Global Vars
|
||||||
|
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
|
||||||
|
META_MORE_RE = re.compile(r'^[ ]{4,}(?P<value>.*)')
|
||||||
|
BEGIN_RE = re.compile(r'^-{3}(\s.*)?')
|
||||||
|
END_RE = re.compile(r'^(-{3}|\.{3})(\s.*)?')
|
||||||
|
|
||||||
|
|
||||||
|
class MetaExtension (Extension):
|
||||||
|
""" Meta-Data extension for Python-Markdown. """
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add `MetaPreprocessor` to Markdown instance. """
|
||||||
|
md.registerExtension(self)
|
||||||
|
self.md = md
|
||||||
|
md.preprocessors.register(MetaPreprocessor(md), 'meta', 27)
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
self.md.Meta = {}
|
||||||
|
|
||||||
|
|
||||||
|
class MetaPreprocessor(Preprocessor):
|
||||||
|
""" Get Meta-Data. """
|
||||||
|
|
||||||
|
def run(self, lines):
|
||||||
|
""" Parse Meta-Data and store in Markdown.Meta. """
|
||||||
|
meta = {}
|
||||||
|
key = None
|
||||||
|
if lines and BEGIN_RE.match(lines[0]):
|
||||||
|
lines.pop(0)
|
||||||
|
while lines:
|
||||||
|
line = lines.pop(0)
|
||||||
|
m1 = META_RE.match(line)
|
||||||
|
if line.strip() == '' or END_RE.match(line):
|
||||||
|
break # blank line or end of YAML header - done
|
||||||
|
if m1:
|
||||||
|
key = m1.group('key').lower().strip()
|
||||||
|
value = m1.group('value').strip()
|
||||||
|
try:
|
||||||
|
meta[key].append(value)
|
||||||
|
except KeyError:
|
||||||
|
meta[key] = [value]
|
||||||
|
else:
|
||||||
|
m2 = META_MORE_RE.match(line)
|
||||||
|
if m2 and key:
|
||||||
|
# Add another line to existing key
|
||||||
|
meta[key].append(m2.group('value').strip())
|
||||||
|
else:
|
||||||
|
lines.insert(0, line)
|
||||||
|
break # no meta data - done
|
||||||
|
self.md.Meta = meta
|
||||||
|
return lines
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return MetaExtension(**kwargs)
|
||||||
41
plugins/markdown_preview/markdown/extensions/nl2br.py
Normal file
41
plugins/markdown_preview/markdown/extensions/nl2br.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# `NL2BR` Extension
|
||||||
|
# ===============
|
||||||
|
|
||||||
|
# A Python-Markdown extension to treat newlines as hard breaks; like
|
||||||
|
# GitHub-flavored Markdown does.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/nl2br
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2011 [Brian Neal](https://deathofagremmie.com/)
|
||||||
|
|
||||||
|
# All changes Copyright 2011-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
A Python-Markdown extension to treat newlines as hard breaks; like
|
||||||
|
GitHub-flavored Markdown does.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/nl2br)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..inlinepatterns import SubstituteTagInlineProcessor
|
||||||
|
|
||||||
|
BR_RE = r'\n'
|
||||||
|
|
||||||
|
|
||||||
|
class Nl2BrExtension(Extension):
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add a `SubstituteTagInlineProcessor` to Markdown. """
|
||||||
|
br_tag = SubstituteTagInlineProcessor(BR_RE, 'br')
|
||||||
|
md.inlinePatterns.register(br_tag, 'nl', 5)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return Nl2BrExtension(**kwargs)
|
||||||
65
plugins/markdown_preview/markdown/extensions/sane_lists.py
Normal file
65
plugins/markdown_preview/markdown/extensions/sane_lists.py
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
# Sane List Extension for Python-Markdown
|
||||||
|
# =======================================
|
||||||
|
|
||||||
|
# Modify the behavior of Lists in Python-Markdown to act in a sane manor.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/sane_lists
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2011 [Waylan Limberg](http://achinghead.com)
|
||||||
|
|
||||||
|
# All changes Copyright 2011-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Modify the behavior of Lists in Python-Markdown to act in a sane manor.
|
||||||
|
|
||||||
|
See [documentation](https://Python-Markdown.github.io/extensions/sane_lists)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..blockprocessors import OListProcessor, UListProcessor
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
class SaneOListProcessor(OListProcessor):
|
||||||
|
""" Override `SIBLING_TAGS` to not include `ul` and set `LAZY_OL` to `False`. """
|
||||||
|
|
||||||
|
SIBLING_TAGS = ['ol']
|
||||||
|
""" Exclude `ul` from list of siblings. """
|
||||||
|
LAZY_OL = False
|
||||||
|
""" Disable lazy list behavior. """
|
||||||
|
|
||||||
|
def __init__(self, parser):
|
||||||
|
super().__init__(parser)
|
||||||
|
self.CHILD_RE = re.compile(r'^[ ]{0,%d}((\d+\.))[ ]+(.*)' %
|
||||||
|
(self.tab_length - 1))
|
||||||
|
|
||||||
|
|
||||||
|
class SaneUListProcessor(UListProcessor):
|
||||||
|
""" Override `SIBLING_TAGS` to not include `ol`. """
|
||||||
|
|
||||||
|
SIBLING_TAGS = ['ul']
|
||||||
|
""" Exclude `ol` from list of siblings. """
|
||||||
|
|
||||||
|
def __init__(self, parser):
|
||||||
|
super().__init__(parser)
|
||||||
|
self.CHILD_RE = re.compile(r'^[ ]{0,%d}(([*+-]))[ ]+(.*)' %
|
||||||
|
(self.tab_length - 1))
|
||||||
|
|
||||||
|
|
||||||
|
class SaneListExtension(Extension):
|
||||||
|
""" Add sane lists to Markdown. """
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Override existing Processors. """
|
||||||
|
md.parser.blockprocessors.register(SaneOListProcessor(md.parser), 'olist', 40)
|
||||||
|
md.parser.blockprocessors.register(SaneUListProcessor(md.parser), 'ulist', 30)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return SaneListExtension(**kwargs)
|
||||||
265
plugins/markdown_preview/markdown/extensions/smarty.py
Normal file
265
plugins/markdown_preview/markdown/extensions/smarty.py
Normal file
@@ -0,0 +1,265 @@
|
|||||||
|
# Smarty extension for Python-Markdown
|
||||||
|
# ====================================
|
||||||
|
|
||||||
|
# Adds conversion of ASCII dashes, quotes and ellipses to their HTML
|
||||||
|
# entity equivalents.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/smarty
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Author: 2013, Dmitry Shachnev <mitya57@gmail.com>
|
||||||
|
|
||||||
|
# All changes Copyright 2013-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
# SmartyPants license:
|
||||||
|
|
||||||
|
# Copyright (c) 2003 John Gruber <https://daringfireball.net/>
|
||||||
|
# All rights reserved.
|
||||||
|
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in
|
||||||
|
# the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
|
||||||
|
# * Neither the name "SmartyPants" nor the names of its contributors
|
||||||
|
# may be used to endorse or promote products derived from this
|
||||||
|
# software without specific prior written permission.
|
||||||
|
|
||||||
|
# This software is provided by the copyright holders and contributors "as
|
||||||
|
# is" and any express or implied warranties, including, but not limited
|
||||||
|
# to, the implied warranties of merchantability and fitness for a
|
||||||
|
# particular purpose are disclaimed. In no event shall the copyright
|
||||||
|
# owner or contributors be liable for any direct, indirect, incidental,
|
||||||
|
# special, exemplary, or consequential damages (including, but not
|
||||||
|
# limited to, procurement of substitute goods or services; loss of use,
|
||||||
|
# data, or profits; or business interruption) however caused and on any
|
||||||
|
# theory of liability, whether in contract, strict liability, or tort
|
||||||
|
# (including negligence or otherwise) arising in any way out of the use
|
||||||
|
# of this software, even if advised of the possibility of such damage.
|
||||||
|
|
||||||
|
|
||||||
|
# `smartypants.py` license:
|
||||||
|
|
||||||
|
# `smartypants.py` is a derivative work of SmartyPants.
|
||||||
|
# Copyright (c) 2004, 2007 Chad Miller <http://web.chad.org/>
|
||||||
|
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
# * Redistributions in binary form must reproduce the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer in
|
||||||
|
# the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
|
||||||
|
# This software is provided by the copyright holders and contributors "as
|
||||||
|
# is" and any express or implied warranties, including, but not limited
|
||||||
|
# to, the implied warranties of merchantability and fitness for a
|
||||||
|
# particular purpose are disclaimed. In no event shall the copyright
|
||||||
|
# owner or contributors be liable for any direct, indirect, incidental,
|
||||||
|
# special, exemplary, or consequential damages (including, but not
|
||||||
|
# limited to, procurement of substitute goods or services; loss of use,
|
||||||
|
# data, or profits; or business interruption) however caused and on any
|
||||||
|
# theory of liability, whether in contract, strict liability, or tort
|
||||||
|
# (including negligence or otherwise) arising in any way out of the use
|
||||||
|
# of this software, even if advised of the possibility of such damage.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Adds conversion of ASCII dashes, quotes and ellipses to their HTML
|
||||||
|
entity equivalents.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/smarty)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..inlinepatterns import HtmlInlineProcessor, HTML_RE
|
||||||
|
from ..treeprocessors import InlineProcessor
|
||||||
|
from ..util import Registry
|
||||||
|
|
||||||
|
|
||||||
|
# Constants for quote education.
|
||||||
|
punctClass = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
|
||||||
|
endOfWordClass = r"[\s.,;:!?)]"
|
||||||
|
closeClass = r"[^\ \t\r\n\[\{\(\-\u0002\u0003]"
|
||||||
|
|
||||||
|
openingQuotesBase = (
|
||||||
|
r'(\s' # a whitespace char
|
||||||
|
r'| ' # or a non-breaking space entity
|
||||||
|
r'|--' # or dashes
|
||||||
|
r'|–|—' # or Unicode
|
||||||
|
r'|&[mn]dash;' # or named dash entities
|
||||||
|
r'|–|—' # or decimal entities
|
||||||
|
r')'
|
||||||
|
)
|
||||||
|
|
||||||
|
substitutions = {
|
||||||
|
'mdash': '—',
|
||||||
|
'ndash': '–',
|
||||||
|
'ellipsis': '…',
|
||||||
|
'left-angle-quote': '«',
|
||||||
|
'right-angle-quote': '»',
|
||||||
|
'left-single-quote': '‘',
|
||||||
|
'right-single-quote': '’',
|
||||||
|
'left-double-quote': '“',
|
||||||
|
'right-double-quote': '”',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Special case if the very first character is a quote
|
||||||
|
# followed by punctuation at a non-word-break. Close the quotes by brute force:
|
||||||
|
singleQuoteStartRe = r"^'(?=%s\B)" % punctClass
|
||||||
|
doubleQuoteStartRe = r'^"(?=%s\B)' % punctClass
|
||||||
|
|
||||||
|
# Special case for double sets of quotes, e.g.:
|
||||||
|
# <p>He said, "'Quoted' words in a larger quote."</p>
|
||||||
|
doubleQuoteSetsRe = r""""'(?=\w)"""
|
||||||
|
singleQuoteSetsRe = r"""'"(?=\w)"""
|
||||||
|
|
||||||
|
# Special case for decade abbreviations (the '80s):
|
||||||
|
decadeAbbrRe = r"(?<!\w)'(?=\d{2}s)"
|
||||||
|
|
||||||
|
# Get most opening double quotes:
|
||||||
|
openingDoubleQuotesRegex = r'%s"(?=\w)' % openingQuotesBase
|
||||||
|
|
||||||
|
# Double closing quotes:
|
||||||
|
closingDoubleQuotesRegex = r'"(?=\s)'
|
||||||
|
closingDoubleQuotesRegex2 = '(?<=%s)"' % closeClass
|
||||||
|
|
||||||
|
# Get most opening single quotes:
|
||||||
|
openingSingleQuotesRegex = r"%s'(?=\w)" % openingQuotesBase
|
||||||
|
|
||||||
|
# Single closing quotes:
|
||||||
|
closingSingleQuotesRegex = r"(?<=%s)'(?!\s|s\b|\d)" % closeClass
|
||||||
|
closingSingleQuotesRegex2 = r"'(\s|s\b)"
|
||||||
|
|
||||||
|
# All remaining quotes should be opening ones
|
||||||
|
remainingSingleQuotesRegex = r"'"
|
||||||
|
remainingDoubleQuotesRegex = r'"'
|
||||||
|
|
||||||
|
HTML_STRICT_RE = HTML_RE + r'(?!\>)'
|
||||||
|
|
||||||
|
|
||||||
|
class SubstituteTextPattern(HtmlInlineProcessor):
|
||||||
|
def __init__(self, pattern, replace, md):
|
||||||
|
""" Replaces matches with some text. """
|
||||||
|
HtmlInlineProcessor.__init__(self, pattern)
|
||||||
|
self.replace = replace
|
||||||
|
self.md = md
|
||||||
|
|
||||||
|
def handleMatch(self, m, data):
|
||||||
|
result = ''
|
||||||
|
for part in self.replace:
|
||||||
|
if isinstance(part, int):
|
||||||
|
result += m.group(part)
|
||||||
|
else:
|
||||||
|
result += self.md.htmlStash.store(part)
|
||||||
|
return result, m.start(0), m.end(0)
|
||||||
|
|
||||||
|
|
||||||
|
class SmartyExtension(Extension):
|
||||||
|
""" Add Smarty to Markdown. """
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.config = {
|
||||||
|
'smart_quotes': [True, 'Educate quotes'],
|
||||||
|
'smart_angled_quotes': [False, 'Educate angled quotes'],
|
||||||
|
'smart_dashes': [True, 'Educate dashes'],
|
||||||
|
'smart_ellipses': [True, 'Educate ellipses'],
|
||||||
|
'substitutions': [{}, 'Overwrite default substitutions'],
|
||||||
|
}
|
||||||
|
""" Default configuration options. """
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self.substitutions = dict(substitutions)
|
||||||
|
self.substitutions.update(self.getConfig('substitutions', default={}))
|
||||||
|
|
||||||
|
def _addPatterns(self, md, patterns, serie, priority):
|
||||||
|
for ind, pattern in enumerate(patterns):
|
||||||
|
pattern += (md,)
|
||||||
|
pattern = SubstituteTextPattern(*pattern)
|
||||||
|
name = 'smarty-%s-%d' % (serie, ind)
|
||||||
|
self.inlinePatterns.register(pattern, name, priority-ind)
|
||||||
|
|
||||||
|
def educateDashes(self, md) -> None:
|
||||||
|
emDashesPattern = SubstituteTextPattern(
|
||||||
|
r'(?<!-)---(?!-)', (self.substitutions['mdash'],), md
|
||||||
|
)
|
||||||
|
enDashesPattern = SubstituteTextPattern(
|
||||||
|
r'(?<!-)--(?!-)', (self.substitutions['ndash'],), md
|
||||||
|
)
|
||||||
|
self.inlinePatterns.register(emDashesPattern, 'smarty-em-dashes', 50)
|
||||||
|
self.inlinePatterns.register(enDashesPattern, 'smarty-en-dashes', 45)
|
||||||
|
|
||||||
|
def educateEllipses(self, md) -> None:
|
||||||
|
ellipsesPattern = SubstituteTextPattern(
|
||||||
|
r'(?<!\.)\.{3}(?!\.)', (self.substitutions['ellipsis'],), md
|
||||||
|
)
|
||||||
|
self.inlinePatterns.register(ellipsesPattern, 'smarty-ellipses', 10)
|
||||||
|
|
||||||
|
def educateAngledQuotes(self, md) -> None:
|
||||||
|
leftAngledQuotePattern = SubstituteTextPattern(
|
||||||
|
r'\<\<', (self.substitutions['left-angle-quote'],), md
|
||||||
|
)
|
||||||
|
rightAngledQuotePattern = SubstituteTextPattern(
|
||||||
|
r'\>\>', (self.substitutions['right-angle-quote'],), md
|
||||||
|
)
|
||||||
|
self.inlinePatterns.register(leftAngledQuotePattern, 'smarty-left-angle-quotes', 40)
|
||||||
|
self.inlinePatterns.register(rightAngledQuotePattern, 'smarty-right-angle-quotes', 35)
|
||||||
|
|
||||||
|
def educateQuotes(self, md) -> None:
|
||||||
|
lsquo = self.substitutions['left-single-quote']
|
||||||
|
rsquo = self.substitutions['right-single-quote']
|
||||||
|
ldquo = self.substitutions['left-double-quote']
|
||||||
|
rdquo = self.substitutions['right-double-quote']
|
||||||
|
patterns = (
|
||||||
|
(singleQuoteStartRe, (rsquo,)),
|
||||||
|
(doubleQuoteStartRe, (rdquo,)),
|
||||||
|
(doubleQuoteSetsRe, (ldquo + lsquo,)),
|
||||||
|
(singleQuoteSetsRe, (lsquo + ldquo,)),
|
||||||
|
(decadeAbbrRe, (rsquo,)),
|
||||||
|
(openingSingleQuotesRegex, (1, lsquo)),
|
||||||
|
(closingSingleQuotesRegex, (rsquo,)),
|
||||||
|
(closingSingleQuotesRegex2, (rsquo, 1)),
|
||||||
|
(remainingSingleQuotesRegex, (lsquo,)),
|
||||||
|
(openingDoubleQuotesRegex, (1, ldquo)),
|
||||||
|
(closingDoubleQuotesRegex, (rdquo,)),
|
||||||
|
(closingDoubleQuotesRegex2, (rdquo,)),
|
||||||
|
(remainingDoubleQuotesRegex, (ldquo,))
|
||||||
|
)
|
||||||
|
self._addPatterns(md, patterns, 'quotes', 30)
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
configs = self.getConfigs()
|
||||||
|
self.inlinePatterns: Registry[HtmlInlineProcessor] = Registry()
|
||||||
|
if configs['smart_ellipses']:
|
||||||
|
self.educateEllipses(md)
|
||||||
|
if configs['smart_quotes']:
|
||||||
|
self.educateQuotes(md)
|
||||||
|
if configs['smart_angled_quotes']:
|
||||||
|
self.educateAngledQuotes(md)
|
||||||
|
# Override `HTML_RE` from `inlinepatterns.py` so that it does not
|
||||||
|
# process tags with duplicate closing quotes.
|
||||||
|
md.inlinePatterns.register(HtmlInlineProcessor(HTML_STRICT_RE, md), 'html', 90)
|
||||||
|
if configs['smart_dashes']:
|
||||||
|
self.educateDashes(md)
|
||||||
|
inlineProcessor = InlineProcessor(md)
|
||||||
|
inlineProcessor.inlinePatterns = self.inlinePatterns
|
||||||
|
md.treeprocessors.register(inlineProcessor, 'smarty', 2)
|
||||||
|
md.ESCAPED_CHARS.extend(['"', "'"])
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return SmartyExtension(**kwargs)
|
||||||
243
plugins/markdown_preview/markdown/extensions/tables.py
Normal file
243
plugins/markdown_preview/markdown/extensions/tables.py
Normal file
@@ -0,0 +1,243 @@
|
|||||||
|
# Tables Extension for Python-Markdown
|
||||||
|
# ====================================
|
||||||
|
|
||||||
|
# Added parsing of tables to Python-Markdown.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/tables
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2009 [Waylan Limberg](http://achinghead.com)
|
||||||
|
|
||||||
|
# All changes Copyright 2008-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Added parsing of tables to Python-Markdown.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/tables)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..blockprocessors import BlockProcessor
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
import re
|
||||||
|
PIPE_NONE = 0
|
||||||
|
PIPE_LEFT = 1
|
||||||
|
PIPE_RIGHT = 2
|
||||||
|
|
||||||
|
|
||||||
|
class TableProcessor(BlockProcessor):
|
||||||
|
""" Process Tables. """
|
||||||
|
|
||||||
|
RE_CODE_PIPES = re.compile(r'(?:(\\\\)|(\\`+)|(`+)|(\\\|)|(\|))')
|
||||||
|
RE_END_BORDER = re.compile(r'(?<!\\)(?:\\\\)*\|$')
|
||||||
|
|
||||||
|
def __init__(self, parser, config):
|
||||||
|
self.border = False
|
||||||
|
self.separator = ''
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
super().__init__(parser)
|
||||||
|
|
||||||
|
def test(self, parent, block):
|
||||||
|
"""
|
||||||
|
Ensure first two rows (column header and separator row) are valid table rows.
|
||||||
|
|
||||||
|
Keep border check and separator row do avoid repeating the work.
|
||||||
|
"""
|
||||||
|
is_table = False
|
||||||
|
rows = [row.strip(' ') for row in block.split('\n')]
|
||||||
|
if len(rows) > 1:
|
||||||
|
header0 = rows[0]
|
||||||
|
self.border = PIPE_NONE
|
||||||
|
if header0.startswith('|'):
|
||||||
|
self.border |= PIPE_LEFT
|
||||||
|
if self.RE_END_BORDER.search(header0) is not None:
|
||||||
|
self.border |= PIPE_RIGHT
|
||||||
|
row = self._split_row(header0)
|
||||||
|
row0_len = len(row)
|
||||||
|
is_table = row0_len > 1
|
||||||
|
|
||||||
|
# Each row in a single column table needs at least one pipe.
|
||||||
|
if not is_table and row0_len == 1 and self.border:
|
||||||
|
for index in range(1, len(rows)):
|
||||||
|
is_table = rows[index].startswith('|')
|
||||||
|
if not is_table:
|
||||||
|
is_table = self.RE_END_BORDER.search(rows[index]) is not None
|
||||||
|
if not is_table:
|
||||||
|
break
|
||||||
|
|
||||||
|
if is_table:
|
||||||
|
row = self._split_row(rows[1])
|
||||||
|
is_table = (len(row) == row0_len) and set(''.join(row)) <= set('|:- ')
|
||||||
|
if is_table:
|
||||||
|
self.separator = row
|
||||||
|
|
||||||
|
return is_table
|
||||||
|
|
||||||
|
def run(self, parent, blocks):
|
||||||
|
""" Parse a table block and build table. """
|
||||||
|
block = blocks.pop(0).split('\n')
|
||||||
|
header = block[0].strip(' ')
|
||||||
|
rows = [] if len(block) < 3 else block[2:]
|
||||||
|
|
||||||
|
# Get alignment of columns
|
||||||
|
align = []
|
||||||
|
for c in self.separator:
|
||||||
|
c = c.strip(' ')
|
||||||
|
if c.startswith(':') and c.endswith(':'):
|
||||||
|
align.append('center')
|
||||||
|
elif c.startswith(':'):
|
||||||
|
align.append('left')
|
||||||
|
elif c.endswith(':'):
|
||||||
|
align.append('right')
|
||||||
|
else:
|
||||||
|
align.append(None)
|
||||||
|
|
||||||
|
# Build table
|
||||||
|
table = etree.SubElement(parent, 'table')
|
||||||
|
thead = etree.SubElement(table, 'thead')
|
||||||
|
self._build_row(header, thead, align)
|
||||||
|
tbody = etree.SubElement(table, 'tbody')
|
||||||
|
if len(rows) == 0:
|
||||||
|
# Handle empty table
|
||||||
|
self._build_empty_row(tbody, align)
|
||||||
|
else:
|
||||||
|
for row in rows:
|
||||||
|
self._build_row(row.strip(' '), tbody, align)
|
||||||
|
|
||||||
|
def _build_empty_row(self, parent, align):
|
||||||
|
"""Build an empty row."""
|
||||||
|
tr = etree.SubElement(parent, 'tr')
|
||||||
|
count = len(align)
|
||||||
|
while count:
|
||||||
|
etree.SubElement(tr, 'td')
|
||||||
|
count -= 1
|
||||||
|
|
||||||
|
def _build_row(self, row, parent, align):
|
||||||
|
""" Given a row of text, build table cells. """
|
||||||
|
tr = etree.SubElement(parent, 'tr')
|
||||||
|
tag = 'td'
|
||||||
|
if parent.tag == 'thead':
|
||||||
|
tag = 'th'
|
||||||
|
cells = self._split_row(row)
|
||||||
|
# We use align here rather than cells to ensure every row
|
||||||
|
# contains the same number of columns.
|
||||||
|
for i, a in enumerate(align):
|
||||||
|
c = etree.SubElement(tr, tag)
|
||||||
|
try:
|
||||||
|
c.text = cells[i].strip(' ')
|
||||||
|
except IndexError: # pragma: no cover
|
||||||
|
c.text = ""
|
||||||
|
if a:
|
||||||
|
if self.config['use_align_attribute']:
|
||||||
|
c.set('align', a)
|
||||||
|
else:
|
||||||
|
c.set('style', f'text-align: {a};')
|
||||||
|
|
||||||
|
def _split_row(self, row):
|
||||||
|
""" split a row of text into list of cells. """
|
||||||
|
if self.border:
|
||||||
|
if row.startswith('|'):
|
||||||
|
row = row[1:]
|
||||||
|
row = self.RE_END_BORDER.sub('', row)
|
||||||
|
return self._split(row)
|
||||||
|
|
||||||
|
def _split(self, row):
|
||||||
|
""" split a row of text with some code into a list of cells. """
|
||||||
|
elements = []
|
||||||
|
pipes = []
|
||||||
|
tics = []
|
||||||
|
tic_points = []
|
||||||
|
tic_region = []
|
||||||
|
good_pipes = []
|
||||||
|
|
||||||
|
# Parse row
|
||||||
|
# Throw out \\, and \|
|
||||||
|
for m in self.RE_CODE_PIPES.finditer(row):
|
||||||
|
# Store ` data (len, start_pos, end_pos)
|
||||||
|
if m.group(2):
|
||||||
|
# \`+
|
||||||
|
# Store length of each tic group: subtract \
|
||||||
|
tics.append(len(m.group(2)) - 1)
|
||||||
|
# Store start of group, end of group, and escape length
|
||||||
|
tic_points.append((m.start(2), m.end(2) - 1, 1))
|
||||||
|
elif m.group(3):
|
||||||
|
# `+
|
||||||
|
# Store length of each tic group
|
||||||
|
tics.append(len(m.group(3)))
|
||||||
|
# Store start of group, end of group, and escape length
|
||||||
|
tic_points.append((m.start(3), m.end(3) - 1, 0))
|
||||||
|
# Store pipe location
|
||||||
|
elif m.group(5):
|
||||||
|
pipes.append(m.start(5))
|
||||||
|
|
||||||
|
# Pair up tics according to size if possible
|
||||||
|
# Subtract the escape length *only* from the opening.
|
||||||
|
# Walk through tic list and see if tic has a close.
|
||||||
|
# Store the tic region (start of region, end of region).
|
||||||
|
pos = 0
|
||||||
|
tic_len = len(tics)
|
||||||
|
while pos < tic_len:
|
||||||
|
try:
|
||||||
|
tic_size = tics[pos] - tic_points[pos][2]
|
||||||
|
if tic_size == 0:
|
||||||
|
raise ValueError
|
||||||
|
index = tics[pos + 1:].index(tic_size) + 1
|
||||||
|
tic_region.append((tic_points[pos][0], tic_points[pos + index][1]))
|
||||||
|
pos += index + 1
|
||||||
|
except ValueError:
|
||||||
|
pos += 1
|
||||||
|
|
||||||
|
# Resolve pipes. Check if they are within a tic pair region.
|
||||||
|
# Walk through pipes comparing them to each region.
|
||||||
|
# - If pipe position is less that a region, it isn't in a region
|
||||||
|
# - If it is within a region, we don't want it, so throw it out
|
||||||
|
# - If we didn't throw it out, it must be a table pipe
|
||||||
|
for pipe in pipes:
|
||||||
|
throw_out = False
|
||||||
|
for region in tic_region:
|
||||||
|
if pipe < region[0]:
|
||||||
|
# Pipe is not in a region
|
||||||
|
break
|
||||||
|
elif region[0] <= pipe <= region[1]:
|
||||||
|
# Pipe is within a code region. Throw it out.
|
||||||
|
throw_out = True
|
||||||
|
break
|
||||||
|
if not throw_out:
|
||||||
|
good_pipes.append(pipe)
|
||||||
|
|
||||||
|
# Split row according to table delimiters.
|
||||||
|
pos = 0
|
||||||
|
for pipe in good_pipes:
|
||||||
|
elements.append(row[pos:pipe])
|
||||||
|
pos = pipe + 1
|
||||||
|
elements.append(row[pos:])
|
||||||
|
return elements
|
||||||
|
|
||||||
|
|
||||||
|
class TableExtension(Extension):
|
||||||
|
""" Add tables to Markdown. """
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.config = {
|
||||||
|
'use_align_attribute': [False, 'True to use align attribute instead of style.'],
|
||||||
|
}
|
||||||
|
""" Default configuration options. """
|
||||||
|
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add an instance of `TableProcessor` to `BlockParser`. """
|
||||||
|
if '|' not in md.ESCAPED_CHARS:
|
||||||
|
md.ESCAPED_CHARS.append('|')
|
||||||
|
processor = TableProcessor(md.parser, self.getConfigs())
|
||||||
|
md.parser.blockprocessors.register(processor, 'table', 75)
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return TableExtension(**kwargs)
|
||||||
408
plugins/markdown_preview/markdown/extensions/toc.py
Normal file
408
plugins/markdown_preview/markdown/extensions/toc.py
Normal file
@@ -0,0 +1,408 @@
|
|||||||
|
# Table of Contents Extension for Python-Markdown
|
||||||
|
# ===============================================
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/toc
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright 2008 [Jack Miller](https://codezen.org/)
|
||||||
|
|
||||||
|
# All changes Copyright 2008-2014 The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Add table of contents support to Python-Markdown.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/toc)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..treeprocessors import Treeprocessor
|
||||||
|
from ..util import code_escape, parseBoolValue, AMP_SUBSTITUTE, HTML_PLACEHOLDER_RE, AtomicString
|
||||||
|
from ..treeprocessors import UnescapeTreeprocessor
|
||||||
|
import re
|
||||||
|
import html
|
||||||
|
import unicodedata
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
|
|
||||||
|
def slugify(value, separator, unicode=False):
|
||||||
|
""" Slugify a string, to make it URL friendly. """
|
||||||
|
if not unicode:
|
||||||
|
# Replace Extended Latin characters with ASCII, i.e. `žlutý` => `zluty`
|
||||||
|
value = unicodedata.normalize('NFKD', value)
|
||||||
|
value = value.encode('ascii', 'ignore').decode('ascii')
|
||||||
|
value = re.sub(r'[^\w\s-]', '', value).strip().lower()
|
||||||
|
return re.sub(r'[{}\s]+'.format(separator), separator, value)
|
||||||
|
|
||||||
|
|
||||||
|
def slugify_unicode(value, separator):
|
||||||
|
""" Slugify a string, to make it URL friendly while preserving Unicode characters. """
|
||||||
|
return slugify(value, separator, unicode=True)
|
||||||
|
|
||||||
|
|
||||||
|
IDCOUNT_RE = re.compile(r'^(.*)_([0-9]+)$')
|
||||||
|
|
||||||
|
|
||||||
|
def unique(id, ids):
|
||||||
|
""" Ensure id is unique in set of ids. Append '_1', '_2'... if not """
|
||||||
|
while id in ids or not id:
|
||||||
|
m = IDCOUNT_RE.match(id)
|
||||||
|
if m:
|
||||||
|
id = '%s_%d' % (m.group(1), int(m.group(2))+1)
|
||||||
|
else:
|
||||||
|
id = '%s_%d' % (id, 1)
|
||||||
|
ids.add(id)
|
||||||
|
return id
|
||||||
|
|
||||||
|
|
||||||
|
def get_name(el):
|
||||||
|
"""Get title name."""
|
||||||
|
|
||||||
|
text = []
|
||||||
|
for c in el.itertext():
|
||||||
|
if isinstance(c, AtomicString):
|
||||||
|
text.append(html.unescape(c))
|
||||||
|
else:
|
||||||
|
text.append(c)
|
||||||
|
return ''.join(text).strip()
|
||||||
|
|
||||||
|
|
||||||
|
def stashedHTML2text(text, md, strip_entities: bool = True):
|
||||||
|
""" Extract raw HTML from stash, reduce to plain text and swap with placeholder. """
|
||||||
|
def _html_sub(m):
|
||||||
|
""" Substitute raw html with plain text. """
|
||||||
|
try:
|
||||||
|
raw = md.htmlStash.rawHtmlBlocks[int(m.group(1))]
|
||||||
|
except (IndexError, TypeError): # pragma: no cover
|
||||||
|
return m.group(0)
|
||||||
|
# Strip out tags and/or entities - leaving text
|
||||||
|
res = re.sub(r'(<[^>]+>)', '', raw)
|
||||||
|
if strip_entities:
|
||||||
|
res = re.sub(r'(&[\#a-zA-Z0-9]+;)', '', res)
|
||||||
|
return res
|
||||||
|
|
||||||
|
return HTML_PLACEHOLDER_RE.sub(_html_sub, text)
|
||||||
|
|
||||||
|
|
||||||
|
def unescape(text):
|
||||||
|
""" Unescape escaped text. """
|
||||||
|
c = UnescapeTreeprocessor()
|
||||||
|
return c.unescape(text)
|
||||||
|
|
||||||
|
|
||||||
|
def nest_toc_tokens(toc_list):
|
||||||
|
"""Given an unsorted list with errors and skips, return a nested one.
|
||||||
|
|
||||||
|
[{'level': 1}, {'level': 2}]
|
||||||
|
=>
|
||||||
|
[{'level': 1, 'children': [{'level': 2, 'children': []}]}]
|
||||||
|
|
||||||
|
A wrong list is also converted:
|
||||||
|
|
||||||
|
[{'level': 2}, {'level': 1}]
|
||||||
|
=>
|
||||||
|
[{'level': 2, 'children': []}, {'level': 1, 'children': []}]
|
||||||
|
"""
|
||||||
|
|
||||||
|
ordered_list = []
|
||||||
|
if len(toc_list):
|
||||||
|
# Initialize everything by processing the first entry
|
||||||
|
last = toc_list.pop(0)
|
||||||
|
last['children'] = []
|
||||||
|
levels = [last['level']]
|
||||||
|
ordered_list.append(last)
|
||||||
|
parents = []
|
||||||
|
|
||||||
|
# Walk the rest nesting the entries properly
|
||||||
|
while toc_list:
|
||||||
|
t = toc_list.pop(0)
|
||||||
|
current_level = t['level']
|
||||||
|
t['children'] = []
|
||||||
|
|
||||||
|
# Reduce depth if current level < last item's level
|
||||||
|
if current_level < levels[-1]:
|
||||||
|
# Pop last level since we know we are less than it
|
||||||
|
levels.pop()
|
||||||
|
|
||||||
|
# Pop parents and levels we are less than or equal to
|
||||||
|
to_pop = 0
|
||||||
|
for p in reversed(parents):
|
||||||
|
if current_level <= p['level']:
|
||||||
|
to_pop += 1
|
||||||
|
else: # pragma: no cover
|
||||||
|
break
|
||||||
|
if to_pop:
|
||||||
|
levels = levels[:-to_pop]
|
||||||
|
parents = parents[:-to_pop]
|
||||||
|
|
||||||
|
# Note current level as last
|
||||||
|
levels.append(current_level)
|
||||||
|
|
||||||
|
# Level is the same, so append to
|
||||||
|
# the current parent (if available)
|
||||||
|
if current_level == levels[-1]:
|
||||||
|
(parents[-1]['children'] if parents
|
||||||
|
else ordered_list).append(t)
|
||||||
|
|
||||||
|
# Current level is > last item's level,
|
||||||
|
# So make last item a parent and append current as child
|
||||||
|
else:
|
||||||
|
last['children'].append(t)
|
||||||
|
parents.append(last)
|
||||||
|
levels.append(current_level)
|
||||||
|
last = t
|
||||||
|
|
||||||
|
return ordered_list
|
||||||
|
|
||||||
|
|
||||||
|
class TocTreeprocessor(Treeprocessor):
|
||||||
|
""" Step through document and build TOC. """
|
||||||
|
|
||||||
|
def __init__(self, md, config):
|
||||||
|
super().__init__(md)
|
||||||
|
|
||||||
|
self.marker = config["marker"]
|
||||||
|
self.title = config["title"]
|
||||||
|
self.base_level = int(config["baselevel"]) - 1
|
||||||
|
self.slugify = config["slugify"]
|
||||||
|
self.sep = config["separator"]
|
||||||
|
self.toc_class = config["toc_class"]
|
||||||
|
self.title_class = config["title_class"]
|
||||||
|
self.use_anchors = parseBoolValue(config["anchorlink"])
|
||||||
|
self.anchorlink_class = config["anchorlink_class"]
|
||||||
|
self.use_permalinks = parseBoolValue(config["permalink"], False)
|
||||||
|
if self.use_permalinks is None:
|
||||||
|
self.use_permalinks = config["permalink"]
|
||||||
|
self.permalink_class = config["permalink_class"]
|
||||||
|
self.permalink_title = config["permalink_title"]
|
||||||
|
self.permalink_leading = parseBoolValue(config["permalink_leading"], False)
|
||||||
|
self.header_rgx = re.compile("[Hh][123456]")
|
||||||
|
if isinstance(config["toc_depth"], str) and '-' in config["toc_depth"]:
|
||||||
|
self.toc_top, self.toc_bottom = [int(x) for x in config["toc_depth"].split('-')]
|
||||||
|
else:
|
||||||
|
self.toc_top = 1
|
||||||
|
self.toc_bottom = int(config["toc_depth"])
|
||||||
|
|
||||||
|
def iterparent(self, node):
|
||||||
|
""" Iterator wrapper to get allowed parent and child all at once. """
|
||||||
|
|
||||||
|
# We do not allow the marker inside a header as that
|
||||||
|
# would causes an endless loop of placing a new TOC
|
||||||
|
# inside previously generated TOC.
|
||||||
|
for child in node:
|
||||||
|
if not self.header_rgx.match(child.tag) and child.tag not in ['pre', 'code']:
|
||||||
|
yield node, child
|
||||||
|
yield from self.iterparent(child)
|
||||||
|
|
||||||
|
def replace_marker(self, root, elem) -> None:
|
||||||
|
""" Replace marker with elem. """
|
||||||
|
for (p, c) in self.iterparent(root):
|
||||||
|
text = ''.join(c.itertext()).strip()
|
||||||
|
if not text:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# To keep the output from screwing up the
|
||||||
|
# validation by putting a `<div>` inside of a `<p>`
|
||||||
|
# we actually replace the `<p>` in its entirety.
|
||||||
|
|
||||||
|
# The `<p>` element may contain more than a single text content
|
||||||
|
# (`nl2br` can introduce a `<br>`). In this situation, `c.text` returns
|
||||||
|
# the very first content, ignore children contents or tail content.
|
||||||
|
# `len(c) == 0` is here to ensure there is only text in the `<p>`.
|
||||||
|
if c.text and c.text.strip() == self.marker and len(c) == 0:
|
||||||
|
for i in range(len(p)):
|
||||||
|
if p[i] == c:
|
||||||
|
p[i] = elem
|
||||||
|
break
|
||||||
|
|
||||||
|
def set_level(self, elem) -> None:
|
||||||
|
""" Adjust header level according to base level. """
|
||||||
|
level = int(elem.tag[-1]) + self.base_level
|
||||||
|
if level > 6:
|
||||||
|
level = 6
|
||||||
|
elem.tag = 'h%d' % level
|
||||||
|
|
||||||
|
def add_anchor(self, c, elem_id) -> None:
|
||||||
|
anchor = etree.Element("a")
|
||||||
|
anchor.text = c.text
|
||||||
|
anchor.attrib["href"] = "#" + elem_id
|
||||||
|
anchor.attrib["class"] = self.anchorlink_class
|
||||||
|
c.text = ""
|
||||||
|
for elem in c:
|
||||||
|
anchor.append(elem)
|
||||||
|
while len(c):
|
||||||
|
c.remove(c[0])
|
||||||
|
c.append(anchor)
|
||||||
|
|
||||||
|
def add_permalink(self, c, elem_id) -> None:
|
||||||
|
permalink = etree.Element("a")
|
||||||
|
permalink.text = ("%spara;" % AMP_SUBSTITUTE
|
||||||
|
if self.use_permalinks is True
|
||||||
|
else self.use_permalinks)
|
||||||
|
permalink.attrib["href"] = "#" + elem_id
|
||||||
|
permalink.attrib["class"] = self.permalink_class
|
||||||
|
if self.permalink_title:
|
||||||
|
permalink.attrib["title"] = self.permalink_title
|
||||||
|
if self.permalink_leading:
|
||||||
|
permalink.tail = c.text
|
||||||
|
c.text = ""
|
||||||
|
c.insert(0, permalink)
|
||||||
|
else:
|
||||||
|
c.append(permalink)
|
||||||
|
|
||||||
|
def build_toc_div(self, toc_list):
|
||||||
|
""" Return a string div given a toc list. """
|
||||||
|
div = etree.Element("div")
|
||||||
|
div.attrib["class"] = self.toc_class
|
||||||
|
|
||||||
|
# Add title to the div
|
||||||
|
if self.title:
|
||||||
|
header = etree.SubElement(div, "span")
|
||||||
|
if self.title_class:
|
||||||
|
header.attrib["class"] = self.title_class
|
||||||
|
header.text = self.title
|
||||||
|
|
||||||
|
def build_etree_ul(toc_list, parent):
|
||||||
|
ul = etree.SubElement(parent, "ul")
|
||||||
|
for item in toc_list:
|
||||||
|
# List item link, to be inserted into the toc div
|
||||||
|
li = etree.SubElement(ul, "li")
|
||||||
|
link = etree.SubElement(li, "a")
|
||||||
|
link.text = item.get('name', '')
|
||||||
|
link.attrib["href"] = '#' + item.get('id', '')
|
||||||
|
if item['children']:
|
||||||
|
build_etree_ul(item['children'], li)
|
||||||
|
return ul
|
||||||
|
|
||||||
|
build_etree_ul(toc_list, div)
|
||||||
|
|
||||||
|
if 'prettify' in self.md.treeprocessors:
|
||||||
|
self.md.treeprocessors['prettify'].run(div)
|
||||||
|
|
||||||
|
return div
|
||||||
|
|
||||||
|
def run(self, doc):
|
||||||
|
# Get a list of id attributes
|
||||||
|
used_ids = set()
|
||||||
|
for el in doc.iter():
|
||||||
|
if "id" in el.attrib:
|
||||||
|
used_ids.add(el.attrib["id"])
|
||||||
|
|
||||||
|
toc_tokens = []
|
||||||
|
for el in doc.iter():
|
||||||
|
if isinstance(el.tag, str) and self.header_rgx.match(el.tag):
|
||||||
|
self.set_level(el)
|
||||||
|
text = get_name(el)
|
||||||
|
|
||||||
|
# Do not override pre-existing ids
|
||||||
|
if "id" not in el.attrib:
|
||||||
|
innertext = unescape(stashedHTML2text(text, self.md))
|
||||||
|
el.attrib["id"] = unique(self.slugify(innertext, self.sep), used_ids)
|
||||||
|
|
||||||
|
if int(el.tag[-1]) >= self.toc_top and int(el.tag[-1]) <= self.toc_bottom:
|
||||||
|
toc_tokens.append({
|
||||||
|
'level': int(el.tag[-1]),
|
||||||
|
'id': el.attrib["id"],
|
||||||
|
'name': unescape(stashedHTML2text(
|
||||||
|
code_escape(el.attrib.get('data-toc-label', text)),
|
||||||
|
self.md, strip_entities=False
|
||||||
|
))
|
||||||
|
})
|
||||||
|
|
||||||
|
# Remove the data-toc-label attribute as it is no longer needed
|
||||||
|
if 'data-toc-label' in el.attrib:
|
||||||
|
del el.attrib['data-toc-label']
|
||||||
|
|
||||||
|
if self.use_anchors:
|
||||||
|
self.add_anchor(el, el.attrib["id"])
|
||||||
|
if self.use_permalinks not in [False, None]:
|
||||||
|
self.add_permalink(el, el.attrib["id"])
|
||||||
|
|
||||||
|
toc_tokens = nest_toc_tokens(toc_tokens)
|
||||||
|
div = self.build_toc_div(toc_tokens)
|
||||||
|
if self.marker:
|
||||||
|
self.replace_marker(doc, div)
|
||||||
|
|
||||||
|
# serialize and attach to markdown instance.
|
||||||
|
toc = self.md.serializer(div)
|
||||||
|
for pp in self.md.postprocessors:
|
||||||
|
toc = pp.run(toc)
|
||||||
|
self.md.toc_tokens = toc_tokens
|
||||||
|
self.md.toc = toc
|
||||||
|
|
||||||
|
|
||||||
|
class TocExtension(Extension):
|
||||||
|
|
||||||
|
TreeProcessorClass = TocTreeprocessor
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.config = {
|
||||||
|
'marker': [
|
||||||
|
'[TOC]',
|
||||||
|
'Text to find and replace with Table of Contents. Set to an empty string to disable. '
|
||||||
|
'Default: `[TOC]`.'
|
||||||
|
],
|
||||||
|
'title': [
|
||||||
|
'', 'Title to insert into TOC `<div>`. Default: an empty string.'
|
||||||
|
],
|
||||||
|
'title_class': [
|
||||||
|
'toctitle', 'CSS class used for the title. Default: `toctitle`.'
|
||||||
|
],
|
||||||
|
'toc_class': [
|
||||||
|
'toc', 'CSS class(es) used for the link. Default: `toclink`.'
|
||||||
|
],
|
||||||
|
'anchorlink': [
|
||||||
|
False, 'True if header should be a self link. Default: `False`.'
|
||||||
|
],
|
||||||
|
'anchorlink_class': [
|
||||||
|
'toclink', 'CSS class(es) used for the link. Defaults: `toclink`.'
|
||||||
|
],
|
||||||
|
'permalink': [
|
||||||
|
0, 'True or link text if a Sphinx-style permalink should be added. Default: `False`.'
|
||||||
|
],
|
||||||
|
'permalink_class': [
|
||||||
|
'headerlink', 'CSS class(es) used for the link. Default: `headerlink`.'
|
||||||
|
],
|
||||||
|
'permalink_title': [
|
||||||
|
'Permanent link', 'Title attribute of the permalink. Default: `Permanent link`.'
|
||||||
|
],
|
||||||
|
'permalink_leading': [
|
||||||
|
False,
|
||||||
|
'True if permalinks should be placed at start of the header, rather than end. Default: False.'
|
||||||
|
],
|
||||||
|
'baselevel': ['1', 'Base level for headers. Default: `1`.'],
|
||||||
|
'slugify': [
|
||||||
|
slugify, 'Function to generate anchors based on header text. Default: `slugify`.'
|
||||||
|
],
|
||||||
|
'separator': ['-', 'Word separator. Default: `-`.'],
|
||||||
|
'toc_depth': [
|
||||||
|
6,
|
||||||
|
'Define the range of section levels to include in the Table of Contents. A single integer '
|
||||||
|
'(b) defines the bottom section level (<h1>..<hb>) only. A string consisting of two digits '
|
||||||
|
'separated by a hyphen in between (`2-5`) defines the top (t) and the bottom (b) (<ht>..<hb>). '
|
||||||
|
'Default: `6` (bottom).'
|
||||||
|
],
|
||||||
|
}
|
||||||
|
""" Default configuration options. """
|
||||||
|
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
""" Add TOC tree processor to Markdown. """
|
||||||
|
md.registerExtension(self)
|
||||||
|
self.md = md
|
||||||
|
self.reset()
|
||||||
|
tocext = self.TreeProcessorClass(md, self.getConfigs())
|
||||||
|
md.treeprocessors.register(tocext, 'toc', 5)
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
self.md.toc = ''
|
||||||
|
self.md.toc_tokens = []
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return TocExtension(**kwargs)
|
||||||
96
plugins/markdown_preview/markdown/extensions/wikilinks.py
Normal file
96
plugins/markdown_preview/markdown/extensions/wikilinks.py
Normal file
@@ -0,0 +1,96 @@
|
|||||||
|
# WikiLinks Extension for Python-Markdown
|
||||||
|
# ======================================
|
||||||
|
|
||||||
|
# Converts [[WikiLinks]] to relative links.
|
||||||
|
|
||||||
|
# See https://Python-Markdown.github.io/extensions/wikilinks
|
||||||
|
# for documentation.
|
||||||
|
|
||||||
|
# Original code Copyright [Waylan Limberg](http://achinghead.com/).
|
||||||
|
|
||||||
|
# All changes Copyright The Python Markdown Project
|
||||||
|
|
||||||
|
# License: [BSD](https://opensource.org/licenses/bsd-license.php)
|
||||||
|
|
||||||
|
"""
|
||||||
|
Converts `[[WikiLinks]]` to relative links.
|
||||||
|
|
||||||
|
See the [documentation](https://Python-Markdown.github.io/extensions/wikilinks)
|
||||||
|
for details.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import Extension
|
||||||
|
from ..inlinepatterns import InlineProcessor
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
def build_url(label, base, end):
|
||||||
|
""" Build a URL from the label, a base, and an end. """
|
||||||
|
clean_label = re.sub(r'([ ]+_)|(_[ ]+)|([ ]+)', '_', label)
|
||||||
|
return '{}{}{}'.format(base, clean_label, end)
|
||||||
|
|
||||||
|
|
||||||
|
class WikiLinkExtension(Extension):
|
||||||
|
""" Add inline processor to Markdown. """
|
||||||
|
|
||||||
|
def __init__(self, **kwargs):
|
||||||
|
self.config = {
|
||||||
|
'base_url': ['/', 'String to append to beginning or URL.'],
|
||||||
|
'end_url': ['/', 'String to append to end of URL.'],
|
||||||
|
'html_class': ['wikilink', 'CSS hook. Leave blank for none.'],
|
||||||
|
'build_url': [build_url, 'Callable formats URL from label.'],
|
||||||
|
}
|
||||||
|
""" Default configuration options. """
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
|
||||||
|
def extendMarkdown(self, md):
|
||||||
|
self.md = md
|
||||||
|
|
||||||
|
# append to end of inline patterns
|
||||||
|
WIKILINK_RE = r'\[\[([\w0-9_ -]+)\]\]'
|
||||||
|
wikilinkPattern = WikiLinksInlineProcessor(WIKILINK_RE, self.getConfigs())
|
||||||
|
wikilinkPattern.md = md
|
||||||
|
md.inlinePatterns.register(wikilinkPattern, 'wikilink', 75)
|
||||||
|
|
||||||
|
|
||||||
|
class WikiLinksInlineProcessor(InlineProcessor):
|
||||||
|
""" Build link from `wikilink`. """
|
||||||
|
|
||||||
|
def __init__(self, pattern, config):
|
||||||
|
super().__init__(pattern)
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def handleMatch(self, m, data):
|
||||||
|
if m.group(1).strip():
|
||||||
|
base_url, end_url, html_class = self._getMeta()
|
||||||
|
label = m.group(1).strip()
|
||||||
|
url = self.config['build_url'](label, base_url, end_url)
|
||||||
|
a = etree.Element('a')
|
||||||
|
a.text = label
|
||||||
|
a.set('href', url)
|
||||||
|
if html_class:
|
||||||
|
a.set('class', html_class)
|
||||||
|
else:
|
||||||
|
a = ''
|
||||||
|
return a, m.start(0), m.end(0)
|
||||||
|
|
||||||
|
def _getMeta(self):
|
||||||
|
""" Return meta data or `config` data. """
|
||||||
|
base_url = self.config['base_url']
|
||||||
|
end_url = self.config['end_url']
|
||||||
|
html_class = self.config['html_class']
|
||||||
|
if hasattr(self.md, 'Meta'):
|
||||||
|
if 'wiki_base_url' in self.md.Meta:
|
||||||
|
base_url = self.md.Meta['wiki_base_url'][0]
|
||||||
|
if 'wiki_end_url' in self.md.Meta:
|
||||||
|
end_url = self.md.Meta['wiki_end_url'][0]
|
||||||
|
if 'wiki_html_class' in self.md.Meta:
|
||||||
|
html_class = self.md.Meta['wiki_html_class'][0]
|
||||||
|
return base_url, end_url, html_class
|
||||||
|
|
||||||
|
|
||||||
|
def makeExtension(**kwargs): # pragma: no cover
|
||||||
|
return WikiLinkExtension(**kwargs)
|
||||||
334
plugins/markdown_preview/markdown/htmlparser.py
Normal file
334
plugins/markdown_preview/markdown/htmlparser.py
Normal file
@@ -0,0 +1,334 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
This module imports a copy of [`html.parser.HTMLParser`][] and modifies it heavily through monkey-patches.
|
||||||
|
A copy is imported rather than the module being directly imported as this ensures that the user can import
|
||||||
|
and use the unmodified library for their own needs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
import importlib.util
|
||||||
|
import sys
|
||||||
|
|
||||||
|
|
||||||
|
# Import a copy of the html.parser lib as `htmlparser` so we can monkeypatch it.
|
||||||
|
# Users can still do `from html import parser` and get the default behavior.
|
||||||
|
spec = importlib.util.find_spec('html.parser')
|
||||||
|
htmlparser = importlib.util.module_from_spec(spec)
|
||||||
|
spec.loader.exec_module(htmlparser)
|
||||||
|
sys.modules['htmlparser'] = htmlparser
|
||||||
|
|
||||||
|
# Monkeypatch `HTMLParser` to only accept `?>` to close Processing Instructions.
|
||||||
|
htmlparser.piclose = re.compile(r'\?>')
|
||||||
|
# Monkeypatch `HTMLParser` to only recognize entity references with a closing semicolon.
|
||||||
|
htmlparser.entityref = re.compile(r'&([a-zA-Z][-.a-zA-Z0-9]*);')
|
||||||
|
# Monkeypatch `HTMLParser` to no longer support partial entities. We are always feeding a complete block,
|
||||||
|
# so the 'incomplete' functionality is unnecessary. As the `entityref` regex is run right before incomplete,
|
||||||
|
# and the two regex are the same, then incomplete will simply never match and we avoid the logic within.
|
||||||
|
htmlparser.incomplete = htmlparser.entityref
|
||||||
|
# Monkeypatch `HTMLParser` to not accept a backtick in a tag name, attribute name, or bare value.
|
||||||
|
htmlparser.locatestarttagend_tolerant = re.compile(r"""
|
||||||
|
<[a-zA-Z][^`\t\n\r\f />\x00]* # tag name <= added backtick here
|
||||||
|
(?:[\s/]* # optional whitespace before attribute name
|
||||||
|
(?:(?<=['"\s/])[^`\s/>][^\s/=>]* # attribute name <= added backtick here
|
||||||
|
(?:\s*=+\s* # value indicator
|
||||||
|
(?:'[^']*' # LITA-enclosed value
|
||||||
|
|"[^"]*" # LIT-enclosed value
|
||||||
|
|(?!['"])[^`>\s]* # bare value <= added backtick here
|
||||||
|
)
|
||||||
|
(?:\s*,)* # possibly followed by a comma
|
||||||
|
)?(?:\s|/(?!>))*
|
||||||
|
)*
|
||||||
|
)?
|
||||||
|
\s* # trailing whitespace
|
||||||
|
""", re.VERBOSE)
|
||||||
|
|
||||||
|
# Match a blank line at the start of a block of text (two newlines).
|
||||||
|
# The newlines may be preceded by additional whitespace.
|
||||||
|
blank_line_re = re.compile(r'^([ ]*\n){2}')
|
||||||
|
|
||||||
|
|
||||||
|
class HTMLExtractor(htmlparser.HTMLParser):
|
||||||
|
"""
|
||||||
|
Extract raw HTML from text.
|
||||||
|
|
||||||
|
The raw HTML is stored in the [`htmlStash`][markdown.util.HtmlStash] of the
|
||||||
|
[`Markdown`][markdown.Markdown] instance passed to `md` and the remaining text
|
||||||
|
is stored in `cleandoc` as a list of strings.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, md, *args, **kwargs):
|
||||||
|
if 'convert_charrefs' not in kwargs:
|
||||||
|
kwargs['convert_charrefs'] = False
|
||||||
|
|
||||||
|
# Block tags that should contain no content (self closing)
|
||||||
|
self.empty_tags = set(['hr'])
|
||||||
|
|
||||||
|
self.lineno_start_cache = [0]
|
||||||
|
|
||||||
|
# This calls self.reset
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.md = md
|
||||||
|
|
||||||
|
def reset(self):
|
||||||
|
"""Reset this instance. Loses all unprocessed data."""
|
||||||
|
self.inraw = False
|
||||||
|
self.intail = False
|
||||||
|
self.stack = [] # When `inraw==True`, stack contains a list of tags
|
||||||
|
self._cache = []
|
||||||
|
self.cleandoc = []
|
||||||
|
self.lineno_start_cache = [0]
|
||||||
|
|
||||||
|
super().reset()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
"""Handle any buffered data."""
|
||||||
|
super().close()
|
||||||
|
if len(self.rawdata):
|
||||||
|
# Temp fix for https://bugs.python.org/issue41989
|
||||||
|
# TODO: remove this when the bug is fixed in all supported Python versions.
|
||||||
|
if self.convert_charrefs and not self.cdata_elem: # pragma: no cover
|
||||||
|
self.handle_data(htmlparser.unescape(self.rawdata))
|
||||||
|
else:
|
||||||
|
self.handle_data(self.rawdata)
|
||||||
|
# Handle any unclosed tags.
|
||||||
|
if len(self._cache):
|
||||||
|
self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache)))
|
||||||
|
self._cache = []
|
||||||
|
|
||||||
|
@property
|
||||||
|
def line_offset(self) -> int:
|
||||||
|
"""Returns char index in `self.rawdata` for the start of the current line. """
|
||||||
|
for ii in range(len(self.lineno_start_cache)-1, self.lineno-1):
|
||||||
|
last_line_start_pos = self.lineno_start_cache[ii]
|
||||||
|
lf_pos = self.rawdata.find('\n', last_line_start_pos)
|
||||||
|
if lf_pos == -1:
|
||||||
|
# No more newlines found. Use end of raw data as start of line beyond end.
|
||||||
|
lf_pos = len(self.rawdata)
|
||||||
|
self.lineno_start_cache.append(lf_pos+1)
|
||||||
|
|
||||||
|
return self.lineno_start_cache[self.lineno-1]
|
||||||
|
|
||||||
|
def at_line_start(self) -> bool:
|
||||||
|
"""
|
||||||
|
Returns True if current position is at start of line.
|
||||||
|
|
||||||
|
Allows for up to three blank spaces at start of line.
|
||||||
|
"""
|
||||||
|
if self.offset == 0:
|
||||||
|
return True
|
||||||
|
if self.offset > 3:
|
||||||
|
return False
|
||||||
|
# Confirm up to first 3 chars are whitespace
|
||||||
|
return self.rawdata[self.line_offset:self.line_offset + self.offset].strip() == ''
|
||||||
|
|
||||||
|
def get_endtag_text(self, tag: str) -> str:
|
||||||
|
"""
|
||||||
|
Returns the text of the end tag.
|
||||||
|
|
||||||
|
If it fails to extract the actual text from the raw data, it builds a closing tag with `tag`.
|
||||||
|
"""
|
||||||
|
# Attempt to extract actual tag from raw source text
|
||||||
|
start = self.line_offset + self.offset
|
||||||
|
m = htmlparser.endendtag.search(self.rawdata, start)
|
||||||
|
if m:
|
||||||
|
return self.rawdata[start:m.end()]
|
||||||
|
else: # pragma: no cover
|
||||||
|
# Failed to extract from raw data. Assume well formed and lowercase.
|
||||||
|
return '</{}>'.format(tag)
|
||||||
|
|
||||||
|
def handle_starttag(self, tag: str, attrs: list[tuple[str, str]]):
|
||||||
|
# Handle tags that should always be empty and do not specify a closing tag
|
||||||
|
if tag in self.empty_tags:
|
||||||
|
self.handle_startendtag(tag, attrs)
|
||||||
|
return
|
||||||
|
|
||||||
|
if self.md.is_block_level(tag) and (self.intail or (self.at_line_start() and not self.inraw)):
|
||||||
|
# Started a new raw block. Prepare stack.
|
||||||
|
self.inraw = True
|
||||||
|
self.cleandoc.append('\n')
|
||||||
|
|
||||||
|
text = self.get_starttag_text()
|
||||||
|
if self.inraw:
|
||||||
|
self.stack.append(tag)
|
||||||
|
self._cache.append(text)
|
||||||
|
else:
|
||||||
|
self.cleandoc.append(text)
|
||||||
|
if tag in self.CDATA_CONTENT_ELEMENTS:
|
||||||
|
# This is presumably a standalone tag in a code span (see #1036).
|
||||||
|
self.clear_cdata_mode()
|
||||||
|
|
||||||
|
def handle_endtag(self, tag: str):
|
||||||
|
text = self.get_endtag_text(tag)
|
||||||
|
|
||||||
|
if self.inraw:
|
||||||
|
self._cache.append(text)
|
||||||
|
if tag in self.stack:
|
||||||
|
# Remove tag from stack
|
||||||
|
while self.stack:
|
||||||
|
if self.stack.pop() == tag:
|
||||||
|
break
|
||||||
|
if len(self.stack) == 0:
|
||||||
|
# End of raw block.
|
||||||
|
if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(text):]):
|
||||||
|
# Preserve blank line and end of raw block.
|
||||||
|
self._cache.append('\n')
|
||||||
|
else:
|
||||||
|
# More content exists after `endtag`.
|
||||||
|
self.intail = True
|
||||||
|
# Reset stack.
|
||||||
|
self.inraw = False
|
||||||
|
self.cleandoc.append(self.md.htmlStash.store(''.join(self._cache)))
|
||||||
|
# Insert blank line between this and next line.
|
||||||
|
self.cleandoc.append('\n\n')
|
||||||
|
self._cache = []
|
||||||
|
else:
|
||||||
|
self.cleandoc.append(text)
|
||||||
|
|
||||||
|
def handle_data(self, data: str):
|
||||||
|
if self.intail and '\n' in data:
|
||||||
|
self.intail = False
|
||||||
|
if self.inraw:
|
||||||
|
self._cache.append(data)
|
||||||
|
else:
|
||||||
|
self.cleandoc.append(data)
|
||||||
|
|
||||||
|
def handle_empty_tag(self, data: str, is_block: bool):
|
||||||
|
""" Handle empty tags (`<data>`). """
|
||||||
|
if self.inraw or self.intail:
|
||||||
|
# Append this to the existing raw block
|
||||||
|
self._cache.append(data)
|
||||||
|
elif self.at_line_start() and is_block:
|
||||||
|
# Handle this as a standalone raw block
|
||||||
|
if blank_line_re.match(self.rawdata[self.line_offset + self.offset + len(data):]):
|
||||||
|
# Preserve blank line after tag in raw block.
|
||||||
|
data += '\n'
|
||||||
|
else:
|
||||||
|
# More content exists after tag.
|
||||||
|
self.intail = True
|
||||||
|
item = self.cleandoc[-1] if self.cleandoc else ''
|
||||||
|
# If we only have one newline before block element, add another
|
||||||
|
if not item.endswith('\n\n') and item.endswith('\n'):
|
||||||
|
self.cleandoc.append('\n')
|
||||||
|
self.cleandoc.append(self.md.htmlStash.store(data))
|
||||||
|
# Insert blank line between this and next line.
|
||||||
|
self.cleandoc.append('\n\n')
|
||||||
|
else:
|
||||||
|
self.cleandoc.append(data)
|
||||||
|
|
||||||
|
def handle_startendtag(self, tag: str, attrs: list[tuple[str, str]]):
|
||||||
|
self.handle_empty_tag(self.get_starttag_text(), is_block=self.md.is_block_level(tag))
|
||||||
|
|
||||||
|
def handle_charref(self, name: str):
|
||||||
|
self.handle_empty_tag('&#{};'.format(name), is_block=False)
|
||||||
|
|
||||||
|
def handle_entityref(self, name: str):
|
||||||
|
self.handle_empty_tag('&{};'.format(name), is_block=False)
|
||||||
|
|
||||||
|
def handle_comment(self, data: str):
|
||||||
|
self.handle_empty_tag('<!--{}-->'.format(data), is_block=True)
|
||||||
|
|
||||||
|
def handle_decl(self, data: str):
|
||||||
|
self.handle_empty_tag('<!{}>'.format(data), is_block=True)
|
||||||
|
|
||||||
|
def handle_pi(self, data: str):
|
||||||
|
self.handle_empty_tag('<?{}?>'.format(data), is_block=True)
|
||||||
|
|
||||||
|
def unknown_decl(self, data: str):
|
||||||
|
end = ']]>' if data.startswith('CDATA[') else ']>'
|
||||||
|
self.handle_empty_tag('<![{}{}'.format(data, end), is_block=True)
|
||||||
|
|
||||||
|
def parse_pi(self, i: int) -> int:
|
||||||
|
if self.at_line_start() or self.intail:
|
||||||
|
return super().parse_pi(i)
|
||||||
|
# This is not the beginning of a raw block so treat as plain data
|
||||||
|
# and avoid consuming any tags which may follow (see #1066).
|
||||||
|
self.handle_data('<?')
|
||||||
|
return i + 2
|
||||||
|
|
||||||
|
def parse_html_declaration(self, i: int) -> int:
|
||||||
|
if self.at_line_start() or self.intail:
|
||||||
|
return super().parse_html_declaration(i)
|
||||||
|
# This is not the beginning of a raw block so treat as plain data
|
||||||
|
# and avoid consuming any tags which may follow (see #1066).
|
||||||
|
self.handle_data('<!')
|
||||||
|
return i + 2
|
||||||
|
|
||||||
|
# The rest has been copied from base class in standard lib to address #1036.
|
||||||
|
# As `__startag_text` is private, all references to it must be in this subclass.
|
||||||
|
# The last few lines of `parse_starttag` are reversed so that `handle_starttag`
|
||||||
|
# can override `cdata_mode` in certain situations (in a code span).
|
||||||
|
__starttag_text: str | None = None
|
||||||
|
|
||||||
|
def get_starttag_text(self) -> str:
|
||||||
|
"""Return full source of start tag: `<...>`."""
|
||||||
|
return self.__starttag_text
|
||||||
|
|
||||||
|
def parse_starttag(self, i: int) -> int: # pragma: no cover
|
||||||
|
self.__starttag_text = None
|
||||||
|
endpos = self.check_for_whole_start_tag(i)
|
||||||
|
if endpos < 0:
|
||||||
|
return endpos
|
||||||
|
rawdata = self.rawdata
|
||||||
|
self.__starttag_text = rawdata[i:endpos]
|
||||||
|
|
||||||
|
# Now parse the data between `i+1` and `j` into a tag and `attrs`
|
||||||
|
attrs = []
|
||||||
|
match = htmlparser.tagfind_tolerant.match(rawdata, i+1)
|
||||||
|
assert match, 'unexpected call to parse_starttag()'
|
||||||
|
k = match.end()
|
||||||
|
self.lasttag = tag = match.group(1).lower()
|
||||||
|
while k < endpos:
|
||||||
|
m = htmlparser.attrfind_tolerant.match(rawdata, k)
|
||||||
|
if not m:
|
||||||
|
break
|
||||||
|
attrname, rest, attrvalue = m.group(1, 2, 3)
|
||||||
|
if not rest:
|
||||||
|
attrvalue = None
|
||||||
|
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
|
||||||
|
attrvalue[:1] == '"' == attrvalue[-1:]: # noqa: E127
|
||||||
|
attrvalue = attrvalue[1:-1]
|
||||||
|
if attrvalue:
|
||||||
|
attrvalue = htmlparser.unescape(attrvalue)
|
||||||
|
attrs.append((attrname.lower(), attrvalue))
|
||||||
|
k = m.end()
|
||||||
|
|
||||||
|
end = rawdata[k:endpos].strip()
|
||||||
|
if end not in (">", "/>"):
|
||||||
|
lineno, offset = self.getpos()
|
||||||
|
if "\n" in self.__starttag_text:
|
||||||
|
lineno = lineno + self.__starttag_text.count("\n")
|
||||||
|
offset = len(self.__starttag_text) \
|
||||||
|
- self.__starttag_text.rfind("\n") # noqa: E127
|
||||||
|
else:
|
||||||
|
offset = offset + len(self.__starttag_text)
|
||||||
|
self.handle_data(rawdata[i:endpos])
|
||||||
|
return endpos
|
||||||
|
if end.endswith('/>'):
|
||||||
|
# XHTML-style empty tag: `<span attr="value" />`
|
||||||
|
self.handle_startendtag(tag, attrs)
|
||||||
|
else:
|
||||||
|
# *** set `cdata_mode` first so we can override it in `handle_starttag` (see #1036) ***
|
||||||
|
if tag in self.CDATA_CONTENT_ELEMENTS:
|
||||||
|
self.set_cdata_mode(tag)
|
||||||
|
self.handle_starttag(tag, attrs)
|
||||||
|
return endpos
|
||||||
992
plugins/markdown_preview/markdown/inlinepatterns.py
Normal file
992
plugins/markdown_preview/markdown/inlinepatterns.py
Normal file
@@ -0,0 +1,992 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
In version 3.0, a new, more flexible inline processor was added, [`markdown.inlinepatterns.InlineProcessor`][]. The
|
||||||
|
original inline patterns, which inherit from [`markdown.inlinepatterns.Pattern`][] or one of its children are still
|
||||||
|
supported, though users are encouraged to migrate.
|
||||||
|
|
||||||
|
The new `InlineProcessor` provides two major enhancements to `Patterns`:
|
||||||
|
|
||||||
|
1. Inline Processors no longer need to match the entire block, so regular expressions no longer need to start with
|
||||||
|
`r'^(.*?)'` and end with `r'(.*?)%'`. This runs faster. The returned [`Match`][re.Match] object will only contain
|
||||||
|
what is explicitly matched in the pattern, and extension pattern groups now start with `m.group(1)`.
|
||||||
|
|
||||||
|
2. The `handleMatch` method now takes an additional input called `data`, which is the entire block under analysis,
|
||||||
|
not just what is matched with the specified pattern. The method now returns the element *and* the indexes relative
|
||||||
|
to `data` that the return element is replacing (usually `m.start(0)` and `m.end(0)`). If the boundaries are
|
||||||
|
returned as `None`, it is assumed that the match did not take place, and nothing will be altered in `data`.
|
||||||
|
|
||||||
|
This allows handling of more complex constructs than regular expressions can handle, e.g., matching nested
|
||||||
|
brackets, and explicit control of the span "consumed" by the processor.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from . import util
|
||||||
|
from typing import TYPE_CHECKING, Any, Collection, NamedTuple
|
||||||
|
import re
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
try: # pragma: no cover
|
||||||
|
from html import entities
|
||||||
|
except ImportError: # pragma: no cover
|
||||||
|
import htmlentitydefs as entities
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from markdown import Markdown
|
||||||
|
|
||||||
|
|
||||||
|
def build_inlinepatterns(md: Markdown, **kwargs: Any) -> util.Registry[InlineProcessor]:
|
||||||
|
"""
|
||||||
|
Build the default set of inline patterns for Markdown.
|
||||||
|
|
||||||
|
The order in which processors and/or patterns are applied is very important - e.g. if we first replace
|
||||||
|
`http://.../` links with `<a>` tags and _then_ try to replace inline HTML, we would end up with a mess. So, we
|
||||||
|
apply the expressions in the following order:
|
||||||
|
|
||||||
|
* backticks and escaped characters have to be handled before everything else so that we can preempt any markdown
|
||||||
|
patterns by escaping them;
|
||||||
|
|
||||||
|
* then we handle the various types of links (auto-links must be handled before inline HTML);
|
||||||
|
|
||||||
|
* then we handle inline HTML. At this point we will simply replace all inline HTML strings with a placeholder
|
||||||
|
and add the actual HTML to a stash;
|
||||||
|
|
||||||
|
* finally we apply strong, emphasis, etc.
|
||||||
|
|
||||||
|
"""
|
||||||
|
inlinePatterns = util.Registry()
|
||||||
|
inlinePatterns.register(BacktickInlineProcessor(BACKTICK_RE), 'backtick', 190)
|
||||||
|
inlinePatterns.register(EscapeInlineProcessor(ESCAPE_RE, md), 'escape', 180)
|
||||||
|
inlinePatterns.register(ReferenceInlineProcessor(REFERENCE_RE, md), 'reference', 170)
|
||||||
|
inlinePatterns.register(LinkInlineProcessor(LINK_RE, md), 'link', 160)
|
||||||
|
inlinePatterns.register(ImageInlineProcessor(IMAGE_LINK_RE, md), 'image_link', 150)
|
||||||
|
inlinePatterns.register(
|
||||||
|
ImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'image_reference', 140
|
||||||
|
)
|
||||||
|
inlinePatterns.register(
|
||||||
|
ShortReferenceInlineProcessor(REFERENCE_RE, md), 'short_reference', 130
|
||||||
|
)
|
||||||
|
inlinePatterns.register(
|
||||||
|
ShortImageReferenceInlineProcessor(IMAGE_REFERENCE_RE, md), 'short_image_ref', 125
|
||||||
|
)
|
||||||
|
inlinePatterns.register(AutolinkInlineProcessor(AUTOLINK_RE, md), 'autolink', 120)
|
||||||
|
inlinePatterns.register(AutomailInlineProcessor(AUTOMAIL_RE, md), 'automail', 110)
|
||||||
|
inlinePatterns.register(SubstituteTagInlineProcessor(LINE_BREAK_RE, 'br'), 'linebreak', 100)
|
||||||
|
inlinePatterns.register(HtmlInlineProcessor(HTML_RE, md), 'html', 90)
|
||||||
|
inlinePatterns.register(HtmlInlineProcessor(ENTITY_RE, md), 'entity', 80)
|
||||||
|
inlinePatterns.register(SimpleTextInlineProcessor(NOT_STRONG_RE), 'not_strong', 70)
|
||||||
|
inlinePatterns.register(AsteriskProcessor(r'\*'), 'em_strong', 60)
|
||||||
|
inlinePatterns.register(UnderscoreProcessor(r'_'), 'em_strong2', 50)
|
||||||
|
return inlinePatterns
|
||||||
|
|
||||||
|
|
||||||
|
# The actual regular expressions for patterns
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
NOIMG = r'(?<!\!)'
|
||||||
|
""" Match not an image. Partial regular expression which matches if not preceded by `!`. """
|
||||||
|
|
||||||
|
BACKTICK_RE = r'(?:(?<!\\)((?:\\{2})+)(?=`+)|(?<!\\)(`+)(.+?)(?<!`)\2(?!`))'
|
||||||
|
""" Match backtick quoted string (`` `e=f()` `` or ``` ``e=f("`")`` ```). """
|
||||||
|
|
||||||
|
ESCAPE_RE = r'\\(.)'
|
||||||
|
""" Match a backslash escaped character (`\\<` or `\\*`). """
|
||||||
|
|
||||||
|
EMPHASIS_RE = r'(\*)([^\*]+)\1'
|
||||||
|
""" Match emphasis with an asterisk (`*emphasis*`). """
|
||||||
|
|
||||||
|
STRONG_RE = r'(\*{2})(.+?)\1'
|
||||||
|
""" Match strong with an asterisk (`**strong**`). """
|
||||||
|
|
||||||
|
SMART_STRONG_RE = r'(?<!\w)(_{2})(?!_)(.+?)(?<!_)\1(?!\w)'
|
||||||
|
""" Match strong with underscore while ignoring middle word underscores (`__smart__strong__`). """
|
||||||
|
|
||||||
|
SMART_EMPHASIS_RE = r'(?<!\w)(_)(?!_)(.+?)(?<!_)\1(?!\w)'
|
||||||
|
""" Match emphasis with underscore while ignoring middle word underscores (`_smart_emphasis_`). """
|
||||||
|
|
||||||
|
SMART_STRONG_EM_RE = r'(?<!\w)(\_)\1(?!\1)(.+?)(?<!\w)\1(?!\1)(.+?)\1{3}(?!\w)'
|
||||||
|
""" Match strong emphasis with underscores (`__strong _em__`). """
|
||||||
|
|
||||||
|
EM_STRONG_RE = r'(\*)\1{2}(.+?)\1(.*?)\1{2}'
|
||||||
|
""" Match emphasis strong with asterisk (`***strongem***` or `***em*strong**`). """
|
||||||
|
|
||||||
|
EM_STRONG2_RE = r'(_)\1{2}(.+?)\1(.*?)\1{2}'
|
||||||
|
""" Match emphasis strong with underscores (`___emstrong___` or `___em_strong__`). """
|
||||||
|
|
||||||
|
STRONG_EM_RE = r'(\*)\1{2}(.+?)\1{2}(.*?)\1'
|
||||||
|
""" Match strong emphasis with asterisk (`***strong**em*`). """
|
||||||
|
|
||||||
|
STRONG_EM2_RE = r'(_)\1{2}(.+?)\1{2}(.*?)\1'
|
||||||
|
""" Match strong emphasis with underscores (`___strong__em_`). """
|
||||||
|
|
||||||
|
STRONG_EM3_RE = r'(\*)\1(?!\1)([^*]+?)\1(?!\1)(.+?)\1{3}'
|
||||||
|
""" Match strong emphasis with asterisk (`**strong*em***`). """
|
||||||
|
|
||||||
|
LINK_RE = NOIMG + r'\['
|
||||||
|
""" Match start of in-line link (`[text](url)` or `[text](<url>)` or `[text](url "title")`). """
|
||||||
|
|
||||||
|
IMAGE_LINK_RE = r'\!\['
|
||||||
|
""" Match start of in-line image link (`` or ``). """
|
||||||
|
|
||||||
|
REFERENCE_RE = LINK_RE
|
||||||
|
""" Match start of reference link (`[Label][3]`). """
|
||||||
|
|
||||||
|
IMAGE_REFERENCE_RE = IMAGE_LINK_RE
|
||||||
|
""" Match start of image reference (`![alt text][2]`). """
|
||||||
|
|
||||||
|
NOT_STRONG_RE = r'((^|(?<=\s))(\*{1,3}|_{1,3})(?=\s|$))'
|
||||||
|
""" Match a stand-alone `*` or `_`. """
|
||||||
|
|
||||||
|
AUTOLINK_RE = r'<((?:[Ff]|[Hh][Tt])[Tt][Pp][Ss]?://[^<>]*)>'
|
||||||
|
""" Match an automatic link (`<http://www.example.com>`). """
|
||||||
|
|
||||||
|
AUTOMAIL_RE = r'<([^<> !]+@[^@<> ]+)>'
|
||||||
|
""" Match an automatic email link (`<me@example.com>`). """
|
||||||
|
|
||||||
|
HTML_RE = r'(<(\/?[a-zA-Z][^<>@ ]*( [^<>]*)?|!--(?:(?!<!--|-->).)*--)>)'
|
||||||
|
""" Match an HTML tag (`<...>`). """
|
||||||
|
|
||||||
|
ENTITY_RE = r'(&(?:\#[0-9]+|\#x[0-9a-fA-F]+|[a-zA-Z0-9]+);)'
|
||||||
|
""" Match an HTML entity (`&` (decimal) or `&` (hex) or `&` (named)). """
|
||||||
|
|
||||||
|
LINE_BREAK_RE = r' \n'
|
||||||
|
""" Match two spaces at end of line. """
|
||||||
|
|
||||||
|
|
||||||
|
def dequote(string: str) -> str:
|
||||||
|
"""Remove quotes from around a string."""
|
||||||
|
if ((string.startswith('"') and string.endswith('"')) or
|
||||||
|
(string.startswith("'") and string.endswith("'"))):
|
||||||
|
return string[1:-1]
|
||||||
|
else:
|
||||||
|
return string
|
||||||
|
|
||||||
|
|
||||||
|
class EmStrongItem(NamedTuple):
|
||||||
|
"""Emphasis/strong pattern item."""
|
||||||
|
pattern: re.Pattern[str]
|
||||||
|
builder: str
|
||||||
|
tags: str
|
||||||
|
|
||||||
|
|
||||||
|
# The pattern classes
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
class Pattern: # pragma: no cover
|
||||||
|
"""
|
||||||
|
Base class that inline patterns subclass.
|
||||||
|
|
||||||
|
Inline patterns are handled by means of `Pattern` subclasses, one per regular expression.
|
||||||
|
Each pattern object uses a single regular expression and must support the following methods:
|
||||||
|
[`getCompiledRegExp`][markdown.inlinepatterns.Pattern.getCompiledRegExp] and
|
||||||
|
[`handleMatch`][markdown.inlinepatterns.Pattern.handleMatch].
|
||||||
|
|
||||||
|
All the regular expressions used by `Pattern` subclasses must capture the whole block. For this
|
||||||
|
reason, they all start with `^(.*)` and end with `(.*)!`. When passing a regular expression on
|
||||||
|
class initialization, the `^(.*)` and `(.*)!` are added automatically and the regular expression
|
||||||
|
is pre-compiled.
|
||||||
|
|
||||||
|
It is strongly suggested that the newer style [`markdown.inlinepatterns.InlineProcessor`][] that
|
||||||
|
use a more efficient and flexible search approach be used instead. However, the older style
|
||||||
|
`Pattern` remains for backward compatibility with many existing third-party extensions.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
ANCESTOR_EXCLUDES: Collection[str] = tuple()
|
||||||
|
"""
|
||||||
|
A collection of elements which are undesirable ancestors. The processor will be skipped if it
|
||||||
|
would cause the content to be a descendant of one of the listed tag names.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, pattern: str, md: Markdown | None = None):
|
||||||
|
"""
|
||||||
|
Create an instant of an inline pattern.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
pattern: A regular expression that matches a pattern.
|
||||||
|
md: An optional pointer to the instance of `markdown.Markdown` and is available as
|
||||||
|
`self.md` on the class instance.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.pattern = pattern
|
||||||
|
self.compiled_re = re.compile(r"^(.*?)%s(.*)$" % pattern,
|
||||||
|
re.DOTALL | re.UNICODE)
|
||||||
|
|
||||||
|
self.md = md
|
||||||
|
|
||||||
|
def getCompiledRegExp(self) -> re.Pattern:
|
||||||
|
""" Return a compiled regular expression. """
|
||||||
|
return self.compiled_re
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str]) -> etree.Element | str:
|
||||||
|
"""Return a ElementTree element from the given match.
|
||||||
|
|
||||||
|
Subclasses should override this method.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
m: A match object containing a match of the pattern.
|
||||||
|
|
||||||
|
Returns: An ElementTree Element object.
|
||||||
|
|
||||||
|
"""
|
||||||
|
pass # pragma: no cover
|
||||||
|
|
||||||
|
def type(self) -> str:
|
||||||
|
""" Return class name, to define pattern type """
|
||||||
|
return self.__class__.__name__
|
||||||
|
|
||||||
|
def unescape(self, text: str) -> str:
|
||||||
|
""" Return unescaped text given text with an inline placeholder. """
|
||||||
|
try:
|
||||||
|
stash = self.md.treeprocessors['inline'].stashed_nodes
|
||||||
|
except KeyError: # pragma: no cover
|
||||||
|
return text
|
||||||
|
|
||||||
|
def get_stash(m):
|
||||||
|
id = m.group(1)
|
||||||
|
if id in stash:
|
||||||
|
value = stash.get(id)
|
||||||
|
if isinstance(value, str):
|
||||||
|
return value
|
||||||
|
else:
|
||||||
|
# An `etree` Element - return text content only
|
||||||
|
return ''.join(value.itertext())
|
||||||
|
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
|
||||||
|
|
||||||
|
|
||||||
|
class InlineProcessor(Pattern):
|
||||||
|
"""
|
||||||
|
Base class that inline processors subclass.
|
||||||
|
|
||||||
|
This is the newer style inline processor that uses a more
|
||||||
|
efficient and flexible search approach.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, pattern: str, md: Markdown | None = None):
|
||||||
|
"""
|
||||||
|
Create an instant of an inline processor.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
pattern: A regular expression that matches a pattern.
|
||||||
|
md: An optional pointer to the instance of `markdown.Markdown` and is available as
|
||||||
|
`self.md` on the class instance.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.pattern = pattern
|
||||||
|
self.compiled_re = re.compile(pattern, re.DOTALL | re.UNICODE)
|
||||||
|
|
||||||
|
# API for Markdown to pass `safe_mode` into instance
|
||||||
|
self.safe_mode = False
|
||||||
|
self.md = md
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | str | None, int | None, int | None]:
|
||||||
|
"""Return a ElementTree element from the given match and the
|
||||||
|
start and end index of the matched text.
|
||||||
|
|
||||||
|
If `start` and/or `end` are returned as `None`, it will be
|
||||||
|
assumed that the processor did not find a valid region of text.
|
||||||
|
|
||||||
|
Subclasses should override this method.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
m: A re match object containing a match of the pattern.
|
||||||
|
data: The buffer currently under analysis.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
el: The ElementTree element, text or None.
|
||||||
|
start: The start of the region that has been matched or None.
|
||||||
|
end: The end of the region that has been matched or None.
|
||||||
|
|
||||||
|
"""
|
||||||
|
pass # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleTextPattern(Pattern): # pragma: no cover
|
||||||
|
""" Return a simple text of `group(2)` of a Pattern. """
|
||||||
|
def handleMatch(self, m: re.Match[str]) -> str:
|
||||||
|
""" Return string content of `group(2)` of a matching pattern. """
|
||||||
|
return m.group(2)
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleTextInlineProcessor(InlineProcessor):
|
||||||
|
""" Return a simple text of `group(1)` of a Pattern. """
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[str, int, int]:
|
||||||
|
""" Return string content of `group(1)` of a matching pattern. """
|
||||||
|
return m.group(1), m.start(0), m.end(0)
|
||||||
|
|
||||||
|
|
||||||
|
class EscapeInlineProcessor(InlineProcessor):
|
||||||
|
""" Return an escaped character. """
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[str | None, int, int]:
|
||||||
|
"""
|
||||||
|
If the character matched by `group(1)` of a pattern is in [`ESCAPED_CHARS`][markdown.Markdown.ESCAPED_CHARS]
|
||||||
|
then return the integer representing the character's Unicode code point (as returned by [`ord`][]) wrapped
|
||||||
|
in [`util.STX`][markdown.util.STX] and [`util.ETX`][markdown.util.ETX].
|
||||||
|
|
||||||
|
If the matched character is not in [`ESCAPED_CHARS`][markdown.Markdown.ESCAPED_CHARS], then return `None`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
char = m.group(1)
|
||||||
|
if char in self.md.ESCAPED_CHARS:
|
||||||
|
return '{}{}{}'.format(util.STX, ord(char), util.ETX), m.start(0), m.end(0)
|
||||||
|
else:
|
||||||
|
return None, m.start(0), m.end(0)
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleTagPattern(Pattern): # pragma: no cover
|
||||||
|
"""
|
||||||
|
Return element of type `tag` with a text attribute of `group(3)`
|
||||||
|
of a Pattern.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, pattern: str, tag: str):
|
||||||
|
"""
|
||||||
|
Create an instant of an simple tag pattern.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
pattern: A regular expression that matches a pattern.
|
||||||
|
tag: Tag of element.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Pattern.__init__(self, pattern)
|
||||||
|
self.tag = tag
|
||||||
|
""" The tag of the rendered element. """
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str]) -> etree.Element:
|
||||||
|
"""
|
||||||
|
Return [`Element`][xml.etree.ElementTree.Element] of type `tag` with the string in `group(3)` of a
|
||||||
|
matching pattern as the Element's text.
|
||||||
|
"""
|
||||||
|
el = etree.Element(self.tag)
|
||||||
|
el.text = m.group(3)
|
||||||
|
return el
|
||||||
|
|
||||||
|
|
||||||
|
class SimpleTagInlineProcessor(InlineProcessor):
|
||||||
|
"""
|
||||||
|
Return element of type `tag` with a text attribute of `group(2)`
|
||||||
|
of a Pattern.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, pattern: str, tag: str):
|
||||||
|
"""
|
||||||
|
Create an instant of an simple tag processor.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
pattern: A regular expression that matches a pattern.
|
||||||
|
tag: Tag of element.
|
||||||
|
|
||||||
|
"""
|
||||||
|
InlineProcessor.__init__(self, pattern)
|
||||||
|
self.tag = tag
|
||||||
|
""" The tag of the rendered element. """
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]: # pragma: no cover
|
||||||
|
"""
|
||||||
|
Return [`Element`][xml.etree.ElementTree.Element] of type `tag` with the string in `group(2)` of a
|
||||||
|
matching pattern as the Element's text.
|
||||||
|
"""
|
||||||
|
el = etree.Element(self.tag)
|
||||||
|
el.text = m.group(2)
|
||||||
|
return el, m.start(0), m.end(0)
|
||||||
|
|
||||||
|
|
||||||
|
class SubstituteTagPattern(SimpleTagPattern): # pragma: no cover
|
||||||
|
""" Return an element of type `tag` with no children. """
|
||||||
|
def handleMatch(self, m: re.Match[str]) -> etree.Element:
|
||||||
|
""" Return empty [`Element`][xml.etree.ElementTree.Element] of type `tag`. """
|
||||||
|
return etree.Element(self.tag)
|
||||||
|
|
||||||
|
|
||||||
|
class SubstituteTagInlineProcessor(SimpleTagInlineProcessor):
|
||||||
|
""" Return an element of type `tag` with no children. """
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]:
|
||||||
|
""" Return empty [`Element`][xml.etree.ElementTree.Element] of type `tag`. """
|
||||||
|
return etree.Element(self.tag), m.start(0), m.end(0)
|
||||||
|
|
||||||
|
|
||||||
|
class BacktickInlineProcessor(InlineProcessor):
|
||||||
|
""" Return a `<code>` element containing the escaped matching text. """
|
||||||
|
def __init__(self, pattern):
|
||||||
|
InlineProcessor.__init__(self, pattern)
|
||||||
|
self.ESCAPED_BSLASH = '{}{}{}'.format(util.STX, ord('\\'), util.ETX)
|
||||||
|
self.tag = 'code'
|
||||||
|
""" The tag of the rendered element. """
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | str, int, int]:
|
||||||
|
"""
|
||||||
|
If the match contains `group(3)` of a pattern, then return a `code`
|
||||||
|
[`Element`][xml.etree.ElementTree.Element] which contains HTML escaped text (with
|
||||||
|
[`code_escape`][markdown.util.code_escape]) as an [`AtomicString`][markdown.util.AtomicString].
|
||||||
|
|
||||||
|
If the match does not contain `group(3)` then return the text of `group(1)` backslash escaped.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if m.group(3):
|
||||||
|
el = etree.Element(self.tag)
|
||||||
|
el.text = util.AtomicString(util.code_escape(m.group(3).strip()))
|
||||||
|
return el, m.start(0), m.end(0)
|
||||||
|
else:
|
||||||
|
return m.group(1).replace('\\\\', self.ESCAPED_BSLASH), m.start(0), m.end(0)
|
||||||
|
|
||||||
|
|
||||||
|
class DoubleTagPattern(SimpleTagPattern): # pragma: no cover
|
||||||
|
"""Return a ElementTree element nested in tag2 nested in tag1.
|
||||||
|
|
||||||
|
Useful for strong emphasis etc.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def handleMatch(self, m: re.Match[str]) -> etree.Element:
|
||||||
|
"""
|
||||||
|
Return [`Element`][xml.etree.ElementTree.Element] in following format:
|
||||||
|
`<tag1><tag2>group(3)</tag2>group(4)</tag2>` where `group(4)` is optional.
|
||||||
|
|
||||||
|
"""
|
||||||
|
tag1, tag2 = self.tag.split(",")
|
||||||
|
el1 = etree.Element(tag1)
|
||||||
|
el2 = etree.SubElement(el1, tag2)
|
||||||
|
el2.text = m.group(3)
|
||||||
|
if len(m.groups()) == 5:
|
||||||
|
el2.tail = m.group(4)
|
||||||
|
return el1
|
||||||
|
|
||||||
|
|
||||||
|
class DoubleTagInlineProcessor(SimpleTagInlineProcessor):
|
||||||
|
"""Return a ElementTree element nested in tag2 nested in tag1.
|
||||||
|
|
||||||
|
Useful for strong emphasis etc.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]: # pragma: no cover
|
||||||
|
"""
|
||||||
|
Return [`Element`][xml.etree.ElementTree.Element] in following format:
|
||||||
|
`<tag1><tag2>group(2)</tag2>group(3)</tag2>` where `group(3)` is optional.
|
||||||
|
|
||||||
|
"""
|
||||||
|
tag1, tag2 = self.tag.split(",")
|
||||||
|
el1 = etree.Element(tag1)
|
||||||
|
el2 = etree.SubElement(el1, tag2)
|
||||||
|
el2.text = m.group(2)
|
||||||
|
if len(m.groups()) == 3:
|
||||||
|
el2.tail = m.group(3)
|
||||||
|
return el1, m.start(0), m.end(0)
|
||||||
|
|
||||||
|
|
||||||
|
class HtmlInlineProcessor(InlineProcessor):
|
||||||
|
""" Store raw inline html and return a placeholder. """
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[str, int, int]:
|
||||||
|
""" Store the text of `group(1)` of a pattern and return a placeholder string. """
|
||||||
|
rawhtml = self.backslash_unescape(self.unescape(m.group(1)))
|
||||||
|
place_holder = self.md.htmlStash.store(rawhtml)
|
||||||
|
return place_holder, m.start(0), m.end(0)
|
||||||
|
|
||||||
|
def unescape(self, text):
|
||||||
|
""" Return unescaped text given text with an inline placeholder. """
|
||||||
|
try:
|
||||||
|
stash = self.md.treeprocessors['inline'].stashed_nodes
|
||||||
|
except KeyError: # pragma: no cover
|
||||||
|
return text
|
||||||
|
|
||||||
|
def get_stash(m):
|
||||||
|
id = m.group(1)
|
||||||
|
value = stash.get(id)
|
||||||
|
if value is not None:
|
||||||
|
try:
|
||||||
|
return self.md.serializer(value)
|
||||||
|
except Exception:
|
||||||
|
return r'\%s' % value
|
||||||
|
|
||||||
|
return util.INLINE_PLACEHOLDER_RE.sub(get_stash, text)
|
||||||
|
|
||||||
|
def backslash_unescape(self, text):
|
||||||
|
""" Return text with backslash escapes undone (backslashes are restored). """
|
||||||
|
try:
|
||||||
|
RE = self.md.treeprocessors['unescape'].RE
|
||||||
|
except KeyError: # pragma: no cover
|
||||||
|
return text
|
||||||
|
|
||||||
|
def _unescape(m):
|
||||||
|
return chr(int(m.group(1)))
|
||||||
|
|
||||||
|
return RE.sub(_unescape, text)
|
||||||
|
|
||||||
|
|
||||||
|
class AsteriskProcessor(InlineProcessor):
|
||||||
|
"""Emphasis processor for handling strong and em matches inside asterisks."""
|
||||||
|
|
||||||
|
PATTERNS = [
|
||||||
|
EmStrongItem(re.compile(EM_STRONG_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
|
||||||
|
EmStrongItem(re.compile(STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
|
||||||
|
EmStrongItem(re.compile(STRONG_EM3_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
|
||||||
|
EmStrongItem(re.compile(STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
|
||||||
|
EmStrongItem(re.compile(EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
|
||||||
|
]
|
||||||
|
""" The various strong and emphasis patterns handled by this processor. """
|
||||||
|
|
||||||
|
def build_single(self, m, tag, idx):
|
||||||
|
"""Return single tag."""
|
||||||
|
el1 = etree.Element(tag)
|
||||||
|
text = m.group(2)
|
||||||
|
self.parse_sub_patterns(text, el1, None, idx)
|
||||||
|
return el1
|
||||||
|
|
||||||
|
def build_double(self, m, tags, idx):
|
||||||
|
"""Return double tag."""
|
||||||
|
|
||||||
|
tag1, tag2 = tags.split(",")
|
||||||
|
el1 = etree.Element(tag1)
|
||||||
|
el2 = etree.Element(tag2)
|
||||||
|
text = m.group(2)
|
||||||
|
self.parse_sub_patterns(text, el2, None, idx)
|
||||||
|
el1.append(el2)
|
||||||
|
if len(m.groups()) == 3:
|
||||||
|
text = m.group(3)
|
||||||
|
self.parse_sub_patterns(text, el1, el2, idx)
|
||||||
|
return el1
|
||||||
|
|
||||||
|
def build_double2(self, m, tags, idx):
|
||||||
|
"""Return double tags (variant 2): `<strong>text <em>text</em></strong>`."""
|
||||||
|
|
||||||
|
tag1, tag2 = tags.split(",")
|
||||||
|
el1 = etree.Element(tag1)
|
||||||
|
el2 = etree.Element(tag2)
|
||||||
|
text = m.group(2)
|
||||||
|
self.parse_sub_patterns(text, el1, None, idx)
|
||||||
|
text = m.group(3)
|
||||||
|
el1.append(el2)
|
||||||
|
self.parse_sub_patterns(text, el2, None, idx)
|
||||||
|
return el1
|
||||||
|
|
||||||
|
def parse_sub_patterns(self, data, parent, last, idx) -> None:
|
||||||
|
"""
|
||||||
|
Parses sub patterns.
|
||||||
|
|
||||||
|
`data` (`str`):
|
||||||
|
text to evaluate.
|
||||||
|
|
||||||
|
`parent` (`etree.Element`):
|
||||||
|
Parent to attach text and sub elements to.
|
||||||
|
|
||||||
|
`last` (`etree.Element`):
|
||||||
|
Last appended child to parent. Can also be None if parent has no children.
|
||||||
|
|
||||||
|
`idx` (`int`):
|
||||||
|
Current pattern index that was used to evaluate the parent.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
offset = 0
|
||||||
|
pos = 0
|
||||||
|
|
||||||
|
length = len(data)
|
||||||
|
while pos < length:
|
||||||
|
# Find the start of potential emphasis or strong tokens
|
||||||
|
if self.compiled_re.match(data, pos):
|
||||||
|
matched = False
|
||||||
|
# See if the we can match an emphasis/strong pattern
|
||||||
|
for index, item in enumerate(self.PATTERNS):
|
||||||
|
# Only evaluate patterns that are after what was used on the parent
|
||||||
|
if index <= idx:
|
||||||
|
continue
|
||||||
|
m = item.pattern.match(data, pos)
|
||||||
|
if m:
|
||||||
|
# Append child nodes to parent
|
||||||
|
# Text nodes should be appended to the last
|
||||||
|
# child if present, and if not, it should
|
||||||
|
# be added as the parent's text node.
|
||||||
|
text = data[offset:m.start(0)]
|
||||||
|
if text:
|
||||||
|
if last is not None:
|
||||||
|
last.tail = text
|
||||||
|
else:
|
||||||
|
parent.text = text
|
||||||
|
el = self.build_element(m, item.builder, item.tags, index)
|
||||||
|
parent.append(el)
|
||||||
|
last = el
|
||||||
|
# Move our position past the matched hunk
|
||||||
|
offset = pos = m.end(0)
|
||||||
|
matched = True
|
||||||
|
if not matched:
|
||||||
|
# We matched nothing, move on to the next character
|
||||||
|
pos += 1
|
||||||
|
else:
|
||||||
|
# Increment position as no potential emphasis start was found.
|
||||||
|
pos += 1
|
||||||
|
|
||||||
|
# Append any leftover text as a text node.
|
||||||
|
text = data[offset:]
|
||||||
|
if text:
|
||||||
|
if last is not None:
|
||||||
|
last.tail = text
|
||||||
|
else:
|
||||||
|
parent.text = text
|
||||||
|
|
||||||
|
def build_element(self, m, builder, tags, index):
|
||||||
|
"""Element builder."""
|
||||||
|
|
||||||
|
if builder == 'double2':
|
||||||
|
return self.build_double2(m, tags, index)
|
||||||
|
elif builder == 'double':
|
||||||
|
return self.build_double(m, tags, index)
|
||||||
|
else:
|
||||||
|
return self.build_single(m, tags, index)
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]:
|
||||||
|
"""Parse patterns."""
|
||||||
|
|
||||||
|
el = None
|
||||||
|
start = None
|
||||||
|
end = None
|
||||||
|
|
||||||
|
for index, item in enumerate(self.PATTERNS):
|
||||||
|
m1 = item.pattern.match(data, m.start(0))
|
||||||
|
if m1:
|
||||||
|
start = m1.start(0)
|
||||||
|
end = m1.end(0)
|
||||||
|
el = self.build_element(m1, item.builder, item.tags, index)
|
||||||
|
break
|
||||||
|
return el, start, end
|
||||||
|
|
||||||
|
|
||||||
|
class UnderscoreProcessor(AsteriskProcessor):
|
||||||
|
"""Emphasis processor for handling strong and em matches inside underscores."""
|
||||||
|
|
||||||
|
PATTERNS = [
|
||||||
|
EmStrongItem(re.compile(EM_STRONG2_RE, re.DOTALL | re.UNICODE), 'double', 'strong,em'),
|
||||||
|
EmStrongItem(re.compile(STRONG_EM2_RE, re.DOTALL | re.UNICODE), 'double', 'em,strong'),
|
||||||
|
EmStrongItem(re.compile(SMART_STRONG_EM_RE, re.DOTALL | re.UNICODE), 'double2', 'strong,em'),
|
||||||
|
EmStrongItem(re.compile(SMART_STRONG_RE, re.DOTALL | re.UNICODE), 'single', 'strong'),
|
||||||
|
EmStrongItem(re.compile(SMART_EMPHASIS_RE, re.DOTALL | re.UNICODE), 'single', 'em')
|
||||||
|
]
|
||||||
|
""" The various strong and emphasis patterns handled by this processor. """
|
||||||
|
|
||||||
|
|
||||||
|
class LinkInlineProcessor(InlineProcessor):
|
||||||
|
""" Return a link element from the given match. """
|
||||||
|
RE_LINK = re.compile(r'''\(\s*(?:(<[^<>]*>)\s*(?:('[^']*'|"[^"]*")\s*)?\))?''', re.DOTALL | re.UNICODE)
|
||||||
|
RE_TITLE_CLEAN = re.compile(r'\s')
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]:
|
||||||
|
""" Return an `a` [`Element`][xml.etree.ElementTree.Element] or `(None, None, None)`. """
|
||||||
|
text, index, handled = self.getText(data, m.end(0))
|
||||||
|
|
||||||
|
if not handled:
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
href, title, index, handled = self.getLink(data, index)
|
||||||
|
if not handled:
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
el = etree.Element("a")
|
||||||
|
el.text = text
|
||||||
|
|
||||||
|
el.set("href", href)
|
||||||
|
|
||||||
|
if title is not None:
|
||||||
|
el.set("title", title)
|
||||||
|
|
||||||
|
return el, m.start(0), index
|
||||||
|
|
||||||
|
def getLink(self, data, index):
|
||||||
|
"""Parse data between `()` of `[Text]()` allowing recursive `()`. """
|
||||||
|
|
||||||
|
href = ''
|
||||||
|
title = None
|
||||||
|
handled = False
|
||||||
|
|
||||||
|
m = self.RE_LINK.match(data, pos=index)
|
||||||
|
if m and m.group(1):
|
||||||
|
# Matches [Text](<link> "title")
|
||||||
|
href = m.group(1)[1:-1].strip()
|
||||||
|
if m.group(2):
|
||||||
|
title = m.group(2)[1:-1]
|
||||||
|
index = m.end(0)
|
||||||
|
handled = True
|
||||||
|
elif m:
|
||||||
|
# Track bracket nesting and index in string
|
||||||
|
bracket_count = 1
|
||||||
|
backtrack_count = 1
|
||||||
|
start_index = m.end()
|
||||||
|
index = start_index
|
||||||
|
last_bracket = -1
|
||||||
|
|
||||||
|
# Primary (first found) quote tracking.
|
||||||
|
quote = None
|
||||||
|
start_quote = -1
|
||||||
|
exit_quote = -1
|
||||||
|
ignore_matches = False
|
||||||
|
|
||||||
|
# Secondary (second found) quote tracking.
|
||||||
|
alt_quote = None
|
||||||
|
start_alt_quote = -1
|
||||||
|
exit_alt_quote = -1
|
||||||
|
|
||||||
|
# Track last character
|
||||||
|
last = ''
|
||||||
|
|
||||||
|
for pos in range(index, len(data)):
|
||||||
|
c = data[pos]
|
||||||
|
if c == '(':
|
||||||
|
# Count nested (
|
||||||
|
# Don't increment the bracket count if we are sure we're in a title.
|
||||||
|
if not ignore_matches:
|
||||||
|
bracket_count += 1
|
||||||
|
elif backtrack_count > 0:
|
||||||
|
backtrack_count -= 1
|
||||||
|
elif c == ')':
|
||||||
|
# Match nested ) to (
|
||||||
|
# Don't decrement if we are sure we are in a title that is unclosed.
|
||||||
|
if ((exit_quote != -1 and quote == last) or (exit_alt_quote != -1 and alt_quote == last)):
|
||||||
|
bracket_count = 0
|
||||||
|
elif not ignore_matches:
|
||||||
|
bracket_count -= 1
|
||||||
|
elif backtrack_count > 0:
|
||||||
|
backtrack_count -= 1
|
||||||
|
# We've found our backup end location if the title doesn't resolve.
|
||||||
|
if backtrack_count == 0:
|
||||||
|
last_bracket = index + 1
|
||||||
|
|
||||||
|
elif c in ("'", '"'):
|
||||||
|
# Quote has started
|
||||||
|
if not quote:
|
||||||
|
# We'll assume we are now in a title.
|
||||||
|
# Brackets are quoted, so no need to match them (except for the final one).
|
||||||
|
ignore_matches = True
|
||||||
|
backtrack_count = bracket_count
|
||||||
|
bracket_count = 1
|
||||||
|
start_quote = index + 1
|
||||||
|
quote = c
|
||||||
|
# Secondary quote (in case the first doesn't resolve): [text](link'"title")
|
||||||
|
elif c != quote and not alt_quote:
|
||||||
|
start_alt_quote = index + 1
|
||||||
|
alt_quote = c
|
||||||
|
# Update primary quote match
|
||||||
|
elif c == quote:
|
||||||
|
exit_quote = index + 1
|
||||||
|
# Update secondary quote match
|
||||||
|
elif alt_quote and c == alt_quote:
|
||||||
|
exit_alt_quote = index + 1
|
||||||
|
|
||||||
|
index += 1
|
||||||
|
|
||||||
|
# Link is closed, so let's break out of the loop
|
||||||
|
if bracket_count == 0:
|
||||||
|
# Get the title if we closed a title string right before link closed
|
||||||
|
if exit_quote >= 0 and quote == last:
|
||||||
|
href = data[start_index:start_quote - 1]
|
||||||
|
title = ''.join(data[start_quote:exit_quote - 1])
|
||||||
|
elif exit_alt_quote >= 0 and alt_quote == last:
|
||||||
|
href = data[start_index:start_alt_quote - 1]
|
||||||
|
title = ''.join(data[start_alt_quote:exit_alt_quote - 1])
|
||||||
|
else:
|
||||||
|
href = data[start_index:index - 1]
|
||||||
|
break
|
||||||
|
|
||||||
|
if c != ' ':
|
||||||
|
last = c
|
||||||
|
|
||||||
|
# We have a scenario: `[test](link"notitle)`
|
||||||
|
# When we enter a string, we stop tracking bracket resolution in the main counter,
|
||||||
|
# but we do keep a backup counter up until we discover where we might resolve all brackets
|
||||||
|
# if the title string fails to resolve.
|
||||||
|
if bracket_count != 0 and backtrack_count == 0:
|
||||||
|
href = data[start_index:last_bracket - 1]
|
||||||
|
index = last_bracket
|
||||||
|
bracket_count = 0
|
||||||
|
|
||||||
|
handled = bracket_count == 0
|
||||||
|
|
||||||
|
if title is not None:
|
||||||
|
title = self.RE_TITLE_CLEAN.sub(' ', dequote(self.unescape(title.strip())))
|
||||||
|
|
||||||
|
href = self.unescape(href).strip()
|
||||||
|
|
||||||
|
return href, title, index, handled
|
||||||
|
|
||||||
|
def getText(self, data, index):
|
||||||
|
"""Parse the content between `[]` of the start of an image or link
|
||||||
|
resolving nested square brackets.
|
||||||
|
|
||||||
|
"""
|
||||||
|
bracket_count = 1
|
||||||
|
text = []
|
||||||
|
for pos in range(index, len(data)):
|
||||||
|
c = data[pos]
|
||||||
|
if c == ']':
|
||||||
|
bracket_count -= 1
|
||||||
|
elif c == '[':
|
||||||
|
bracket_count += 1
|
||||||
|
index += 1
|
||||||
|
if bracket_count == 0:
|
||||||
|
break
|
||||||
|
text.append(c)
|
||||||
|
return ''.join(text), index, bracket_count == 0
|
||||||
|
|
||||||
|
|
||||||
|
class ImageInlineProcessor(LinkInlineProcessor):
|
||||||
|
""" Return a `img` element from the given match. """
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]:
|
||||||
|
""" Return an `img` [`Element`][xml.etree.ElementTree.Element] or `(None, None, None)`. """
|
||||||
|
text, index, handled = self.getText(data, m.end(0))
|
||||||
|
if not handled:
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
src, title, index, handled = self.getLink(data, index)
|
||||||
|
if not handled:
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
el = etree.Element("img")
|
||||||
|
|
||||||
|
el.set("src", src)
|
||||||
|
|
||||||
|
if title is not None:
|
||||||
|
el.set("title", title)
|
||||||
|
|
||||||
|
el.set('alt', self.unescape(text))
|
||||||
|
return el, m.start(0), index
|
||||||
|
|
||||||
|
|
||||||
|
class ReferenceInlineProcessor(LinkInlineProcessor):
|
||||||
|
""" Match to a stored reference and return link element. """
|
||||||
|
NEWLINE_CLEANUP_RE = re.compile(r'\s+', re.MULTILINE)
|
||||||
|
|
||||||
|
RE_LINK = re.compile(r'\s?\[([^\]]*)\]', re.DOTALL | re.UNICODE)
|
||||||
|
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element | None, int | None, int | None]:
|
||||||
|
"""
|
||||||
|
Return [`Element`][xml.etree.ElementTree.Element] returned by `makeTag` method or `(None, None, None)`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
text, index, handled = self.getText(data, m.end(0))
|
||||||
|
if not handled:
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
id, end, handled = self.evalId(data, index, text)
|
||||||
|
if not handled:
|
||||||
|
return None, None, None
|
||||||
|
|
||||||
|
# Clean up line breaks in id
|
||||||
|
id = self.NEWLINE_CLEANUP_RE.sub(' ', id)
|
||||||
|
if id not in self.md.references: # ignore undefined refs
|
||||||
|
return None, m.start(0), end
|
||||||
|
|
||||||
|
href, title = self.md.references[id]
|
||||||
|
|
||||||
|
return self.makeTag(href, title, text), m.start(0), end
|
||||||
|
|
||||||
|
def evalId(self, data, index, text):
|
||||||
|
"""
|
||||||
|
Evaluate the id portion of `[ref][id]`.
|
||||||
|
|
||||||
|
If `[ref][]` use `[ref]`.
|
||||||
|
"""
|
||||||
|
m = self.RE_LINK.match(data, pos=index)
|
||||||
|
if not m:
|
||||||
|
return None, index, False
|
||||||
|
else:
|
||||||
|
id = m.group(1).lower()
|
||||||
|
end = m.end(0)
|
||||||
|
if not id:
|
||||||
|
id = text.lower()
|
||||||
|
return id, end, True
|
||||||
|
|
||||||
|
def makeTag(self, href: str, title: str, text: str) -> etree.Element:
|
||||||
|
""" Return an `a` [`Element`][xml.etree.ElementTree.Element]. """
|
||||||
|
el = etree.Element('a')
|
||||||
|
|
||||||
|
el.set('href', href)
|
||||||
|
if title:
|
||||||
|
el.set('title', title)
|
||||||
|
|
||||||
|
el.text = text
|
||||||
|
return el
|
||||||
|
|
||||||
|
|
||||||
|
class ShortReferenceInlineProcessor(ReferenceInlineProcessor):
|
||||||
|
"""Short form of reference: `[google]`. """
|
||||||
|
def evalId(self, data, index, text):
|
||||||
|
"""Evaluate the id of `[ref]`. """
|
||||||
|
|
||||||
|
return text.lower(), index, True
|
||||||
|
|
||||||
|
|
||||||
|
class ImageReferenceInlineProcessor(ReferenceInlineProcessor):
|
||||||
|
""" Match to a stored reference and return `img` element. """
|
||||||
|
def makeTag(self, href: str, title: str, text: str) -> etree.Element:
|
||||||
|
""" Return an `img` [`Element`][xml.etree.ElementTree.Element]. """
|
||||||
|
el = etree.Element("img")
|
||||||
|
el.set("src", href)
|
||||||
|
if title:
|
||||||
|
el.set("title", title)
|
||||||
|
el.set("alt", self.unescape(text))
|
||||||
|
return el
|
||||||
|
|
||||||
|
|
||||||
|
class ShortImageReferenceInlineProcessor(ImageReferenceInlineProcessor):
|
||||||
|
""" Short form of image reference: `![ref]`. """
|
||||||
|
def evalId(self, data, index, text):
|
||||||
|
"""Evaluate the id of `[ref]`. """
|
||||||
|
|
||||||
|
return text.lower(), index, True
|
||||||
|
|
||||||
|
|
||||||
|
class AutolinkInlineProcessor(InlineProcessor):
|
||||||
|
""" Return a link Element given an auto-link (`<http://example/com>`). """
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]:
|
||||||
|
""" Return an `a` [`Element`][xml.etree.ElementTree.Element] of `group(1)`. """
|
||||||
|
el = etree.Element("a")
|
||||||
|
el.set('href', self.unescape(m.group(1)))
|
||||||
|
el.text = util.AtomicString(m.group(1))
|
||||||
|
return el, m.start(0), m.end(0)
|
||||||
|
|
||||||
|
|
||||||
|
class AutomailInlineProcessor(InlineProcessor):
|
||||||
|
"""
|
||||||
|
Return a `mailto` link Element given an auto-mail link (`<foo@example.com>`).
|
||||||
|
"""
|
||||||
|
def handleMatch(self, m: re.Match[str], data: str) -> tuple[etree.Element, int, int]:
|
||||||
|
""" Return an [`Element`][xml.etree.ElementTree.Element] containing a `mailto` link of `group(1)`. """
|
||||||
|
el = etree.Element('a')
|
||||||
|
email = self.unescape(m.group(1))
|
||||||
|
if email.startswith("mailto:"):
|
||||||
|
email = email[len("mailto:"):]
|
||||||
|
|
||||||
|
def codepoint2name(code):
|
||||||
|
"""Return entity definition by code, or the code if not defined."""
|
||||||
|
entity = entities.codepoint2name.get(code)
|
||||||
|
if entity:
|
||||||
|
return "{}{};".format(util.AMP_SUBSTITUTE, entity)
|
||||||
|
else:
|
||||||
|
return "%s#%d;" % (util.AMP_SUBSTITUTE, code)
|
||||||
|
|
||||||
|
letters = [codepoint2name(ord(letter)) for letter in email]
|
||||||
|
el.text = util.AtomicString(''.join(letters))
|
||||||
|
|
||||||
|
mailto = "mailto:" + email
|
||||||
|
mailto = "".join([util.AMP_SUBSTITUTE + '#%d;' %
|
||||||
|
ord(letter) for letter in mailto])
|
||||||
|
el.set('href', mailto)
|
||||||
|
return el, m.start(0), m.end(0)
|
||||||
143
plugins/markdown_preview/markdown/postprocessors.py
Normal file
143
plugins/markdown_preview/markdown/postprocessors.py
Normal file
@@ -0,0 +1,143 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
Post-processors run on the text of the entire document after is has been serialized into a string.
|
||||||
|
Postprocessors should be used to work with the text just before output. Usually, they are used add
|
||||||
|
back sections that were extracted in a preprocessor, fix up outgoing encodings, or wrap the whole
|
||||||
|
document.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from collections import OrderedDict
|
||||||
|
from typing import TYPE_CHECKING, Any
|
||||||
|
from . import util
|
||||||
|
import re
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from markdown import Markdown
|
||||||
|
|
||||||
|
|
||||||
|
def build_postprocessors(md: Markdown, **kwargs: Any) -> util.Registry[Postprocessor]:
|
||||||
|
""" Build the default postprocessors for Markdown. """
|
||||||
|
postprocessors = util.Registry()
|
||||||
|
postprocessors.register(RawHtmlPostprocessor(md), 'raw_html', 30)
|
||||||
|
postprocessors.register(AndSubstitutePostprocessor(), 'amp_substitute', 20)
|
||||||
|
return postprocessors
|
||||||
|
|
||||||
|
|
||||||
|
class Postprocessor(util.Processor):
|
||||||
|
"""
|
||||||
|
Postprocessors are run after the ElementTree it converted back into text.
|
||||||
|
|
||||||
|
Each Postprocessor implements a `run` method that takes a pointer to a
|
||||||
|
text string, modifies it as necessary and returns a text string.
|
||||||
|
|
||||||
|
Postprocessors must extend `Postprocessor`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run(self, text: str) -> str:
|
||||||
|
"""
|
||||||
|
Subclasses of `Postprocessor` should implement a `run` method, which
|
||||||
|
takes the html document as a single text string and returns a
|
||||||
|
(possibly modified) string.
|
||||||
|
|
||||||
|
"""
|
||||||
|
pass # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
class RawHtmlPostprocessor(Postprocessor):
|
||||||
|
""" Restore raw html to the document. """
|
||||||
|
|
||||||
|
BLOCK_LEVEL_REGEX = re.compile(r'^\<\/?([^ >]+)')
|
||||||
|
|
||||||
|
def run(self, text: str):
|
||||||
|
""" Iterate over html stash and restore html. """
|
||||||
|
replacements = OrderedDict()
|
||||||
|
for i in range(self.md.htmlStash.html_counter):
|
||||||
|
html = self.stash_to_string(self.md.htmlStash.rawHtmlBlocks[i])
|
||||||
|
if self.isblocklevel(html):
|
||||||
|
replacements["<p>{}</p>".format(
|
||||||
|
self.md.htmlStash.get_placeholder(i))] = html
|
||||||
|
replacements[self.md.htmlStash.get_placeholder(i)] = html
|
||||||
|
|
||||||
|
def substitute_match(m):
|
||||||
|
key = m.group(0)
|
||||||
|
|
||||||
|
if key not in replacements:
|
||||||
|
if key[3:-4] in replacements:
|
||||||
|
return f'<p>{ replacements[key[3:-4]] }</p>'
|
||||||
|
else:
|
||||||
|
return key
|
||||||
|
|
||||||
|
return replacements[key]
|
||||||
|
|
||||||
|
if replacements:
|
||||||
|
base_placeholder = util.HTML_PLACEHOLDER % r'([0-9]+)'
|
||||||
|
pattern = re.compile(f'<p>{ base_placeholder }</p>|{ base_placeholder }')
|
||||||
|
processed_text = pattern.sub(substitute_match, text)
|
||||||
|
else:
|
||||||
|
return text
|
||||||
|
|
||||||
|
if processed_text == text:
|
||||||
|
return processed_text
|
||||||
|
else:
|
||||||
|
return self.run(processed_text)
|
||||||
|
|
||||||
|
def isblocklevel(self, html: str) -> bool:
|
||||||
|
""" Check is block of HTML is block-level. """
|
||||||
|
m = self.BLOCK_LEVEL_REGEX.match(html)
|
||||||
|
if m:
|
||||||
|
if m.group(1)[0] in ('!', '?', '@', '%'):
|
||||||
|
# Comment, PHP etc...
|
||||||
|
return True
|
||||||
|
return self.md.is_block_level(m.group(1))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def stash_to_string(self, text: str) -> str:
|
||||||
|
""" Convert a stashed object to a string. """
|
||||||
|
return str(text)
|
||||||
|
|
||||||
|
|
||||||
|
class AndSubstitutePostprocessor(Postprocessor):
|
||||||
|
""" Restore valid entities """
|
||||||
|
|
||||||
|
def run(self, text):
|
||||||
|
text = text.replace(util.AMP_SUBSTITUTE, "&")
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
@util.deprecated(
|
||||||
|
"This class is deprecated and will be removed in the future; "
|
||||||
|
"use [`UnescapeTreeprocessor`][markdown.treeprocessors.UnescapeTreeprocessor] instead."
|
||||||
|
)
|
||||||
|
class UnescapePostprocessor(Postprocessor):
|
||||||
|
""" Restore escaped chars. """
|
||||||
|
|
||||||
|
RE = re.compile(r'{}(\d+){}'.format(util.STX, util.ETX))
|
||||||
|
|
||||||
|
def unescape(self, m):
|
||||||
|
return chr(int(m.group(1)))
|
||||||
|
|
||||||
|
def run(self, text):
|
||||||
|
return self.RE.sub(self.unescape, text)
|
||||||
91
plugins/markdown_preview/markdown/preprocessors.py
Normal file
91
plugins/markdown_preview/markdown/preprocessors.py
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
Preprocessors work on source text before it is broken down into its individual parts.
|
||||||
|
This is an excellent place to clean up bad characters or to extract portions for later
|
||||||
|
processing that the parser may otherwise choke on.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import TYPE_CHECKING, Any
|
||||||
|
from . import util
|
||||||
|
from .htmlparser import HTMLExtractor
|
||||||
|
import re
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from markdown import Markdown
|
||||||
|
|
||||||
|
|
||||||
|
def build_preprocessors(md: Markdown, **kwargs: Any) -> util.Registry[Preprocessor]:
|
||||||
|
""" Build and return the default set of preprocessors used by Markdown. """
|
||||||
|
preprocessors = util.Registry()
|
||||||
|
preprocessors.register(NormalizeWhitespace(md), 'normalize_whitespace', 30)
|
||||||
|
preprocessors.register(HtmlBlockPreprocessor(md), 'html_block', 20)
|
||||||
|
return preprocessors
|
||||||
|
|
||||||
|
|
||||||
|
class Preprocessor(util.Processor):
|
||||||
|
"""
|
||||||
|
Preprocessors are run after the text is broken into lines.
|
||||||
|
|
||||||
|
Each preprocessor implements a `run` method that takes a pointer to a
|
||||||
|
list of lines of the document, modifies it as necessary and returns
|
||||||
|
either the same pointer or a pointer to a new list.
|
||||||
|
|
||||||
|
Preprocessors must extend `Preprocessor`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def run(self, lines: list[str]) -> list[str]:
|
||||||
|
"""
|
||||||
|
Each subclass of `Preprocessor` should override the `run` method, which
|
||||||
|
takes the document as a list of strings split by newlines and returns
|
||||||
|
the (possibly modified) list of lines.
|
||||||
|
|
||||||
|
"""
|
||||||
|
pass # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
class NormalizeWhitespace(Preprocessor):
|
||||||
|
""" Normalize whitespace for consistent parsing. """
|
||||||
|
|
||||||
|
def run(self, lines: list[str]) -> list[str]:
|
||||||
|
source = '\n'.join(lines)
|
||||||
|
source = source.replace(util.STX, "").replace(util.ETX, "")
|
||||||
|
source = source.replace("\r\n", "\n").replace("\r", "\n") + "\n\n"
|
||||||
|
source = source.expandtabs(self.md.tab_length)
|
||||||
|
source = re.sub(r'(?<=\n) +\n', '\n', source)
|
||||||
|
return source.split('\n')
|
||||||
|
|
||||||
|
|
||||||
|
class HtmlBlockPreprocessor(Preprocessor):
|
||||||
|
"""
|
||||||
|
Remove html blocks from the text and store them for later retrieval.
|
||||||
|
|
||||||
|
The raw HTML is stored in the [`htmlStash`][markdown.util.HtmlStash] of the
|
||||||
|
[`Markdown`][markdown.Markdown] instance.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def run(self, lines: list[str]) -> list[str]:
|
||||||
|
source = '\n'.join(lines)
|
||||||
|
parser = HTMLExtractor(self.md)
|
||||||
|
parser.feed(source)
|
||||||
|
parser.close()
|
||||||
|
return ''.join(parser.cleandoc).split('\n')
|
||||||
193
plugins/markdown_preview/markdown/serializers.py
Normal file
193
plugins/markdown_preview/markdown/serializers.py
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
# Add x/html serialization to `Elementree`
|
||||||
|
# Taken from ElementTree 1.3 preview with slight modifications
|
||||||
|
#
|
||||||
|
# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
|
||||||
|
#
|
||||||
|
# fredrik@pythonware.com
|
||||||
|
# https://www.pythonware.com/
|
||||||
|
#
|
||||||
|
# --------------------------------------------------------------------
|
||||||
|
# The ElementTree toolkit is
|
||||||
|
#
|
||||||
|
# Copyright (c) 1999-2007 by Fredrik Lundh
|
||||||
|
#
|
||||||
|
# By obtaining, using, and/or copying this software and/or its
|
||||||
|
# associated documentation, you agree that you have read, understood,
|
||||||
|
# and will comply with the following terms and conditions:
|
||||||
|
#
|
||||||
|
# Permission to use, copy, modify, and distribute this software and
|
||||||
|
# its associated documentation for any purpose and without fee is
|
||||||
|
# hereby granted, provided that the above copyright notice appears in
|
||||||
|
# all copies, and that both that copyright notice and this permission
|
||||||
|
# notice appear in supporting documentation, and that the name of
|
||||||
|
# Secret Labs AB or the author not be used in advertising or publicity
|
||||||
|
# pertaining to distribution of the software without specific, written
|
||||||
|
# prior permission.
|
||||||
|
#
|
||||||
|
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
|
||||||
|
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
|
||||||
|
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
|
||||||
|
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
|
||||||
|
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
||||||
|
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
||||||
|
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
||||||
|
# OF THIS SOFTWARE.
|
||||||
|
# --------------------------------------------------------------------
|
||||||
|
|
||||||
|
"""
|
||||||
|
Python-Markdown provides two serializers which render [`ElementTree.Element`][xml.etree.ElementTree.Element]
|
||||||
|
objects to a string of HTML. Both functions wrap the same underlying code with only a few minor
|
||||||
|
differences as outlined below:
|
||||||
|
|
||||||
|
1. Empty (self-closing) tags are rendered as `<tag>` for HTML and as `<tag />` for XHTML.
|
||||||
|
2. Boolean attributes are rendered as `attrname` for HTML and as `attrname="attrname"` for XHTML.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from xml.etree.ElementTree import ProcessingInstruction
|
||||||
|
from xml.etree.ElementTree import Comment, ElementTree, Element, QName, HTML_EMPTY
|
||||||
|
import re
|
||||||
|
|
||||||
|
__all__ = ['to_html_string', 'to_xhtml_string']
|
||||||
|
|
||||||
|
RE_AMP = re.compile(r'&(?!(?:\#[0-9]+|\#x[0-9a-f]+|[0-9a-z]+);)', re.I)
|
||||||
|
|
||||||
|
|
||||||
|
def _raise_serialization_error(text): # pragma: no cover
|
||||||
|
raise TypeError(
|
||||||
|
"cannot serialize {!r} (type {})".format(text, type(text).__name__)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _escape_cdata(text):
|
||||||
|
# escape character data
|
||||||
|
try:
|
||||||
|
# it's worth avoiding do-nothing calls for strings that are
|
||||||
|
# shorter than 500 character, or so. assume that's, by far,
|
||||||
|
# the most common case in most applications.
|
||||||
|
if "&" in text:
|
||||||
|
# Only replace & when not part of an entity
|
||||||
|
text = RE_AMP.sub('&', text)
|
||||||
|
if "<" in text:
|
||||||
|
text = text.replace("<", "<")
|
||||||
|
if ">" in text:
|
||||||
|
text = text.replace(">", ">")
|
||||||
|
return text
|
||||||
|
except (TypeError, AttributeError): # pragma: no cover
|
||||||
|
_raise_serialization_error(text)
|
||||||
|
|
||||||
|
|
||||||
|
def _escape_attrib(text):
|
||||||
|
# escape attribute value
|
||||||
|
try:
|
||||||
|
if "&" in text:
|
||||||
|
# Only replace & when not part of an entity
|
||||||
|
text = RE_AMP.sub('&', text)
|
||||||
|
if "<" in text:
|
||||||
|
text = text.replace("<", "<")
|
||||||
|
if ">" in text:
|
||||||
|
text = text.replace(">", ">")
|
||||||
|
if "\"" in text:
|
||||||
|
text = text.replace("\"", """)
|
||||||
|
if "\n" in text:
|
||||||
|
text = text.replace("\n", " ")
|
||||||
|
return text
|
||||||
|
except (TypeError, AttributeError): # pragma: no cover
|
||||||
|
_raise_serialization_error(text)
|
||||||
|
|
||||||
|
|
||||||
|
def _escape_attrib_html(text):
|
||||||
|
# escape attribute value
|
||||||
|
try:
|
||||||
|
if "&" in text:
|
||||||
|
# Only replace & when not part of an entity
|
||||||
|
text = RE_AMP.sub('&', text)
|
||||||
|
if "<" in text:
|
||||||
|
text = text.replace("<", "<")
|
||||||
|
if ">" in text:
|
||||||
|
text = text.replace(">", ">")
|
||||||
|
if "\"" in text:
|
||||||
|
text = text.replace("\"", """)
|
||||||
|
return text
|
||||||
|
except (TypeError, AttributeError): # pragma: no cover
|
||||||
|
_raise_serialization_error(text)
|
||||||
|
|
||||||
|
|
||||||
|
def _serialize_html(write, elem, format):
|
||||||
|
tag = elem.tag
|
||||||
|
text = elem.text
|
||||||
|
if tag is Comment:
|
||||||
|
write("<!--%s-->" % _escape_cdata(text))
|
||||||
|
elif tag is ProcessingInstruction:
|
||||||
|
write("<?%s?>" % _escape_cdata(text))
|
||||||
|
elif tag is None:
|
||||||
|
if text:
|
||||||
|
write(_escape_cdata(text))
|
||||||
|
for e in elem:
|
||||||
|
_serialize_html(write, e, format)
|
||||||
|
else:
|
||||||
|
namespace_uri = None
|
||||||
|
if isinstance(tag, QName):
|
||||||
|
# `QNAME` objects store their data as a string: `{uri}tag`
|
||||||
|
if tag.text[:1] == "{":
|
||||||
|
namespace_uri, tag = tag.text[1:].split("}", 1)
|
||||||
|
else:
|
||||||
|
raise ValueError('QName objects must define a tag.')
|
||||||
|
write("<" + tag)
|
||||||
|
items = elem.items()
|
||||||
|
if items:
|
||||||
|
items = sorted(items) # lexical order
|
||||||
|
for k, v in items:
|
||||||
|
if isinstance(k, QName):
|
||||||
|
# Assume a text only `QName`
|
||||||
|
k = k.text
|
||||||
|
if isinstance(v, QName):
|
||||||
|
# Assume a text only `QName`
|
||||||
|
v = v.text
|
||||||
|
else:
|
||||||
|
v = _escape_attrib_html(v)
|
||||||
|
if k == v and format == 'html':
|
||||||
|
# handle boolean attributes
|
||||||
|
write(" %s" % v)
|
||||||
|
else:
|
||||||
|
write(' {}="{}"'.format(k, v))
|
||||||
|
if namespace_uri:
|
||||||
|
write(' xmlns="%s"' % (_escape_attrib(namespace_uri)))
|
||||||
|
if format == "xhtml" and tag.lower() in HTML_EMPTY:
|
||||||
|
write(" />")
|
||||||
|
else:
|
||||||
|
write(">")
|
||||||
|
if text:
|
||||||
|
if tag.lower() in ["script", "style"]:
|
||||||
|
write(text)
|
||||||
|
else:
|
||||||
|
write(_escape_cdata(text))
|
||||||
|
for e in elem:
|
||||||
|
_serialize_html(write, e, format)
|
||||||
|
if tag.lower() not in HTML_EMPTY:
|
||||||
|
write("</" + tag + ">")
|
||||||
|
if elem.tail:
|
||||||
|
write(_escape_cdata(elem.tail))
|
||||||
|
|
||||||
|
|
||||||
|
def _write_html(root, format="html"):
|
||||||
|
assert root is not None
|
||||||
|
data = []
|
||||||
|
write = data.append
|
||||||
|
_serialize_html(write, root, format)
|
||||||
|
return "".join(data)
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------
|
||||||
|
# public functions
|
||||||
|
|
||||||
|
|
||||||
|
def to_html_string(element: Element) -> str:
|
||||||
|
""" Serialize element and its children to a string of HTML5. """
|
||||||
|
return _write_html(ElementTree(element).getroot(), format="html")
|
||||||
|
|
||||||
|
|
||||||
|
def to_xhtml_string(element: Element) -> str:
|
||||||
|
""" Serialize element and its children to a string of XHTML. """
|
||||||
|
return _write_html(ElementTree(element).getroot(), format="xhtml")
|
||||||
224
plugins/markdown_preview/markdown/test_tools.py
Normal file
224
plugins/markdown_preview/markdown/test_tools.py
Normal file
@@ -0,0 +1,224 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
""" A collection of tools for testing the Markdown code base and extensions. """
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
import textwrap
|
||||||
|
from typing import Any
|
||||||
|
from . import markdown, Markdown, util
|
||||||
|
|
||||||
|
try:
|
||||||
|
import tidylib
|
||||||
|
except ImportError:
|
||||||
|
tidylib = None
|
||||||
|
|
||||||
|
__all__ = ['TestCase', 'LegacyTestCase', 'Kwargs']
|
||||||
|
|
||||||
|
|
||||||
|
class TestCase(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
A [`unittest.TestCase`][] subclass with helpers for testing Markdown output.
|
||||||
|
|
||||||
|
Define `default_kwargs` as a `dict` of keywords to pass to Markdown for each
|
||||||
|
test. The defaults can be overridden on individual tests.
|
||||||
|
|
||||||
|
The `assertMarkdownRenders` method accepts the source text, the expected
|
||||||
|
output, and any keywords to pass to Markdown. The `default_kwargs` are used
|
||||||
|
except where overridden by `kwargs`. The output and expected output are passed
|
||||||
|
to `TestCase.assertMultiLineEqual`. An `AssertionError` is raised with a diff
|
||||||
|
if the actual output does not equal the expected output.
|
||||||
|
|
||||||
|
The `dedent` method is available to dedent triple-quoted strings if
|
||||||
|
necessary.
|
||||||
|
|
||||||
|
In all other respects, behaves as `unittest.TestCase`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
default_kwargs: dict[str, Any] = {}
|
||||||
|
""" Default options to pass to Markdown for each test. """
|
||||||
|
|
||||||
|
def assertMarkdownRenders(self, source, expected, expected_attrs=None, **kwargs):
|
||||||
|
"""
|
||||||
|
Test that source Markdown text renders to expected output with given keywords.
|
||||||
|
|
||||||
|
`expected_attrs` accepts a `dict`. Each key should be the name of an attribute
|
||||||
|
on the `Markdown` instance and the value should be the expected value after
|
||||||
|
the source text is parsed by Markdown. After the expected output is tested,
|
||||||
|
the expected value for each attribute is compared against the actual
|
||||||
|
attribute of the `Markdown` instance using `TestCase.assertEqual`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
expected_attrs = expected_attrs or {}
|
||||||
|
kws = self.default_kwargs.copy()
|
||||||
|
kws.update(kwargs)
|
||||||
|
md = Markdown(**kws)
|
||||||
|
output = md.convert(source)
|
||||||
|
self.assertMultiLineEqual(output, expected)
|
||||||
|
for key, value in expected_attrs.items():
|
||||||
|
self.assertEqual(getattr(md, key), value)
|
||||||
|
|
||||||
|
def dedent(self, text):
|
||||||
|
"""
|
||||||
|
Dedent text.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# TODO: If/when actual output ends with a newline, then use:
|
||||||
|
# return textwrap.dedent(text.strip('/n'))
|
||||||
|
return textwrap.dedent(text).strip()
|
||||||
|
|
||||||
|
|
||||||
|
class recursionlimit:
|
||||||
|
"""
|
||||||
|
A context manager which temporarily modifies the Python recursion limit.
|
||||||
|
|
||||||
|
The testing framework, coverage, etc. may add an arbitrary number of levels to the depth. To maintain consistency
|
||||||
|
in the tests, the current stack depth is determined when called, then added to the provided limit.
|
||||||
|
|
||||||
|
Example usage:
|
||||||
|
|
||||||
|
``` python
|
||||||
|
with recursionlimit(20):
|
||||||
|
# test code here
|
||||||
|
```
|
||||||
|
|
||||||
|
See <https://stackoverflow.com/a/50120316/866026>.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, limit):
|
||||||
|
self.limit = util._get_stack_depth() + limit
|
||||||
|
self.old_limit = sys.getrecursionlimit()
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
sys.setrecursionlimit(self.limit)
|
||||||
|
|
||||||
|
def __exit__(self, type, value, tb):
|
||||||
|
sys.setrecursionlimit(self.old_limit)
|
||||||
|
|
||||||
|
|
||||||
|
#########################
|
||||||
|
# Legacy Test Framework #
|
||||||
|
#########################
|
||||||
|
|
||||||
|
|
||||||
|
class Kwargs(dict):
|
||||||
|
""" A `dict` like class for holding keyword arguments. """
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def _normalize_whitespace(text):
|
||||||
|
""" Normalize whitespace for a string of HTML using `tidylib`. """
|
||||||
|
output, errors = tidylib.tidy_fragment(text, options={
|
||||||
|
'drop_empty_paras': 0,
|
||||||
|
'fix_backslash': 0,
|
||||||
|
'fix_bad_comments': 0,
|
||||||
|
'fix_uri': 0,
|
||||||
|
'join_styles': 0,
|
||||||
|
'lower_literals': 0,
|
||||||
|
'merge_divs': 0,
|
||||||
|
'output_xhtml': 1,
|
||||||
|
'quote_ampersand': 0,
|
||||||
|
'newline': 'LF'
|
||||||
|
})
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
class LegacyTestMeta(type):
|
||||||
|
def __new__(cls, name, bases, dct):
|
||||||
|
|
||||||
|
def generate_test(infile, outfile, normalize, kwargs):
|
||||||
|
def test(self):
|
||||||
|
with open(infile, encoding="utf-8") as f:
|
||||||
|
input = f.read()
|
||||||
|
with open(outfile, encoding="utf-8") as f:
|
||||||
|
# Normalize line endings
|
||||||
|
# (on Windows, git may have altered line endings).
|
||||||
|
expected = f.read().replace("\r\n", "\n")
|
||||||
|
output = markdown(input, **kwargs)
|
||||||
|
if tidylib and normalize:
|
||||||
|
try:
|
||||||
|
expected = _normalize_whitespace(expected)
|
||||||
|
output = _normalize_whitespace(output)
|
||||||
|
except OSError:
|
||||||
|
self.skipTest("Tidylib's c library not available.")
|
||||||
|
elif normalize:
|
||||||
|
self.skipTest('Tidylib not available.')
|
||||||
|
self.assertMultiLineEqual(output, expected)
|
||||||
|
return test
|
||||||
|
|
||||||
|
location = dct.get('location', '')
|
||||||
|
exclude = dct.get('exclude', [])
|
||||||
|
normalize = dct.get('normalize', False)
|
||||||
|
input_ext = dct.get('input_ext', '.txt')
|
||||||
|
output_ext = dct.get('output_ext', '.html')
|
||||||
|
kwargs = dct.get('default_kwargs', Kwargs())
|
||||||
|
|
||||||
|
if os.path.isdir(location):
|
||||||
|
for file in os.listdir(location):
|
||||||
|
infile = os.path.join(location, file)
|
||||||
|
if os.path.isfile(infile):
|
||||||
|
tname, ext = os.path.splitext(file)
|
||||||
|
if ext == input_ext:
|
||||||
|
outfile = os.path.join(location, tname + output_ext)
|
||||||
|
tname = tname.replace(' ', '_').replace('-', '_')
|
||||||
|
kws = kwargs.copy()
|
||||||
|
if tname in dct:
|
||||||
|
kws.update(dct[tname])
|
||||||
|
test_name = 'test_%s' % tname
|
||||||
|
if tname not in exclude:
|
||||||
|
dct[test_name] = generate_test(infile, outfile, normalize, kws)
|
||||||
|
else:
|
||||||
|
dct[test_name] = unittest.skip('Excluded')(lambda: None)
|
||||||
|
|
||||||
|
return type.__new__(cls, name, bases, dct)
|
||||||
|
|
||||||
|
|
||||||
|
class LegacyTestCase(unittest.TestCase, metaclass=LegacyTestMeta):
|
||||||
|
"""
|
||||||
|
A [`unittest.TestCase`][] subclass for running Markdown's legacy file-based tests.
|
||||||
|
|
||||||
|
A subclass should define various properties which point to a directory of
|
||||||
|
text-based test files and define various behaviors/defaults for those tests.
|
||||||
|
The following properties are supported:
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
location (str): A path to the directory of test files. An absolute path is preferred.
|
||||||
|
exclude (list[str]): A list of tests to exclude. Each test name should comprise the filename
|
||||||
|
without an extension.
|
||||||
|
normalize (bool): A boolean value indicating if the HTML should be normalized. Default: `False`.
|
||||||
|
input_ext (str): A string containing the file extension of input files. Default: `.txt`.
|
||||||
|
output_ext (str): A string containing the file extension of expected output files. Default: `html`.
|
||||||
|
default_kwargs (Kwargs[str, Any]): The default set of keyword arguments for all test files in the directory.
|
||||||
|
|
||||||
|
In addition, properties can be defined for each individual set of test files within
|
||||||
|
the directory. The property should be given the name of the file without the file
|
||||||
|
extension. Any spaces and dashes in the filename should be replaced with
|
||||||
|
underscores. The value of the property should be a `Kwargs` instance which
|
||||||
|
contains the keyword arguments that should be passed to `Markdown` for that
|
||||||
|
test file. The keyword arguments will "update" the `default_kwargs`.
|
||||||
|
|
||||||
|
When the class instance is created, it will walk the given directory and create
|
||||||
|
a separate `Unitttest` for each set of test files using the naming scheme:
|
||||||
|
`test_filename`. One `Unittest` will be run for each set of input and output files.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
476
plugins/markdown_preview/markdown/treeprocessors.py
Normal file
476
plugins/markdown_preview/markdown/treeprocessors.py
Normal file
@@ -0,0 +1,476 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
Tree processors manipulate the tree created by block processors. They can even create an entirely
|
||||||
|
new `ElementTree` object. This is an excellent place for creating summaries, adding collected
|
||||||
|
references, or last minute adjustments.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
import xml.etree.ElementTree as etree
|
||||||
|
from typing import TYPE_CHECKING, Any
|
||||||
|
from . import util
|
||||||
|
from . import inlinepatterns
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from markdown import Markdown
|
||||||
|
|
||||||
|
|
||||||
|
def build_treeprocessors(md: Markdown, **kwargs: Any) -> util.Registry[Treeprocessor]:
|
||||||
|
""" Build the default `treeprocessors` for Markdown. """
|
||||||
|
treeprocessors = util.Registry()
|
||||||
|
treeprocessors.register(InlineProcessor(md), 'inline', 20)
|
||||||
|
treeprocessors.register(PrettifyTreeprocessor(md), 'prettify', 10)
|
||||||
|
treeprocessors.register(UnescapeTreeprocessor(md), 'unescape', 0)
|
||||||
|
return treeprocessors
|
||||||
|
|
||||||
|
|
||||||
|
def isString(s: Any) -> bool:
|
||||||
|
""" Return `True` if object is a string but not an [`AtomicString`][markdown.util.AtomicString]. """
|
||||||
|
if not isinstance(s, util.AtomicString):
|
||||||
|
return isinstance(s, str)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class Treeprocessor(util.Processor):
|
||||||
|
"""
|
||||||
|
`Treeprocessor`s are run on the `ElementTree` object before serialization.
|
||||||
|
|
||||||
|
Each `Treeprocessor` implements a `run` method that takes a pointer to an
|
||||||
|
`Element` and modifies it as necessary.
|
||||||
|
|
||||||
|
`Treeprocessors` must extend `markdown.Treeprocessor`.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def run(self, root: etree.Element) -> etree.Element | None:
|
||||||
|
"""
|
||||||
|
Subclasses of `Treeprocessor` should implement a `run` method, which
|
||||||
|
takes a root `Element`. This method can return another `Element`
|
||||||
|
object, and the existing root `Element` will be replaced, or it can
|
||||||
|
modify the current tree and return `None`.
|
||||||
|
"""
|
||||||
|
pass # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
|
class InlineProcessor(Treeprocessor):
|
||||||
|
"""
|
||||||
|
A `Treeprocessor` that traverses a tree, applying inline patterns.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, md):
|
||||||
|
self.__placeholder_prefix = util.INLINE_PLACEHOLDER_PREFIX
|
||||||
|
self.__placeholder_suffix = util.ETX
|
||||||
|
self.__placeholder_length = 4 + len(self.__placeholder_prefix) \
|
||||||
|
+ len(self.__placeholder_suffix)
|
||||||
|
self.__placeholder_re = util.INLINE_PLACEHOLDER_RE
|
||||||
|
self.md = md
|
||||||
|
self.inlinePatterns = md.inlinePatterns
|
||||||
|
self.ancestors = []
|
||||||
|
|
||||||
|
def __makePlaceholder(self, type) -> tuple[str, str]:
|
||||||
|
""" Generate a placeholder """
|
||||||
|
id = "%04d" % len(self.stashed_nodes)
|
||||||
|
hash = util.INLINE_PLACEHOLDER % id
|
||||||
|
return hash, id
|
||||||
|
|
||||||
|
def __findPlaceholder(self, data: str, index: int) -> tuple[str | None, int]:
|
||||||
|
"""
|
||||||
|
Extract id from data string, start from index.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
data: String.
|
||||||
|
index: Index, from which we start search.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Placeholder id and string index, after the found placeholder.
|
||||||
|
|
||||||
|
"""
|
||||||
|
m = self.__placeholder_re.search(data, index)
|
||||||
|
if m:
|
||||||
|
return m.group(1), m.end()
|
||||||
|
else:
|
||||||
|
return None, index + 1
|
||||||
|
|
||||||
|
def __stashNode(self, node, type) -> str:
|
||||||
|
""" Add node to stash. """
|
||||||
|
placeholder, id = self.__makePlaceholder(type)
|
||||||
|
self.stashed_nodes[id] = node
|
||||||
|
return placeholder
|
||||||
|
|
||||||
|
def __handleInline(self, data: str, patternIndex: int = 0) -> str:
|
||||||
|
"""
|
||||||
|
Process string with inline patterns and replace it with placeholders.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
data: A line of Markdown text.
|
||||||
|
patternIndex: The index of the `inlinePattern` to start with.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
String with placeholders.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if not isinstance(data, util.AtomicString):
|
||||||
|
startIndex = 0
|
||||||
|
count = len(self.inlinePatterns)
|
||||||
|
while patternIndex < count:
|
||||||
|
data, matched, startIndex = self.__applyPattern(
|
||||||
|
self.inlinePatterns[patternIndex], data, patternIndex, startIndex
|
||||||
|
)
|
||||||
|
if not matched:
|
||||||
|
patternIndex += 1
|
||||||
|
return data
|
||||||
|
|
||||||
|
def __processElementText(self, node: etree.Element, subnode: etree.Element, isText: bool = True):
|
||||||
|
"""
|
||||||
|
Process placeholders in `Element.text` or `Element.tail`
|
||||||
|
of Elements popped from `self.stashed_nodes`.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
node: Parent node.
|
||||||
|
subnode: Processing node.
|
||||||
|
isText: Boolean variable, True - it's text, False - it's a tail.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if isText:
|
||||||
|
text = subnode.text
|
||||||
|
subnode.text = None
|
||||||
|
else:
|
||||||
|
text = subnode.tail
|
||||||
|
subnode.tail = None
|
||||||
|
|
||||||
|
childResult = self.__processPlaceholders(text, subnode, isText)
|
||||||
|
|
||||||
|
if not isText and node is not subnode:
|
||||||
|
pos = list(node).index(subnode) + 1
|
||||||
|
else:
|
||||||
|
pos = 0
|
||||||
|
|
||||||
|
childResult.reverse()
|
||||||
|
for newChild in childResult:
|
||||||
|
node.insert(pos, newChild[0])
|
||||||
|
|
||||||
|
def __processPlaceholders(
|
||||||
|
self,
|
||||||
|
data: str,
|
||||||
|
parent: etree.Element,
|
||||||
|
isText: bool = True
|
||||||
|
) -> list[tuple[etree.Element, Any]]:
|
||||||
|
"""
|
||||||
|
Process string with placeholders and generate `ElementTree` tree.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
data: String with placeholders instead of `ElementTree` elements.
|
||||||
|
parent: Element, which contains processing inline data.
|
||||||
|
isText: Boolean variable, True - it's text, False - it's a tail.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List with `ElementTree` elements with applied inline patterns.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def linkText(text):
|
||||||
|
if text:
|
||||||
|
if result:
|
||||||
|
if result[-1][0].tail:
|
||||||
|
result[-1][0].tail += text
|
||||||
|
else:
|
||||||
|
result[-1][0].tail = text
|
||||||
|
elif not isText:
|
||||||
|
if parent.tail:
|
||||||
|
parent.tail += text
|
||||||
|
else:
|
||||||
|
parent.tail = text
|
||||||
|
else:
|
||||||
|
if parent.text:
|
||||||
|
parent.text += text
|
||||||
|
else:
|
||||||
|
parent.text = text
|
||||||
|
result = []
|
||||||
|
strartIndex = 0
|
||||||
|
while data:
|
||||||
|
index = data.find(self.__placeholder_prefix, strartIndex)
|
||||||
|
if index != -1:
|
||||||
|
id, phEndIndex = self.__findPlaceholder(data, index)
|
||||||
|
|
||||||
|
if id in self.stashed_nodes:
|
||||||
|
node = self.stashed_nodes.get(id)
|
||||||
|
|
||||||
|
if index > 0:
|
||||||
|
text = data[strartIndex:index]
|
||||||
|
linkText(text)
|
||||||
|
|
||||||
|
if not isString(node): # it's Element
|
||||||
|
for child in [node] + list(node):
|
||||||
|
if child.tail:
|
||||||
|
if child.tail.strip():
|
||||||
|
self.__processElementText(
|
||||||
|
node, child, False
|
||||||
|
)
|
||||||
|
if child.text:
|
||||||
|
if child.text.strip():
|
||||||
|
self.__processElementText(child, child)
|
||||||
|
else: # it's just a string
|
||||||
|
linkText(node)
|
||||||
|
strartIndex = phEndIndex
|
||||||
|
continue
|
||||||
|
|
||||||
|
strartIndex = phEndIndex
|
||||||
|
result.append((node, self.ancestors[:]))
|
||||||
|
|
||||||
|
else: # wrong placeholder
|
||||||
|
end = index + len(self.__placeholder_prefix)
|
||||||
|
linkText(data[strartIndex:end])
|
||||||
|
strartIndex = end
|
||||||
|
else:
|
||||||
|
text = data[strartIndex:]
|
||||||
|
if isinstance(data, util.AtomicString):
|
||||||
|
# We don't want to loose the `AtomicString`
|
||||||
|
text = util.AtomicString(text)
|
||||||
|
linkText(text)
|
||||||
|
data = ""
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def __applyPattern(
|
||||||
|
self,
|
||||||
|
pattern: inlinepatterns.Pattern,
|
||||||
|
data: str,
|
||||||
|
patternIndex: int,
|
||||||
|
startIndex: int = 0
|
||||||
|
) -> tuple[str, bool, int]:
|
||||||
|
"""
|
||||||
|
Check if the line fits the pattern, create the necessary
|
||||||
|
elements, add it to `stashed_nodes`.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
data: The text to be processed.
|
||||||
|
pattern: The pattern to be checked.
|
||||||
|
patternIndex: Index of current pattern.
|
||||||
|
startIndex: String index, from which we start searching.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
String with placeholders instead of `ElementTree` elements.
|
||||||
|
|
||||||
|
"""
|
||||||
|
new_style = isinstance(pattern, inlinepatterns.InlineProcessor)
|
||||||
|
|
||||||
|
for exclude in pattern.ANCESTOR_EXCLUDES:
|
||||||
|
if exclude.lower() in self.ancestors:
|
||||||
|
return data, False, 0
|
||||||
|
|
||||||
|
if new_style:
|
||||||
|
match = None
|
||||||
|
# Since `handleMatch` may reject our first match,
|
||||||
|
# we iterate over the buffer looking for matches
|
||||||
|
# until we can't find any more.
|
||||||
|
for match in pattern.getCompiledRegExp().finditer(data, startIndex):
|
||||||
|
node, start, end = pattern.handleMatch(match, data)
|
||||||
|
if start is None or end is None:
|
||||||
|
startIndex += match.end(0)
|
||||||
|
match = None
|
||||||
|
continue
|
||||||
|
break
|
||||||
|
else: # pragma: no cover
|
||||||
|
match = pattern.getCompiledRegExp().match(data[startIndex:])
|
||||||
|
leftData = data[:startIndex]
|
||||||
|
|
||||||
|
if not match:
|
||||||
|
return data, False, 0
|
||||||
|
|
||||||
|
if not new_style: # pragma: no cover
|
||||||
|
node = pattern.handleMatch(match)
|
||||||
|
start = match.start(0)
|
||||||
|
end = match.end(0)
|
||||||
|
|
||||||
|
if node is None:
|
||||||
|
return data, True, end
|
||||||
|
|
||||||
|
if not isString(node):
|
||||||
|
if not isinstance(node.text, util.AtomicString):
|
||||||
|
# We need to process current node too
|
||||||
|
for child in [node] + list(node):
|
||||||
|
if not isString(node):
|
||||||
|
if child.text:
|
||||||
|
self.ancestors.append(child.tag.lower())
|
||||||
|
child.text = self.__handleInline(
|
||||||
|
child.text, patternIndex + 1
|
||||||
|
)
|
||||||
|
self.ancestors.pop()
|
||||||
|
if child.tail:
|
||||||
|
child.tail = self.__handleInline(
|
||||||
|
child.tail, patternIndex
|
||||||
|
)
|
||||||
|
|
||||||
|
placeholder = self.__stashNode(node, pattern.type())
|
||||||
|
|
||||||
|
if new_style:
|
||||||
|
return "{}{}{}".format(data[:start],
|
||||||
|
placeholder, data[end:]), True, 0
|
||||||
|
else: # pragma: no cover
|
||||||
|
return "{}{}{}{}".format(leftData,
|
||||||
|
match.group(1),
|
||||||
|
placeholder, match.groups()[-1]), True, 0
|
||||||
|
|
||||||
|
def __build_ancestors(self, parent, parents):
|
||||||
|
"""Build the ancestor list."""
|
||||||
|
ancestors = []
|
||||||
|
while parent is not None:
|
||||||
|
if parent is not None:
|
||||||
|
ancestors.append(parent.tag.lower())
|
||||||
|
parent = self.parent_map.get(parent)
|
||||||
|
ancestors.reverse()
|
||||||
|
parents.extend(ancestors)
|
||||||
|
|
||||||
|
def run(self, tree: etree.Element, ancestors: list[str] | None = None) -> etree.Element:
|
||||||
|
"""Apply inline patterns to a parsed Markdown tree.
|
||||||
|
|
||||||
|
Iterate over `Element`, find elements with inline tag, apply inline
|
||||||
|
patterns and append newly created Elements to tree. To avoid further
|
||||||
|
processing of string with inline patterns, instead of normal string,
|
||||||
|
use subclass [`AtomicString`][markdown.util.AtomicString]:
|
||||||
|
|
||||||
|
node.text = markdown.util.AtomicString("This will not be processed.")
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
tree: `Element` object, representing Markdown tree.
|
||||||
|
ancestors: List of parent tag names that precede the tree node (if needed).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
An element tree object with applied inline patterns.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.stashed_nodes: dict[str, etree.Element] = {}
|
||||||
|
|
||||||
|
# Ensure a valid parent list, but copy passed in lists
|
||||||
|
# to ensure we don't have the user accidentally change it on us.
|
||||||
|
tree_parents = [] if ancestors is None else ancestors[:]
|
||||||
|
|
||||||
|
self.parent_map = {c: p for p in tree.iter() for c in p}
|
||||||
|
stack = [(tree, tree_parents)]
|
||||||
|
|
||||||
|
while stack:
|
||||||
|
currElement, parents = stack.pop()
|
||||||
|
|
||||||
|
self.ancestors = parents
|
||||||
|
self.__build_ancestors(currElement, self.ancestors)
|
||||||
|
|
||||||
|
insertQueue = []
|
||||||
|
for child in currElement:
|
||||||
|
if child.text and not isinstance(
|
||||||
|
child.text, util.AtomicString
|
||||||
|
):
|
||||||
|
self.ancestors.append(child.tag.lower())
|
||||||
|
text = child.text
|
||||||
|
child.text = None
|
||||||
|
lst = self.__processPlaceholders(
|
||||||
|
self.__handleInline(text), child
|
||||||
|
)
|
||||||
|
for item in lst:
|
||||||
|
self.parent_map[item[0]] = child
|
||||||
|
stack += lst
|
||||||
|
insertQueue.append((child, lst))
|
||||||
|
self.ancestors.pop()
|
||||||
|
if child.tail:
|
||||||
|
tail = self.__handleInline(child.tail)
|
||||||
|
dumby = etree.Element('d')
|
||||||
|
child.tail = None
|
||||||
|
tailResult = self.__processPlaceholders(tail, dumby, False)
|
||||||
|
if dumby.tail:
|
||||||
|
child.tail = dumby.tail
|
||||||
|
pos = list(currElement).index(child) + 1
|
||||||
|
tailResult.reverse()
|
||||||
|
for newChild in tailResult:
|
||||||
|
self.parent_map[newChild[0]] = currElement
|
||||||
|
currElement.insert(pos, newChild[0])
|
||||||
|
if len(child):
|
||||||
|
self.parent_map[child] = currElement
|
||||||
|
stack.append((child, self.ancestors[:]))
|
||||||
|
|
||||||
|
for element, lst in insertQueue:
|
||||||
|
for i, obj in enumerate(lst):
|
||||||
|
newChild = obj[0]
|
||||||
|
element.insert(i, newChild)
|
||||||
|
return tree
|
||||||
|
|
||||||
|
|
||||||
|
class PrettifyTreeprocessor(Treeprocessor):
|
||||||
|
""" Add line breaks to the html document. """
|
||||||
|
|
||||||
|
def _prettifyETree(self, elem):
|
||||||
|
""" Recursively add line breaks to `ElementTree` children. """
|
||||||
|
|
||||||
|
i = "\n"
|
||||||
|
if self.md.is_block_level(elem.tag) and elem.tag not in ['code', 'pre']:
|
||||||
|
if (not elem.text or not elem.text.strip()) \
|
||||||
|
and len(elem) and self.md.is_block_level(elem[0].tag):
|
||||||
|
elem.text = i
|
||||||
|
for e in elem:
|
||||||
|
if self.md.is_block_level(e.tag):
|
||||||
|
self._prettifyETree(e)
|
||||||
|
if not elem.tail or not elem.tail.strip():
|
||||||
|
elem.tail = i
|
||||||
|
|
||||||
|
def run(self, root: etree.Element) -> None:
|
||||||
|
""" Add line breaks to `Element` object and its children. """
|
||||||
|
|
||||||
|
self._prettifyETree(root)
|
||||||
|
# Do `<br />`'s separately as they are often in the middle of
|
||||||
|
# inline content and missed by `_prettifyETree`.
|
||||||
|
brs = root.iter('br')
|
||||||
|
for br in brs:
|
||||||
|
if not br.tail or not br.tail.strip():
|
||||||
|
br.tail = '\n'
|
||||||
|
else:
|
||||||
|
br.tail = '\n%s' % br.tail
|
||||||
|
# Clean up extra empty lines at end of code blocks.
|
||||||
|
pres = root.iter('pre')
|
||||||
|
for pre in pres:
|
||||||
|
if len(pre) and pre[0].tag == 'code':
|
||||||
|
code = pre[0]
|
||||||
|
# Only prettify code containing text only
|
||||||
|
if not len(code) and code.text is not None:
|
||||||
|
code.text = util.AtomicString(code.text.rstrip() + '\n')
|
||||||
|
|
||||||
|
|
||||||
|
class UnescapeTreeprocessor(Treeprocessor):
|
||||||
|
""" Restore escaped chars """
|
||||||
|
|
||||||
|
RE = re.compile(r'{}(\d+){}'.format(util.STX, util.ETX))
|
||||||
|
|
||||||
|
def _unescape(self, m):
|
||||||
|
return chr(int(m.group(1)))
|
||||||
|
|
||||||
|
def unescape(self, text: str) -> str:
|
||||||
|
return self.RE.sub(self._unescape, text)
|
||||||
|
|
||||||
|
def run(self, root):
|
||||||
|
""" Loop over all elements and unescape all text. """
|
||||||
|
for elem in root.iter():
|
||||||
|
# Unescape text content
|
||||||
|
if elem.text and not elem.tag == 'code':
|
||||||
|
elem.text = self.unescape(elem.text)
|
||||||
|
# Unescape tail content
|
||||||
|
if elem.tail:
|
||||||
|
elem.tail = self.unescape(elem.tail)
|
||||||
|
# Unescape attribute values
|
||||||
|
for key, value in elem.items():
|
||||||
|
elem.set(key, self.unescape(value))
|
||||||
399
plugins/markdown_preview/markdown/util.py
Normal file
399
plugins/markdown_preview/markdown/util.py
Normal file
@@ -0,0 +1,399 @@
|
|||||||
|
# Python Markdown
|
||||||
|
|
||||||
|
# A Python implementation of John Gruber's Markdown.
|
||||||
|
|
||||||
|
# Documentation: https://python-markdown.github.io/
|
||||||
|
# GitHub: https://github.com/Python-Markdown/markdown/
|
||||||
|
# PyPI: https://pypi.org/project/Markdown/
|
||||||
|
|
||||||
|
# Started by Manfred Stienstra (http://www.dwerg.net/).
|
||||||
|
# Maintained for a few years by Yuri Takhteyev (http://www.freewisdom.org).
|
||||||
|
# Currently maintained by Waylan Limberg (https://github.com/waylan),
|
||||||
|
# Dmitry Shachnev (https://github.com/mitya57) and Isaac Muse (https://github.com/facelessuser).
|
||||||
|
|
||||||
|
# Copyright 2007-2023 The Python Markdown Project (v. 1.7 and later)
|
||||||
|
# Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
|
||||||
|
# Copyright 2004 Manfred Stienstra (the original version)
|
||||||
|
|
||||||
|
# License: BSD (see LICENSE.md for details).
|
||||||
|
|
||||||
|
"""
|
||||||
|
This module contains various contacts, classes and functions which get referenced and used
|
||||||
|
throughout the code base.
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import warnings
|
||||||
|
from functools import wraps, lru_cache
|
||||||
|
from itertools import count
|
||||||
|
from typing import TYPE_CHECKING, Generic, Iterator, NamedTuple, TypeVar, overload
|
||||||
|
|
||||||
|
if TYPE_CHECKING: # pragma: no cover
|
||||||
|
from markdown import Markdown
|
||||||
|
|
||||||
|
_T = TypeVar('_T')
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
Constants you might want to modify
|
||||||
|
-----------------------------------------------------------------------------
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
BLOCK_LEVEL_ELEMENTS: list[str] = [
|
||||||
|
# Elements which are invalid to wrap in a `<p>` tag.
|
||||||
|
# See https://w3c.github.io/html/grouping-content.html#the-p-element
|
||||||
|
'address', 'article', 'aside', 'blockquote', 'details', 'div', 'dl',
|
||||||
|
'fieldset', 'figcaption', 'figure', 'footer', 'form', 'h1', 'h2', 'h3',
|
||||||
|
'h4', 'h5', 'h6', 'header', 'hgroup', 'hr', 'main', 'menu', 'nav', 'ol',
|
||||||
|
'p', 'pre', 'section', 'table', 'ul',
|
||||||
|
# Other elements which Markdown should not be mucking up the contents of.
|
||||||
|
'canvas', 'colgroup', 'dd', 'body', 'dt', 'group', 'html', 'iframe', 'li', 'legend',
|
||||||
|
'math', 'map', 'noscript', 'output', 'object', 'option', 'progress', 'script',
|
||||||
|
'style', 'summary', 'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'video'
|
||||||
|
]
|
||||||
|
"""
|
||||||
|
List of HTML tags which get treated as block-level elements. Same as the `block_level_elements`
|
||||||
|
attribute of the [`Markdown`][markdown.Markdown] class. Generally one should use the
|
||||||
|
attribute on the class. This remains for compatibility with older extensions.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Placeholders
|
||||||
|
STX = '\u0002'
|
||||||
|
""" "Start of Text" marker for placeholder templates. """
|
||||||
|
ETX = '\u0003'
|
||||||
|
""" "End of Text" marker for placeholder templates. """
|
||||||
|
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
|
||||||
|
""" Prefix for inline placeholder template. """
|
||||||
|
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
|
||||||
|
""" Placeholder template for stashed inline text. """
|
||||||
|
INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]+)')
|
||||||
|
""" Regular Expression which matches inline placeholders. """
|
||||||
|
AMP_SUBSTITUTE = STX+"amp"+ETX
|
||||||
|
""" Placeholder template for HTML entities. """
|
||||||
|
HTML_PLACEHOLDER = STX + "wzxhzdk:%s" + ETX
|
||||||
|
""" Placeholder template for raw HTML. """
|
||||||
|
HTML_PLACEHOLDER_RE = re.compile(HTML_PLACEHOLDER % r'([0-9]+)')
|
||||||
|
""" Regular expression which matches HTML placeholders. """
|
||||||
|
TAG_PLACEHOLDER = STX + "hzzhzkh:%s" + ETX
|
||||||
|
""" Placeholder template for tags. """
|
||||||
|
|
||||||
|
|
||||||
|
# Constants you probably do not need to change
|
||||||
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
RTL_BIDI_RANGES = (
|
||||||
|
('\u0590', '\u07FF'),
|
||||||
|
# Hebrew (0590-05FF), Arabic (0600-06FF),
|
||||||
|
# Syriac (0700-074F), Arabic supplement (0750-077F),
|
||||||
|
# Thaana (0780-07BF), Nko (07C0-07FF).
|
||||||
|
('\u2D30', '\u2D7F') # Tifinagh
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# AUXILIARY GLOBAL FUNCTIONS
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
@lru_cache(maxsize=None)
|
||||||
|
def get_installed_extensions():
|
||||||
|
""" Return all entry_points in the `markdown.extensions` group. """
|
||||||
|
if sys.version_info >= (3, 10):
|
||||||
|
from importlib import metadata
|
||||||
|
else: # `<PY310` use backport
|
||||||
|
import importlib_metadata as metadata
|
||||||
|
# Only load extension entry_points once.
|
||||||
|
return metadata.entry_points(group='markdown.extensions')
|
||||||
|
|
||||||
|
|
||||||
|
def deprecated(message: str, stacklevel: int = 2):
|
||||||
|
"""
|
||||||
|
Raise a [`DeprecationWarning`][] when wrapped function/method is called.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
|
||||||
|
```python
|
||||||
|
@deprecated("This method will be removed in version X; use Y instead.")
|
||||||
|
def some_method():
|
||||||
|
pass
|
||||||
|
```
|
||||||
|
"""
|
||||||
|
def wrapper(func):
|
||||||
|
@wraps(func)
|
||||||
|
def deprecated_func(*args, **kwargs):
|
||||||
|
warnings.warn(
|
||||||
|
f"'{func.__name__}' is deprecated. {message}",
|
||||||
|
category=DeprecationWarning,
|
||||||
|
stacklevel=stacklevel
|
||||||
|
)
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
return deprecated_func
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def parseBoolValue(value: str | None, fail_on_errors: bool = True, preserve_none: bool = False) -> bool | None:
|
||||||
|
"""Parses a string representing a boolean value. If parsing was successful,
|
||||||
|
returns `True` or `False`. If `preserve_none=True`, returns `True`, `False`,
|
||||||
|
or `None`. If parsing was not successful, raises `ValueError`, or, if
|
||||||
|
`fail_on_errors=False`, returns `None`."""
|
||||||
|
if not isinstance(value, str):
|
||||||
|
if preserve_none and value is None:
|
||||||
|
return value
|
||||||
|
return bool(value)
|
||||||
|
elif preserve_none and value.lower() == 'none':
|
||||||
|
return None
|
||||||
|
elif value.lower() in ('true', 'yes', 'y', 'on', '1'):
|
||||||
|
return True
|
||||||
|
elif value.lower() in ('false', 'no', 'n', 'off', '0', 'none'):
|
||||||
|
return False
|
||||||
|
elif fail_on_errors:
|
||||||
|
raise ValueError('Cannot parse bool value: %r' % value)
|
||||||
|
|
||||||
|
|
||||||
|
def code_escape(text: str) -> str:
|
||||||
|
"""HTML escape a string of code."""
|
||||||
|
if "&" in text:
|
||||||
|
text = text.replace("&", "&")
|
||||||
|
if "<" in text:
|
||||||
|
text = text.replace("<", "<")
|
||||||
|
if ">" in text:
|
||||||
|
text = text.replace(">", ">")
|
||||||
|
return text
|
||||||
|
|
||||||
|
|
||||||
|
def _get_stack_depth(size=2):
|
||||||
|
"""Get current stack depth, performantly.
|
||||||
|
"""
|
||||||
|
frame = sys._getframe(size)
|
||||||
|
|
||||||
|
for size in count(size):
|
||||||
|
frame = frame.f_back
|
||||||
|
if not frame:
|
||||||
|
return size
|
||||||
|
|
||||||
|
|
||||||
|
def nearing_recursion_limit() -> bool:
|
||||||
|
"""Return true if current stack depth is within 100 of maximum limit."""
|
||||||
|
return sys.getrecursionlimit() - _get_stack_depth() < 100
|
||||||
|
|
||||||
|
|
||||||
|
# MISC AUXILIARY CLASSES
|
||||||
|
# =============================================================================
|
||||||
|
|
||||||
|
|
||||||
|
class AtomicString(str):
|
||||||
|
"""A string which should not be further processed."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Processor:
|
||||||
|
""" The base class for all processors.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
Processor.md: The `Markdown` instance passed in an initialization.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
md: The `Markdown` instance this processor is a part of.
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self, md: Markdown | None = None):
|
||||||
|
self.md = md
|
||||||
|
|
||||||
|
|
||||||
|
class HtmlStash:
|
||||||
|
"""
|
||||||
|
This class is used for stashing HTML objects that we extract
|
||||||
|
in the beginning and replace with place-holders.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
""" Create an `HtmlStash`. """
|
||||||
|
self.html_counter = 0 # for counting inline html segments
|
||||||
|
self.rawHtmlBlocks = []
|
||||||
|
self.tag_counter = 0
|
||||||
|
self.tag_data = [] # list of dictionaries in the order tags appear
|
||||||
|
|
||||||
|
def store(self, html: str) -> str:
|
||||||
|
"""
|
||||||
|
Saves an HTML segment for later reinsertion. Returns a
|
||||||
|
placeholder string that needs to be inserted into the
|
||||||
|
document.
|
||||||
|
|
||||||
|
Keyword arguments:
|
||||||
|
html: An html segment.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A placeholder string.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.rawHtmlBlocks.append(html)
|
||||||
|
placeholder = self.get_placeholder(self.html_counter)
|
||||||
|
self.html_counter += 1
|
||||||
|
return placeholder
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
""" Clear the stash. """
|
||||||
|
self.html_counter = 0
|
||||||
|
self.rawHtmlBlocks = []
|
||||||
|
|
||||||
|
def get_placeholder(self, key: int) -> str:
|
||||||
|
return HTML_PLACEHOLDER % key
|
||||||
|
|
||||||
|
def store_tag(self, tag: str, attrs: list, left_index: int, right_index: int) -> str:
|
||||||
|
"""Store tag data and return a placeholder."""
|
||||||
|
self.tag_data.append({'tag': tag, 'attrs': attrs,
|
||||||
|
'left_index': left_index,
|
||||||
|
'right_index': right_index})
|
||||||
|
placeholder = TAG_PLACEHOLDER % str(self.tag_counter)
|
||||||
|
self.tag_counter += 1 # equal to the tag's index in `self.tag_data`
|
||||||
|
return placeholder
|
||||||
|
|
||||||
|
|
||||||
|
# Used internally by `Registry` for each item in its sorted list.
|
||||||
|
# Provides an easier to read API when editing the code later.
|
||||||
|
# For example, `item.name` is more clear than `item[0]`.
|
||||||
|
class _PriorityItem(NamedTuple):
|
||||||
|
name: str
|
||||||
|
priority: float
|
||||||
|
|
||||||
|
|
||||||
|
class Registry(Generic[_T]):
|
||||||
|
"""
|
||||||
|
A priority sorted registry.
|
||||||
|
|
||||||
|
A `Registry` instance provides two public methods to alter the data of the
|
||||||
|
registry: `register` and `deregister`. Use `register` to add items and
|
||||||
|
`deregister` to remove items. See each method for specifics.
|
||||||
|
|
||||||
|
When registering an item, a "name" and a "priority" must be provided. All
|
||||||
|
items are automatically sorted by "priority" from highest to lowest. The
|
||||||
|
"name" is used to remove ("deregister") and get items.
|
||||||
|
|
||||||
|
A `Registry` instance it like a list (which maintains order) when reading
|
||||||
|
data. You may iterate over the items, get an item and get a count (length)
|
||||||
|
of all items. You may also check that the registry contains an item.
|
||||||
|
|
||||||
|
When getting an item you may use either the index of the item or the
|
||||||
|
string-based "name". For example:
|
||||||
|
|
||||||
|
registry = Registry()
|
||||||
|
registry.register(SomeItem(), 'itemname', 20)
|
||||||
|
# Get the item by index
|
||||||
|
item = registry[0]
|
||||||
|
# Get the item by name
|
||||||
|
item = registry['itemname']
|
||||||
|
|
||||||
|
When checking that the registry contains an item, you may use either the
|
||||||
|
string-based "name", or a reference to the actual item. For example:
|
||||||
|
|
||||||
|
someitem = SomeItem()
|
||||||
|
registry.register(someitem, 'itemname', 20)
|
||||||
|
# Contains the name
|
||||||
|
assert 'itemname' in registry
|
||||||
|
# Contains the item instance
|
||||||
|
assert someitem in registry
|
||||||
|
|
||||||
|
The method `get_index_for_name` is also available to obtain the index of
|
||||||
|
an item using that item's assigned "name".
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self._data: dict[str, _T] = {}
|
||||||
|
self._priority = []
|
||||||
|
self._is_sorted = False
|
||||||
|
|
||||||
|
def __contains__(self, item: str | _T) -> bool:
|
||||||
|
if isinstance(item, str):
|
||||||
|
# Check if an item exists by this name.
|
||||||
|
return item in self._data.keys()
|
||||||
|
# Check if this instance exists.
|
||||||
|
return item in self._data.values()
|
||||||
|
|
||||||
|
def __iter__(self) -> Iterator[_T]:
|
||||||
|
self._sort()
|
||||||
|
return iter([self._data[k] for k, p in self._priority])
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def __getitem__(self, key: str | int) -> _T: # pragma: no cover
|
||||||
|
...
|
||||||
|
|
||||||
|
@overload
|
||||||
|
def __getitem__(self, key: slice) -> Registry[_T]: # pragma: no cover
|
||||||
|
...
|
||||||
|
|
||||||
|
def __getitem__(self, key: str | int | slice) -> _T | Registry[_T]:
|
||||||
|
self._sort()
|
||||||
|
if isinstance(key, slice):
|
||||||
|
data: Registry[_T] = Registry()
|
||||||
|
for k, p in self._priority[key]:
|
||||||
|
data.register(self._data[k], k, p)
|
||||||
|
return data
|
||||||
|
if isinstance(key, int):
|
||||||
|
return self._data[self._priority[key].name]
|
||||||
|
return self._data[key]
|
||||||
|
|
||||||
|
def __len__(self) -> int:
|
||||||
|
return len(self._priority)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '<{}({})>'.format(self.__class__.__name__, list(self))
|
||||||
|
|
||||||
|
def get_index_for_name(self, name: str) -> int:
|
||||||
|
"""
|
||||||
|
Return the index of the given name.
|
||||||
|
"""
|
||||||
|
if name in self:
|
||||||
|
self._sort()
|
||||||
|
return self._priority.index(
|
||||||
|
[x for x in self._priority if x.name == name][0]
|
||||||
|
)
|
||||||
|
raise ValueError('No item named "{}" exists.'.format(name))
|
||||||
|
|
||||||
|
def register(self, item: _T, name: str, priority: float) -> None:
|
||||||
|
"""
|
||||||
|
Add an item to the registry with the given name and priority.
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
item: The item being registered.
|
||||||
|
name: A string used to reference the item.
|
||||||
|
priority: An integer or float used to sort against all items.
|
||||||
|
|
||||||
|
If an item is registered with a "name" which already exists, the
|
||||||
|
existing item is replaced with the new item. Treat carefully as the
|
||||||
|
old item is lost with no way to recover it. The new item will be
|
||||||
|
sorted according to its priority and will **not** retain the position
|
||||||
|
of the old item.
|
||||||
|
"""
|
||||||
|
if name in self:
|
||||||
|
# Remove existing item of same name first
|
||||||
|
self.deregister(name)
|
||||||
|
self._is_sorted = False
|
||||||
|
self._data[name] = item
|
||||||
|
self._priority.append(_PriorityItem(name, priority))
|
||||||
|
|
||||||
|
def deregister(self, name: str, strict: bool = True) -> None:
|
||||||
|
"""
|
||||||
|
Remove an item from the registry.
|
||||||
|
|
||||||
|
Set `strict=False` to fail silently. Otherwise a [`ValueError`][] is raised for an unknown `name`.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
index = self.get_index_for_name(name)
|
||||||
|
del self._priority[index]
|
||||||
|
del self._data[name]
|
||||||
|
except ValueError:
|
||||||
|
if strict:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def _sort(self):
|
||||||
|
"""
|
||||||
|
Sort the registry by priority from highest to lowest.
|
||||||
|
|
||||||
|
This method is called internally and should never be explicitly called.
|
||||||
|
"""
|
||||||
|
if not self._is_sorted:
|
||||||
|
self._priority.sort(key=lambda item: item.priority, reverse=True)
|
||||||
|
self._is_sorted = True
|
||||||
121
plugins/markdown_preview/markdown_preview.glade
Normal file
121
plugins/markdown_preview/markdown_preview.glade
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!-- Generated with glade 3.40.0 -->
|
||||||
|
<interface>
|
||||||
|
<requires lib="gtk+" version="3.24"/>
|
||||||
|
<requires lib="webkit2gtk" version="2.28"/>
|
||||||
|
<object class="GtkImage" id="settings_img">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="stock">gtk-justify-fill</property>
|
||||||
|
</object>
|
||||||
|
<object class="WebKitSettings" type-func="webkit_settings_get_type" id="web_view_settings">
|
||||||
|
<property name="enable-offline-web-application-cache">False</property>
|
||||||
|
<property name="enable-html5-local-storage">False</property>
|
||||||
|
<property name="enable-html5-database">False</property>
|
||||||
|
<property name="enable-xss-auditor">False</property>
|
||||||
|
<property name="enable-hyperlink-auditing">False</property>
|
||||||
|
<property name="enable-tabs-to-links">False</property>
|
||||||
|
<property name="enable-fullscreen">False</property>
|
||||||
|
<property name="print-backgrounds">False</property>
|
||||||
|
<property name="enable-webaudio">False</property>
|
||||||
|
<property name="enable-page-cache">False</property>
|
||||||
|
<property name="user-agent">Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.0 Safari/605.1.15</property>
|
||||||
|
<property name="enable-accelerated-2d-canvas">True</property>
|
||||||
|
<property name="allow-file-access-from-file-urls">True</property>
|
||||||
|
<property name="allow-universal-access-from-file-urls">True</property>
|
||||||
|
<property name="enable-webrtc">True</property>
|
||||||
|
</object>
|
||||||
|
<object class="GtkPopover" id="markdown_preview_dialog">
|
||||||
|
<property name="width-request">620</property>
|
||||||
|
<property name="height-request">480</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="vexpand">True</property>
|
||||||
|
<property name="position">left</property>
|
||||||
|
<property name="modal">False</property>
|
||||||
|
<property name="transitions-enabled">False</property>
|
||||||
|
<property name="constrain-to">none</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkBox">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="hexpand">True</property>
|
||||||
|
<property name="vexpand">True</property>
|
||||||
|
<property name="orientation">vertical</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkButtonBox">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="layout-style">end</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkToggleButton">
|
||||||
|
<property name="label">gtk-media-pause</property>
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="use-stock">True</property>
|
||||||
|
<property name="always-show-image">True</property>
|
||||||
|
<signal name="toggled" handler="_tggle_preview_updates" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">True</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">0</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkButton">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="sensitive">False</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="image">settings_img</property>
|
||||||
|
<signal name="clicked" handler="_handle_settings" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">True</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">1</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">False</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">0</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkScrolledWindow">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="shadow-type">in</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkViewport">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<child>
|
||||||
|
<object class="WebKitWebView" type-func="webkit_web_view_get_type" id="markdown_view">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="settings">web_view_settings</property>
|
||||||
|
<property name="is-ephemeral">True</property>
|
||||||
|
<property name="is-muted">True</property>
|
||||||
|
<property name="default-content-security-policy">*</property>
|
||||||
|
<child>
|
||||||
|
<placeholder/>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">True</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">1</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
</interface>
|
||||||
42
plugins/markdown_preview/markdown_template_mixin.py
Normal file
42
plugins/markdown_preview/markdown_template_mixin.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class MarkdownTemplateMixin:
|
||||||
|
def wrap_html_to_body(self, html):
|
||||||
|
return f"""\
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en" dir="ltr">
|
||||||
|
<head>
|
||||||
|
<meta charset="utf-8">
|
||||||
|
<title>Markdown View</title>
|
||||||
|
<style media="screen">
|
||||||
|
html, body {{
|
||||||
|
display: block;
|
||||||
|
background-color: #32383e00;
|
||||||
|
color: #ffffff;
|
||||||
|
text-wrap: wrap;
|
||||||
|
}}
|
||||||
|
|
||||||
|
img {{
|
||||||
|
width: 100%;
|
||||||
|
height: auto;
|
||||||
|
}}
|
||||||
|
|
||||||
|
code {{
|
||||||
|
border: 1px solid #32383e;
|
||||||
|
background-color: #32383e;
|
||||||
|
padding: 4px;
|
||||||
|
}}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
{html}
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
"""
|
||||||
114
plugins/markdown_preview/plugin.py
Normal file
114
plugins/markdown_preview/plugin.py
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
# Python imports
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
gi.require_version('Gdk', '3.0')
|
||||||
|
gi.require_version('WebKit2', '4.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
from gi.repository import Gdk
|
||||||
|
from gi.repository import WebKit2
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from . import markdown
|
||||||
|
from .markdown_template_mixin import MarkdownTemplateMixin
|
||||||
|
from plugins.plugin_base import PluginBase
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(MarkdownTemplateMixin, PluginBase):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.name = "Markdown Preview" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus
|
||||||
|
# where self.name should not be needed for message comms
|
||||||
|
self.path = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
self._GLADE_FILE = f"{self.path}/markdown_preview.glade"
|
||||||
|
|
||||||
|
self.is_preview_paused = False
|
||||||
|
self.is_md_file = False
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
WebKit2.WebView() # Need one initialized for webview to work from glade file
|
||||||
|
|
||||||
|
self._builder = Gtk.Builder()
|
||||||
|
self._builder.add_from_file(self._GLADE_FILE)
|
||||||
|
self._connect_builder_signals(self, self._builder)
|
||||||
|
|
||||||
|
separator_right = self._ui_objects[0]
|
||||||
|
self._markdown_dialog = self._builder.get_object("markdown_preview_dialog")
|
||||||
|
self._markdown_view = self._builder.get_object("markdown_view")
|
||||||
|
self._web_view_settings = self._builder.get_object("web_view_settings")
|
||||||
|
|
||||||
|
self._markdown_dialog.set_relative_to(separator_right)
|
||||||
|
self._markdown_view.set_settings(self._web_view_settings)
|
||||||
|
self._markdown_view.set_background_color(Gdk.RGBA(0, 0, 0, 0.0))
|
||||||
|
|
||||||
|
|
||||||
|
def generate_reference_ui_element(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def subscribe_to_events(self):
|
||||||
|
self._event_system.subscribe("tggle_markdown_preview", self._tggle_markdown_preview)
|
||||||
|
self._event_system.subscribe("set_active_src_view", self._set_active_src_view)
|
||||||
|
self._event_system.subscribe("buffer_changed", self._do_markdown_translate)
|
||||||
|
|
||||||
|
def _buffer_changed_first_load(self, buffer):
|
||||||
|
self._buffer = buffer
|
||||||
|
|
||||||
|
self._do_markdown_translate(buffer)
|
||||||
|
|
||||||
|
def _set_active_src_view(self, source_view):
|
||||||
|
self._active_src_view = source_view
|
||||||
|
self._buffer = self._active_src_view.get_buffer()
|
||||||
|
|
||||||
|
self._do_markdown_translate(self._buffer)
|
||||||
|
|
||||||
|
def _handle_settings(self, widget = None, eve = None):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _tggle_preview_updates(self, widget = None, eve = None):
|
||||||
|
self.is_preview_paused = not self.is_preview_paused
|
||||||
|
widget.set_active(self.is_preview_paused)
|
||||||
|
|
||||||
|
if not self.is_preview_paused:
|
||||||
|
self._do_markdown_translate(self._buffer)
|
||||||
|
|
||||||
|
def _tggle_markdown_preview(self, widget = None, eve = None):
|
||||||
|
if not self._active_src_view: return
|
||||||
|
|
||||||
|
is_visible = self._markdown_dialog.is_visible()
|
||||||
|
buffer = self._active_src_view.get_buffer()
|
||||||
|
data = None
|
||||||
|
|
||||||
|
if not is_visible:
|
||||||
|
self._markdown_dialog.popup();
|
||||||
|
self._do_markdown_translate(buffer)
|
||||||
|
elif not data and is_visible:
|
||||||
|
self._markdown_dialog.popdown()
|
||||||
|
|
||||||
|
def _do_markdown_translate(self, buffer):
|
||||||
|
if self.is_preview_paused: return
|
||||||
|
|
||||||
|
self.is_markdown_check()
|
||||||
|
is_visible = self._markdown_dialog.is_visible()
|
||||||
|
if not is_visible or not self.is_md_file: return
|
||||||
|
self.render_markdown(buffer)
|
||||||
|
|
||||||
|
def render_markdown(self, buffer):
|
||||||
|
start_iter = buffer.get_start_iter()
|
||||||
|
end_iter = buffer.get_end_iter()
|
||||||
|
text = buffer.get_text(start_iter, end_iter, include_hidden_chars = False)
|
||||||
|
html = markdown.markdown(text)
|
||||||
|
|
||||||
|
path = self._active_src_view.get_current_file().get_parent().get_path()
|
||||||
|
data = self.wrap_html_to_body(html)
|
||||||
|
self._markdown_view.load_html(content = data, base_uri = f"file://{path}/")
|
||||||
|
|
||||||
|
def is_markdown_check(self):
|
||||||
|
self.is_md_file = self._active_src_view.get_filetype() == "markdown"
|
||||||
|
if not self.is_md_file:
|
||||||
|
data = self.wrap_html_to_body("<h1>Not a Markdown file...</h1>")
|
||||||
|
self._markdown_view.load_html(content = data, base_uri = None)
|
||||||
3
plugins/search_replace/__init__.py
Normal file
3
plugins/search_replace/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Module
|
||||||
|
"""
|
||||||
3
plugins/search_replace/__main__.py
Normal file
3
plugins/search_replace/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Package
|
||||||
|
"""
|
||||||
12
plugins/search_replace/manifest.json
Normal file
12
plugins/search_replace/manifest.json
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
{
|
||||||
|
"name": "Search/Replace",
|
||||||
|
"author": "ITDominator",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"support": "",
|
||||||
|
"requests": {
|
||||||
|
"pass_events": true,
|
||||||
|
"pass_ui_objects": ["separator_botton"],
|
||||||
|
"bind_keys": ["Search/Replace||tggl_search_replace:<Control>f"]
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
221
plugins/search_replace/plugin.py
Normal file
221
plugins/search_replace/plugin.py
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
# Python imports
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import threading
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
gi.require_version('Gdk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
from gi.repository import Gdk
|
||||||
|
from gi.repository import GLib
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from plugins.plugin_base import PluginBase
|
||||||
|
from .styling_mixin import StylingMixin
|
||||||
|
from .replace_mixin import ReplaceMixin
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(StylingMixin, ReplaceMixin, PluginBase):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.name = "Search/Replace" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus
|
||||||
|
# where self.name should not be needed for message comms
|
||||||
|
self.path = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
self._GLADE_FILE = f"{self.path}/search_replace.glade"
|
||||||
|
|
||||||
|
self._search_replace_dialog = None
|
||||||
|
self._find_entry = None
|
||||||
|
self._replace_entry = None
|
||||||
|
self._active_src_view = None
|
||||||
|
self._buffer = None
|
||||||
|
self._tag_table = None
|
||||||
|
|
||||||
|
self.use_regex = False
|
||||||
|
self.use_case_sensitive = False
|
||||||
|
self.search_only_in_selection = False
|
||||||
|
self.use_whole_word_search = False
|
||||||
|
|
||||||
|
self.timer = None
|
||||||
|
self.search_time = 0.35
|
||||||
|
self.find_text = ""
|
||||||
|
self.search_tag = "search_tag"
|
||||||
|
self.highlight_color = "#FBF719"
|
||||||
|
self.text_color = "#000000"
|
||||||
|
self.alpha_num_under = re.compile(r"[a-zA-Z0-9_]")
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
self._builder = Gtk.Builder()
|
||||||
|
self._builder.add_from_file(self._GLADE_FILE)
|
||||||
|
self._connect_builder_signals(self, self._builder)
|
||||||
|
|
||||||
|
separator_botton = self._ui_objects[0]
|
||||||
|
self._search_replace_dialog = self._builder.get_object("search_replace_dialog")
|
||||||
|
self._find_status_lbl = self._builder.get_object("find_status_lbl")
|
||||||
|
self._find_options_lbl = self._builder.get_object("find_options_lbl")
|
||||||
|
|
||||||
|
self._find_entry = self._builder.get_object("find_entry")
|
||||||
|
self._replace_entry = self._builder.get_object("replace_entry")
|
||||||
|
|
||||||
|
self._search_replace_dialog.set_relative_to(separator_botton)
|
||||||
|
self._search_replace_dialog.set_hexpand(True)
|
||||||
|
|
||||||
|
def generate_reference_ui_element(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def subscribe_to_events(self):
|
||||||
|
self._event_system.subscribe("tggl_search_replace", self._tggl_search_replace)
|
||||||
|
self._event_system.subscribe("set_active_src_view", self._set_active_src_view)
|
||||||
|
|
||||||
|
def _set_active_src_view(self, source_view):
|
||||||
|
self._active_src_view = source_view
|
||||||
|
self._buffer = self._active_src_view.get_buffer()
|
||||||
|
self._tag_table = self._buffer.get_tag_table()
|
||||||
|
self.search_for_string(self._find_entry)
|
||||||
|
|
||||||
|
def _show_search_replace(self, widget = None, eve = None):
|
||||||
|
self._search_replace_dialog.popup()
|
||||||
|
|
||||||
|
def _tggl_search_replace(self, widget = None, eve = None):
|
||||||
|
is_visible = self._search_replace_dialog.is_visible()
|
||||||
|
buffer = self._active_src_view.get_buffer()
|
||||||
|
data = None
|
||||||
|
|
||||||
|
if buffer.get_has_selection():
|
||||||
|
start, end = buffer.get_selection_bounds()
|
||||||
|
data = buffer.get_text(start, end, include_hidden_chars = False)
|
||||||
|
|
||||||
|
if data:
|
||||||
|
self._find_entry.set_text(data)
|
||||||
|
|
||||||
|
if not is_visible:
|
||||||
|
self._search_replace_dialog.popup();
|
||||||
|
self._find_entry.grab_focus()
|
||||||
|
elif not data and is_visible:
|
||||||
|
self._search_replace_dialog.popdown()
|
||||||
|
self._find_entry.set_text("")
|
||||||
|
else:
|
||||||
|
self._find_entry.grab_focus()
|
||||||
|
|
||||||
|
|
||||||
|
def get_search_tag(self, buffer):
|
||||||
|
tag_table = buffer.get_tag_table()
|
||||||
|
search_tag = tag_table.lookup(self.search_tag)
|
||||||
|
if not search_tag:
|
||||||
|
search_tag = buffer.create_tag(self.search_tag, background = self.highlight_color, foreground = self.text_color)
|
||||||
|
|
||||||
|
buffer.remove_tag_by_name(self.search_tag, buffer.get_start_iter(), buffer.get_end_iter())
|
||||||
|
return search_tag
|
||||||
|
|
||||||
|
|
||||||
|
def cancel_timer(self):
|
||||||
|
if self.timer:
|
||||||
|
self.timer.cancel()
|
||||||
|
GLib.idle_remove_by_data(None)
|
||||||
|
|
||||||
|
def delay_search_glib(self):
|
||||||
|
GLib.idle_add(self._do_highlight)
|
||||||
|
|
||||||
|
def delay_search(self):
|
||||||
|
wait_time = self.search_time / len(self.find_text)
|
||||||
|
wait_time = max(wait_time, 0.05)
|
||||||
|
|
||||||
|
self.timer = threading.Timer(wait_time, self.delay_search_glib)
|
||||||
|
self.timer.daemon = True
|
||||||
|
self.timer.start()
|
||||||
|
|
||||||
|
|
||||||
|
def on_enter_search(self, widget, eve):
|
||||||
|
text = widget.get_text()
|
||||||
|
if not text: return
|
||||||
|
|
||||||
|
keyname = Gdk.keyval_name(eve.keyval)
|
||||||
|
if keyname == "Return":
|
||||||
|
self.find_next(widget)
|
||||||
|
|
||||||
|
def search_for_string(self, widget):
|
||||||
|
self.cancel_timer()
|
||||||
|
|
||||||
|
self.find_text = widget.get_text()
|
||||||
|
if len(self.find_text) > 0 and len(self.find_text) < 5:
|
||||||
|
self.delay_search()
|
||||||
|
else:
|
||||||
|
self._do_highlight(self.find_text)
|
||||||
|
|
||||||
|
|
||||||
|
def _do_highlight(self, query = None):
|
||||||
|
query = self.find_text if not query else query
|
||||||
|
buffer = self._active_src_view.get_buffer()
|
||||||
|
# Also clears tag from buffer so if no query we're clean in ui
|
||||||
|
search_tag = self.get_search_tag(buffer)
|
||||||
|
|
||||||
|
self.update_style(1)
|
||||||
|
if not query:
|
||||||
|
self._find_status_lbl.set_label(f"Find in current buffer")
|
||||||
|
self.update_style(0)
|
||||||
|
return
|
||||||
|
|
||||||
|
start_itr = buffer.get_start_iter()
|
||||||
|
end_itr = buffer.get_end_iter()
|
||||||
|
|
||||||
|
results, total_count = self.search(start_itr, query)
|
||||||
|
self._update_status_lbl(total_count, query)
|
||||||
|
for start, end in results:
|
||||||
|
buffer.apply_tag(search_tag, start, end)
|
||||||
|
|
||||||
|
def search(self, start_itr = None, query = None, limit = None):
|
||||||
|
if not start_itr or not query: return None, None
|
||||||
|
|
||||||
|
flags = Gtk.TextSearchFlags.VISIBLE_ONLY | Gtk.TextSearchFlags.TEXT_ONLY
|
||||||
|
if not self.use_case_sensitive:
|
||||||
|
flags = flags | Gtk.TextSearchFlags.CASE_INSENSITIVE
|
||||||
|
|
||||||
|
if self.search_only_in_selection and self._buffer.get_has_selection():
|
||||||
|
start_itr, limit = self._buffer.get_selection_bounds()
|
||||||
|
|
||||||
|
_results = []
|
||||||
|
while True:
|
||||||
|
result = start_itr.forward_search(query, flags, limit)
|
||||||
|
if not result: break
|
||||||
|
|
||||||
|
_results.append(result)
|
||||||
|
start_itr = result[1]
|
||||||
|
|
||||||
|
results = self.apply_filters(_results, query)
|
||||||
|
return results, len(results)
|
||||||
|
|
||||||
|
def apply_filters(self, _results, query):
|
||||||
|
results = []
|
||||||
|
for start, end in _results:
|
||||||
|
text = self._buffer.get_slice(start, end, include_hidden_chars = False)
|
||||||
|
if self.use_whole_word_search:
|
||||||
|
if not self.is_whole_word(start, end):
|
||||||
|
continue
|
||||||
|
|
||||||
|
results.append([start, end])
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def find_next(self, widget, eve = None, use_data = None):
|
||||||
|
mark = self._buffer.get_insert()
|
||||||
|
iter = self._buffer.get_iter_at_mark(mark)
|
||||||
|
iter.forward_line()
|
||||||
|
|
||||||
|
search_tag = self._tag_table.lookup(self.search_tag)
|
||||||
|
next_tag_found = iter.forward_to_tag_toggle(search_tag)
|
||||||
|
if not next_tag_found:
|
||||||
|
self._buffer.place_cursor( self._buffer.get_start_iter() )
|
||||||
|
mark = self._buffer.get_insert()
|
||||||
|
iter = self._buffer.get_iter_at_mark(mark)
|
||||||
|
iter.forward_to_tag_toggle(search_tag)
|
||||||
|
|
||||||
|
self._buffer.place_cursor(iter)
|
||||||
|
self._active_src_view.scroll_to_mark( self._buffer.get_insert(), 0.0, True, 0.0, 0.0 )
|
||||||
|
|
||||||
|
|
||||||
|
def find_all(self, widget):
|
||||||
|
...
|
||||||
94
plugins/search_replace/replace_mixin.py
Normal file
94
plugins/search_replace/replace_mixin.py
Normal file
@@ -0,0 +1,94 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class ReplaceMixin:
|
||||||
|
def replace(self, widget):
|
||||||
|
replace_text = self._replace_entry.get_text()
|
||||||
|
if self.find_text and replace_text:
|
||||||
|
self._buffer.begin_user_action()
|
||||||
|
|
||||||
|
iter = self._buffer.get_start_iter()
|
||||||
|
search_tag = self._tag_table.lookup(self.search_tag)
|
||||||
|
|
||||||
|
iter.forward_to_tag_toggle(search_tag)
|
||||||
|
self._do_replace(iter, replace_text)
|
||||||
|
self._active_src_view.scroll_to_iter( iter, 0.0, True, 0.0, 0.0 )
|
||||||
|
|
||||||
|
self._buffer.end_user_action()
|
||||||
|
|
||||||
|
def replace_all(self, widget):
|
||||||
|
replace_text = self._replace_entry.get_text()
|
||||||
|
if self.find_text:
|
||||||
|
self._buffer.begin_user_action()
|
||||||
|
|
||||||
|
mark = self._buffer.get_insert()
|
||||||
|
iter = self._buffer.get_start_iter()
|
||||||
|
search_tag = self._tag_table.lookup(self.search_tag)
|
||||||
|
|
||||||
|
while iter.forward_to_tag_toggle(search_tag):
|
||||||
|
self._do_replace(iter, replace_text)
|
||||||
|
iter = self._buffer.get_start_iter()
|
||||||
|
|
||||||
|
self._buffer.end_user_action()
|
||||||
|
|
||||||
|
|
||||||
|
def _do_replace(self, iter, text):
|
||||||
|
start, end = self.get_start_end(iter)
|
||||||
|
self.replace_in_buffer(start, end, text)
|
||||||
|
|
||||||
|
def replace_in_buffer(self, start, end, text):
|
||||||
|
pos_mark = self._buffer.create_mark("find-replace", end, True)
|
||||||
|
self._buffer.delete(start, end)
|
||||||
|
replace_iter = self._buffer.get_iter_at_mark(pos_mark)
|
||||||
|
self._buffer.insert(replace_iter, text)
|
||||||
|
|
||||||
|
def get_start_end(self, iter):
|
||||||
|
start = iter.copy()
|
||||||
|
end = None
|
||||||
|
|
||||||
|
while True:
|
||||||
|
iter.forward_char()
|
||||||
|
tags = iter.get_tags()
|
||||||
|
valid = False
|
||||||
|
for tag in tags:
|
||||||
|
if tag.props.name and self.search_tag in tag.props.name:
|
||||||
|
valid = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if valid:
|
||||||
|
continue
|
||||||
|
|
||||||
|
end = iter.copy()
|
||||||
|
break
|
||||||
|
|
||||||
|
return start, end
|
||||||
|
|
||||||
|
# NOTE: Below, lovingly taken from Hamad Al Marri's Gamma text editor.
|
||||||
|
# Link: https://gitlab.com/hamadmarri/gamma-text-editor
|
||||||
|
def is_whole_word(self, match_start, match_end):
|
||||||
|
is_prev_a_char = True
|
||||||
|
is_next_a_char = True
|
||||||
|
|
||||||
|
prev_iter = match_start.copy()
|
||||||
|
next_iter = match_end.copy()
|
||||||
|
if not prev_iter.backward_char():
|
||||||
|
is_prev_a_char = False
|
||||||
|
else:
|
||||||
|
c = prev_iter.get_char()
|
||||||
|
is_prev_a_char = (c.isalpha() or c.isdigit())
|
||||||
|
|
||||||
|
if not next_iter:
|
||||||
|
is_next_a_char = False
|
||||||
|
else:
|
||||||
|
c = next_iter.get_char()
|
||||||
|
is_next_a_char = (c.isalpha() or c.isdigit())
|
||||||
|
|
||||||
|
is_word = (not is_prev_a_char and not is_next_a_char)
|
||||||
|
|
||||||
|
# Note: Both must be false to be a word...
|
||||||
|
return is_word
|
||||||
299
plugins/search_replace/search_replace.glade
Normal file
299
plugins/search_replace/search_replace.glade
Normal file
@@ -0,0 +1,299 @@
|
|||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<!-- Generated with glade 3.40.0 -->
|
||||||
|
<interface>
|
||||||
|
<requires lib="gtk+" version="3.24"/>
|
||||||
|
<object class="GtkImage" id="close_img">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="stock">gtk-close</property>
|
||||||
|
</object>
|
||||||
|
<object class="GtkImage" id="only-in-selection">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="pixbuf">../../icons/only-in-selection.png</property>
|
||||||
|
</object>
|
||||||
|
<object class="GtkImage" id="whole-word">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="pixbuf">../../icons/whole-word.png</property>
|
||||||
|
</object>
|
||||||
|
<object class="GtkPopover" id="search_replace_dialog">
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="modal">False</property>
|
||||||
|
<property name="transitions-enabled">False</property>
|
||||||
|
<property name="constrain-to">none</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkBox">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="orientation">vertical</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkBox">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkLabel" id="find_status_lbl">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="margin-start">5</property>
|
||||||
|
<property name="label" translatable="yes">Find in Current Buffer</property>
|
||||||
|
<property name="xalign">0</property>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">True</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">0</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkBox">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkLabel" id="find_options_lbl">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="xpad">20</property>
|
||||||
|
<property name="label" translatable="yes">Finding with Options: Case Insensitive</property>
|
||||||
|
<property name="xalign">0</property>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">False</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">0</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkButtonBox">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="layout-style">start</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkToggleButton">
|
||||||
|
<property name="label" translatable="yes">.*</property>
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="sensitive">False</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="focus-on-click">False</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="tooltip-text" translatable="yes">Use Regex</property>
|
||||||
|
<signal name="toggled" handler="tggle_regex" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">True</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">0</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkToggleButton">
|
||||||
|
<property name="label" translatable="yes">Aa</property>
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="focus-on-click">False</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="tooltip-text" translatable="yes">Match Case</property>
|
||||||
|
<signal name="toggled" handler="tggle_case_sensitive" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">True</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">1</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkToggleButton">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="focus-on-click">False</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="tooltip-text" translatable="yes">Only In Selection</property>
|
||||||
|
<property name="image">only-in-selection</property>
|
||||||
|
<property name="always-show-image">True</property>
|
||||||
|
<signal name="toggled" handler="tggle_selection_only_scan" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">True</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">2</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkToggleButton">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="focus-on-click">False</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="tooltip-text" translatable="yes">Whole Word</property>
|
||||||
|
<property name="image">whole-word</property>
|
||||||
|
<property name="always-show-image">True</property>
|
||||||
|
<signal name="toggled" handler="tggle_whole_word_search" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">True</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">3</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkButton">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="focus-on-click">False</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="tooltip-text" translatable="yes">Close Panel</property>
|
||||||
|
<property name="image">close_img</property>
|
||||||
|
<property name="always-show-image">True</property>
|
||||||
|
<signal name="clicked" handler="_tggl_search_replace" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">True</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">4</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">False</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">1</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">False</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">1</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">False</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="position">0</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<!-- n-columns=10 n-rows=2 -->
|
||||||
|
<object class="GtkGrid">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">False</property>
|
||||||
|
<property name="column-homogeneous">True</property>
|
||||||
|
<child>
|
||||||
|
<object class="GtkButton">
|
||||||
|
<property name="label" translatable="yes">Replace All</property>
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="focus-on-click">False</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="tooltip-text" translatable="yes">Replace All</property>
|
||||||
|
<property name="margin-start">5</property>
|
||||||
|
<property name="margin-end">5</property>
|
||||||
|
<property name="margin-top">5</property>
|
||||||
|
<property name="margin-bottom">5</property>
|
||||||
|
<signal name="clicked" handler="replace_all" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="left-attach">9</property>
|
||||||
|
<property name="top-attach">1</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkButton">
|
||||||
|
<property name="label" translatable="yes">Replace</property>
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="focus-on-click">False</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="tooltip-text" translatable="yes">Replace Next</property>
|
||||||
|
<property name="margin-start">5</property>
|
||||||
|
<property name="margin-end">10</property>
|
||||||
|
<property name="margin-top">5</property>
|
||||||
|
<property name="margin-bottom">5</property>
|
||||||
|
<signal name="clicked" handler="replace" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="left-attach">8</property>
|
||||||
|
<property name="top-attach">1</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkButton">
|
||||||
|
<property name="label" translatable="yes">Find All</property>
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="focus-on-click">False</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="margin-start">5</property>
|
||||||
|
<property name="margin-end">5</property>
|
||||||
|
<property name="margin-top">5</property>
|
||||||
|
<property name="margin-bottom">5</property>
|
||||||
|
<signal name="clicked" handler="find_all" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="left-attach">9</property>
|
||||||
|
<property name="top-attach">0</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkButton">
|
||||||
|
<property name="label" translatable="yes">Find</property>
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="focus-on-click">False</property>
|
||||||
|
<property name="receives-default">True</property>
|
||||||
|
<property name="margin-start">5</property>
|
||||||
|
<property name="margin-end">5</property>
|
||||||
|
<property name="margin-top">5</property>
|
||||||
|
<property name="margin-bottom">5</property>
|
||||||
|
<signal name="clicked" handler="find_next" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="left-attach">8</property>
|
||||||
|
<property name="top-attach">0</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkSearchEntry" id="find_entry">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="primary-icon-name">edit-find-symbolic</property>
|
||||||
|
<property name="primary-icon-activatable">False</property>
|
||||||
|
<property name="primary-icon-sensitive">False</property>
|
||||||
|
<property name="placeholder-text" translatable="yes">Find in current buffer</property>
|
||||||
|
<signal name="key-release-event" handler="on_enter_search" swapped="no"/>
|
||||||
|
<signal name="search-changed" handler="search_for_string" swapped="no"/>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="left-attach">0</property>
|
||||||
|
<property name="top-attach">0</property>
|
||||||
|
<property name="width">8</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
<child>
|
||||||
|
<object class="GtkSearchEntry" id="replace_entry">
|
||||||
|
<property name="visible">True</property>
|
||||||
|
<property name="can-focus">True</property>
|
||||||
|
<property name="primary-icon-name">edit-find-symbolic</property>
|
||||||
|
<property name="primary-icon-activatable">False</property>
|
||||||
|
<property name="primary-icon-sensitive">False</property>
|
||||||
|
<property name="placeholder-text" translatable="yes">Replace in current buffer</property>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="left-attach">0</property>
|
||||||
|
<property name="top-attach">1</property>
|
||||||
|
<property name="width">8</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
<packing>
|
||||||
|
<property name="expand">False</property>
|
||||||
|
<property name="fill">True</property>
|
||||||
|
<property name="padding">10</property>
|
||||||
|
<property name="position">3</property>
|
||||||
|
</packing>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
</child>
|
||||||
|
</object>
|
||||||
|
</interface>
|
||||||
66
plugins/search_replace/styling_mixin.py
Normal file
66
plugins/search_replace/styling_mixin.py
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class StylingMixin:
|
||||||
|
def tggle_regex(self, widget):
|
||||||
|
self.use_regex = not widget.get_active()
|
||||||
|
self._set_find_options_lbl()
|
||||||
|
self.search_for_string(self._find_entry)
|
||||||
|
|
||||||
|
def tggle_case_sensitive(self, widget):
|
||||||
|
self.use_case_sensitive = widget.get_active()
|
||||||
|
self._set_find_options_lbl()
|
||||||
|
self.search_for_string(self._find_entry)
|
||||||
|
|
||||||
|
def tggle_selection_only_scan(self, widget):
|
||||||
|
self.search_only_in_selection = widget.get_active()
|
||||||
|
self._set_find_options_lbl()
|
||||||
|
self.search_for_string(self._find_entry)
|
||||||
|
|
||||||
|
def tggle_whole_word_search(self, widget):
|
||||||
|
self.use_whole_word_search = widget.get_active()
|
||||||
|
self._set_find_options_lbl()
|
||||||
|
self.search_for_string(self._find_entry)
|
||||||
|
|
||||||
|
def _set_find_options_lbl(self):
|
||||||
|
find_options = "Finding with Options: "
|
||||||
|
|
||||||
|
if self.use_regex:
|
||||||
|
find_options += "Regex"
|
||||||
|
|
||||||
|
find_options += ", " if self.use_regex else ""
|
||||||
|
find_options += "Case Sensitive" if self.use_case_sensitive else "Case Inensitive"
|
||||||
|
|
||||||
|
if self.search_only_in_selection:
|
||||||
|
find_options += ", Within Current Selection"
|
||||||
|
|
||||||
|
if self.use_whole_word_search:
|
||||||
|
find_options += ", Whole Word"
|
||||||
|
|
||||||
|
self._find_options_lbl.set_label(find_options)
|
||||||
|
|
||||||
|
def update_style(self, state):
|
||||||
|
self._find_entry.get_style_context().remove_class("searching")
|
||||||
|
self._find_entry.get_style_context().remove_class("search_success")
|
||||||
|
self._find_entry.get_style_context().remove_class("search_fail")
|
||||||
|
|
||||||
|
if state == 0:
|
||||||
|
self._find_entry.get_style_context().add_class("searching")
|
||||||
|
elif state == 1:
|
||||||
|
self._find_entry.get_style_context().add_class("search_success")
|
||||||
|
elif state == 2:
|
||||||
|
self._find_entry.get_style_context().add_class("search_fail")
|
||||||
|
|
||||||
|
def _update_status_lbl(self, total_count: int = 0, query: str = None):
|
||||||
|
if not query: return
|
||||||
|
|
||||||
|
count = total_count if total_count > 0 else "No"
|
||||||
|
plural = "s" if total_count > 1 else ""
|
||||||
|
|
||||||
|
if total_count == 0: self.update_style(2)
|
||||||
|
self._find_status_lbl.set_label(f"{count} result{plural} found for '{query}'")
|
||||||
3
plugins/template/__init__.py
Normal file
3
plugins/template/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Module
|
||||||
|
"""
|
||||||
3
plugins/template/__main__.py
Normal file
3
plugins/template/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Pligin Package
|
||||||
|
"""
|
||||||
11
plugins/template/manifest.json
Normal file
11
plugins/template/manifest.json
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"name": "Example Plugin",
|
||||||
|
"author": "John Doe",
|
||||||
|
"version": "0.0.1",
|
||||||
|
"support": "",
|
||||||
|
"requests": {
|
||||||
|
"ui_target": "plugin_control_list",
|
||||||
|
"pass_events": true,
|
||||||
|
"bind_keys": ["Example Plugin||send_message:<Control>f"]
|
||||||
|
}
|
||||||
|
}
|
||||||
51
plugins/template/plugin.py
Normal file
51
plugins/template/plugin.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
# Python imports
|
||||||
|
import os
|
||||||
|
import threading
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from plugins.plugin_base import PluginBase
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: Threads WILL NOT die with parent's destruction.
|
||||||
|
def threaded(fn):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=False).start()
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
# NOTE: Threads WILL die with parent's destruction.
|
||||||
|
def daemon_threaded(fn):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
threading.Thread(target=fn, args=args, kwargs=kwargs, daemon=True).start()
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Plugin(PluginBase):
|
||||||
|
def __init__(self):
|
||||||
|
super().__init__()
|
||||||
|
|
||||||
|
self.name = "Example Plugin" # NOTE: Need to remove after establishing private bidirectional 1-1 message bus
|
||||||
|
# where self.name should not be needed for message comms
|
||||||
|
|
||||||
|
|
||||||
|
def generate_reference_ui_element(self):
|
||||||
|
button = Gtk.Button(label=self.name)
|
||||||
|
button.connect("button-release-event", self.send_message)
|
||||||
|
return button
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def send_message(self, widget=None, eve=None):
|
||||||
|
message = "Hello, World!"
|
||||||
|
event_system.emit("display_message", ("warning", message, None))
|
||||||
13
pyrightconfig.json
Normal file
13
pyrightconfig.json
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{
|
||||||
|
"reportUndefinedVariable": false,
|
||||||
|
"reportUnusedVariable": false,
|
||||||
|
"reportUnusedImport": true,
|
||||||
|
"reportDuplicateImport": true,
|
||||||
|
"executionEnvironments": [
|
||||||
|
{
|
||||||
|
"root": "./src/versions/solarfm-0.0.1/solarfm"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"venvPath": ".",
|
||||||
|
"venv": ".venv"
|
||||||
|
}
|
||||||
7
requirements.txt
Normal file
7
requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
PyGObject==3.40.1
|
||||||
|
pygobject-stubs --no-cache-dir --config-settings=config=Gtk3,Gdk3,Soup2
|
||||||
|
setproctitle==1.2.2
|
||||||
|
pyxdg==0.27
|
||||||
|
psutil==5.8.0
|
||||||
|
pycryptodome==3.20.0
|
||||||
|
sqlmodel==0.0.19
|
||||||
79
src/__builtins__.py
Normal file
79
src/__builtins__.py
Normal file
@@ -0,0 +1,79 @@
|
|||||||
|
# Python imports
|
||||||
|
import builtins
|
||||||
|
import traceback
|
||||||
|
import threading
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
# from libs.db import DB
|
||||||
|
from libs.event_system import EventSystem
|
||||||
|
from libs.endpoint_registry import EndpointRegistry
|
||||||
|
from libs.keybindings import Keybindings
|
||||||
|
from libs.logger import Logger
|
||||||
|
from libs.settings.manager import SettingsManager
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: Threads WILL NOT die with parent's destruction.
|
||||||
|
def threaded_wrapper(fn):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
thread = threading.Thread(target = fn, args = args, kwargs = kwargs, daemon = False)
|
||||||
|
thread.start()
|
||||||
|
return thread
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
# NOTE: Threads WILL die with parent's destruction.
|
||||||
|
def daemon_threaded_wrapper(fn):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
thread = threading.Thread(target = fn, args = args, kwargs = kwargs, daemon = True)
|
||||||
|
thread.start()
|
||||||
|
return thread
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
def call_chain_wrapper(fn):
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
print()
|
||||||
|
print()
|
||||||
|
for line in traceback.format_stack():
|
||||||
|
print( line.strip() )
|
||||||
|
print()
|
||||||
|
print()
|
||||||
|
|
||||||
|
return fn(*args, **kwargs)
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE: Just reminding myself we can add to builtins two different ways...
|
||||||
|
# __builtins__.update({"event_system": Builtins()})
|
||||||
|
builtins.APP_NAME = "Newton"
|
||||||
|
|
||||||
|
builtins.keybindings = Keybindings()
|
||||||
|
builtins.event_system = EventSystem()
|
||||||
|
builtins.endpoint_registry = EndpointRegistry()
|
||||||
|
builtins.settings_manager = SettingsManager()
|
||||||
|
# builtins.db = DB()
|
||||||
|
|
||||||
|
settings_manager.load_settings()
|
||||||
|
|
||||||
|
builtins.logger = Logger(
|
||||||
|
settings_manager.path_manager.get_home_config_path(), \
|
||||||
|
_ch_log_lvl = settings_manager.settings.debugging.ch_log_lvl, \
|
||||||
|
_fh_log_lvl = settings_manager.settings.debugging.fh_log_lvl
|
||||||
|
).get_logger()
|
||||||
|
|
||||||
|
builtins.threaded = threaded_wrapper
|
||||||
|
builtins.daemon_threaded = daemon_threaded_wrapper
|
||||||
|
builtins.call_chain = call_chain_wrapper
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# def custom_except_hook(exc_type, exc_value, exc_traceback):
|
||||||
|
# if issubclass(exc_type, KeyboardInterrupt):
|
||||||
|
# sys.__excepthook__(exc_type, exc_value, exc_traceback)
|
||||||
|
# return
|
||||||
|
|
||||||
|
# logger.error("Uncaught exception", exc_info = (exc_type, exc_value, exc_traceback))
|
||||||
|
|
||||||
|
# sys.excepthook = custom_except_hook
|
||||||
3
src/__init__.py
Normal file
3
src/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Src Package.
|
||||||
|
"""
|
||||||
58
src/__main__.py
Normal file
58
src/__main__.py
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Python imports
|
||||||
|
import argparse
|
||||||
|
import faulthandler
|
||||||
|
import traceback
|
||||||
|
from setproctitle import setproctitle
|
||||||
|
|
||||||
|
import tracemalloc
|
||||||
|
tracemalloc.start()
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from __builtins__ import *
|
||||||
|
from app import Application
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
setproctitle(f'{APP_NAME}')
|
||||||
|
settings_manager.set_start_load_time()
|
||||||
|
|
||||||
|
if args.debug == "true":
|
||||||
|
settings_manager.set_debug(True)
|
||||||
|
|
||||||
|
if args.trace_debug == "true":
|
||||||
|
settings_manager.set_trace_debug(True)
|
||||||
|
|
||||||
|
settings_manager.do_dirty_start_check()
|
||||||
|
|
||||||
|
app = Application()
|
||||||
|
app.run()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
''' Set process title, get arguments, and create GTK main thread. '''
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
# Add long and short arguments
|
||||||
|
parser.add_argument("--debug", "-d", default = "false", help = "Do extra console messaging.")
|
||||||
|
parser.add_argument("--trace-debug", "-td", default = "false", help = "Disable saves, ignore IPC lock, do extra console messaging.")
|
||||||
|
parser.add_argument("--no-plugins", "-np", default = "false", help = "Do not load plugins.")
|
||||||
|
|
||||||
|
parser.add_argument("--new-tab", "-nt", default = "false", help = "Opens a 'New Tab' if a handler is set for it.")
|
||||||
|
parser.add_argument("--file", "-f", default = "default", help = "JUST SOME FILE ARG.")
|
||||||
|
|
||||||
|
# Read arguments (If any...)
|
||||||
|
args, unknownargs = parser.parse_known_args()
|
||||||
|
settings_manager.set_starting_args( args, unknownargs )
|
||||||
|
|
||||||
|
try:
|
||||||
|
faulthandler.enable() # For better debug info
|
||||||
|
main()
|
||||||
|
except Exception as e:
|
||||||
|
traceback.print_exc()
|
||||||
|
quit()
|
||||||
72
src/app.py
Normal file
72
src/app.py
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
# Python imports
|
||||||
|
from contextlib import suppress
|
||||||
|
import signal
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from libs.debugging import debug_signal_handler
|
||||||
|
from libs.ipc_server import IPCServer
|
||||||
|
from core.window import Window
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class AppLaunchException(Exception):
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class Application:
|
||||||
|
""" docstring for Application. """
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(Application, self).__init__()
|
||||||
|
|
||||||
|
self.setup_debug_hook()
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
if not settings_manager.is_trace_debug():
|
||||||
|
if not self.load_ipc():
|
||||||
|
return
|
||||||
|
|
||||||
|
win = Window()
|
||||||
|
win.start()
|
||||||
|
|
||||||
|
def load_ipc(self):
|
||||||
|
args, \
|
||||||
|
unknownargs = settings_manager.get_starting_args()
|
||||||
|
ipc_server = IPCServer()
|
||||||
|
|
||||||
|
self.ipc_realization_check(ipc_server)
|
||||||
|
if ipc_server.is_ipc_alive:
|
||||||
|
return True
|
||||||
|
|
||||||
|
logger.warning(f"{app_name} IPC Server Exists: Have sent path(s) to it and closing...")
|
||||||
|
for arg in unknownargs + [args.new_tab,]:
|
||||||
|
if os.path.isfile(arg):
|
||||||
|
message = f"FILE|{arg}"
|
||||||
|
ipc_server.send_ipc_message(message)
|
||||||
|
|
||||||
|
if os.path.isdir(arg):
|
||||||
|
message = f"DIR|{arg}"
|
||||||
|
ipc_server.send_ipc_message(message)
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def ipc_realization_check(self, ipc_server):
|
||||||
|
try:
|
||||||
|
ipc_server.create_ipc_listener()
|
||||||
|
except Exception:
|
||||||
|
ipc_server.send_test_ipc_message()
|
||||||
|
|
||||||
|
def setup_debug_hook(self):
|
||||||
|
# Typically: ValueError: signal only works in main thread
|
||||||
|
with suppress(ValueError):
|
||||||
|
# kill -SIGUSR2 <pid> from Linux/Unix or SIGBREAK signal from Windows
|
||||||
|
signal.signal(
|
||||||
|
vars(signal).get("SIGBREAK") or vars(signal).get("SIGUSR2"),
|
||||||
|
debug_signal_handler
|
||||||
|
)
|
||||||
|
|
||||||
3
src/core/__init__.py
Normal file
3
src/core/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Core Package
|
||||||
|
"""
|
||||||
33
src/core/builder_wrapper.py
Normal file
33
src/core/builder_wrapper.py
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class BuilderWrapper(Gtk.Builder):
|
||||||
|
"""docstring for BuilderWrapper."""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(BuilderWrapper, self).__init__()
|
||||||
|
|
||||||
|
self.objects = {}
|
||||||
|
|
||||||
|
def get_object(self, id: str, use_gtk: bool = True) -> any:
|
||||||
|
if not use_gtk:
|
||||||
|
return self.objects[id]
|
||||||
|
|
||||||
|
return super(BuilderWrapper, self).get_object(id)
|
||||||
|
|
||||||
|
def expose_object(self, id: str, object: any, use_gtk: bool = True) -> None:
|
||||||
|
if not use_gtk:
|
||||||
|
self.objects[id] = object
|
||||||
|
else:
|
||||||
|
super(BuilderWrapper, self).expose_object(id, object)
|
||||||
|
|
||||||
|
def dereference_object(self, id: str) -> None:
|
||||||
|
del self.objects[id]
|
||||||
49
src/core/containers/base_container.py
Normal file
49
src/core/containers/base_container.py
Normal file
@@ -0,0 +1,49 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from .header_container import HeaderContainer
|
||||||
|
from .body_container import BodyContainer
|
||||||
|
from .footer_container import FooterContainer
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class BaseContainer(Gtk.Box):
|
||||||
|
def __init__(self):
|
||||||
|
super(BaseContainer, self).__init__()
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
self.show()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
self.ctx = self.get_style_context()
|
||||||
|
self.ctx.add_class("base-container")
|
||||||
|
|
||||||
|
self.set_orientation(Gtk.Orientation.VERTICAL)
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
event_system.subscribe("update-transparency", self._update_transparency)
|
||||||
|
event_system.subscribe("remove-transparency", self._remove_transparency)
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
self.add( HeaderContainer() )
|
||||||
|
self.add( BodyContainer() )
|
||||||
|
self.add( FooterContainer() )
|
||||||
|
|
||||||
|
def _update_transparency(self):
|
||||||
|
self.ctx.add_class(f"mw_transparency_{settings_manager.settings.theming.transparency}")
|
||||||
|
|
||||||
|
def _remove_transparency(self):
|
||||||
|
self.ctx.remove_class(f"mw_transparency_{settings_manager.settings.theming.transparency}")
|
||||||
42
src/core/containers/body_container.py
Normal file
42
src/core/containers/body_container.py
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from .left_container import LeftContainer
|
||||||
|
from .center_container import CenterContainer
|
||||||
|
from .right_container import RightContainer
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class BodyContainer(Gtk.Box):
|
||||||
|
def __init__(self):
|
||||||
|
super(BodyContainer, self).__init__()
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
self.show()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
self.ctx = self.get_style_context()
|
||||||
|
self.ctx.add_class("body-container")
|
||||||
|
|
||||||
|
self.set_orientation(Gtk.Orientation.HORIZONTAL)
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
self.add( LeftContainer() )
|
||||||
|
self.add( CenterContainer() )
|
||||||
|
self.add( RightContainer() )
|
||||||
48
src/core/containers/center_container.py
Normal file
48
src/core/containers/center_container.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from ..widgets.code.general_info_widget import GeneralInfoWidget
|
||||||
|
from .code.code_container import CodeContainer
|
||||||
|
from ..widgets.save_file_dialog import SaveFileDialog
|
||||||
|
from ..widgets.controls.open_files_button import OpenFilesButton
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class CenterContainer(Gtk.Box):
|
||||||
|
def __init__(self):
|
||||||
|
super(CenterContainer, self).__init__()
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
self.show()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
self.ctx = self.get_style_context()
|
||||||
|
self.ctx.add_class("center-container")
|
||||||
|
|
||||||
|
self.set_orientation(Gtk.Orientation.VERTICAL)
|
||||||
|
self.set_hexpand(True)
|
||||||
|
self.set_vexpand(True)
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
SaveFileDialog()
|
||||||
|
OpenFilesButton()
|
||||||
|
|
||||||
|
self.add( GeneralInfoWidget() )
|
||||||
|
self.add( CodeContainer() )
|
||||||
3
src/core/containers/code/__init__.py
Normal file
3
src/core/containers/code/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Containers Package
|
||||||
|
"""
|
||||||
38
src/core/containers/code/code_container.py
Normal file
38
src/core/containers/code/code_container.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from ...widgets.code.tabs_widget import TabsWidget
|
||||||
|
|
||||||
|
from .editors_container import EditorsContainer
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class CodeContainer(Gtk.Box):
|
||||||
|
def __init__(self):
|
||||||
|
super(CodeContainer, self).__init__()
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
self.show_all()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
self.set_orientation(Gtk.Orientation.VERTICAL)
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
self.add( TabsWidget() )
|
||||||
|
self.add( EditorsContainer() )
|
||||||
38
src/core/containers/code/editors_container.py
Normal file
38
src/core/containers/code/editors_container.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from ...widgets.separator_widget import Separator
|
||||||
|
from ...widgets.code.miniview_widget import MiniViewWidget
|
||||||
|
from .paned_editors_container import PanedEditorsContainer
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class EditorsContainer(Gtk.Box):
|
||||||
|
def __init__(self):
|
||||||
|
super(EditorsContainer, self).__init__()
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
self.add( Separator("separator_left") )
|
||||||
|
self.add( PanedEditorsContainer() )
|
||||||
|
self.add( Separator("separator_right") )
|
||||||
|
self.add( MiniViewWidget() )
|
||||||
69
src/core/containers/code/paned_editors_container.py
Normal file
69
src/core/containers/code/paned_editors_container.py
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
from gi.repository import GLib
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from ...widgets.code.view import SourceView
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class PanedEditorsContainer(Gtk.Paned):
|
||||||
|
def __init__(self):
|
||||||
|
super(PanedEditorsContainer, self).__init__()
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
self.ctx = self.get_style_context()
|
||||||
|
self.ctx.add_class("paned-editors-container")
|
||||||
|
|
||||||
|
self.set_hexpand(True)
|
||||||
|
self.set_vexpand(True)
|
||||||
|
# self.set_homogeneous(True)
|
||||||
|
self.set_wide_handle(True)
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
self.map_id = self.connect("map", self._init_map)
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
scrolled_win1 = Gtk.ScrolledWindow()
|
||||||
|
scrolled_win2 = Gtk.ScrolledWindow()
|
||||||
|
source_view1 = SourceView()
|
||||||
|
source_view2 = SourceView()
|
||||||
|
|
||||||
|
source_view1.sibling_right = source_view2
|
||||||
|
source_view2.sibling_left = source_view1
|
||||||
|
|
||||||
|
scrolled_win1.add( source_view1 )
|
||||||
|
scrolled_win2.add( source_view2 )
|
||||||
|
|
||||||
|
self.add1(scrolled_win1)
|
||||||
|
self.add2(scrolled_win2)
|
||||||
|
|
||||||
|
def _init_map(self, view):
|
||||||
|
def _first_show_init():
|
||||||
|
self.disconnect(self.map_id)
|
||||||
|
del self.map_id
|
||||||
|
|
||||||
|
self._handle_first_show()
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
GLib.timeout_add(250, _first_show_init)
|
||||||
|
|
||||||
|
def _handle_first_show(self):
|
||||||
|
self.set_position(
|
||||||
|
self.get_allocated_width() / 2
|
||||||
|
)
|
||||||
|
|
||||||
43
src/core/containers/footer_container.py
Normal file
43
src/core/containers/footer_container.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from ..widgets.vte_widget import VteWidget
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class FooterContainer(Gtk.Box):
|
||||||
|
def __init__(self):
|
||||||
|
super(FooterContainer, self).__init__()
|
||||||
|
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
self.show()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
self.ctx = self.get_style_context()
|
||||||
|
self.ctx.add_class("footer-container")
|
||||||
|
|
||||||
|
self.set_orientation(Gtk.Orientation.HORIZONTAL)
|
||||||
|
self.set_hexpand(True)
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
vte_widget = VteWidget()
|
||||||
|
|
||||||
|
vte_widget.hide()
|
||||||
|
self.add( vte_widget )
|
||||||
40
src/core/containers/header_container.py
Normal file
40
src/core/containers/header_container.py
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from ..widgets.controls.transparency_scale import TransparencyScale
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class HeaderContainer(Gtk.Box):
|
||||||
|
def __init__(self):
|
||||||
|
super(HeaderContainer, self).__init__()
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
self.ctx = self.get_style_context()
|
||||||
|
self.ctx.add_class("header-container")
|
||||||
|
|
||||||
|
self.set_orientation(Gtk.Orientation.VERTICAL)
|
||||||
|
self.set_hexpand(True)
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
event_system.subscribe("tggl-top-main-menubar", self.tggl_top_main_menubar)
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
self.add( TransparencyScale() )
|
||||||
|
|
||||||
|
def tggl_top_main_menubar(self):
|
||||||
|
self.hide() if self.is_visible() else self.show_all()
|
||||||
39
src/core/containers/left_container.py
Normal file
39
src/core/containers/left_container.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from ..widgets.code.miniview_widget import MiniViewWidget
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class LeftContainer(Gtk.Box):
|
||||||
|
def __init__(self):
|
||||||
|
super(LeftContainer, self).__init__()
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
self.show()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
self.ctx = self.get_style_context()
|
||||||
|
self.ctx.add_class("left-container")
|
||||||
|
|
||||||
|
self.set_orientation(Gtk.Orientation.VERTICAL)
|
||||||
|
self.set_vexpand(True)
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
...
|
||||||
38
src/core/containers/right_container.py
Normal file
38
src/core/containers/right_container.py
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class RightContainer(Gtk.Box):
|
||||||
|
def __init__(self):
|
||||||
|
super(RightContainer, self).__init__()
|
||||||
|
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_widgets()
|
||||||
|
|
||||||
|
self.show()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
self.ctx = self.get_style_context()
|
||||||
|
self.ctx.add_class("right-container")
|
||||||
|
|
||||||
|
self.set_orientation(Gtk.Orientation.VERTICAL)
|
||||||
|
self.set_vexpand(True)
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _load_widgets(self):
|
||||||
|
...
|
||||||
3
src/core/controllers/__init__.py
Normal file
3
src/core/controllers/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
"""
|
||||||
|
Controllers Package
|
||||||
|
"""
|
||||||
71
src/core/controllers/base_controller.py
Normal file
71
src/core/controllers/base_controller.py
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
# Python imports
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
import gi
|
||||||
|
gi.require_version('Gtk', '3.0')
|
||||||
|
from gi.repository import Gtk
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from libs.mixins.ipc_signals_mixin import IPCSignalsMixin
|
||||||
|
from libs.mixins.keyboard_signals_mixin import KeyboardSignalsMixin
|
||||||
|
|
||||||
|
from ..containers.base_container import BaseContainer
|
||||||
|
|
||||||
|
from .base_controller_data import BaseControllerData
|
||||||
|
from .bridge_controller import BridgeController
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class BaseController(IPCSignalsMixin, KeyboardSignalsMixin, BaseControllerData):
|
||||||
|
""" docstring for BaseController. """
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
self._setup_controller_data()
|
||||||
|
self._setup_styling()
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
self._load_controllers()
|
||||||
|
self._load_plugins_and_files()
|
||||||
|
|
||||||
|
logger.info(f"Made it past {self.__class__} loading...")
|
||||||
|
settings_manager.set_end_load_time()
|
||||||
|
settings_manager.log_load_time()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_styling(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
# self.window.connect("focus-out-event", self.unset_keys_and_data)
|
||||||
|
# self.window.connect("key-press-event", self.on_global_key_press_controller)
|
||||||
|
# self.window.connect("key-release-event", self.on_global_key_release_controller)
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
event_system.subscribe("shutting-down", lambda: print("Shutting down..."))
|
||||||
|
event_system.subscribe("handle-file-from-ipc", self.handle_file_from_ipc)
|
||||||
|
event_system.subscribe("handle-dir-from-ipc", self.handle_dir_from_ipc)
|
||||||
|
event_system.subscribe("tggl-top-main-menubar", self._tggl_top_main_menubar)
|
||||||
|
|
||||||
|
def _load_controllers(self):
|
||||||
|
BridgeController()
|
||||||
|
|
||||||
|
def _load_plugins_and_files(self):
|
||||||
|
args, unknownargs = settings_manager.get_starting_args()
|
||||||
|
|
||||||
|
if args.no_plugins == "false":
|
||||||
|
self.plugins_controller.pre_launch_plugins()
|
||||||
|
self.plugins_controller.post_launch_plugins()
|
||||||
|
|
||||||
|
def _tggl_top_main_menubar(self):
|
||||||
|
logger.debug("_tggl_top_main_menubar > stub...")
|
||||||
|
|
||||||
|
def _load_glade_file(self):
|
||||||
|
self.builder.add_from_file( settings_manager.path_manager.get_glade_file() )
|
||||||
|
self.builder.expose_object("main_window", self.window)
|
||||||
|
|
||||||
|
settings_manager.set_builder(self.builder)
|
||||||
|
self.base_container = BaseContainer()
|
||||||
|
|
||||||
|
settings_manager.register_signals_to_builder([self, self.base_container])
|
||||||
107
src/core/controllers/base_controller_data.py
Normal file
107
src/core/controllers/base_controller_data.py
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
# Python imports
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
from shutil import which
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
from plugins.plugins_controller import PluginsController
|
||||||
|
from ..builder_wrapper import BuilderWrapper
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class BaseControllerData:
|
||||||
|
''' BaseControllerData contains most of the state of the app at ay given time. It also has some support methods. '''
|
||||||
|
|
||||||
|
def _setup_controller_data(self) -> None:
|
||||||
|
self.window = settings_manager.get_main_window()
|
||||||
|
self.builder = BuilderWrapper()
|
||||||
|
self.plugins_controller = PluginsController()
|
||||||
|
|
||||||
|
self.base_container = None
|
||||||
|
self.was_midified_key = False
|
||||||
|
self.ctrl_down = False
|
||||||
|
self.shift_down = False
|
||||||
|
self.alt_down = False
|
||||||
|
|
||||||
|
self._collect_files_dirs()
|
||||||
|
self._load_glade_file()
|
||||||
|
|
||||||
|
|
||||||
|
def _collect_files_dirs(self):
|
||||||
|
args, \
|
||||||
|
unknownargs = settings_manager.get_starting_args()
|
||||||
|
files = []
|
||||||
|
|
||||||
|
for arg in unknownargs + [args.new_tab,]:
|
||||||
|
if os.path.isdir( arg.replace("file://", "") ):
|
||||||
|
files.append( f"DIR|{arg.replace('file://', '')}" )
|
||||||
|
continue
|
||||||
|
|
||||||
|
# NOTE: If passing line number with file split against :
|
||||||
|
if os.path.isfile( arg.replace("file://", "").split(":")[0] ):
|
||||||
|
files.append( f"FILE|{arg.replace('file://', '')}" )
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.info(f"Not a File: {arg}")
|
||||||
|
|
||||||
|
if len(files) == 0: return
|
||||||
|
|
||||||
|
settings_manager.set_is_starting_with_file(True)
|
||||||
|
settings_manager.set_starting_files(files)
|
||||||
|
|
||||||
|
def get_base_container(self):
|
||||||
|
return self.base_container
|
||||||
|
|
||||||
|
def clear_console(self) -> None:
|
||||||
|
''' Clears the terminal screen. '''
|
||||||
|
os.system('cls' if os.name == 'nt' else 'clear')
|
||||||
|
|
||||||
|
def call_method(self, _method_name: str, data: type) -> type:
|
||||||
|
'''
|
||||||
|
Calls a method from scope of class.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
a (obj): self
|
||||||
|
b (str): method name to be called
|
||||||
|
c (*): Data (if any) to be passed to the method.
|
||||||
|
Note: It must be structured according to the given methods requirements.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Return data is that which the calling method gives.
|
||||||
|
'''
|
||||||
|
method_name = str(_method_name)
|
||||||
|
method = getattr(self, method_name, lambda data: f"No valid key passed...\nkey={method_name}\nargs={data}")
|
||||||
|
return method(*data) if data else method()
|
||||||
|
|
||||||
|
def has_method(self, obj: type, method: type) -> type:
|
||||||
|
''' Checks if a given method exists. '''
|
||||||
|
return callable(getattr(obj, method, None))
|
||||||
|
|
||||||
|
def clear_children(self, widget: type) -> None:
|
||||||
|
''' Clear children of a gtk widget. '''
|
||||||
|
for child in widget.get_children():
|
||||||
|
widget.remove(child)
|
||||||
|
|
||||||
|
def get_clipboard_data(self, encoding = "utf-8") -> str:
|
||||||
|
if not which("xclip"):
|
||||||
|
logger.info('xclip not found...')
|
||||||
|
return
|
||||||
|
|
||||||
|
command = ['xclip','-selection','clipboard']
|
||||||
|
proc = subprocess.Popen(['xclip','-selection', 'clipboard', '-o'], stdout = subprocess.PIPE)
|
||||||
|
retcode = proc.wait()
|
||||||
|
data = proc.stdout.read()
|
||||||
|
return data.decode(encoding).strip()
|
||||||
|
|
||||||
|
def set_clipboard_data(self, data: type, encoding = "utf-8") -> None:
|
||||||
|
if not which("xclip"):
|
||||||
|
logger.info('xclip not found...')
|
||||||
|
return
|
||||||
|
|
||||||
|
command = ['xclip','-selection','clipboard']
|
||||||
|
proc = subprocess.Popen(command, stdin = subprocess.PIPE)
|
||||||
|
proc.stdin.write(data.encode(encoding))
|
||||||
|
proc.stdin.close()
|
||||||
|
retcode = proc.wait()
|
||||||
41
src/core/controllers/bridge_controller.py
Normal file
41
src/core/controllers/bridge_controller.py
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
# Python imports
|
||||||
|
import base64
|
||||||
|
|
||||||
|
# Lib imports
|
||||||
|
|
||||||
|
# Application imports
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class BridgeController:
|
||||||
|
def __init__(self):
|
||||||
|
|
||||||
|
self._setup_signals()
|
||||||
|
self._subscribe_to_events()
|
||||||
|
|
||||||
|
|
||||||
|
def _setup_signals(self):
|
||||||
|
...
|
||||||
|
|
||||||
|
def _subscribe_to_events(self):
|
||||||
|
event_system.subscribe("handle-bridge-event", self.handle_bridge_event)
|
||||||
|
|
||||||
|
|
||||||
|
def handle_bridge_event(self, event):
|
||||||
|
match event.topic:
|
||||||
|
case "save":
|
||||||
|
event_system.emit(f"handle-file-event-{event.originator}", (event,))
|
||||||
|
case "close":
|
||||||
|
event_system.emit(f"handle-file-event-{event.originator}", (event,))
|
||||||
|
case "load_buffer":
|
||||||
|
event_system.emit(f"handle-file-event-{event.originator}", (event,))
|
||||||
|
case "load_file":
|
||||||
|
event_system.emit(f"handle-file-event-{event.originator}", (event,))
|
||||||
|
case "alert":
|
||||||
|
content = base64.b64decode( event.content.encode() ).decode("utf-8")
|
||||||
|
logger.info(f"\nMessage Topic: {event.topic}\nMessage Content: {content}")
|
||||||
|
case "error":
|
||||||
|
content = base64.b64decode( event.content.encode() ).decode("utf-8")
|
||||||
|
logger.info(content)
|
||||||
|
case _:
|
||||||
|
...
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user