summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Hagen <pragma@wxs.nl>2014-04-28 23:24:00 +0200
committerHans Hagen <pragma@wxs.nl>2014-04-28 23:24:00 +0200
commit3a9be11a68e3bd5453edf1c0c7d469442dbd0cf3 (patch)
tree01d807ccd33b88e0c3e03e2ba2372bf665b0adc3
parent7e02e6e8f9e6bee6c8813d3937fdbc8deb2d6e74 (diff)
downloadcontext-3a9be11a68e3bd5453edf1c0c7d469442dbd0cf3.tar.gz
beta 2014.04.28 23:24
-rw-r--r--context/data/scite/context/documents/scite-context-readme.pdf (renamed from context/data/scite/scite-context-readme.pdf)bin210958 -> 221437 bytes
-rw-r--r--context/data/scite/context/documents/scite-context-readme.tex (renamed from context/data/scite/scite-context-readme.tex)236
-rw-r--r--context/data/scite/context/documents/scite-context-visual.pdf (renamed from context/data/scite/scite-context-visual.pdf)0
-rw-r--r--context/data/scite/context/documents/scite-context-visual.png (renamed from context/data/scite/scite-context-visual.png)bin77849 -> 77849 bytes
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-context.lua (renamed from context/data/scite/lexers/data/scite-context-data-context.lua)0
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-interfaces.lua (renamed from context/data/scite/lexers/data/scite-context-data-interfaces.lua)0
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-metafun.lua (renamed from context/data/scite/lexers/data/scite-context-data-metafun.lua)0
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-metapost.lua (renamed from context/data/scite/lexers/data/scite-context-data-metapost.lua)0
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-tex.lua (renamed from context/data/scite/lexers/data/scite-context-data-tex.lua)0
-rw-r--r--context/data/scite/context/lexers/lexer.lua (renamed from context/data/scite/lexers/lexer.lua)0
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-bibtex.lua176
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-cld.lua (renamed from context/data/scite/lexers/scite-context-lexer-cld.lua)2
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-cpp-web.lua23
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-cpp.lua188
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-lua-longstring.lua (renamed from context/data/scite/lexers/scite-context-lexer-lua-longstring.lua)4
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-lua.lua (renamed from context/data/scite/lexers/scite-context-lexer-lua.lua)172
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-mps.lua (renamed from context/data/scite/lexers/scite-context-lexer-mps.lua)96
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-pdf-object.lua136
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-pdf-xref.lua43
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-pdf.lua204
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-tex-web.lua23
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-tex.lua (renamed from context/data/scite/lexers/scite-context-lexer-tex.lua)129
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-txt.lua (renamed from context/data/scite/lexers/scite-context-lexer-txt.lua)7
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-web-snippets.lua133
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-web.lua67
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-xml-cdata.lua (renamed from context/data/scite/lexers/scite-context-lexer-xml-cdata.lua)8
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-xml-comment.lua (renamed from context/data/scite/lexers/scite-context-lexer-xml-comment.lua)20
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-xml-script.lua (renamed from context/data/scite/lexers/scite-context-lexer-xml-script.lua)10
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-xml.lua (renamed from context/data/scite/lexers/scite-context-lexer-xml.lua)195
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer.lua (renamed from context/data/scite/lexers/scite-context-lexer.lua)868
-rw-r--r--context/data/scite/context/lexers/themes/scite-context-theme.lua (renamed from context/data/scite/lexers/themes/scite-context-theme.lua)46
-rw-r--r--context/data/scite/context/scite-context-data-context.properties (renamed from context/data/scite/scite-context-data-context.properties)0
-rw-r--r--context/data/scite/context/scite-context-data-interfaces.properties (renamed from context/data/scite/scite-context-data-interfaces.properties)0
-rw-r--r--context/data/scite/context/scite-context-data-metafun.properties (renamed from context/data/scite/scite-context-data-metafun.properties)0
-rw-r--r--context/data/scite/context/scite-context-data-metapost.properties (renamed from context/data/scite/scite-context-data-metapost.properties)0
-rw-r--r--context/data/scite/context/scite-context-data-tex.properties (renamed from context/data/scite/scite-context-data-tex.properties)0
-rw-r--r--context/data/scite/context/scite-context-external.properties (renamed from context/data/scite/scite-context-external.properties)34
-rw-r--r--context/data/scite/context/scite-context-internal.properties (renamed from context/data/scite/scite-context-internal.properties)10
-rw-r--r--context/data/scite/context/scite-context-user.properties15
-rw-r--r--context/data/scite/context/scite-context.properties (renamed from context/data/scite/scite-context.properties)18
-rw-r--r--context/data/scite/context/scite-ctx-context.properties (renamed from context/data/scite/scite-ctx-context.properties)0
-rw-r--r--context/data/scite/context/scite-ctx-example.properties (renamed from context/data/scite/scite-ctx-example.properties)0
-rw-r--r--context/data/scite/context/scite-ctx.lua (renamed from context/data/scite/scite-ctx.lua)10
-rw-r--r--context/data/scite/context/scite-ctx.properties (renamed from context/data/scite/scite-ctx.properties)6
-rw-r--r--context/data/scite/context/scite-metapost.properties (renamed from context/data/scite/scite-metapost.properties)2
-rw-r--r--context/data/scite/context/scite-pragma.properties (renamed from context/data/scite/scite-pragma.properties)7
-rw-r--r--context/data/scite/context/scite-tex.properties (renamed from context/data/scite/scite-tex.properties)2
-rw-r--r--context/data/scite/lexers/archive/data-pre-303.zipbin380461 -> 0 bytes
-rw-r--r--context/data/scite/lexers/archive/data-pre-331.zipbin399829 -> 0 bytes
-rw-r--r--context/data/scite/lexers/archive/data-pre-341.zipbin405362 -> 0 bytes
-rw-r--r--context/data/scite/lexers/scite-context-lexer-pdf-object.lua119
-rw-r--r--context/data/scite/lexers/scite-context-lexer-pdf-xref.lua53
-rw-r--r--context/data/scite/lexers/scite-context-lexer-pdf.lua79
-rw-r--r--context/data/scite/lexers/scite-context-lexer-web.lua159
-rw-r--r--context/data/scite/lexers/themes/scite-context-theme-keep.lua233
-rw-r--r--context/data/scite/metapost.properties1
-rw-r--r--context/data/scite/scite-context-user.properties15
-rw-r--r--context/data/scite/scite-context-visual.tex52
-rw-r--r--context/data/scite/tex.properties1
-rw-r--r--doc/context/scripts/mkiv/mtx-scite.html2
-rw-r--r--doc/context/scripts/mkiv/mtx-scite.man6
-rw-r--r--doc/context/scripts/mkiv/mtx-scite.xml2
-rw-r--r--scripts/context/lua/mtx-scite.lua55
-rw-r--r--scripts/context/lua/mtxrun.lua6
-rw-r--r--scripts/context/stubs/mswin/mtxrun.lua6
-rwxr-xr-xscripts/context/stubs/unix/mtxrun6
-rw-r--r--scripts/context/stubs/win64/mtxrun.lua6
-rw-r--r--tex/context/base/bibl-bib.lua2
-rw-r--r--tex/context/base/cont-new.mkiv2
-rw-r--r--tex/context/base/context-version.pdfbin4065 -> 4061 bytes
-rw-r--r--tex/context/base/context.mkiv2
-rw-r--r--tex/context/base/m-scite.mkiv269
-rw-r--r--tex/context/base/status-files.pdfbin24624 -> 24625 bytes
-rw-r--r--tex/context/base/status-lua.pdfbin242465 -> 242466 bytes
-rw-r--r--tex/context/base/trac-deb.lua16
-rw-r--r--tex/context/base/trac-log.lua3
-rw-r--r--tex/context/base/util-sci.lua262
-rw-r--r--tex/generic/context/luatex/luatex-fonts-merged.lua2
78 files changed, 2842 insertions, 1407 deletions
diff --git a/context/data/scite/scite-context-readme.pdf b/context/data/scite/context/documents/scite-context-readme.pdf
index 99f05a2a5..2bd7d4216 100644
--- a/context/data/scite/scite-context-readme.pdf
+++ b/context/data/scite/context/documents/scite-context-readme.pdf
Binary files differ
diff --git a/context/data/scite/scite-context-readme.tex b/context/data/scite/context/documents/scite-context-readme.tex
index 42f5e0a98..cbfc00a33 100644
--- a/context/data/scite/scite-context-readme.tex
+++ b/context/data/scite/context/documents/scite-context-readme.tex
@@ -191,60 +191,115 @@ You need to add this path to your local path definition. Installing \SCITE\ to
some known place has the advantage that you can move it around. There are no
special dependencies on the operating system.
+On \MSWINDOWS\ you can for instance install \SCITE\ in:
+
+\starttyping
+c:\data\system\scite
+\stoptyping
+
+and then end up with:
+
+\starttyping
+c:\data\system\scite\wscite
+\stoptyping
+
+and that is the path you need to add to your environment \type {PATH} variable.
+
+On \LINUX\ the files end up in:
+
+\starttyping
+/usr/bin
+/usr/share/scite
+\stoptyping
+
+Where the second path is the path we will put more files.
+
+\subject{Installing \type {scintillua}}
+
Next you need to install the lpeg lexers. \footnote {Versions later than 2.11
will not run on \MSWINDOWS\ 2K. In that case you need to comment the external
-lexer import.} These can be fetched from:
+lexer import.} The library is part of the \type {textadept} editor by Mitchell
+(\hyphenatedurl {mitchell.att.foicica.com}) which is also based on scintilla:
+The archive can be fetched from:
\starttyping
http://foicica.com/scintillua/
\stoptyping
-On \MSWINDOWS\ you need to copy the \type {lexers} subfolder to the \type
-{wscite} folder. For \LINUX\ the place depends on the distribution and I just
-copy them in the same path as where the regular properties files live. \footnote
-{If you update, don't do so without testing first. Sometimes there are changes in
-\SCITE\ that influence the lexers in which case you have to wait till we have
-update them to suit those changes.}
+On \MSWINDOWS\ you need to copy the files to the \type {wscite} folder (so we end
+up with a \type {lexers} subfolder there). For \LINUX\ the place depends on the
+distribution, for instance \type {/usr/share/scite}; this is the place where the
+regular properties files live. \footnote {If you update, don't do so without
+testing first. Sometimes there are changes in \SCITE\ that influence the lexers
+in which case you have to wait till we have update them to suit those changes.}
-For \UNIX, one can take a precompiled version as well. Here we might need to split
-the set of files into:
+So, you end up, on \MSWINDOWS\ with:
\starttyping
-/usr/bin
-/usr/share/scite
+c:\data\system\scite\wscite\lexers
\stoptyping
-The second path is hard coded in the binary and moving all files there probably works
-okay. Beware: if you're on a 64 bit system, you need to rename the 64 bit \type {so}
-library.
+And on \LINUX:
+
+\starttyping
+/usr/share/scite/lexers
+\stoptyping
-If you want to use \CONTEXT, you need to copy the relevant files from
+Beware: if you're on a 64 bit system, you need to rename the 64 bit \type {so}
+library into one without a number. Unfortunately the 64 bit library is now always
+available which can give surprises when the operating system gets updates. In such
+a case you should downgrade or use \type {wine} with the \MSWINDOWS\ binaries
+instead. After installation you need to restart \SCITE\ in order to see if things
+work out as expected.
+
+\subject{Installing the \CONTEXT\ lexers}
+
+When we started using this nice extension, we ran into issues and as a
+consequence shipped a patched \LUA\ code. We also needed some more control as we
+wanted to provide more features and complex nested lexers. Because the library
+\API\ changed a couple of times, we now have our own variant which will be
+cleaned up over time to be more consistent with our other \LUA\ code (so that we
+can also use it in \CONTEXT\ as variant verbatim lexer). We hope to be able to
+use the \type {scintillua} library as it does the job.
+
+Anyway, if you want to use \CONTEXT, you need to copy the relevant files from
\starttyping
<texroot>/tex/texmf-context/context/data/scite
\stoptyping
-to the path were \SCITE\ keeps its property files (\type {*.properties}). There
-is a file called \type {SciteGlobal.properties}. At the end of that file (on
-\MSWINDOWS\ it is in the path where the Scite binary) you then add a line to the
-end:
+to the path were \SCITE\ keeps its property files (\type {*.properties}). This is
+the path we already mentioned. There should be a file there called \type
+{SciteGlobal.properties}.
+
+So,in the end you get on \MSWINDOWS\ new files in:
\starttyping
-import scite-context-user
+c:\data\system\scite\wscite
+c:\data\system\scite\wscite\context
+c:\data\system\scite\wscite\context\lexer
+c:\data\system\scite\wscite\context\lexer\themes
+c:\data\system\scite\wscite\context\lexer\data
+c:\data\system\scite\wscite\context\documents
\stoptyping
-You need to restart \SCITE\ in order to see if things work out as expected.
-
-Disabling the external lexer in a recent \SCITE\ is somewhat tricky. In that case
-the end of that file looks like:
+while on \LINUX\ you get:
\starttyping
-imports.exclude=scite-context-external
-import *
-import scite-context-user
+/usr/bin/share/
+/usr/bin/share/context
+/usr/bin/share/context/lexer
+/usr/bin/share/context/lexer/themes
+/usr/bin/share/context/lexer/data
+/usr/bin/share/context/documents
\stoptyping
-In any case you need to make sure that the user file is loaded last.
+At the end of the \type {SciteGlobal.properties} you need to add the following
+line:
+
+\starttyping
+import context/scite-context-user
+\stoptyping
After this, things should run as expected (given that \TEX\ runs at the console
as well).
@@ -266,102 +321,15 @@ The configuration file defaults to the Dejavu fonts. These free fonts are part o
the \CONTEXT\ suite (also known as the standalone distribution). Of course you
can fetch them from \type {http://dejavu-fonts.org} as well. You have to copy
them to where your operating system expects them. In the suite they are available
-in
+in:
\starttyping
<contextroot>/tex/texmf/fonts/truetype/public/dejavu
\stoptyping
-\subject{An alternative approach}
-
-If for some reason you prefer not to mess with property files in the main \SCITE\
-path, you can follow a different route and selectively copy files to places.
-
-The following files are needed for the lpeg based lexer:
-
-\starttyping
-lexers/scite-context-lexer.lua
-lexers/scite-context-lexer-tex.lua
-lexers/scite-context-lexer-mps.lua
-lexers/scite-context-lexer-lua.lua
-lexers/scite-context-lexer-cld.lua
-lexers/scite-context-lexer-txt.lua
-lexers/scite-context-lexer-xml*.lua
-lexers/scite-context-lexer-pdf*.lua
-
-lexers/context/data/scite-context-data-tex.lua
-lexers/context/data/scite-context-data-context.lua
-lexers/context/data/scite-context-data-interfaces.lua
-lexers/context/data/scite-context-data-metapost.lua
-lexers/context/data/scite-context-data-metafun.lua
-
-lexers/themes/scite-context-theme.lua
-\stoptyping
-
-The data files are needed because we cannot access property files from within the
-lexer. If we could open a file we could use the property files instead.
-
-These files go to the \type {lexers} subpath in your \SCITE\ installation.
-Normally this sits in the binary path. The following files provide some
-extensions. On \MSWINDOWS\ you can copy these files to the path where the \SCITE\
-binary lives.
-
-\starttyping
-scite-ctx.lua
-\stoptyping
-
-Because property files can only be loaded from the same path where the (user)
-file loads them you need to copy the following files to the same path where the
-loading is defined:
-
-\starttyping
-scite-context.properties
-scite-context-internal.properties
-scite-context-external.properties
-
-scite-pragma.properties
-
-scite-tex.properties
-scite-metapost.properties
-
-scite-context-data-tex.properties
-scite-context-data-context.properties
-scite-context-data-interfaces.properties
-scite-context-data-metapost.properties
-scite-context-data-metafun.properties
-
-scite-ctx.properties
-scite-ctx-context.properties
-scite-ctx-example.properties
-\stoptyping
-
-On \MSWINDOWS\ these go to:
-
-\starttyping
-c:/Users/YourName
-\stoptyping
-
-Next you need to add this to:
-
-\starttyping
-import scite-context
-import scite-context-internal
-import scite-context-external
-import scite-pragma
-\stoptyping
-
-to the file:
-
-\starttyping
-SciTEUser.properties
-\stoptyping
-
-Of course the pragma import is optional. You can comment either the internal or
-external variant but there is no reason not to keep them both.
-
\subject{Extensions}
-Just a quick not to some extensions. If you select a part of the text (normally
+Just a quick note to some extensions. If you select a part of the text (normally
you do this with the shift key pressed) and you hit \type {Shift-F11}, you get a
menu with some options. More (robust) ones will be provided at some point.
@@ -388,6 +356,27 @@ disable it). Wrong words are colored red, and words that might have a case
problem are colored orange. Recognized words are greyed and words with less than
three characters are ignored.
+A spell checking file has to be put in the \type {lexers/data} directory and
+looks as follows (e.g. \type {spell-uk.lua}):
+
+\starttyping
+return {
+ ["max"]=40,
+ ["min"]=3,
+ ["n"]=151493,
+ ["words"]={
+ ["aardvark"]="aardvark",
+ ["aardvarks"]="aardvarks",
+ ["aardwolf"]="aardwolf",
+ ["aardwolves"]="aardwolves",
+ ...
+ }
+}
+\stoptyping
+
+The keys are words that get checked for the given value (which can have uppercase
+characters). The word files are not distributed (but they might be at some point).
+
In the case of internal lexers, the following file is needed:
\starttyping
@@ -451,8 +440,8 @@ releases.
\subject{The external lexers}
-These are the more advanced. They provide more detail and the \CONTEXT\ lexer
-also supports nested \METAPOST\ and \LUA. Currently there is no detailed
+These are the more advanced lexers. They provide more detail and the \CONTEXT\
+lexer also supports nested \METAPOST\ and \LUA. Currently there is no detailed
configuration but this might change once they are stable.
The external lexers operate on documents while the internal ones operate on
@@ -463,13 +452,6 @@ garbage collecting many small tables comes at a price. Of course in practice thi
probably gets unnoticed. \footnote {I wrote the code in 2011 on a more than 5
years old Dell M90 laptop, so I suppose that speed is less an issue now.}
-In principle the external lexers can be used with \type {textadept} which also
-uses \type {scintilla}. Actually, support for lpeg lexing originates in \type
-{textadept}. Currently \type {textadept} lacks a couple of features I like about
-\SCITE\ (for instance it has no realtime logpane) and it's also still changing.
-At some point the \CONTEXT\ distribution might ship with files for \type
-{textadept} as well.
-
The external lpeg lexers work okay with the \MSWINDOWS\ and \LINUX\ versions of
\SCITE, but unfortunately at the time of writing this, the \LUA\ library that is
needed is not available for the \MACOSX\ version of \SCITE. Also, due to the fact
@@ -480,7 +462,7 @@ In addition to \CONTEXT\ and \METAFUN\ lexing a \LUA\ lexer is also provided so
that we can handle \CONTEXT\ \LUA\ Document (\CLD) files too. There is also an
\XML\ lexer. This one also provides spell checking. The \PDF\ lexer tries to do a
good job on \PDF\ files, but it has some limitations. There is also a simple text
-file lexer that does spell checking.
+file lexer that does spell checking. Finally there is a lexer for \CWEB\ files.
Don't worry if you see an orange rectangle in your \TEX\ or \XML\ document. This
indicates that there is a special space character there, for instance \type
diff --git a/context/data/scite/scite-context-visual.pdf b/context/data/scite/context/documents/scite-context-visual.pdf
index 69d82eda6..69d82eda6 100644
--- a/context/data/scite/scite-context-visual.pdf
+++ b/context/data/scite/context/documents/scite-context-visual.pdf
diff --git a/context/data/scite/scite-context-visual.png b/context/data/scite/context/documents/scite-context-visual.png
index 7368a68f1..7368a68f1 100644
--- a/context/data/scite/scite-context-visual.png
+++ b/context/data/scite/context/documents/scite-context-visual.png
Binary files differ
diff --git a/context/data/scite/lexers/data/scite-context-data-context.lua b/context/data/scite/context/lexers/data/scite-context-data-context.lua
index 6c0293fbd..6c0293fbd 100644
--- a/context/data/scite/lexers/data/scite-context-data-context.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-context.lua
diff --git a/context/data/scite/lexers/data/scite-context-data-interfaces.lua b/context/data/scite/context/lexers/data/scite-context-data-interfaces.lua
index b2c09b62a..b2c09b62a 100644
--- a/context/data/scite/lexers/data/scite-context-data-interfaces.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-interfaces.lua
diff --git a/context/data/scite/lexers/data/scite-context-data-metafun.lua b/context/data/scite/context/lexers/data/scite-context-data-metafun.lua
index 50b9ecec4..50b9ecec4 100644
--- a/context/data/scite/lexers/data/scite-context-data-metafun.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-metafun.lua
diff --git a/context/data/scite/lexers/data/scite-context-data-metapost.lua b/context/data/scite/context/lexers/data/scite-context-data-metapost.lua
index 766ea90da..766ea90da 100644
--- a/context/data/scite/lexers/data/scite-context-data-metapost.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-metapost.lua
diff --git a/context/data/scite/lexers/data/scite-context-data-tex.lua b/context/data/scite/context/lexers/data/scite-context-data-tex.lua
index 415b74128..415b74128 100644
--- a/context/data/scite/lexers/data/scite-context-data-tex.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-tex.lua
diff --git a/context/data/scite/lexers/lexer.lua b/context/data/scite/context/lexers/lexer.lua
index 9582f6a76..9582f6a76 100644
--- a/context/data/scite/lexers/lexer.lua
+++ b/context/data/scite/context/lexers/lexer.lua
diff --git a/context/data/scite/context/lexers/scite-context-lexer-bibtex.lua b/context/data/scite/context/lexers/scite-context-lexer-bibtex.lua
new file mode 100644
index 000000000..88b070e5e
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-bibtex.lua
@@ -0,0 +1,176 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for bibtex",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local global, string, table, lpeg = _G, string, table, lpeg
+local P, R, S, V = lpeg.P, lpeg.R, lpeg.S, lpeg.V
+local type = type
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+local exact_match = lexer.exact_match
+
+local bibtexlexer = lexer.new("xml","scite-context-lexer-xml")
+local whitespace = bibtexlexer.whitespace
+
+ local escape, left, right = P("\\"), P('{'), P('}')
+
+ patterns.balanced = P {
+ [1] = ((escape * (left+right)) + (1 - (left+right)) + V(2))^0,
+ [2] = left * V(1) * right
+ }
+
+-- taken from bibl-bib.lua
+
+local anything = patterns.anything
+local percent = P("%")
+local start = P("@")
+local comma = P(",")
+local hash = P("#")
+local escape = P("\\")
+local single = P("'")
+local double = P('"')
+local left = P('{')
+local right = P('}')
+local lineending = S("\n\r")
+local space = S(" \t\n\r\f")
+local spaces = space^1
+local equal = P("=")
+
+local keyword = (R("az","AZ","09") + S("@_:-"))^1
+local s_quoted = ((escape*single) + spaces^1 + (1-single))^0
+local d_quoted = ((escape*double) + spaces^1 + (1-double))^0
+local balanced = patterns.balanced
+
+local t_spacing = token(whitespace, space^1)
+local t_optionalws = token("default", space^1)^0
+
+local t_equal = token("operator",equal)
+local t_left = token("grouping",left)
+local t_right = token("grouping",right)
+local t_comma = token("operator",comma)
+local t_hash = token("operator",hash)
+
+local t_s_value = token("operator",single)
+ * token("text",s_quoted)
+ * token("operator",single)
+local t_d_value = token("operator",double)
+ * token("text",d_quoted)
+ * token("operator",double)
+local t_b_value = token("operator",left)
+ * token("text",balanced)
+ * token("operator",right)
+local t_r_value = token("text",keyword)
+
+local t_keyword = token("keyword",keyword)
+local t_key = token("command",keyword)
+local t_label = token("warning",keyword)
+
+local t_somevalue = t_s_value + t_d_value + t_b_value + t_r_value
+local t_value = t_somevalue
+ * ((t_optionalws * t_hash * t_optionalws) * t_somevalue)^0
+
+local t_assignment = t_optionalws
+ * t_key
+ * t_optionalws
+ * t_equal
+ * t_optionalws
+ * t_value
+
+local t_shortcut = t_keyword
+ * t_optionalws
+ * t_left
+ * t_optionalws
+ * (t_assignment * t_comma^0)^0
+ * t_optionalws
+ * t_right
+
+local t_definition = t_keyword
+ * t_optionalws
+ * t_left
+ * t_optionalws
+ * t_label
+ * t_optionalws
+ * t_comma
+ * (t_assignment * t_comma^0)^0
+ * t_optionalws
+ * t_right
+
+local t_comment = t_keyword
+ * t_optionalws
+ * t_left
+ * token("text",(1-t_right)^0)
+ * t_optionalws
+ * t_right
+
+local t_forget = token("comment",percent^1 * (1-lineending)^0)
+
+local t_rest = token("default",anything)
+
+-- this kind of lexing seems impossible as the size of the buffer passed to the lexer is not
+-- large enough .. but we can cheat and use this:
+--
+-- function OnOpen(filename) editor:Colourise(1,editor.TextLength) end -- or is it 0?
+
+bibtexlexer._rules = {
+ { "whitespace", t_spacing },
+ { "forget", t_forget },
+ { "shortcut", t_shortcut },
+ { "definition", t_definition },
+ { "comment", t_comment },
+ { "rest", t_rest },
+}
+
+-- local t_assignment = t_key
+-- * t_optionalws
+-- * t_equal
+-- * t_optionalws
+-- * t_value
+--
+-- local t_shortcut = t_keyword
+-- * t_optionalws
+-- * t_left
+--
+-- local t_definition = t_keyword
+-- * t_optionalws
+-- * t_left
+-- * t_optionalws
+-- * t_label
+-- * t_optionalws
+-- * t_comma
+--
+-- bibtexlexer._rules = {
+-- { "whitespace", t_spacing },
+-- { "assignment", t_assignment },
+-- { "definition", t_definition },
+-- { "shortcut", t_shortcut },
+-- { "right", t_right },
+-- { "comma", t_comma },
+-- { "forget", t_forget },
+-- { "comment", t_comment },
+-- { "rest", t_rest },
+-- }
+
+bibtexlexer._tokenstyles = context.styleset
+
+bibtexlexer._foldpattern = P("{") + P("}")
+
+bibtexlexer._foldsymbols = {
+ _patterns = {
+ "{",
+ "}",
+ },
+ ["grouping"] = {
+ ["{"] = 1,
+ ["}"] = -1,
+ },
+}
+
+return bibtexlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-cld.lua b/context/data/scite/context/lexers/scite-context-lexer-cld.lua
index 9b07b5b80..3442a195c 100644
--- a/context/data/scite/lexers/scite-context-lexer-cld.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-cld.lua
@@ -11,7 +11,7 @@ local context = lexer.context
local patterns = context.patterns
local cldlexer = lexer.new("cld","scite-context-lexer-cld")
-local lualexer = lexer.load('scite-context-lexer-lua')
+local lualexer = lexer.load("scite-context-lexer-lua")
-- can probably be done nicer now, a bit of a hack
diff --git a/context/data/scite/context/lexers/scite-context-lexer-cpp-web.lua b/context/data/scite/context/lexers/scite-context-lexer-cpp-web.lua
new file mode 100644
index 000000000..daa9221ba
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-cpp-web.lua
@@ -0,0 +1,23 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for cpp web",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local cppweblexer = lexer.new("cpp-web","scite-context-lexer-cpp")
+local cpplexer = lexer.load("scite-context-lexer-cpp")
+
+-- can probably be done nicer now, a bit of a hack
+
+cppweblexer._rules = cpplexer._rules_web
+cppweblexer._tokenstyles = cpplexer._tokenstyles
+cppweblexer._foldsymbols = cpplexer._foldsymbols
+cppweblexer._directives = cpplexer._directives
+
+return cppweblexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-cpp.lua b/context/data/scite/context/lexers/scite-context-lexer-cpp.lua
new file mode 100644
index 000000000..31180e6a5
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-cpp.lua
@@ -0,0 +1,188 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for cpp",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+-- looks liks the original cpp lexer but web ready (so nothing special here yet)
+
+local P, R, S = lpeg.P, lpeg.R, lpeg.S
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+local exact_match = lexer.exact_match
+
+local cpplexer = lexer.new("cpp","scite-context-lexer-cpp")
+local whitespace = cpplexer.whitespace
+
+local keywords = { -- copied from cpp.lua
+ -- c
+ "asm", "auto", "break", "case", "const", "continue", "default", "do", "else",
+ "extern", "false", "for", "goto", "if", "inline", "register", "return",
+ "sizeof", "static", "switch", "true", "typedef", "volatile", "while",
+ "restrict",
+ -- hm
+ "_Bool", "_Complex", "_Pragma", "_Imaginary",
+ -- c++.
+ "catch", "class", "const_cast", "delete", "dynamic_cast", "explicit",
+ "export", "friend", "mutable", "namespace", "new", "operator", "private",
+ "protected", "public", "signals", "slots", "reinterpret_cast",
+ "static_assert", "static_cast", "template", "this", "throw", "try", "typeid",
+ "typename", "using", "virtual"
+}
+
+local datatypes = { -- copied from cpp.lua
+ "bool", "char", "double", "enum", "float", "int", "long", "short", "signed",
+ "struct", "union", "unsigned", "void"
+}
+
+local macros = { -- copied from cpp.lua
+ "define", "elif", "else", "endif", "error", "if", "ifdef", "ifndef", "import",
+ "include", "line", "pragma", "undef", "using", "warning"
+}
+
+local space = patterns.space -- S(" \n\r\t\f\v")
+local any = patterns.any
+local restofline = patterns.restofline
+local startofline = patterns.startofline
+
+local squote = P("'")
+local dquote = P('"')
+local period = P(".")
+local escaped = P("\\") * P(1)
+local slashes = P("//")
+local begincomment = P("/*")
+local endcomment = P("*/")
+local percent = P("%")
+
+local hexadecimal = patterns.hexadecimal
+local decimal = patterns.decimal
+local float = patterns.float
+local integer = P("-")^-1 * (hexadecimal + decimal) -- also in patterns ?
+
+local spacing = token(whitespace, space^1)
+local rest = token("default", any)
+
+local shortcomment = token("comment", slashes * restofline^0)
+local longcomment = token("comment", begincomment * (1-endcomment)^0 * endcomment^-1)
+
+local shortstring = token("quote", dquote) -- can be shared
+ * token("string", (escaped + (1-dquote))^0)
+ * token("quote", dquote)
+ + token("quote", squote)
+ * token("string", (escaped + (1-squote))^0)
+ * token("quote", squote)
+
+local number = token("number", float + integer)
+
+local validword = R("AZ","az","__") * R("AZ","az","__","09")^0
+local identifier = token("default",validword)
+
+local operator = token("special", S("+-*/%^!=<>;:{}[]().&|?~"))
+
+----- optionalspace = spacing^0
+
+local p_keywords = exact_match(keywords )
+local p_datatypes = exact_match(datatypes)
+local p_macros = exact_match(macros)
+
+local keyword = token("keyword", p_keywords)
+local datatype = token("keyword", p_datatypes)
+local identifier = token("default", validword)
+
+local macro = token("data", #P("#") * startofline * P("#") * S("\t ")^0 * p_macros)
+
+cpplexer._rules = {
+ { "whitespace", spacing },
+ { "keyword", keyword },
+ { "type", datatype },
+ { "identifier", identifier },
+ { "string", shortstring },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment },
+ { "number", number },
+ { "macro", macro },
+ { "operator", operator },
+ { "rest", rest },
+}
+
+local web = lexer.loadluafile("scite-context-lexer-web-snippets")
+
+if web then
+
+ lexer.inform("supporting web snippets in cpp lexer")
+
+ cpplexer._rules_web = {
+ { "whitespace", spacing },
+ { "keyword", keyword },
+ { "type", datatype },
+ { "identifier", identifier },
+ { "string", shortstring },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment },
+ { "web", web.pattern },
+ { "number", number },
+ { "macro", macro },
+ { "operator", operator },
+ { "rest", rest },
+ }
+
+else
+
+ lexer.report("not supporting web snippets in cpp lexer")
+
+ cpplexer._rules_web = {
+ { "whitespace", spacing },
+ { "keyword", keyword },
+ { "type", datatype },
+ { "identifier", identifier },
+ { "string", shortstring },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment },
+ { "number", number },
+ { "macro", macro },
+ { "operator", operator },
+ { "rest", rest },
+ }
+
+end
+
+cpplexer._tokenstyles = context.styleset
+
+cpplexer._foldpattern = P("/*") + P("*/") + S("{}") -- separate entry else interference
+
+cpplexer._foldsymbols = {
+ _patterns = {
+ "[{}]",
+ "/%*",
+ "%*/",
+ },
+ -- ["data"] = { -- macro
+ -- ["region"] = 1,
+ -- ["endregion"] = -1,
+ -- ["if"] = 1,
+ -- ["ifdef"] = 1,
+ -- ["ifndef"] = 1,
+ -- ["endif"] = -1,
+ -- },
+ ["special"] = { -- operator
+ ["{"] = 1,
+ ["}"] = -1,
+ },
+ ["comment"] = {
+ ["/*"] = 1,
+ ["*/"] = -1,
+ }
+}
+
+-- -- by indentation:
+
+cpplexer._foldpatterns = nil
+cpplexer._foldsymbols = nil
+
+return cpplexer
diff --git a/context/data/scite/lexers/scite-context-lexer-lua-longstring.lua b/context/data/scite/context/lexers/scite-context-lexer-lua-longstring.lua
index 7e1a3dcc3..855adbe4e 100644
--- a/context/data/scite/lexers/scite-context-lexer-lua-longstring.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-lua-longstring.lua
@@ -1,13 +1,11 @@
local info = {
version = 1.002,
- comment = "scintilla lpeg lexer for lua",
+ comment = "scintilla lpeg lexer for lua longstrings",
author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
copyright = "PRAGMA ADE / ConTeXt Development Team",
license = "see context related readme files",
}
-local P = lpeg.P
-
local lexer = require("lexer") -- require("scite-context-lexer")
local context = lexer.context
local patterns = context.patterns
diff --git a/context/data/scite/lexers/scite-context-lexer-lua.lua b/context/data/scite/context/lexers/scite-context-lexer-lua.lua
index ebb69c979..c44d586ba 100644
--- a/context/data/scite/lexers/scite-context-lexer-lua.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-lua.lua
@@ -9,7 +9,7 @@ local info = {
-- beware: all multiline is messy, so even if it's no lexer, it should be an embedded lexer
-- we probably could use a local whitespace variant but this is cleaner
-local P, R, S, C, Cg, Cb, Cs, Cmt, Cp = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cg, lpeg.Cb, lpeg.Cs, lpeg.Cmt, lpeg.Cp
+local P, R, S, C, Cmt, Cp = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cmt, lpeg.Cp
local match, find = string.match, string.find
local setmetatable = setmetatable
@@ -34,28 +34,28 @@ local directives = { } -- communication channel
-- this can save time on large files
local keywords = {
- 'and', 'break', 'do', 'else', 'elseif', 'end', 'false', 'for', 'function', -- 'goto',
- 'if', 'in', 'local', 'nil', 'not', 'or', 'repeat', 'return', 'then', 'true',
- 'until', 'while',
+ "and", "break", "do", "else", "elseif", "end", "false", "for", "function", -- "goto",
+ "if", "in", "local", "nil", "not", "or", "repeat", "return", "then", "true",
+ "until", "while",
}
local functions = {
- 'assert', 'collectgarbage', 'dofile', 'error', 'getmetatable',
- 'ipairs', 'load', 'loadfile', 'module', 'next', 'pairs',
- 'pcall', 'print', 'rawequal', 'rawget', 'rawset', 'require',
- 'setmetatable', 'tonumber', 'tostring', 'type', 'unpack', 'xpcall', 'select',
+ "assert", "collectgarbage", "dofile", "error", "getmetatable",
+ "ipairs", "load", "loadfile", "module", "next", "pairs",
+ "pcall", "print", "rawequal", "rawget", "rawset", "require",
+ "setmetatable", "tonumber", "tostring", "type", "unpack", "xpcall", "select",
"string", "table", "coroutine", "debug", "file", "io", "lpeg", "math", "os", "package", "bit32",
}
local constants = {
- '_G', '_VERSION', '_M', '...', '_ENV',
+ "_G", "_VERSION", "_M", "...", "_ENV",
-- here too
- '__add', '__call', '__concat', '__div', '__idiv', '__eq', '__gc', '__index',
- '__le', '__lt', '__metatable', '__mode', '__mul', '__newindex',
- '__pow', '__sub', '__tostring', '__unm', '__len',
- '__pairs', '__ipairs',
- 'NaN',
+ "__add", "__call", "__concat", "__div", "__idiv", "__eq", "__gc", "__index",
+ "__le", "__lt", "__metatable", "__mode", "__mul", "__newindex",
+ "__pow", "__sub", "__tostring", "__unm", "__len",
+ "__pairs", "__ipairs",
+ "NaN",
}
-- local tokenmappings = { }
@@ -65,9 +65,9 @@ local constants = {
-- for i=1,#constants do tokenmappings[constants[i]] = "constant" }
local internals = { -- __
- 'add', 'call', 'concat', 'div', 'eq', 'gc', 'index',
- 'le', 'lt', 'metatable', 'mode', 'mul', 'newindex',
- 'pow', 'sub', 'tostring', 'unm', 'len',
+ "add", "call", "concat", "div", "eq", "gc", "index",
+ "le", "lt", "metatable", "mode", "mul", "newindex",
+ "pow", "sub", "tostring", "unm", "len",
}
local depricated = {
@@ -93,14 +93,14 @@ local longonestart = P("[[")
local longonestop = P("]]")
local longonestring = (1-longonestop)^0
-local longtwostart = P('[') * Cmt(equals,setlevel) * P('[')
-local longtwostop = P(']') * equals * P(']')
+local longtwostart = P("[") * Cmt(equals,setlevel) * P("[")
+local longtwostop = P("]") * equals * P("]")
local sentinels = { } setmetatable(sentinels, { __index = function(t,k) local v = "]" .. k .. "]" t[k] = v return v end })
local longtwostring = P(function(input,index)
if level then
- -- local sentinel = ']' .. level .. ']'
+ -- local sentinel = "]" .. level .. "]"
local sentinel = sentinels[level]
local _, stop = find(input,sentinel,index,true)
return stop and stop + 1 - #sentinel or #input + 1
@@ -111,15 +111,15 @@ end)
local longtwostring_end = P(function(input,index)
if level then
- -- local sentinel = ']' .. level .. ']'
+ -- local sentinel = "]" .. level .. "]"
local sentinel = sentinels[level]
local _, stop = find(input,sentinel,index,true)
return stop and stop + 1 or #input + 1
end
end)
-local longcomment = Cmt(#('[[' + ('[' * C(equals) * '[')), function(input,index,level)
- -- local sentinel = ']' .. level .. ']'
+local longcomment = Cmt(#("[[" + ("[" * C(equals) * "[")), function(input,index,level)
+ -- local sentinel = "]" .. level .. "]"
local sentinel = sentinels[level]
local _, stop = find(input,sentinel,index,true)
return stop and stop + 1 or #input + 1
@@ -132,7 +132,7 @@ local eol = patterns.eol
local squote = P("'")
local dquote = P('"')
local escaped = P("\\") * P(1)
-local dashes = P('--')
+local dashes = P("--")
local spacing = token(whitespace, space^1)
local rest = token("default", any)
@@ -227,22 +227,22 @@ local identifier = token("default", validword)
-- end)
lualexer._rules = {
- { 'whitespace', spacing },
- { 'keyword', keyword }, -- can be combined
- -- { 'structure', structure },
- { 'function', builtin }, -- can be combined
- { 'constant', constant }, -- can be combined
- -- { 'experimental', experimental }, -- works but better split
- { 'csname', csname },
- { 'goto', gotokeyword },
- { 'identifier', identifier },
- { 'string', string },
- { 'number', number },
- { 'longcomment', longcomment },
- { 'shortcomment', shortcomment },
- { 'label', gotolabel },
- { 'operator', operator },
- { 'rest', rest },
+ { "whitespace", spacing },
+ { "keyword", keyword }, -- can be combined
+ -- { "structure", structure },
+ { "function", builtin }, -- can be combined
+ { "constant", constant }, -- can be combined
+ -- { "experimental", experimental }, -- works but better split
+ { "csname", csname },
+ { "goto", gotokeyword },
+ { "identifier", identifier },
+ { "string", string },
+ { "number", number },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment },
+ { "label", gotolabel },
+ { "operator", operator },
+ { "rest", rest },
}
-- -- experiment
@@ -277,18 +277,18 @@ lualexer._rules = {
-- }
--
-- lualexer._rules = {
--- { 'whitespace', spacing },
--- { 'whatever', whatever },
--- { 'csname', csname },
--- { 'goto', gotokeyword },
--- { 'identifier', identifier },
--- { 'string', string },
--- { 'number', number },
--- { 'longcomment', longcomment },
--- { 'shortcomment', shortcomment },
--- { 'label', gotolabel },
--- { 'operator', operator },
--- { 'rest', rest },
+-- { "whitespace", spacing },
+-- { "whatever", whatever },
+-- { "csname", csname },
+-- { "goto", gotokeyword },
+-- { "identifier", identifier },
+-- { "string", string },
+-- { "number", number },
+-- { "longcomment", longcomment },
+-- { "shortcomment", shortcomment },
+-- { "label", gotolabel },
+-- { "operator", operator },
+-- { "rest", rest },
-- }
lualexer._tokenstyles = context.styleset
@@ -300,26 +300,26 @@ lualexer._foldpattern = (P("end") + P("if") + P("do") + P("function") + P("repea
lualexer._foldsymbols = {
_patterns = {
- '[a-z][a-z]+',
- '[{}%[%]]',
+ "[a-z][a-z]+",
+ "[{}%[%]]",
},
- ['keyword'] = { -- challenge: if=0 then=1 else=-1 elseif=-1
- ['if'] = 1, -- if .. [then|else] .. end
- ['do'] = 1, -- [while] do .. end
- ['function'] = 1, -- function .. end
- ['repeat'] = 1, -- repeat .. until
- ['until'] = -1,
- ['end'] = -1,
+ ["keyword"] = { -- challenge: if=0 then=1 else=-1 elseif=-1
+ ["if"] = 1, -- if .. [then|else] .. end
+ ["do"] = 1, -- [while] do .. end
+ ["function"] = 1, -- function .. end
+ ["repeat"] = 1, -- repeat .. until
+ ["until"] = -1,
+ ["end"] = -1,
},
- ['comment'] = {
- ['['] = 1, [']'] = -1,
+ ["comment"] = {
+ ["["] = 1, ["]"] = -1,
},
- -- ['quote'] = { -- confusing
- -- ['['] = 1, [']'] = -1,
+ -- ["quote"] = { -- confusing
+ -- ["["] = 1, ["]"] = -1,
-- },
- ['special'] = {
- -- ['('] = 1, [')'] = -1,
- ['{'] = 1, ['}'] = -1,
+ ["special"] = {
+ -- ["("] = 1, [")"] = -1,
+ ["{"] = 1, ["}"] = -1,
},
}
@@ -327,9 +327,9 @@ lualexer._foldsymbols = {
local cstoken = R("az","AZ","\127\255") + S("@!?_")
local texcsname = P("\\") * cstoken^1
-local commentline = P('%') * (1-S("\n\r"))^0
+local commentline = P("%") * (1-S("\n\r"))^0
-local texcomment = token('comment', Cmt(commentline, function() return directives.cld_inline end))
+local texcomment = token("comment", Cmt(commentline, function() return directives.cld_inline end))
local longthreestart = P("\\!!bs")
local longthreestop = P("\\!!es")
@@ -352,22 +352,22 @@ local texcommand = token("warning", texcsname)
lualexer._directives = directives
lualexer._rules_cld = {
- { 'whitespace', spacing },
- { 'texstring', texstring },
- { 'texcomment', texcomment },
- { 'texcommand', texcommand },
- -- { 'structure', structure },
- { 'keyword', keyword },
- { 'function', builtin },
- { 'csname', csname },
- { 'constant', constant },
- { 'identifier', identifier },
- { 'string', string },
- { 'longcomment', longcomment },
- { 'shortcomment', shortcomment }, -- should not be used inline so best signal it as comment (otherwise complex state till end of inline)
- { 'number', number },
- { 'operator', operator },
- { 'rest', rest },
+ { "whitespace", spacing },
+ { "texstring", texstring },
+ { "texcomment", texcomment },
+ { "texcommand", texcommand },
+ -- { "structure", structure },
+ { "keyword", keyword },
+ { "function", builtin },
+ { "csname", csname },
+ { "constant", constant },
+ { "identifier", identifier },
+ { "string", string },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment }, -- should not be used inline so best signal it as comment (otherwise complex state till end of inline)
+ { "number", number },
+ { "operator", operator },
+ { "rest", rest },
}
return lualexer
diff --git a/context/data/scite/lexers/scite-context-lexer-mps.lua b/context/data/scite/context/lexers/scite-context-lexer-mps.lua
index dc61786dc..b87ea83cb 100644
--- a/context/data/scite/lexers/scite-context-lexer-mps.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-mps.lua
@@ -7,7 +7,7 @@ local info = {
}
local global, string, table, lpeg = _G, string, table, lpeg
-local P, R, S, V, C, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt
+local P, R, S, V = lpeg.P, lpeg.R, lpeg.S, lpeg.V
local type = type
local lexer = require("lexer")
@@ -82,51 +82,63 @@ local cstokentex = R("az","AZ","\127\255") + S("@!?_")
-- we could collapse as in tex
local spacing = token(whitespace, space^1)
-local rest = token('default', any)
-local comment = token('comment', P('%') * (1-S("\n\r"))^0)
-local internal = token('reserved', exact_match(mergedshortcuts,false))
-local shortcut = token('data', exact_match(mergedinternals))
-local helper = token('command', exact_match(metafuncommands))
-local plain = token('plain', exact_match(metapostcommands))
-local quoted = token('quote', dquote)
- * token('string', P(1-dquote)^0)
- * token('quote', dquote)
-local texstuff = token('quote', P("btex ") + P("verbatimtex "))
- * token('string', P(1-P(" etex"))^0)
- * token('quote', P(" etex"))
-local primitive = token('primitive', exact_match(metapostprimitives))
-local identifier = token('default', cstoken^1)
-local number = token('number', number)
-local grouping = token('grouping', S("()[]{}")) -- can be an option
-local special = token('special', S("#()[]{}<>=:\"")) -- or else := <> etc split
-local texlike = token('warning', P("\\") * cstokentex^1)
-local extra = token('extra', P("+-+") + P("++") + S("`~%^&_-+*/\'|\\"))
+local rest = token("default", any)
+local comment = token("comment", P("%") * (1-S("\n\r"))^0)
+local internal = token("reserved", exact_match(mergedshortcuts,false))
+local shortcut = token("data", exact_match(mergedinternals))
+local helper = token("command", exact_match(metafuncommands))
+local plain = token("plain", exact_match(metapostcommands))
+local quoted = token("quote", dquote)
+ * token("string", P(1-dquote)^0)
+ * token("quote", dquote)
+local texstuff = token("quote", P("btex ") + P("verbatimtex "))
+ * token("string", P(1-P(" etex"))^0)
+ * token("quote", P(" etex"))
+local primitive = token("primitive", exact_match(metapostprimitives))
+local identifier = token("default", cstoken^1)
+local number = token("number", number)
+local grouping = token("grouping", S("()[]{}")) -- can be an option
+local special = token("special", S("#()[]{}<>=:\"")) -- or else := <> etc split
+local texlike = token("warning", P("\\") * cstokentex^1)
+local extra = token("extra", P("+-+") + P("++") + S("`~%^&_-+*/\'|\\"))
local nested = P { leftbrace * (V(1) + (1-rightbrace))^0 * rightbrace }
-local texlike = token('embedded', P("\\") * (P("MP") + P("mp")) * mptoken^1)
+local texlike = token("embedded", P("\\") * (P("MP") + P("mp")) * mptoken^1)
* spacing^0
- * token('grouping', leftbrace)
- * token('rest', (nested + (1-rightbrace))^0 )
- * token('grouping', rightbrace)
- + token('warning', P("\\") * cstokentex^1)
+ * token("grouping", leftbrace)
+ * token("default", (nested + (1-rightbrace))^0 )
+ * token("grouping", rightbrace)
+ + token("warning", P("\\") * cstokentex^1)
+
+-- lua: we assume: lua ( "lua code" )
+
+local cldlexer = lexer.load("scite-context-lexer-cld","mps-cld")
+
+local startlua = P("lua") * space^0 * P('(') * space^0 * P('"')
+local stoplua = P('"') * space^0 * P(')')
+
+local startluacode = token("embedded", startlua)
+local stopluacode = #stoplua * token("embedded", stoplua)
+
+lexer.embed_lexer(metafunlexer, cldlexer, startluacode, stopluacode)
metafunlexer._rules = {
- { 'whitespace', spacing },
- { 'comment', comment },
- { 'internal', internal },
- { 'shortcut', shortcut },
- { 'helper', helper },
- { 'plain', plain },
- { 'primitive', primitive },
- { 'texstuff', texstuff },
- { 'identifier', identifier },
- { 'number', number },
- { 'quoted', quoted },
- -- { 'grouping', grouping }, -- can be an option
- { 'special', special },
- { 'texlike', texlike },
- { 'extra', extra },
- { 'rest', rest },
+ { "whitespace", spacing },
+ { "comment", comment },
+ { "internal", internal },
+ { "shortcut", shortcut },
+ { "helper", helper },
+ { "plain", plain },
+ { "primitive", primitive },
+ { "texstuff", texstuff },
+ { "identifier", identifier },
+ { "number", number },
+ { "quoted", quoted },
+ -- { "grouping", grouping }, -- can be an option
+ { "special", special },
+ { "texlike", texlike },
+ { "extra", extra },
+ { "rest", rest },
}
metafunlexer._tokenstyles = context.styleset
@@ -135,7 +147,7 @@ metafunlexer._foldpattern = patterns.lower^2 -- separate entry else interference
metafunlexer._foldsymbols = {
_patterns = {
- '[a-z][a-z]+',
+ "[a-z][a-z]+",
},
["plain"] = {
["beginfig"] = 1,
diff --git a/context/data/scite/context/lexers/scite-context-lexer-pdf-object.lua b/context/data/scite/context/lexers/scite-context-lexer-pdf-object.lua
new file mode 100644
index 000000000..1fb95838a
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-pdf-object.lua
@@ -0,0 +1,136 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for pdf objects",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+-- no longer used: nesting lexers with whitespace in start/stop is unreliable
+
+local P, R, S, C, V = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.V
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local pdfobjectlexer = lexer.new("pdfobj","scite-context-lexer-pdf-object")
+local whitespace = pdfobjectlexer.whitespace
+
+local space = patterns.space
+local spacing = patterns.spacing
+local nospacing = patterns.nospacing
+local anything = patterns.anything
+local newline = patterns.eol
+local real = patterns.real
+local cardinal = patterns.cardinal
+
+local lparent = P("(")
+local rparent = P(")")
+local langle = P("<")
+local rangle = P(">")
+local escape = P("\\")
+local unicodetrigger = P("feff")
+
+local nametoken = 1 - space - S("<>/[]()")
+local name = P("/") * nametoken^1
+
+local p_string = P { ( escape * anything + lparent * V(1) * rparent + (1 - rparent) )^0 }
+
+local t_spacing = token(whitespace, spacing)
+local t_spaces = token(whitespace, spacing)^0
+local t_rest = token("default", nospacing) -- anything
+
+local p_stream = P("stream")
+local p_endstream = P("endstream")
+local p_obj = P("obj")
+local p_endobj = P("endobj")
+local p_reference = P("R")
+
+local p_objectnumber = patterns.cardinal
+local p_comment = P("%") * (1-S("\n\r"))^0
+
+local t_string = token("quote", lparent)
+ * token("string", p_string)
+ * token("quote", rparent)
+local t_unicode = token("quote", langle)
+ * token("plain", unicodetrigger)
+ * token("string", (1-rangle)^1)
+ * token("quote", rangle)
+local t_whatsit = token("quote", langle)
+ * token("string", (1-rangle)^1)
+ * token("quote", rangle)
+local t_keyword = token("command", name)
+local t_constant = token("constant", name)
+local t_number = token("number", real)
+-- t_reference = token("number", cardinal)
+-- * t_spacing
+-- * token("number", cardinal)
+local t_reserved = token("number", P("true") + P("false") + P("NULL"))
+local t_reference = token("warning", cardinal)
+ * t_spacing
+ * token("warning", cardinal)
+ * t_spacing
+ * token("keyword", p_reference)
+
+local t_comment = token("comment", p_comment)
+
+local t_openobject = token("warning", p_objectnumber * spacing)
+-- * t_spacing
+ * token("warning", p_objectnumber * spacing)
+-- * t_spacing
+ * token("keyword", p_obj)
+local t_closeobject = token("keyword", p_endobj)
+
+local t_opendictionary = token("grouping", P("<<"))
+local t_closedictionary = token("grouping", P(">>"))
+
+local t_openarray = token("grouping", P("["))
+local t_closearray = token("grouping", P("]"))
+
+-- todo: comment
+
+local t_stream = token("keyword", p_stream)
+-- * token("default", newline * (1-newline*p_endstream*newline)^1 * newline)
+-- * token("text", (1 - p_endstream)^1)
+ * (token("text", (1 - p_endstream-spacing)^1) + t_spacing)^1
+ * token("keyword", p_endstream)
+
+local t_dictionary = { "dictionary",
+ dictionary = t_opendictionary * (t_spaces * t_keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
+ array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
+ whatever = V("dictionary") + V("array") + t_constant + t_reference + t_string + t_unicode + t_number + t_reserved + t_whatsit,
+ }
+
+----- t_object = { "object", -- weird that we need to catch the end here (probably otherwise an invalid lpeg)
+----- object = t_spaces * (V("dictionary") * t_spaces * t_stream^-1 + V("array") + V("number") + t_spaces) * t_spaces * t_closeobject,
+----- dictionary = t_opendictionary * (t_spaces * t_keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
+----- array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
+----- whatever = V("dictionary") + V("array") + t_constant + t_reference + t_string + t_unicode + t_number + t_reserved + t_whatsit,
+----- number = t_number,
+----- }
+
+local t_object = { "object", -- weird that we need to catch the end here (probably otherwise an invalid lpeg)
+ dictionary = t_dictionary.dictionary,
+ array = t_dictionary.array,
+ whatever = t_dictionary.whatever,
+ object = t_openobject^-1 * t_spaces * (V("dictionary") * t_spaces * t_stream^-1 + V("array") + V("number") + t_spaces) * t_spaces * t_closeobject,
+ number = t_number,
+ }
+
+pdfobjectlexer._shared = {
+ dictionary = t_dictionary,
+ object = t_object,
+ stream = t_stream,
+}
+
+pdfobjectlexer._rules = {
+ { "whitespace", t_spacing }, -- in fact, here we don't want whitespace as it's top level lexer work
+ { "object", t_object },
+}
+
+pdfobjectlexer._tokenstyles = context.styleset
+
+return pdfobjectlexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-pdf-xref.lua b/context/data/scite/context/lexers/scite-context-lexer-pdf-xref.lua
new file mode 100644
index 000000000..7097c41a6
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-pdf-xref.lua
@@ -0,0 +1,43 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for pdf xref",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+-- no longer used: nesting lexers with whitespace in start/stop is unreliable
+
+local P, R = lpeg.P, lpeg.R
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local pdfxreflexer = lexer.new("pdfxref","scite-context-lexer-pdf-xref")
+local whitespace = pdfxreflexer.whitespace
+
+local spacing = patterns.spacing
+local cardinal = patterns.cardinal
+local alpha = patterns.alpha
+
+local t_spacing = token(whitespace, spacing)
+
+local p_xref = P("xref")
+local t_xref = token("keyword",p_xref)
+ * token("number", cardinal * spacing * cardinal * spacing)
+
+local t_number = token("number", cardinal * spacing * cardinal * spacing)
+ * token("keyword", alpha)
+
+pdfxreflexer._rules = {
+ { "whitespace", t_spacing },
+ { "xref", t_xref },
+ { "number", t_number },
+}
+
+pdfxreflexer._tokenstyles = context.styleset
+
+return pdfxreflexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-pdf.lua b/context/data/scite/context/lexers/scite-context-lexer-pdf.lua
new file mode 100644
index 000000000..f8e4e7380
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-pdf.lua
@@ -0,0 +1,204 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for pdf",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+-- pdf is normally startic .. i.e. not edited so we don't really
+-- need embedded lexers.
+
+local P, R, S, V = lpeg.P, lpeg.R, lpeg.S, lpeg.V
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local pdflexer = lexer.new("pdf","scite-context-lexer-pdf")
+local whitespace = pdflexer.whitespace
+
+----- pdfobjectlexer = lexer.load("scite-context-lexer-pdf-object")
+----- pdfxreflexer = lexer.load("scite-context-lexer-pdf-xref")
+
+local anything = patterns.anything
+local space = patterns.space
+local spacing = patterns.spacing
+local nospacing = patterns.nospacing
+local anything = patterns.anything
+local restofline = patterns.restofline
+
+local t_whitespace = token(whitespace, spacing)
+local t_spacing = token("default", spacing)
+----- t_rest = token("default", nospacing)
+local t_rest = token("default", anything)
+
+local p_comment = P("%") * restofline
+local t_comment = token("comment", p_comment)
+
+-- whatever
+
+local space = patterns.space
+local spacing = patterns.spacing
+local nospacing = patterns.nospacing
+local anything = patterns.anything
+local newline = patterns.eol
+local real = patterns.real
+local cardinal = patterns.cardinal
+local alpha = patterns.alpha
+
+local lparent = P("(")
+local rparent = P(")")
+local langle = P("<")
+local rangle = P(">")
+local escape = P("\\")
+local unicodetrigger = P("feff")
+
+local nametoken = 1 - space - S("<>/[]()")
+local name = P("/") * nametoken^1
+
+local p_string = P { ( escape * anything + lparent * V(1) * rparent + (1 - rparent) )^0 }
+
+local t_spacing = token("default", spacing)
+local t_spaces = token("default", spacing)^0
+local t_rest = token("default", nospacing) -- anything
+
+local p_stream = P("stream")
+local p_endstream = P("endstream")
+local p_obj = P("obj")
+local p_endobj = P("endobj")
+local p_reference = P("R")
+
+local p_objectnumber = patterns.cardinal
+local p_comment = P("%") * (1-S("\n\r"))^0
+
+local t_string = token("quote", lparent)
+ * token("string", p_string)
+ * token("quote", rparent)
+local t_unicode = token("quote", langle)
+ * token("plain", unicodetrigger)
+ * token("string", (1-rangle)^1)
+ * token("quote", rangle)
+local t_whatsit = token("quote", langle)
+ * token("string", (1-rangle)^1)
+ * token("quote", rangle)
+local t_keyword = token("command", name)
+local t_constant = token("constant", name)
+local t_number = token("number", real)
+-- t_reference = token("number", cardinal)
+-- * t_spacing
+-- * token("number", cardinal)
+local t_reserved = token("number", P("true") + P("false") + P("NULL"))
+-- t_reference = token("warning", cardinal * spacing * cardinal * spacing)
+-- * token("keyword", p_reference)
+local t_reference = token("warning", cardinal)
+ * t_spacing
+ * token("warning", cardinal)
+ * t_spacing
+ * token("keyword", p_reference)
+
+local t_comment = token("comment", p_comment)
+
+local t_openobject = token("warning", p_objectnumber)
+ * t_spacing
+ * token("warning", p_objectnumber)
+ * t_spacing
+ * token("keyword", p_obj)
+-- t_openobject = token("warning", p_objectnumber * spacing)
+-- * token("warning", p_objectnumber * spacing)
+-- * token("keyword", p_obj)
+local t_closeobject = token("keyword", p_endobj)
+
+local t_opendictionary = token("grouping", P("<<"))
+local t_closedictionary = token("grouping", P(">>"))
+
+local t_openarray = token("grouping", P("["))
+local t_closearray = token("grouping", P("]"))
+
+local t_stream = token("keyword", p_stream)
+ * token("text", (1 - p_endstream)^1)
+ * token("keyword", p_endstream)
+
+local t_dictionary = { "dictionary",
+ dictionary = t_opendictionary * (t_spaces * t_keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
+ array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
+ whatever = V("dictionary") + V("array") + t_constant + t_reference + t_string + t_unicode + t_number + t_reserved + t_whatsit,
+ }
+
+local t_object = { "object", -- weird that we need to catch the end here (probably otherwise an invalid lpeg)
+ dictionary = t_dictionary.dictionary,
+ array = t_dictionary.array,
+ whatever = t_dictionary.whatever,
+ object = t_openobject * t_spaces * (V("dictionary")^-1 * t_spaces * t_stream^-1 + V("array") + V("number") + t_spaces) * t_spaces * t_closeobject,
+ number = t_number,
+ }
+
+-- objects ... sometimes NUL characters play havoc ... and in xref we have
+-- issues with embedded lexers that have spaces in the start and stop
+-- conditions and this cannot be handled well either ... so, an imperfect
+-- solution ... but anyway, there is not that much that can end up in
+-- the root of the tree see we're sort of safe
+
+local p_trailer = P("trailer")
+local t_trailer = token("keyword", p_trailer)
+ * t_spacing
+ * t_dictionary
+-- t_trailer = token("keyword", p_trailer * spacing)
+-- * t_dictionary
+
+local p_startxref = P("startxref")
+local t_startxref = token("keyword", p_startxref)
+ * t_spacing
+ * token("number", cardinal)
+-- t_startxref = token("keyword", p_startxref * spacing)
+-- * token("number", cardinal)
+
+local p_xref = P("xref")
+local t_xref = token("keyword",p_xref)
+ * t_spacing
+ * token("number", cardinal)
+ * t_spacing
+ * token("number", cardinal)
+ * spacing
+-- t_xref = token("keyword",p_xref)
+-- * token("number", spacing * cardinal * spacing * cardinal * spacing)
+
+local t_number = token("number", cardinal)
+ * t_spacing
+ * token("number", cardinal)
+ * t_spacing
+ * token("keyword", S("fn"))
+-- t_number = token("number", cardinal * spacing * cardinal * spacing)
+-- * token("keyword", S("fn"))
+
+pdflexer._rules = {
+ { "whitespace", t_whitespace },
+ { "object", t_object },
+ { "comment", t_comment },
+ { "trailer", t_trailer },
+ { "startxref", t_startxref },
+ { "xref", t_xref },
+ { "number", t_number },
+ { "rest", t_rest },
+}
+
+pdflexer._tokenstyles = context.styleset
+
+-- lexer.inspect(pdflexer)
+
+-- collapser: obj endobj stream endstream
+
+pdflexer._foldpattern = p_obj + p_endobj + p_stream + p_endstream
+
+pdflexer._foldsymbols = {
+ ["keyword"] = {
+ ["obj"] = 1,
+ ["endobj"] = -1,
+ ["stream"] = 1,
+ ["endstream"] = -1,
+ },
+}
+
+return pdflexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-tex-web.lua b/context/data/scite/context/lexers/scite-context-lexer-tex-web.lua
new file mode 100644
index 000000000..5d8859c26
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-tex-web.lua
@@ -0,0 +1,23 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for tex web",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local texweblexer = lexer.new("tex-web","scite-context-lexer-tex")
+local texlexer = lexer.load("scite-context-lexer-tex")
+
+-- can probably be done nicer now, a bit of a hack
+
+texweblexer._rules = texlexer._rules_web
+texweblexer._tokenstyles = texlexer._tokenstyles
+texweblexer._foldsymbols = texlexer._foldsymbols
+texweblexer._directives = texlexer._directives
+
+return texweblexer
diff --git a/context/data/scite/lexers/scite-context-lexer-tex.lua b/context/data/scite/context/lexers/scite-context-lexer-tex.lua
index bbe0f016c..d67be2cd8 100644
--- a/context/data/scite/lexers/scite-context-lexer-tex.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-tex.lua
@@ -24,13 +24,6 @@ local info = {
-- local interface = props["keywordclass.macros.context.en"]
-- local interface = lexer.get_property("keywordclass.macros.context.en","")
- -- it seems that whitespace triggers the lexer when embedding happens, but this
- -- is quite fragile due to duplicate styles .. lexer.WHITESPACE is a number
- -- (initially) ... _NAME vs filename (but we don't want to overwrite files)
-
- -- this lexer does not care about other macro packages (one can of course add a fake
- -- interface but it's not on the agenda)
-
]]--
local global, string, table, lpeg = _G, string, table, lpeg
@@ -41,17 +34,16 @@ local find, match, lower, upper = string.find, string.match, string.lower, strin
local lexer = require("lexer")
local context = lexer.context
local patterns = context.patterns
+local inform = context.inform
local token = lexer.token
local exact_match = lexer.exact_match
--- module(...)
-
local contextlexer = lexer.new("tex","scite-context-lexer-tex")
local whitespace = contextlexer.whitespace
-local cldlexer = lexer.load('scite-context-lexer-cld')
-local mpslexer = lexer.load('scite-context-lexer-mps')
+local cldlexer = lexer.load("scite-context-lexer-cld")
+local mpslexer = lexer.load("scite-context-lexer-mps")
local commands = { en = { } }
local primitives = { }
@@ -65,7 +57,9 @@ do -- todo: only once, store in global
local definitions = context.loaddefinitions("scite-context-data-interfaces")
if definitions then
+ local list = { }
for interface, list in next, definitions do
+ list[#list+1] = interface
local c = { }
for i=1,#list do
c[list[i]] = true
@@ -80,6 +74,7 @@ do -- todo: only once, store in global
end
commands[interface] = c
end
+ inform("context user interfaces '%s' supported",table.concat(list," "))
end
local definitions = context.loaddefinitions("scite-context-data-context")
@@ -147,13 +142,16 @@ local validminimum = 3
-- % language=uk
-local knownpreamble = Cmt(#P("% "), function(input,i,_) -- todo : utfbomb
+-- fails (empty loop message) ... latest lpeg issue?
+
+local knownpreamble = Cmt(P("% "), function(input,i,_) -- todo : utfbomb, was #P("% ")
if i < 10 then
validwords, validminimum = false, 3
- local s, e, word = find(input,'^(.+)[\n\r]',i) -- combine with match
+ local s, e, word = find(input,"^(.+)[\n\r]",i) -- combine with match
if word then
local interface = match(word,"interface=([a-z]+)")
- if interface then
+ if interface and #interface == 2 then
+ inform("enabling context user interface '%s'",interface)
currentcommands = commands[interface] or commands.en or { }
end
local language = match(word,"language=([a-z]+)")
@@ -171,7 +169,7 @@ end)
-- local helpers_hash = { } for i=1,#helpers do helpers_hash [helpers [i]] = true end
-- local primitives_hash = { } for i=1,#primitives do primitives_hash[primitives[i]] = true end
--- local specialword = Ct( P('\\') * Cmt( C(cstoken^1), function(input,i,s)
+-- local specialword = Ct( P("\\") * Cmt( C(cstoken^1), function(input,i,s)
-- if currentcommands[s] then
-- return true, "command", i
-- elseif constants_hash[s] then
@@ -185,7 +183,7 @@ end)
-- end
-- end) )
--- local specialword = P('\\') * Cmt( C(cstoken^1), function(input,i,s)
+-- local specialword = P("\\") * Cmt( C(cstoken^1), function(input,i,s)
-- if currentcommands[s] then
-- return true, { "command", i }
-- elseif constants_hash[s] then
@@ -203,7 +201,7 @@ end)
-- 10pt
-local commentline = P('%') * (1-S("\n\r"))^0
+local commentline = P("%") * (1-S("\n\r"))^0
local endline = S("\n\r")^1
local space = patterns.space -- S(" \n\r\t\f\v")
@@ -220,7 +218,7 @@ local p_command = backslash * knowncommand
local p_constant = backslash * exact_match(constants)
local p_helper = backslash * exact_match(helpers)
local p_primitive = backslash * exact_match(primitives)
-local p_ifprimitive = P('\\if') * cstoken^1
+local p_ifprimitive = P("\\if") * cstoken^1
local p_csname = backslash * (cstoken^1 + P(1))
local p_grouping = S("{$}")
local p_special = S("#()[]<>=\"")
@@ -300,24 +298,24 @@ local p_invisible = invisibles^1
local spacing = token(whitespace, p_spacing )
-local rest = token('default', p_rest )
-local preamble = token('preamble', p_preamble )
-local comment = token('comment', p_comment )
-local command = token('command', p_command )
-local constant = token('data', p_constant )
-local helper = token('plain', p_helper )
-local primitive = token('primitive', p_primitive )
-local ifprimitive = token('primitive', p_ifprimitive)
-local reserved = token('reserved', p_reserved )
-local csname = token('user', p_csname )
-local grouping = token('grouping', p_grouping )
-local number = token('number', p_number )
- * token('constant', p_unit )
-local special = token('special', p_special )
-local reserved = token('reserved', p_reserved ) -- reserved internal preproc
-local extra = token('extra', p_extra )
-local invisible = token('invisible', p_invisible )
-local text = token('default', p_text )
+local rest = token("default", p_rest )
+local preamble = token("preamble", p_preamble )
+local comment = token("comment", p_comment )
+local command = token("command", p_command )
+local constant = token("data", p_constant )
+local helper = token("plain", p_helper )
+local primitive = token("primitive", p_primitive )
+local ifprimitive = token("primitive", p_ifprimitive)
+local reserved = token("reserved", p_reserved )
+local csname = token("user", p_csname )
+local grouping = token("grouping", p_grouping )
+local number = token("number", p_number )
+ * token("constant", p_unit )
+local special = token("special", p_special )
+local reserved = token("reserved", p_reserved ) -- reserved internal preproc
+local extra = token("extra", p_extra )
+local invisible = token("invisible", p_invisible )
+local text = token("default", p_text )
local word = p_word
----- startluacode = token("grouping", P("\\startluacode"))
@@ -436,9 +434,6 @@ local callers = token("embedded", P("\\") * metafuncall) * metafu
lexer.embed_lexer(contextlexer, cldlexer, startluacode, stopluacode)
lexer.embed_lexer(contextlexer, mpslexer, startmetafuncode, stopmetafuncode)
--- Watch the text grabber, after all, we're talking mostly of text (beware,
--- no punctuation here as it can be special). We might go for utf here.
-
contextlexer._rules = {
{ "whitespace", spacing },
{ "preamble", preamble },
@@ -462,6 +457,60 @@ contextlexer._rules = {
{ "rest", rest },
}
+-- Watch the text grabber, after all, we're talking mostly of text (beware,
+-- no punctuation here as it can be special). We might go for utf here.
+
+local web = lexer.loadluafile("scite-context-lexer-web-snippets")
+
+if web then
+
+ lexer.inform("supporting web snippets in tex lexer")
+
+ contextlexer._rules_web = {
+ { "whitespace", spacing },
+ { "text", text }, -- non words
+ { "comment", comment },
+ { "constant", constant },
+ { "callers", callers },
+ { "helper", helper },
+ { "command", command },
+ { "primitive", primitive },
+ { "ifprimitive", ifprimitive },
+ { "reserved", reserved },
+ { "csname", csname },
+ { "grouping", grouping },
+ { "special", special },
+ { "extra", extra },
+ { "invisible", invisible },
+ { "web", web.pattern },
+ { "rest", rest },
+ }
+
+else
+
+ lexer.report("not supporting web snippets in tex lexer")
+
+ contextlexer._rules_web = {
+ { "whitespace", spacing },
+ { "text", text }, -- non words
+ { "comment", comment },
+ { "constant", constant },
+ { "callers", callers },
+ { "helper", helper },
+ { "command", command },
+ { "primitive", primitive },
+ { "ifprimitive", ifprimitive },
+ { "reserved", reserved },
+ { "csname", csname },
+ { "grouping", grouping },
+ { "special", special },
+ { "extra", extra },
+ { "invisible", invisible },
+ { "rest", rest },
+ }
+
+end
+
contextlexer._tokenstyles = context.styleset
local environment = {
@@ -493,4 +542,6 @@ contextlexer._foldsymbols = { -- these need to be style references
["grouping"] = group,
}
+-- context.inspect(contextlexer)
+
return contextlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-txt.lua b/context/data/scite/context/lexers/scite-context-lexer-txt.lua
index 86570ae64..43eec2c35 100644
--- a/context/data/scite/lexers/scite-context-lexer-txt.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-txt.lua
@@ -6,7 +6,7 @@ local info = {
license = "see context related readme files",
}
-local P, S, Cmt, Cp, Ct = lpeg.P, lpeg.S, lpeg.Cmt, lpeg.Cp, lpeg.Ct
+local P, S, Cmt, Cp = lpeg.P, lpeg.S, lpeg.Cmt, lpeg.Cp
local find, match = string.find, string.match
local lexer = require("lexer")
@@ -37,10 +37,10 @@ local validminimum = 3
-- [#!-%] language=uk
-local p_preamble = Cmt(#(S("#!-%") * P(" ")), function(input,i,_) -- todo: utf bomb
+local p_preamble = Cmt((S("#!-%") * P(" ")), function(input,i,_) -- todo: utf bomb no longer #
if i == 1 then -- < 10 then
validwords, validminimum = false, 3
- local s, e, line = find(input,'^[#!%-%%](.+)[\n\r]',i)
+ local s, e, line = find(input,"^[#!%-%%](.+)[\n\r]",i)
if line then
local language = match(line,"language=([a-z]+)")
if language then
@@ -55,7 +55,6 @@ local t_preamble =
token("preamble", p_preamble)
local t_word =
--- Ct( wordpattern / function(s) return styleofword(validwords,validminimum,s) end * Cp() ) -- the function can be inlined
wordpattern / function(s) return styleofword(validwords,validminimum,s) end * Cp() -- the function can be inlined
local t_text =
diff --git a/context/data/scite/context/lexers/scite-context-lexer-web-snippets.lua b/context/data/scite/context/lexers/scite-context-lexer-web-snippets.lua
new file mode 100644
index 000000000..196a545bc
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-web-snippets.lua
@@ -0,0 +1,133 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for web snippets",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local P, R, S, C, Cg, Cb, Cs, Cmt, lpegmatch = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cg, lpeg.Cb, lpeg.Cs, lpeg.Cmt, lpeg.match
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local websnippets = { }
+
+local space = patterns.space -- S(" \n\r\t\f\v")
+local any = patterns.any
+local restofline = patterns.restofline
+local startofline = patterns.startofline
+
+local squote = P("'")
+local dquote = P('"')
+local period = P(".")
+
+local t_whitespace = token(whitespace, space^1)
+local t_spacing = token("default", space^1)
+local t_rest = token("default", any)
+
+-- the web subset
+
+local p_beginofweb = P("@")
+local p_endofweb = P("@>")
+
+-- @, @/ @| @# @+ @; @[ @]
+
+local p_directive_1 = p_beginofweb * S(",/|#+;[]")
+local t_directive_1 = token("label",p_directive_1)
+
+-- @.text @>(monospaced)
+-- @:text @>(macro driven)
+-- @= verbose@>
+-- @! underlined @>
+-- @t text @> (hbox)
+-- @q ignored @>
+
+local p_typeset = p_beginofweb * S(".:=!tq")
+local t_typeset = token("label",p_typeset) * token("warning",(1-p_endofweb)^1) * token("label",p_endofweb)
+
+-- @^index@>
+
+local p_index = p_beginofweb * P("^")
+local t_index = token("label",p_index) * token("function",(1-p_endofweb)^1) * token("label",p_endofweb)
+
+-- @f text renderclass
+
+local p_render = p_beginofweb * S("f")
+local t_render = token("label",p_render) * t_spacing * token("warning",(1-space)^1) * t_spacing * token("label",(1-space)^1)
+
+-- @s idem
+-- @p idem
+-- @& strip (spaces before)
+-- @h
+
+local p_directive_2 = p_beginofweb * S("sp&h")
+local t_directive_2 = token("label",p_directive_2)
+
+-- @< ... @> [=|+=|]
+-- @(foo@>
+
+local p_reference = p_beginofweb * S("<(")
+local t_reference = token("label",p_reference) * token("function",(1-p_endofweb)^1) * token("label",p_endofweb * (P("+=") + P("="))^-1)
+
+-- @'char' (ascii code)
+
+local p_character = p_beginofweb * S("'")
+local t_character = token("label",p_character) * token("reserved",(1-squote)^1) * token("label",squote)
+
+-- @l nonascii
+
+local p_nonascii = p_beginofweb * S("l")
+local t_nonascii = token("label",p_nonascii) * t_spacing * token("reserved",(1-space)^1)
+
+-- @x @y @z changefile
+-- @i webfile
+
+local p_filename = p_beginofweb * S("xyzi")
+local t_filename = token("label",p_filename) * t_spacing * token("reserved",(1-space)^1)
+
+-- @@ escape
+
+local p_escape = p_beginofweb * p_beginofweb
+local t_escape = token("text",p_escape)
+
+-- structure
+
+-- @* title.
+
+-- local p_section = p_beginofweb * P("*")^1
+-- local t_section = token("label",p_section) * t_spacing * token("function",(1-period)^1) * token("label",period)
+
+-- @ explanation
+
+-- local p_explanation = p_beginofweb
+-- local t_explanation = token("label",p_explanation) * t_spacing^1
+
+-- @d macro
+
+-- local p_macro = p_beginofweb * P("d")
+-- local t_macro = token("label",p_macro)
+
+-- @c code
+
+-- local p_code = p_beginofweb * P("c")
+-- local t_code = token("label",p_code)
+
+websnippets.pattern = P (
+ t_typeset
+ + t_index
+ + t_render
+ + t_reference
+ + t_filename
+ + t_directive_1
+ + t_directive_2
+ + t_character
+ + t_nonascii
+ + t_escape
+)
+
+
+return websnippets
diff --git a/context/data/scite/context/lexers/scite-context-lexer-web.lua b/context/data/scite/context/lexers/scite-context-lexer-web.lua
new file mode 100644
index 000000000..86ae76644
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-web.lua
@@ -0,0 +1,67 @@
+local info = {
+ version = 1.003,
+ comment = "scintilla lpeg lexer for web",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local P, R, S = lpeg.P, lpeg.R, lpeg.S
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+local exact_match = lexer.exact_match
+
+local weblexer = lexer.new("web","scite-context-lexer-web")
+local whitespace = weblexer.whitespace
+
+local space = patterns.space -- S(" \n\r\t\f\v")
+local any = patterns.any
+local restofline = patterns.restofline
+local startofline = patterns.startofline
+
+local period = P(".")
+local percent = P("%")
+
+local spacing = token(whitespace, space^1)
+local rest = token("default", any)
+
+local eop = P("@>")
+local eos = eop * P("+")^-1 * P("=")
+
+-- we can put some of the next in the web-snippets file
+-- is f okay here?
+
+local texcomment = token("comment", percent * restofline^0)
+
+local texpart = token("label",P("@")) * #spacing
+ + token("label",P("@") * P("*")^1) * token("function",(1-period)^1) * token("label",period)
+local midpart = token("label",P("@d")) * #spacing
+ + token("label",P("@f")) * #spacing
+local cpppart = token("label",P("@c")) * #spacing
+ + token("label",P("@p")) * #spacing
+ + token("label",P("@") * S("<(")) * token("function",(1-eop)^1) * token("label",eos)
+
+local anypart = P("@") * ( P("*")^1 + S("dfcp") + space^1 + S("<(") * (1-eop)^1 * eos )
+local limbo = 1 - anypart - percent
+
+local texlexer = lexer.load("scite-context-lexer-tex-web")
+local cpplexer = lexer.load("scite-context-lexer-cpp-web")
+
+lexer.embed_lexer(weblexer, texlexer, texpart + limbo, #anypart)
+lexer.embed_lexer(weblexer, cpplexer, cpppart + midpart, #anypart)
+
+local texcomment = token("comment", percent * restofline^0)
+
+weblexer._rules = {
+ { "whitespace", spacing },
+ { "texcomment", texcomment }, -- else issues with first tex section
+ { "rest", rest },
+}
+
+weblexer._tokenstyles = context.styleset
+
+return weblexer
diff --git a/context/data/scite/lexers/scite-context-lexer-xml-cdata.lua b/context/data/scite/context/lexers/scite-context-lexer-xml-cdata.lua
index 35e07ef18..e6276da0d 100644
--- a/context/data/scite/lexers/scite-context-lexer-xml-cdata.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-xml-cdata.lua
@@ -20,12 +20,12 @@ local whitespace = xmlcdatalexer.whitespace
local space = patterns.space
local nospace = 1 - space - P("]]>")
-local p_spaces = token(whitespace, space ^1)
-local p_cdata = token("comment", nospace^1)
+local t_spaces = token(whitespace, space ^1)
+local t_cdata = token("comment", nospace^1)
xmlcdatalexer._rules = {
- { "whitespace", p_spaces },
- { "cdata", p_cdata },
+ { "whitespace", t_spaces },
+ { "cdata", t_cdata },
}
xmlcdatalexer._tokenstyles = context.styleset
diff --git a/context/data/scite/lexers/scite-context-lexer-xml-comment.lua b/context/data/scite/context/lexers/scite-context-lexer-xml-comment.lua
index 70805b249..b5b3fefe0 100644
--- a/context/data/scite/lexers/scite-context-lexer-xml-comment.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-xml-comment.lua
@@ -20,26 +20,14 @@ local whitespace = xmlcommentlexer.whitespace
local space = patterns.space
local nospace = 1 - space - P("-->")
-local p_spaces = token(whitespace, space ^1)
-local p_comment = token("comment", nospace^1)
+local t_spaces = token(whitespace, space ^1)
+local t_comment = token("comment", nospace^1)
xmlcommentlexer._rules = {
- { "whitespace", p_spaces },
- { "comment", p_comment },
+ { "whitespace", t_spaces },
+ { "comment", t_comment },
}
xmlcommentlexer._tokenstyles = context.styleset
-xmlcommentlexer._foldpattern = P("<!--") + P("-->")
-
-xmlcommentlexer._foldsymbols = {
- _patterns = {
- "<%!%-%-", "%-%->", -- comments
- },
- ["comment"] = {
- ["<!--"] = 1,
- ["-->" ] = -1,
- }
-}
-
return xmlcommentlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-xml-script.lua b/context/data/scite/context/lexers/scite-context-lexer-xml-script.lua
index 13f4cddba..bbb938dc5 100644
--- a/context/data/scite/lexers/scite-context-lexer-xml-script.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-xml-script.lua
@@ -1,6 +1,6 @@
local info = {
version = 1.002,
- comment = "scintilla lpeg lexer for xml cdata",
+ comment = "scintilla lpeg lexer for xml script",
author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
copyright = "PRAGMA ADE / ConTeXt Development Team",
license = "see context related readme files",
@@ -20,12 +20,12 @@ local whitespace = xmlscriptlexer.whitespace
local space = patterns.space
local nospace = 1 - space - (P("</") * P("script") + P("SCRIPT")) * P(">")
-local p_spaces = token(whitespace, space ^1)
-local p_cdata = token("default", nospace^1)
+local t_spaces = token(whitespace, space ^1)
+local t_script = token("default", nospace^1)
xmlscriptlexer._rules = {
- { "whitespace", p_spaces },
- { "script", p_cdata },
+ { "whitespace", t_spaces },
+ { "script", t_script },
}
xmlscriptlexer._tokenstyles = context.styleset
diff --git a/context/data/scite/lexers/scite-context-lexer-xml.lua b/context/data/scite/context/lexers/scite-context-lexer-xml.lua
index fa15bbafd..77c89b1d6 100644
--- a/context/data/scite/lexers/scite-context-lexer-xml.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-xml.lua
@@ -13,7 +13,7 @@ local info = {
-- todo: parse entities in attributes
local global, string, table, lpeg = _G, string, table, lpeg
-local P, R, S, V, C, Cmt, Ct, Cp = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt, lpeg.Ct, lpeg.Cp
+local P, R, S, C, Cmt, Cp = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cmt, lpeg.Cp
local type = type
local match, find = string.match, string.find
@@ -27,10 +27,10 @@ local exact_match = lexer.exact_match
local xmllexer = lexer.new("xml","scite-context-lexer-xml")
local whitespace = xmllexer.whitespace
-local xmlcommentlexer = lexer.load("scite-context-lexer-xml-comment") -- indirect (some issue with the lexer framework)
-local xmlcdatalexer = lexer.load("scite-context-lexer-xml-cdata") -- indirect (some issue with the lexer framework)
-local xmlscriptlexer = lexer.load("scite-context-lexer-xml-script") -- indirect (some issue with the lexer framework)
-local lualexer = lexer.load("scite-context-lexer-lua") --
+local xmlcommentlexer = lexer.load("scite-context-lexer-xml-comment")
+local xmlcdatalexer = lexer.load("scite-context-lexer-xml-cdata")
+local xmlscriptlexer = lexer.load("scite-context-lexer-xml-script")
+local lualexer = lexer.load("scite-context-lexer-lua")
local space = patterns.space
local any = patterns.any
@@ -42,7 +42,7 @@ local semicolon = P(";")
local equal = P("=")
local ampersand = P("&")
-local name = (R("az","AZ","09") + S('_-.'))^1
+local name = (R("az","AZ","09") + S("_-."))^1
local openbegin = P("<")
local openend = P("</")
local closebegin = P("/>") + P(">")
@@ -86,12 +86,12 @@ local validminimum = 3
--
-- <?context-directive editor language us ?>
-local p_preamble = Cmt(#P("<?xml "), function(input,i,_) -- todo: utf bomb
+local t_preamble = Cmt(P("<?xml "), function(input,i,_) -- todo: utf bomb, no longer #
if i < 200 then
validwords, validminimum = false, 3
local language = match(input,"^<%?xml[^>]*%?>%s*<%?context%-directive%s+editor%s+language%s+(..)%s+%?>")
-- if not language then
- -- language = match(input,'^<%?xml[^>]*language=[\"\'](..)[\"\'][^>]*%?>',i)
+ -- language = match(input,"^<%?xml[^>]*language=[\"\'](..)[\"\'][^>]*%?>",i)
-- end
if language then
validwords, validminimum = setwordlist(language)
@@ -100,24 +100,23 @@ local p_preamble = Cmt(#P("<?xml "), function(input,i,_) -- todo: utf bomb
return false
end)
-local p_word =
+local t_word =
-- Ct( iwordpattern / function(s) return styleofword(validwords,validminimum,s) end * Cp() ) -- the function can be inlined
iwordpattern / function(s) return styleofword(validwords,validminimum,s) end * Cp() -- the function can be inlined
-local p_rest =
+local t_rest =
token("default", any)
-local p_text =
+local t_text =
token("default", (1-S("<>&")-space)^1)
-local p_spacing =
+local t_spacing =
token(whitespace, space^1)
--- token("whitespace", space^1)
-local p_optionalwhitespace =
- p_spacing^0
+local t_optionalwhitespace =
+ token("default", space^1)^0
-local p_localspacing =
+local t_localspacing =
token("default", space^1)
-- Because we want a differently colored open and close we need an embedded lexer (whitespace
@@ -125,22 +124,22 @@ local p_localspacing =
-- Even using different style keys is not robust as they can be shared. I'll fix the main
-- lexer code.
-local p_sstring =
+local t_sstring =
token("quote",dquote)
* token("string",(1-dquote)^0) -- different from context
* token("quote",dquote)
-local p_dstring =
+local t_dstring =
token("quote",squote)
* token("string",(1-squote)^0) -- different from context
* token("quote",squote)
--- local p_comment =
+-- local t_comment =
-- token("command",opencomment)
-- * token("comment",(1-closecomment)^0) -- different from context
-- * token("command",closecomment)
--- local p_cdata =
+-- local t_cdata =
-- token("command",opencdata)
-- * token("comment",(1-closecdata)^0) -- different from context
-- * token("command",closecdata)
@@ -158,74 +157,74 @@ local p_dstring =
-- <!ENTITY xxxx PUBLIC "yyyy" >
-- <!ENTITY xxxx "yyyy" >
-local p_docstr = p_dstring + p_sstring
+local t_docstr = t_dstring + t_sstring
-local p_docent = token("command",P("<!ENTITY"))
- * p_optionalwhitespace
+local t_docent = token("command",P("<!ENTITY"))
+ * t_optionalwhitespace
* token("keyword",name)
- * p_optionalwhitespace
+ * t_optionalwhitespace
* (
(
token("constant",P("SYSTEM"))
- * p_optionalwhitespace
- * p_docstr
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * t_docstr
+ * t_optionalwhitespace
* token("constant",P("NDATA"))
- * p_optionalwhitespace
+ * t_optionalwhitespace
* token("keyword",name)
) + (
token("constant",P("PUBLIC"))
- * p_optionalwhitespace
- * p_docstr
+ * t_optionalwhitespace
+ * t_docstr
) + (
- p_docstr
+ t_docstr
)
)
- * p_optionalwhitespace
+ * t_optionalwhitespace
* token("command",P(">"))
-local p_docele = token("command",P("<!ELEMENT"))
- * p_optionalwhitespace
+local t_docele = token("command",P("<!ELEMENT"))
+ * t_optionalwhitespace
* token("keyword",name)
- * p_optionalwhitespace
+ * t_optionalwhitespace
* token("command",P("("))
* (
- p_spacing
+ t_localspacing
+ token("constant",P("#CDATA") + P("#PCDATA") + P("ANY"))
+ token("text",P(","))
+ token("comment",(1-S(",)"))^1)
)^1
* token("command",P(")"))
- * p_optionalwhitespace
+ * t_optionalwhitespace
* token("command",P(">"))
-local p_docset = token("command",P("["))
- * p_optionalwhitespace
- * ((p_optionalwhitespace * (p_docent + p_docele))^1 + token("comment",(1-P("]"))^0))
- * p_optionalwhitespace
+local t_docset = token("command",P("["))
+ * t_optionalwhitespace
+ * ((t_optionalwhitespace * (t_docent + t_docele))^1 + token("comment",(1-P("]"))^0))
+ * t_optionalwhitespace
* token("command",P("]"))
-local p_doctype = token("command",P("<!DOCTYPE"))
- * p_optionalwhitespace
+local t_doctype = token("command",P("<!DOCTYPE"))
+ * t_optionalwhitespace
* token("keyword",name)
- * p_optionalwhitespace
+ * t_optionalwhitespace
* (
(
token("constant",P("PUBLIC"))
- * p_optionalwhitespace
- * p_docstr
- * p_optionalwhitespace
- * p_docstr
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * t_docstr
+ * t_optionalwhitespace
+ * t_docstr
+ * t_optionalwhitespace
) + (
token("constant",P("SYSTEM"))
- * p_optionalwhitespace
- * p_docstr
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * t_docstr
+ * t_optionalwhitespace
)
)^-1
- * p_docset^-1
- * p_optionalwhitespace
+ * t_docset^-1
+ * t_optionalwhitespace
* token("command",P(">"))
lexer.embed_lexer(xmllexer, lualexer, token("command", openlua), token("command", closelua))
@@ -233,7 +232,7 @@ lexer.embed_lexer(xmllexer, xmlcommentlexer, token("command", opencomment), toke
lexer.embed_lexer(xmllexer, xmlcdatalexer, token("command", opencdata), token("command", closecdata))
lexer.embed_lexer(xmllexer, xmlscriptlexer, token("command", openscript), token("command", closescript))
--- local p_name =
+-- local t_name =
-- token("plain",name)
-- * (
-- token("default",colon)
@@ -241,11 +240,11 @@ lexer.embed_lexer(xmllexer, xmlscriptlexer, token("command", openscript), toke
-- )
-- + token("keyword",name)
-local p_name = -- more robust
+local t_name = -- more robust
token("plain",name * colon)^-1
* token("keyword",name)
--- local p_key =
+-- local t_key =
-- token("plain",name)
-- * (
-- token("default",colon)
@@ -253,81 +252,82 @@ local p_name = -- more robust
-- )
-- + token("constant",name)
-local p_key =
+local t_key =
token("plain",name * colon)^-1
* token("constant",name)
-local p_attributes = (
- p_optionalwhitespace
- * p_key
- * p_optionalwhitespace
+local t_attributes = (
+ t_optionalwhitespace
+ * t_key
+ * t_optionalwhitespace
* token("plain",equal)
- * p_optionalwhitespace
- * (p_dstring + p_sstring)
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * (t_dstring + t_sstring)
+ * t_optionalwhitespace
)^0
-local p_open =
+local t_open =
token("keyword",openbegin)
* (
- p_name
- * p_optionalwhitespace
- * p_attributes
+ t_name
+ * t_optionalwhitespace
+ * t_attributes
* token("keyword",closebegin)
+
token("error",(1-closebegin)^1)
)
-local p_close =
+local t_close =
token("keyword",openend)
* (
- p_name
- * p_optionalwhitespace
+ t_name
+ * t_optionalwhitespace
* token("keyword",closeend)
+
token("error",(1-closeend)^1)
)
-local p_entity =
+local t_entity =
token("constant",entity)
-local p_instruction =
+local t_instruction =
token("command",openinstruction * P("xml"))
- * p_optionalwhitespace
- * p_attributes
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * t_attributes
+ * t_optionalwhitespace
* token("command",closeinstruction)
+ token("command",openinstruction * name)
* token("default",(1-closeinstruction)^1)
* token("command",closeinstruction)
-local p_invisible =
+local t_invisible =
token("invisible",invisibles^1)
--- local p_preamble =
--- token('preamble', p_preamble )
+-- local t_preamble =
+-- token("preamble", t_preamble )
xmllexer._rules = {
- { "whitespace", p_spacing },
- { "preamble", p_preamble },
- { "word", p_word },
- -- { "text", p_text },
- -- { "comment", p_comment },
- -- { "cdata", p_cdata },
- { "doctype", p_doctype },
- { "instruction", p_instruction },
- { "close", p_close },
- { "open", p_open },
- { "entity", p_entity },
- { "invisible", p_invisible },
- { "rest", p_rest },
+ { "whitespace", t_spacing },
+ { "preamble", t_preamble },
+ { "word", t_word },
+ -- { "text", t_text },
+ -- { "comment", t_comment },
+ -- { "cdata", t_cdata },
+ { "doctype", t_doctype },
+ { "instruction", t_instruction },
+ { "close", t_close },
+ { "open", t_open },
+ { "entity", t_entity },
+ { "invisible", t_invisible },
+ { "rest", t_rest },
}
xmllexer._tokenstyles = context.styleset
xmllexer._foldpattern = P("</") + P("<") + P("/>") -- separate entry else interference
++ P("<!--") + P("-->")
-xmllexer._foldsymbols = { -- somehow doesn't work yet
+xmllexer._foldsymbols = {
_patterns = {
"</",
"/>",
@@ -338,6 +338,13 @@ xmllexer._foldsymbols = { -- somehow doesn't work yet
["/>"] = -1,
["<"] = 1,
},
+ ["command"] = {
+ ["</"] = -1,
+ ["/>"] = -1,
+ ["<!--"] = 1,
+ ["-->"] = -1,
+ ["<"] = 1,
+ },
}
return xmllexer
diff --git a/context/data/scite/lexers/scite-context-lexer.lua b/context/data/scite/context/lexers/scite-context-lexer.lua
index be130077b..6335af911 100644
--- a/context/data/scite/lexers/scite-context-lexer.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer.lua
@@ -8,7 +8,20 @@ local info = {
}
-local trace = true -- false
+-- todo: hook into context resolver etc
+-- todo: only old api in lexers, rest in context subnamespace
+-- todo: make sure we can run in one state .. copies or shared?
+-- todo: auto-nesting
+
+local log = false
+local trace = false
+local detail = false
+local show = false -- nice for tracing (also for later)
+local collapse = false -- can save some 15% (maybe easier on scintilla)
+local inspect = false -- can save some 15% (maybe easier on scintilla)
+
+-- local log = true
+-- local trace = true
-- GET GOING
--
@@ -136,14 +149,56 @@ local trace = true -- false
--
-- Promissing is that the library now can use another Lua instance so maybe some day
-- it will get properly in SciTE and we can use more clever scripting.
+--
+-- In some lexers we use embedded ones even if we could do it directly, The reason is
+-- that when the end token is edited (e.g. -->), backtracking to the space before the
+-- begin token (e.g. <!--) results in applying the surrounding whitespace which in
+-- turn means that when the end token is edited right, backtracking doesn't go back.
+-- One solution (in the dll) would be to backtrack several space categories. After all,
+-- lexing is quite fast (applying the result is much slower).
+--
+-- For some reason the first blob of text tends to go wrong (pdf and web). It would be
+-- nice to have 'whole doc' initial lexing. Quite fishy as it makes it impossible to
+-- lex the first part well (for already opened documents) because only a partial
+-- text is passed.
+--
+-- So, maybe I should just write this from scratch (assuming more generic usage)
+-- because after all, the dll expects just tables, based on a string. I can then also
+-- do some more aggressive resource sharing (needed when used generic).
+--
+-- I think that nested lexers are still bugged (esp over longer ranges). It never was
+-- robust or maybe it's simply not meant for too complex cases. The 3.24 version was
+-- probably the best so far. The fact that styles bleed between lexers even if their
+-- states are isolated is an issue. Another issus is that zero characters in the
+-- text passed to the lexer can mess things up (pdf files have them in streams).
+--
+-- For more complex 'languages', like web or xml, we need to make sure that we use
+-- e.g. 'default' for spacing that makes up some construct. Ok, we then still have a
+-- backtracking issue but less.
+--
+-- TODO
+--
+-- I can make an export to context, but first I'll redo the code that makes the grammar,
+-- as we only seem to need
+--
+-- lexer._TOKENSTYLES : table
+-- lexer._CHILDREN : flag
+-- lexer._EXTRASTYLES : table
+-- lexer._GRAMMAR : flag
+--
+-- lexers.load : function
+-- lexers.lex : function
+--
+-- So, if we drop compatibility with other lex definitions, we can make things simpler.
-- TRACING
--
-- The advantage is that we now can check more easily with regular Lua. We can also
-- use wine and print to the console (somehow stdout is intercepted there.) So, I've
-- added a bit of tracing. Interesting is to notice that each document gets its own
--- instance which is pretty inefficient when we are spellchecking (In the past I
--- assumed a shared instance and took some precautions.)
+-- instance which has advantages but also means that when we are spellchecking we
+-- reload the word lists each time. (In the past I assumed a shared instance and took
+-- some precautions.)
local lpeg = require("lpeg")
@@ -157,24 +212,28 @@ local lpegmatch = lpeg.match
local nesting = 0
local function report(fmt,str,...)
- if str then
- fmt = format(fmt,str,...)
+ if log then
+ if str then
+ fmt = format(fmt,str,...)
+ end
+ print(format("scite lpeg lexer > %s > %s",nesting == 0 and "-" or nesting,fmt))
end
- print(format("scite lpeg lexer > %s > %s",nesting == 0 and "-" or nesting,fmt))
end
-if trace then
- report("loading context lexer module (global table: %s)",tostring(global))
+local function inform(...)
+ if log and trace then
+ report(...)
+ end
end
+inform("loading context lexer module (global table: %s)",tostring(global))
+
if not package.searchpath then
-- Unfortunately the io library is only available when we end up
-- in this branch of code.
- if trace then
- report("using adapted function 'package.searchpath'")
- end
+ inform("using adapted function 'package.searchpath' (if used at all)")
function package.searchpath(name,path)
local tried = { }
@@ -182,6 +241,7 @@ if not package.searchpath then
local filename = gsub(part,"%?",name)
local f = io.open(filename,"r")
if f then
+ inform("file found on path: %s",filename)
f:close()
return filename
end
@@ -190,6 +250,7 @@ if not package.searchpath then
-- added: local path .. for testing
local f = io.open(filename,"r")
if f then
+ inform("file found on current path: %s",filename)
f:close()
return filename
end
@@ -207,8 +268,16 @@ lexers.context = context
local patterns = { }
context.patterns = patterns -- todo: lexers.patterns
-lexers._CONTEXTEXTENSIONS = true
-lexers.LEXERPATH = package.path -- no need
+context.report = report
+context.inform = inform
+
+lexers.LEXERPATH = package.path -- can be multiple paths separated by ;
+lexers.LEXERPATH = "./?.lua" -- good enough, will be set anyway (was
+
+if resolvers then
+ -- todo: set LEXERPATH
+ -- todo: set report
+end
local usedlexers = { }
local parent_lexer = nil
@@ -218,20 +287,25 @@ local parent_lexer = nil
-- just add them here. There is also a limit on some 30 styles. Maybe I should
-- hash them in order to reuse.
+-- todo: work with proper hashes and analyze what styles are really used by a
+-- lexer
+
local default = {
"nothing", "whitespace", "comment", "string", "number", "keyword",
"identifier", "operator", "error", "preprocessor", "constant", "variable",
"function", "type", "label", "embedded",
- -- "regex", "class",
"quote", "special", "extra", "reserved", "okay", "warning",
"command", "internal", "preamble", "grouping", "primitive", "plain",
"user",
- -- "invisible", "data",
+ -- not used (yet) .. we cross the 32 boundary so had to patch the initializer, see (1)
+ "char", "class", "data", "definition", "invisible", "regex",
+ "standout", "tag",
+ "text",
}
local predefined = {
- 'default', 'linenumber', 'bracelight', 'bracebad', 'controlchar',
- 'indentguide', 'calltip'
+ "default", "linenumber", "bracelight", "bracebad", "controlchar",
+ "indentguide", "calltip"
}
-- Bah ... ugly ... nicer would be a proper hash .. we now have properties
@@ -240,17 +314,20 @@ local predefined = {
-- are still unknown.
local function preparestyles(list)
+ local reverse = { }
for i=1,#list do
local k = list[i]
local K = upper(k)
local s = "style." .. k
lexers[K] = k -- is this used
- lexers['STYLE_'..K] = '$(' .. k .. ')'
+ lexers["STYLE_"..K] = "$(" .. k .. ")"
+ reverse[k] = true
end
+ return reverse
end
-preparestyles(default)
-preparestyles(predefined)
+local defaultstyles = preparestyles(default)
+local predefinedstyles = preparestyles(predefined)
-- These helpers are set afterwards so we delay their initialization ... there
-- is no need to alias each time again and this way we can more easily adapt
@@ -272,6 +349,25 @@ preparestyles(predefined)
-- It needs checking: do we have access to all properties now? I'll clean
-- this up anyway as I want a simple clean and stable model.
+-- This is somewhat messy. The lexer dll provides some virtual fields:
+--
+-- + property
+-- + property_int
+-- + style_at
+-- + fold_level
+-- + indent_amount
+--
+-- but for some reasons not:
+--
+-- + property_expanded
+--
+-- As a consequence we need to define it here because otherwise the
+-- lexer will crash. The fuzzy thing is that we don't have to define
+-- the property and property_int tables but we do have to define the
+-- expanded beforehand. The folding properties are no longer interfaced
+-- so the interface to scite is now rather weak (only a few hard coded
+-- properties).
+
local FOLD_BASE = 0
local FOLD_HEADER = 0
local FOLD_BLANK = 0
@@ -280,6 +376,130 @@ local style_at = { }
local indent_amount = { }
local fold_level = { }
+local function check_main_properties()
+ if not lexers.property then
+ lexers.property = { }
+ end
+ if not lexers.property_int then
+ lexers.property_int = setmetatable({ }, {
+ __index = function(t,k)
+ -- why the tostring .. it relies on lua casting to a number when
+ -- doing a comparison
+ return tonumber(lexers.property[k]) or 0 -- tostring removed
+ end,
+ __newindex = function(t,k,v)
+ report("properties are read-only, '%s' is not changed",k)
+ end,
+ })
+ end
+end
+
+lexers.property_expanded = setmetatable({ }, {
+ __index = function(t,k)
+ -- better be safe for future changes .. what if at some point this is
+ -- made consistent in the dll ... we need to keep an eye on that
+ local property = lexers.property
+ if not property then
+ check_main_properties()
+ end
+ --
+ return gsub(property[k],"[$%%]%b()", function(k)
+ return t[sub(k,3,-2)]
+ end)
+ end,
+ __newindex = function(t,k,v)
+ report("properties are read-only, '%s' is not changed",k)
+ end,
+})
+
+-- A downward compatible feature but obsolete:
+
+-- local function get_property(tag,default)
+-- return lexers.property_int[tag] or lexers.property[tag] or default
+-- end
+
+-- We still want our own properties (as it keeps changing so better play
+-- safe from now on):
+
+local function check_properties(lexer)
+ if lexer.properties then
+ return lexer
+ end
+ check_main_properties()
+ -- we use a proxy
+ local mainproperties = lexers.property
+ local properties = { }
+ local expanded = setmetatable({ }, {
+ __index = function(t,k)
+ return gsub(properties[k] or mainproperties[k],"[$%%]%b()", function(k)
+ return t[sub(k,3,-2)]
+ end)
+ end,
+ })
+ lexer.properties = setmetatable(properties, {
+ __index = mainproperties,
+ __call = function(t,k,default) -- expands
+ local v = expanded[k]
+ local t = type(default)
+ if t == "number" then
+ return tonumber(v) or default
+ elseif t == "boolean" then
+ return v == nil and default or v
+ else
+ return v or default
+ end
+ end,
+ })
+ return lexer
+end
+
+-- do
+-- lexers.property = { foo = 123, red = "R" }
+-- local a = check_properties({}) print("a.foo",a.properties.foo)
+-- a.properties.foo = "bar" print("a.foo",a.properties.foo)
+-- a.properties.foo = "bar:$(red)" print("a.foo",a.properties.foo) print("a.foo",a.properties("foo"))
+-- end
+
+local function set(value,default)
+ if value == 0 or value == false or value == "0" then
+ return false
+ elseif value == 1 or value == true or value == "1" then
+ return true
+ else
+ return default
+ end
+end
+
+local function check_context_properties()
+ local property = lexers.property -- let's hope that this stays
+ log = set(property["lexer.context.log"], log)
+ trace = set(property["lexer.context.trace"], trace)
+ detail = set(property["lexer.context.detail"], detail)
+ show = set(property["lexer.context.show"], show)
+ collapse = set(property["lexer.context.collapse"],collapse)
+ inspect = set(property["lexer.context.inspect"], inspect)
+end
+
+function context.registerproperties(p) -- global
+ check_main_properties()
+ local property = lexers.property -- let's hope that this stays
+ for k, v in next, p do
+ property[k] = v
+ end
+ check_context_properties()
+end
+
+context.properties = setmetatable({ }, {
+ __index = lexers.property,
+ __newindex = function(t,k,v)
+ check_main_properties()
+ lexers.property[k] = v
+ check_context_properties()
+ end,
+})
+
+-- We want locals to we set them delayed. Once.
+
local function initialize()
FOLD_BASE = lexers.FOLD_BASE
FOLD_HEADER = lexers.FOLD_HEADER
@@ -289,27 +509,11 @@ local function initialize()
indent_amount = lexers.indent_amount -- table
fold_level = lexers.fold_level -- table
--
- initialize = nil
+ check_main_properties()
--
+ initialize = nil
end
-local function get_property(tag,default)
- return lexers.property_int[tag] or lexers.property[tag] or default
-end
-
--- Do we really need this?
-
-lexers.property_expanded = setmetatable({ }, {
- __index = function(t, key)
- return gsub(lexers.property[key],'[$%%]%b()', function(key)
- return t[sub(key,3,-2)]
- end)
- end,
- __newindex = function(t,k,v)
- report("properties are read-only, '%s' is not changed",k)
- end,
-})
-
-- Style handler.
--
-- The property table will be set later (after loading) by the library. The
@@ -344,6 +548,19 @@ end
context.toproperty = toproperty
context.tostyles = tostyles
+local function sortedkeys(hash)
+ local t, n = { }, 0
+ for k, v in next, hash do
+ t[#t+1] = k
+ local l = #tostring(k)
+ if l > n then
+ n = l
+ end
+ end
+ table.sort(t)
+ return t, n
+end
+
-- If we had one instance/state of Lua as well as all regular libraries
-- preloaded we could use the context base libraries. So, let's go poor-
-- mans solution now.
@@ -353,66 +570,105 @@ function context.registerstyles(styles)
context.styles = styles
context.styleset = styleset
if trace then
- local t, n = { }, 0
- for k, v in next, styleset do
- t[#t+1] = k
- if #k > n then
- n = #k
+ if detail then
+ local t, n = sortedkeys(styleset)
+ local template = " %-" .. n .. "s : %s"
+ report("initializing styleset:")
+ for i=1,#t do
+ local k = t[i]
+ report(template,k,styleset[k])
end
- end
- table.sort(t)
- local template = " %-" .. n .. "s : %s"
- report("initializing styleset:")
- for i=1,#t do
- local k = t[i]
- report(template,k,styleset[k])
+ else
+ report("initializing styleset")
end
end
end
-- Some spell checking related stuff. Unfortunately we cannot use a path set
--- by property.
+-- by property. This will get a hook for resolvers.
local locations = {
- "data", -- optional data directory
- "..", -- regular scite directory
- "lexers", -- new in 3.41 .. no tracing possible
- "lexers/data", -- new in 3.41 .. no tracing possible
- "../data", -- new in 3.41 .. no tracing possible
+ "context/lexers", -- context lexers
+ "context/lexers/data", -- context lexers
+ "../lexers", -- original lexers
+ "../lexers/data", -- original lexers
+ ".", -- whatever
+ "./data", -- whatever
}
local function collect(name)
- local okay, definitions = pcall(function () return require(name) end)
- if okay then
- if type(definitions) == "function" then
- definitions = definitions()
+ local root = gsub(lexers.LEXERPATH or ".","/.-lua$","") .. "/" -- this is a horrible hack
+ -- report("module '%s' locating '%s'",tostring(lexers),name)
+ for i=1,#locations do
+ local fullname = root .. locations[i] .. "/" .. name .. ".lua" -- so we can also check for .luc
+ if trace then
+ report("attempt to locate '%s'",fullname)
end
- if type(definitions) == "table" then
- return definitions
+ local okay, result = pcall(function () return dofile(fullname) end)
+ if okay then
+ return result, fullname
end
- else
end
end
+function context.loadluafile(name)
+ local data, fullname = collect(name)
+ if data then
+ if trace then
+ report("lua file '%s' has been loaded",fullname)
+ end
+ return data, fullname
+ end
+ report("unable to load lua file '%s'",name)
+end
+
+-- in fact we could share more as we probably process the data but then we need
+-- to have a more advanced helper
+
+local cache = { }
+
function context.loaddefinitions(name)
- for i=1,#locations do
- local data = collect(locations[i] .. "/" .. name)
- if data then
- if trace then
- report("definition file '%s' has been loaded",name)
+ local data = cache[name]
+ if data then
+ if trace then
+ report("reusing definitions '%s'",name)
+ end
+ return data
+ elseif trace and data == false then
+ report("definitions '%s' were not found",name)
+ end
+ local data, fullname = collect(name)
+ if not data then
+ report("unable to load definition file '%s'",name)
+ data = false
+ elseif trace then
+ report("definition file '%s' has been loaded",fullname)
+ if detail then
+ local t, n = sortedkeys(data)
+ local template = " %-" .. n .. "s : %s"
+ for i=1,#t do
+ local k = t[i]
+ local v = data[k]
+ if type(v) ~= "table" then
+ report(template,k,tostring(v))
+ elseif #v > 0 then
+ report(template,k,#v)
+ else
+ -- no need to show hash
+ end
end
- return data
end
end
- report("unable to load definition file '%s'",name)
+ cache[name] = data
+ return type(data) == "table" and data
end
function context.word_match(words,word_chars,case_insensitive)
- local chars = '%w_' -- maybe just "" when word_chars
+ local chars = "%w_" -- maybe just "" when word_chars
if word_chars then
- chars = '^([' .. chars .. gsub(word_chars,'([%^%]%-])', '%%%1') ..']+)'
+ chars = "^([" .. chars .. gsub(word_chars,"([%^%]%-])", "%%%1") .."]+)"
else
- chars = '^([' .. chars ..']+)'
+ chars = "^([" .. chars .."]+)"
end
if case_insensitive then
local word_list = { }
@@ -448,8 +704,8 @@ do
local period = P(".")
local octdigit = R("07")
local hexdigit = R("09","AF","af")
- local lower = R('az')
- local upper = R('AZ')
+ local lower = R("az")
+ local upper = R("AZ")
local alpha = upper + lower
local space = S(" \n\r\t\f\v")
local eol = S("\r\n")
@@ -458,7 +714,7 @@ do
local octal = P("0")
* octdigit^1
local hexadecimal = P("0") * S("xX")
- * (hexdigit^0 * '.' * hexdigit^1 + hexdigit^1 * '.' * hexdigit^0 + hexdigit^1)
+ * (hexdigit^0 * period * hexdigit^1 + hexdigit^1 * period * hexdigit^0 + hexdigit^1)
* (S("pP") * sign^-1 * hexdigit^1)^-1 -- *
patterns.idtoken = idtoken
@@ -467,9 +723,9 @@ do
patterns.period = period
patterns.octdigit = octdigit
patterns.hexdigit = hexdigit
- patterns.ascii = R('\000\127') -- useless
- patterns.extend = R('\000\255') -- useless
- patterns.control = R('\000\031')
+ patterns.ascii = R("\000\127") -- useless
+ patterns.extend = R("\000\255") -- useless
+ patterns.control = R("\000\031")
patterns.lower = lower
patterns.upper = upper
patterns.alpha = alpha
@@ -477,7 +733,7 @@ do
patterns.octal = octal
patterns.hexadecimal = hexadecimal
patterns.float = sign^-1
- * (digit^0 * '.' * digit^1 + digit^1 * '.' * digit^0 + digit^1)
+ * (digit^0 * period * digit^1 + digit^1 * period * digit^0 + digit^1)
* S("eE") * sign^-1 * digit^1 -- *
patterns.cardinal = decimal
@@ -508,7 +764,7 @@ do
end)
-- These are the expected ones for other lexers. Maybe all in own namespace
- -- and provide compatibility layer.
+ -- and provide compatibility layer. or should I just remove them?
lexers.any = anything
lexers.ascii = ascii
@@ -520,9 +776,9 @@ do
lexers.upper = upper
lexers.xdigit = hexdigit
lexers.cntrl = control
- lexers.graph = R('!~')
- lexers.print = R(' ~')
- lexers.punct = R('!/', ':@', '[\'', '{~')
+ lexers.graph = R("!~")
+ lexers.print = R(" ~")
+ lexers.punct = R("!/", ":@", "[\'", "{~")
lexers.space = space
lexers.newline = S("\r\n\f")^1
lexers.nonnewline = 1 - lexers.newline
@@ -532,7 +788,7 @@ do
lexers.hex_num = hexadecimal
lexers.integer = integer
lexers.float = float
- lexers.word = (alpha + '_') * (alpha + digit + '_')^0 -- weird, why digits
+ lexers.word = (alpha + "_") * (alpha + digit + "_")^0 -- weird, why digits
end
@@ -616,6 +872,7 @@ function context.setwordlist(tag,limit) -- returns hash (lowercase keys and orig
if not list then
list = context.loaddefinitions("spell-" .. tag)
if not list or type(list) ~= "table" then
+ report("invalid spell checking list for '%s'",tag)
list = { words = false, min = 3 }
else
list.words = list.words or false
@@ -623,6 +880,9 @@ function context.setwordlist(tag,limit) -- returns hash (lowercase keys and orig
end
lists[tag] = list
end
+ if trace then
+ report("enabling spell checking for '%s' with minimum '%s'",tag,list.min)
+ end
return list.words, list.min
end
@@ -735,13 +995,13 @@ local function fold_by_parsing(text,start_pos,start_line,start_level,lexer)
local fold_symbols_patterns = fold_symbols._patterns
local action_y = function(pos,line)
for j = 1, #fold_symbols_patterns do
- for s, match in gmatch(line,fold_symbols_patterns[j]) do -- '()('..patterns[i]..')'
+ for s, match in gmatch(line,fold_symbols_patterns[j]) do -- "()(" .. patterns[i] .. ")"
local symbols = fold_symbols[style_at[start_pos + pos + s - 1]]
local l = symbols and symbols[match]
local t = type(l)
- if t == 'number' then
+ if t == "number" then
current_level = current_level + l
- elseif t == 'function' then
+ elseif t == "function" then
current_level = current_level + l(text, pos, line, s, match)
end
end
@@ -860,7 +1120,7 @@ local threshold_by_indentation = 512 * 1024 -- we don't know the filesize yet
local threshold_by_line = 512 * 1024 -- we don't know the filesize yet
function context.fold(lexer,text,start_pos,start_line,start_level) -- hm, we had size thresholds .. where did they go
- if text == '' then
+ if text == "" then
return { }
end
if initialize then
@@ -873,15 +1133,15 @@ function context.fold(lexer,text,start_pos,start_line,start_level) -- hm, we had
if filesize <= threshold_by_lexer then
return fold_by_lexer(text,start_pos,start_line,start_level,lexer)
end
- elseif fold_by_symbols then -- and get_property('fold.by.parsing',1) > 0 then
+ elseif fold_by_symbols then -- and lexer.properties("fold.by.parsing",1) > 0 then
if filesize <= threshold_by_parsing then
return fold_by_parsing(text,start_pos,start_line,start_level,lexer)
end
- elseif get_property('fold.by.indentation',1) > 0 then
+ elseif lexer.properties("fold.by.indentation",1) > 0 then
if filesize <= threshold_by_indentation then
return fold_by_indentation(text,start_pos,start_line,start_level,lexer)
end
- elseif get_property('fold.by.line',1) > 0 then
+ elseif lexer.properties("fold.by.line",1) > 0 then
if filesize <= threshold_by_line then
return fold_by_line(text,start_pos,start_line,start_level,lexer)
end
@@ -889,10 +1149,6 @@ function context.fold(lexer,text,start_pos,start_line,start_level) -- hm, we had
return { }
end
--- function context.fold(lexer,text,start_pos,start_line,start_level) -- hm, we had size thresholds .. where did they go
--- return { }
--- end
-
-- The following code is mostly unchanged:
local function add_rule(lexer,id,rule) -- unchanged
@@ -904,8 +1160,41 @@ local function add_rule(lexer,id,rule) -- unchanged
lexer._RULEORDER[#lexer._RULEORDER + 1] = id
end
-local function add_style(lexer,token_name,style) -- unchanged (well, changed a bit around 3.41)
--- if not lexer._TOKENSTYLES[token_name] then
+-- I finally figured out that adding more styles was an issue because of several
+-- reasons:
+--
+-- + in old versions there was a limit in the amount, so we overran the built-in
+-- hard coded scintilla range
+-- + then, the add_style function didn't check for already known ones, so again
+-- we had an overrun (with some magic that could be avoided)
+-- + then, when I messed with a new default set I realized that there is no check
+-- in initializing _TOKENSTYLES (here the inspect function helps)
+-- + of course it was mostly a side effect of passing all the used styles to the
+-- _tokenstyles instead of only the not-default ones but such a thing should not
+-- matter (read: intercepted)
+--
+-- This finally removed a head-ache and was revealed by lots of tracing, which I
+-- should have built in way earlier.
+
+local function add_style(lexer,token_name,style) -- changed a bit around 3.41
+ -- We don't add styles that are already defined as this can overflow the
+ -- amount possible (in old versions of scintilla).
+ if defaultstyles[token_name] then
+ if trace and detail then
+ report("default style '%s' is ignored as extra style",token_name)
+ end
+ return
+ elseif predefinedstyles[token_name] then
+ if trace and detail then
+ report("predefined style '%s' is ignored as extra style",token_name)
+ end
+ return
+ else
+ if trace and detail then
+ report("adding extra style '%s' as '%s'",token_name,style)
+ end
+ end
+ -- This is unchanged. We skip the dangerous zone.
local num_styles = lexer._numstyles
if num_styles == 32 then
num_styles = num_styles + 8
@@ -916,27 +1205,56 @@ local function add_style(lexer,token_name,style) -- unchanged (well, changed a b
lexer._TOKENSTYLES[token_name] = num_styles
lexer._EXTRASTYLES[token_name] = style
lexer._numstyles = num_styles + 1
--- end
+end
+
+local function check_styles(lexer)
+ -- Here we also use a check for the dangerous zone. That way we can have a
+ -- larger default set. The original code just assumes that #default is less
+ -- than the dangerous zone's start.
+ local numstyles = 0
+ local tokenstyles = { }
+ for i=1, #default do
+ if numstyles == 32 then
+ numstyles = numstyles + 8
+ end
+ tokenstyles[default[i]] = numstyles
+ numstyles = numstyles + 1
+ end
+ -- Unchanged.
+ for i=1, #predefined do
+ tokenstyles[predefined[i]] = i + 31
+ end
+ lexer._TOKENSTYLES = tokenstyles
+ lexer._numstyles = numstyles
+ lexer._EXTRASTYLES = { }
+ return lexer
end
-- At some point an 'any' append showed up in the original code ...
-- but I see no need to catch that case ... beter fix the specification.
+--
+-- hm, why are many joined twice
local function join_tokens(lexer) -- slightly different from the original (no 'any' append)
- local patterns = lexer._RULES
- local order = lexer._RULEORDER
- local token_rule = patterns[order[1]] -- normally whitespace
- for i=2, #order do
- token_rule = token_rule + patterns[order[i]]
- end
- if lexer._TYPE ~= "context" then
- token_rule = token_rule + lexers.token(lexers.DEFAULT, patterns.any)
+ local patterns = lexer._RULES
+ local order = lexer._RULEORDER
+ -- report("lexer: %s, tokens: %s",lexer._NAME,table.concat(order," + "))
+ if patterns and order then
+ local token_rule = patterns[order[1]] -- normally whitespace
+ for i=2,#order do
+ token_rule = token_rule + patterns[order[i]]
+ end
+ if lexer._TYPE ~= "context" then
+ token_rule = token_rule + lexers.token(lexers.DEFAULT, patterns.any)
+ end
+ lexer._TOKENRULE = token_rule
+ return token_rule
+ else
+ return P(1)
end
- lexer._TOKENRULE = token_rule
- return token_rule
end
-local function add_lexer(grammar, lexer, token_rule) -- mostly the same as the original
+local function add_lexer(grammar, lexer) -- mostly the same as the original
local token_rule = join_tokens(lexer)
local lexer_name = lexer._NAME
local children = lexer._CHILDREN
@@ -947,20 +1265,24 @@ local function add_lexer(grammar, lexer, token_rule) -- mostly the same as the o
end
local child_name = child._NAME
local rules = child._EMBEDDEDRULES[lexer_name]
- local rules_token_rule = grammar['__'..child_name] or rules.token_rule
- grammar[child_name] = (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1 * V(lexer_name)
- local embedded_child = '_' .. child_name
- grammar[embedded_child] = rules.start_rule * (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1
+ local rules_token_rule = grammar["__" .. child_name] or rules.token_rule
+ local pattern = (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1
+ grammar[child_name] = pattern * V(lexer_name)
+ local embedded_child = "_" .. child_name
+ grammar[embedded_child] = rules.start_rule * pattern
token_rule = V(embedded_child) + token_rule
end
- grammar['__' .. lexer_name] = token_rule
+ if trace then
+ report("adding lexer '%s' with %s children",lexer_name,#children)
+ end
+ grammar["__" .. lexer_name] = token_rule
grammar[lexer_name] = token_rule^0
end
local function build_grammar(lexer,initial_rule) -- same as the original
local children = lexer._CHILDREN
+ local lexer_name = lexer._NAME
if children then
- local lexer_name = lexer._NAME
if not initial_rule then
initial_rule = lexer_name
end
@@ -968,17 +1290,88 @@ local function build_grammar(lexer,initial_rule) -- same as the original
add_lexer(grammar, lexer)
lexer._INITIALRULE = initial_rule
lexer._GRAMMAR = Ct(P(grammar))
+ if trace then
+ report("building grammar for '%s' with whitespace '%s'and %s children",lexer_name,lexer.whitespace or "?",#children)
+ end
else
lexer._GRAMMAR = Ct(join_tokens(lexer)^0)
+ if trace then
+ report("building grammar for '%s' with whitespace '%s'",lexer_name,lexer.whitespace or "?")
+ end
end
end
--- So far. We need these local functions in the next one. We have these
--- redefinitions because we memoize the lexers ... it looks like in
--- 3.1.4 something similar now happens with 'lexers'.
+-- So far. We need these local functions in the next one.
local lineparsers = { }
+local maxmatched = 100
+
+local function collapsed(t)
+ local lasttoken = nil
+ local lastindex = nil
+ for i=1,#t,2 do
+ local token = t[i]
+ local position = t[i+1]
+ if token == lasttoken then
+ t[lastindex] = position
+ elseif lastindex then
+ lastindex = lastindex + 1
+ t[lastindex] = token
+ lastindex = lastindex + 1
+ t[lastindex] = position
+ lasttoken = token
+ else
+ lastindex = i+1
+ lasttoken = token
+ end
+ end
+ for i=#t,lastindex+1,-1 do
+ t[i] = nil
+ end
+ return t
+end
+
+local function matched(lexer,grammar,text)
+ -- text = string.gsub(text,"\z","!")
+ local t = lpegmatch(grammar,text)
+ if trace then
+ if show then
+ report("output of lexer: %s (max %s entries)",lexer._NAME,maxmatched)
+ local s = lexer._TOKENSTYLES
+ local p = 1
+ for i=1,2*maxmatched,2 do
+ local n = i + 1
+ local ti = t[i]
+ local tn = t[n]
+ if ti then
+ local txt = sub(text,p,tn-1)
+ if txt then
+ txt = gsub(txt,"[%s]"," ")
+ else
+ txt = "!no text!"
+ end
+ report("%4i : %s > %s (%s) (%s)",n/2,ti,tn,s[ti] or "!unset!",txt)
+ p = tn
+ else
+ break
+ end
+ end
+ end
+ report("lexer results: %s, length: %s, ranges: %s",lexer._NAME,#text,#t/2)
+ if collapse then
+ t = collapsed(t)
+ report("lexer collapsed: %s, length: %s, ranges: %s",lexer._NAME,#text,#t/2)
+ end
+ elseif collapse then
+ t = collapsed(t)
+ end
+ return t
+end
+
+-- Todo: make nice generic lexer (extra argument with start/stop commands) for
+-- context itself.
+
function context.lex(lexer,text,init_style)
-- local lexer = global._LEXER
local grammar = lexer._GRAMMAR
@@ -1007,7 +1400,7 @@ function context.lex(lexer,text,init_style)
offset = offset + length
if noftokens > 0 and tokens[noftokens] ~= offset then
noftokens = noftokens + 1
- tokens[noftokens] = 'default'
+ tokens[noftokens] = "default"
noftokens = noftokens + 1
tokens[noftokens] = offset + 1
end
@@ -1017,10 +1410,7 @@ function context.lex(lexer,text,init_style)
end
lpegmatch(lineparser,text)
return tokens
-
elseif lexer._CHILDREN then
- -- as we cannot print, tracing is not possible ... this might change as we can as well
- -- generate them all in one go (sharing as much as possible)
local hash = lexer._HASH -- hm, was _hash
if not hash then
hash = { }
@@ -1029,12 +1419,13 @@ function context.lex(lexer,text,init_style)
grammar = hash[init_style]
if grammar then
lexer._GRAMMAR = grammar
+ -- lexer._GRAMMAR = lexer._GRAMMAR or grammar
else
for style, style_num in next, lexer._TOKENSTYLES do
if style_num == init_style then
-- the name of the lexers is filtered from the whitespace
- -- specification
- local lexer_name = match(style,'^(.+)_whitespace') or lexer._NAME
+ -- specification .. weird code, should be a reverse hash
+ local lexer_name = match(style,"^(.+)_whitespace") or lexer._NAME
if lexer._INITIALRULE ~= lexer_name then
grammar = hash[lexer_name]
if not grammar then
@@ -1049,22 +1440,19 @@ function context.lex(lexer,text,init_style)
grammar = grammar or lexer._GRAMMAR
hash[init_style] = grammar
end
- return lpegmatch(grammar,text)
+ if trace then
+ report("lexing '%s' with initial style '%s' and %s children",lexer._NAME,#lexer._CHILDREN or 0,init_style)
+ end
+ return matched(lexer,grammar,text)
else
- return lpegmatch(grammar,text)
+ if trace then
+ report("lexing '%s' with initial style '%s'",lexer._NAME,init_style)
+ end
+ return matched(lexer,grammar,text)
end
end
-
--- so far
-
--- todo: keywords: one lookup and multiple matches
-
--- function context.token(name, patt)
--- return Ct(patt * Cc(name) * Cp())
--- end
---
--- -- hm, changed in 3.24 .. no longer a table
+-- hm, changed in 3.24 .. no longer small table but one table:
function context.token(name, patt)
return patt * Cc(name) * Cp()
@@ -1091,35 +1479,6 @@ end
-- as we don't want to clash with existing files, we end up in
-- lexers not being found.
-local function check_properties()
- if not lexers.property then
- lexers.property = { }
- lexers.property_int = setmetatable({ }, {
- __index = function(t,k)
- return tostring(tonumber(lexers.property[k]) or 0)
- end,
- __newindex = function(t,k,v)
- report("properties are read-only, '%s' is not changed",k)
- end,
- })
- end
-end
-
-local function check_styles(lexer)
- local numstyles = #default
- local tokenstyles = { }
- for i=1, #default do
- tokenstyles[default[i]] = i - 1
- end
- for i=1, #predefined do
- tokenstyles[predefined[i]] = i + 31
- end
- lexer._TOKENSTYLES = tokenstyles
- lexer._numstyles = numstyles
- lexer._EXTRASTYLES = { }
- return lexer
-end
-
local whitespaces = { }
local function push_whitespace(name)
@@ -1146,13 +1505,13 @@ function context.new(name,filename)
--
name = name,
filename = filename,
- whitespace = whitespace,
}
if trace then
report("initializing lexer tagged '%s' from file '%s'",name,filename or name)
end
- check_styles(lexer)
check_whitespace(lexer)
+ check_styles(lexer)
+ check_properties(lexer)
return lexer
end
@@ -1164,66 +1523,123 @@ local function nolexer(name)
}
check_styles(lexer)
check_whitespace(lexer)
+ check_properties(lexer)
return lexer
end
-local function load_lexer(name)
- local lexer, okay = nil, false
- -- first locate the file (methods have changed over time)
- local lexer_file = package.searchpath(name,lexers.LEXERPATH)
- if not lexer_file or lexer_file == "" then
- report("lexer file '%s' can't be located",name)
- else
- if trace then
- report("loading lexer file '%s'",lexer_file)
- end
- push_whitespace(name) -- for traditional lexers .. no alt_name yet
- okay, lexer = pcall(dofile, lexer_file or '')
- pop_whitespace()
- if not okay then
- report("invalid lexer file '%s'",lexer_file)
- elseif trace then
- report("lexer file '%s' has been loaded",lexer_file)
- end
+local function load_lexer(name,namespace)
+ if trace then
+ report("loading lexer file '%s'",name)
+ end
+ push_whitespace(namespace or name) -- for traditional lexers .. no alt_name yet
+ local lexer, fullname = context.loadluafile(name)
+ pop_whitespace()
+ if not lexer then
+ report("invalid lexer file '%s'",name)
+ elseif trace then
+ report("lexer file '%s' has been loaded",fullname)
end
if type(lexer) ~= "table" then
+ if trace then
+ report("lexer file '%s' gets a dummy lexer",name)
+ end
return nolexer(name)
end
if lexer._TYPE ~= "context" then
lexer._TYPE = "native"
check_styles(lexer)
- check_whitespace(lexer,name)
+ check_whitespace(lexer,namespace or name)
+ check_properties(lexer)
end
if not lexer._NAME then
lexer._NAME = name -- so: filename
end
+ if name ~= namespace then
+ lexer._NAME = namespace
+ end
return lexer
end
+-- tracing ...
+
+local function inspect_lexer(lexer,level)
+ -- If we had the regular libs available I could use the usual
+ -- helpers.
+ local parent = lexer._lexer
+ lexer._lexer = nil -- prevent endless recursion
+ local name = lexer._NAME
+ local function showstyles_1(tag,styles)
+ local numbers = { }
+ for k, v in next, styles do
+ numbers[v] = k
+ end
+ -- sort by number and make number hash too
+ local keys = sortedkeys(numbers)
+ for i=1,#keys do
+ local k = keys[i]
+ local v = numbers[k]
+ report("[%s %s] %s %s = %s",level,name,tag,k,v)
+ end
+ end
+ local function showstyles_2(tag,styles)
+ local keys = sortedkeys(styles)
+ for i=1,#keys do
+ local k = keys[i]
+ local v = styles[k]
+ report("[%s %s] %s %s = %s",level,name,tag,k,v)
+ end
+ end
+ local keys = sortedkeys(lexer)
+ for i=1,#keys do
+ local k = keys[i]
+ local v = lexer[k]
+ report("[%s %s] root key : %s = %s",level,name,k,tostring(v))
+ end
+ showstyles_1("token style",lexer._TOKENSTYLES)
+ showstyles_2("extra style",lexer._EXTRASTYLES)
+ local children = lexer._CHILDREN
+ if children then
+ for i=1,#children do
+ inspect_lexer(children[i],level+1)
+ end
+ end
+ lexer._lexer = parent
+end
+
+function context.inspect(lexer)
+ inspect_lexer(lexer,0)
+end
+
-- An optional second argument has been introduced so that one can embed a lexer
-- more than once ... maybe something to look into (as not it's done by remembering
-- the start sequence ... quite okay but maybe suboptimal ... anyway, never change
-- a working solution).
-function context.load(filename)
+-- namespace can be automatic: if parent then use name of parent (chain)
+
+function context.loadlexer(filename,namespace)
nesting = nesting + 1
- local lexer = usedlexers[filename] -- we load by filename but the internal name can be short
+ if not namespace then
+ namespace = filename
+ end
+ local lexer = usedlexers[namespace] -- we load by filename but the internal name can be short
if lexer then
if trace then
- report("reusing lexer '%s'",filename)
+ report("reusing lexer '%s'",namespace)
end
nesting = nesting - 1
return lexer
- end
- if trace then
- report("loading lexer '%s'",filename)
+ elseif trace then
+ report("loading lexer '%s'",namespace)
end
--
- check_properties()
+ if initialize then
+ initialize()
+ end
--
parent_lexer = nil
--
- lexer = load_lexer(filename) or nolexer(name)
+ lexer = load_lexer(filename,namespace) or nolexer(filename,namespace)
usedlexers[filename] = lexer
--
if not lexer._rules and not lexer._lexer then
@@ -1243,7 +1659,7 @@ function context.load(filename)
for i=1,#_r do
local rule = _r[i]
rules[#rules + 1] = {
- name .. '_' .. rule[1],
+ name .. "_" .. rule[1],
rule[2],
}
end
@@ -1254,7 +1670,7 @@ function context.load(filename)
tokenstyles[token] = style
end
end
- lexer = l
+ lexer = _l
end
--
local _r = lexer._rules
@@ -1279,7 +1695,7 @@ function context.load(filename)
local patterns = foldsymbols._patterns
if patterns then
for i = 1, #patterns do
- patterns[i] = '()(' .. patterns[i] .. ')'
+ patterns[i] = "()(" .. patterns[i] .. ")"
end
end
end
@@ -1289,6 +1705,10 @@ function context.load(filename)
--
nesting = nesting - 1
--
+ if inspect then
+ context.inspect(lexer)
+ end
+ --
return lexer
end
@@ -1301,7 +1721,7 @@ function context.embed_lexer(parent, child, start_rule, end_rule) -- mostly the
if not child._RULES then
local rules = child._rules
if not rules then
- report("child lexer '%s' has no rules",chile._NAME or "unknown")
+ report("child lexer '%s' has no rules",child._NAME or "unknown")
rules = { }
child._rules = rules
end
@@ -1311,9 +1731,9 @@ function context.embed_lexer(parent, child, start_rule, end_rule) -- mostly the
end
end
embeddedrules[parent._NAME] = {
- ['start_rule'] = start_rule,
- ['token_rule'] = join_tokens(child),
- ['end_rule'] = end_rule
+ ["start_rule"] = start_rule,
+ ["token_rule"] = join_tokens(child),
+ ["end_rule"] = end_rule
}
local children = parent._CHILDREN
if not children then
@@ -1326,7 +1746,12 @@ function context.embed_lexer(parent, child, start_rule, end_rule) -- mostly the
tokenstyles = { }
parent._tokenstyles = tokenstyles
end
- tokenstyles[child._NAME..'_whitespace'] = lexers.STYLE_WHITESPACE -- check what whitespace
+ local childname = child._NAME
+ local whitespace = childname .. "_whitespace"
+ tokenstyles[whitespace] = lexers.STYLE_WHITESPACE -- all these STYLE_THINGS will go .. just a proper hash
+ if trace then
+ report("using whitespace '%s' as trigger for '%s' with property '%s'",whitespace,childname,lexers.STYLE_WHITESPACE)
+ end
local childstyles = child._tokenstyles
if childstyles then
for token, style in next, childstyles do
@@ -1340,7 +1765,9 @@ end
-- we now move the adapted code to the lexers namespace
lexers.new = context.new
-lexers.load = context.load
+lexers.load = context.loadlexer
+------.loadlexer = context.loadlexer
+lexers.loadluafile = context.loadluafile
lexers.embed_lexer = context.embed_lexer
lexers.fold = context.fold
lexers.lex = context.lex
@@ -1348,6 +1775,9 @@ lexers.token = context.token
lexers.word_match = context.word_match
lexers.exact_match = context.exact_match
lexers.just_match = context.just_match
+lexers.inspect = context.inspect
+lexers.report = context.report
+lexers.inform = context.inform
-- helper .. alas ... the lexer's lua instance is rather crippled .. not even
-- math is part of it
@@ -1472,8 +1902,8 @@ function lexers.delimited_range(chars, single_line, no_escape, balanced) -- unch
local s = sub(chars,1,1)
local e = #chars == 2 and sub(chars,2,2) or s
local range
- local b = balanced and s or ''
- local n = single_line and '\n' or ''
+ local b = balanced and s or ""
+ local n = single_line and "\n" or ""
if no_escape then
local invalid = S(e .. n .. b)
range = patterns.any - invalid
@@ -1496,20 +1926,20 @@ function lexers.starts_line(patt) -- unchanged
return index
end
local char = sub(input,index - 1,index - 1)
- if char == '\n' or char == '\r' or char == '\f' then
+ if char == "\n" or char == "\r" or char == "\f" then
return index
end
end ) * patt
end
function lexers.last_char_includes(s) -- unchanged
- s = '[' .. gsub(s,'[-%%%[]', '%%%1') .. ']'
+ s = "[" .. gsub(s,"[-%%%[]", "%%%1") .. "]"
return P ( function(input, index)
if index == 1 then
return index
end
local i = index
- while match(sub(input,i - 1,i - 1),'[ \t\r\n\f]') do
+ while match(sub(input,i - 1,i - 1),"[ \t\r\n\f]") do
i = i - 1
end
if match(sub(input,i - 1,i - 1),s) then
@@ -1527,21 +1957,21 @@ function lexers.nested_pair(start_chars, end_chars) -- unchanged
end
local function prev_line_is_comment(prefix, text, pos, line, s) -- unchanged
- local start = find(line,'%S')
+ local start = find(line,"%S")
if start < s and not find(line,prefix,start,true) then
return false
end
local p = pos - 1
- if sub(text,p,p) == '\n' then
+ if sub(text,p,p) == "\n" then
p = p - 1
- if sub(text,p,p) == '\r' then
+ if sub(text,p,p) == "\r" then
p = p - 1
end
- if sub(text,p,p) ~= '\n' then
- while p > 1 and sub(text,p - 1,p - 1) ~= '\n'
+ if sub(text,p,p) ~= "\n" then
+ while p > 1 and sub(text,p - 1,p - 1) ~= "\n"
do p = p - 1
end
- while find(sub(text,p,p),'^[\t ]$') do
+ while find(sub(text,p,p),"^[\t ]$") do
p = p + 1
end
return sub(text,p,p + #prefix - 1) == prefix
@@ -1551,10 +1981,10 @@ local function prev_line_is_comment(prefix, text, pos, line, s) -- unchanged
end
local function next_line_is_comment(prefix, text, pos, line, s)
- local p = find(text,'\n',pos + s)
+ local p = find(text,"\n",pos + s)
if p then
p = p + 1
- while find(sub(text,p,p),'^[\t ]$') do
+ while find(sub(text,p,p),"^[\t ]$") do
p = p + 1
end
return sub(text,p,p + #prefix - 1) == prefix
@@ -1565,10 +1995,10 @@ end
function lexers.fold_line_comments(prefix)
local property_int = lexers.property_int
return function(text, pos, line, s)
- if property_int['fold.line.comments'] == 0 then
+ if property_int["fold.line.comments"] == 0 then
return 0
end
- if s > 1 and match(line,'^%s*()') < s then
+ if s > 1 and match(line,"^%s*()") < s then
return 0
end
local prev_line_comment = prev_line_is_comment(prefix, text, pos, line, s)
diff --git a/context/data/scite/lexers/themes/scite-context-theme.lua b/context/data/scite/context/lexers/themes/scite-context-theme.lua
index 9b22b241c..b0c63fe39 100644
--- a/context/data/scite/lexers/themes/scite-context-theme.lua
+++ b/context/data/scite/context/lexers/themes/scite-context-theme.lua
@@ -17,12 +17,20 @@ local info = {
-- crippled. On the other hand, I don't see other schemes being used with the
-- context lexers.
--- The next kludge is no longer needed:
+-- The next kludge is no longer needed which is good!
--
-- if GTK then -- WIN32 GTK OSX CURSES
-- font_name = '!' .. font_name
-- end
+-- I need to play with these, some work ok:
+--
+-- eolfilled noteolfilled
+-- characterset:u|l
+-- visible notvisible
+-- changeable notchangeable (this way we can protect styles, e.g. preamble?)
+-- hotspot nothotspot
+
local font_name = 'Dejavu Sans Mono'
local font_size = '14'
@@ -55,6 +63,7 @@ local styles = {
["whitespace"] = { },
["default"] = { font = font_name, size = font_size, fore = colors.black, back = colors.textpanel },
+ ["default"] = { font = font_name, size = font_size, fore = colors.black },
["number"] = { fore = colors.cyan },
["comment"] = { fore = colors.yellow },
["keyword"] = { fore = colors.blue, bold = true },
@@ -105,14 +114,37 @@ local styles = {
-- equal to default:
["text"] = { font = font_name, size = font_size, fore = colors.black, back = colors.textpanel },
+ ["text"] = { font = font_name, size = font_size, fore = colors.black },
}
--- I need to read the C++ code and see how I can load the themes directly
--- in which case we can start thinking of tracing.
-
-if lexer and lexer.context and lexer.context.registerstyles then
-
- lexer.context.registerstyles(styles)
+local properties = {
+ ["fold.by.parsing"] = 1,
+ ["fold.by.indentation"] = 0,
+ ["fold.by.line"] = 0,
+ ["fold.line.comments"] = 0,
+ --
+ ["lexer.context.log"] = 1, -- log errors and warnings
+ ["lexer.context.trace"] = 0, -- show loading, initializations etc
+ ["lexer.context.detail"] = 0, -- show more detail when tracing
+ ["lexer.context.show"] = 0, -- show result of lexing
+ ["lexer.context.collapse"] = 0, -- make lexing results somewhat more efficient
+ ["lexer.context.inspect"] = 0, -- show some info about lexer (styles and so)
+ --
+-- ["lexer.context.log"] = 1, -- log errors and warnings
+-- ["lexer.context.trace"] = 1, -- show loading, initializations etc
+}
+local lexer = lexer or require("lexer")
+local context = lexer.context
+
+if context then
+ context.inform("loading context (style) properties")
+ if context.registerstyles then
+ context.registerstyles(styles)
+ end
+ if context.registerproperties then
+ context.registerproperties(properties)
+ end
end
+
diff --git a/context/data/scite/scite-context-data-context.properties b/context/data/scite/context/scite-context-data-context.properties
index 3e53862f7..3e53862f7 100644
--- a/context/data/scite/scite-context-data-context.properties
+++ b/context/data/scite/context/scite-context-data-context.properties
diff --git a/context/data/scite/scite-context-data-interfaces.properties b/context/data/scite/context/scite-context-data-interfaces.properties
index 9c2ca4623..9c2ca4623 100644
--- a/context/data/scite/scite-context-data-interfaces.properties
+++ b/context/data/scite/context/scite-context-data-interfaces.properties
diff --git a/context/data/scite/scite-context-data-metafun.properties b/context/data/scite/context/scite-context-data-metafun.properties
index 9381b4f8d..9381b4f8d 100644
--- a/context/data/scite/scite-context-data-metafun.properties
+++ b/context/data/scite/context/scite-context-data-metafun.properties
diff --git a/context/data/scite/scite-context-data-metapost.properties b/context/data/scite/context/scite-context-data-metapost.properties
index 88ace57ca..88ace57ca 100644
--- a/context/data/scite/scite-context-data-metapost.properties
+++ b/context/data/scite/context/scite-context-data-metapost.properties
diff --git a/context/data/scite/scite-context-data-tex.properties b/context/data/scite/context/scite-context-data-tex.properties
index d1780794d..d1780794d 100644
--- a/context/data/scite/scite-context-data-tex.properties
+++ b/context/data/scite/context/scite-context-data-tex.properties
diff --git a/context/data/scite/scite-context-external.properties b/context/data/scite/context/scite-context-external.properties
index 11326756c..c7d0c4a17 100644
--- a/context/data/scite/scite-context-external.properties
+++ b/context/data/scite/context/scite-context-external.properties
@@ -1,29 +1,46 @@
# external lpeg lexers
-lexer.lpeg.home=$(SciteDefaultHome)/lexers
+lexer.lpeg.home=$(SciteDefaultHome)/context/lexers
lexer.lpeg.color.theme=scite-context-theme
+# lexer.lpeg.color.theme=$(SciteDefaultHome)/context/lexers/themes/scite-context-theme.lua
-# alas, only a few properties are passed (only indentation)
+# The lexer dll no longer interfaces to teh following properties. It never had a full
+# interface, so maybe I'll make my own.
fold.by.parsing=1
fold.by.indentation=0
fold.by.line=0
fold.line.comments=0
+# you can put the dll/so file in the <scitehome>/context/lexers path or keep it in
+# <scitehome>/lexers
+
if PLAT_WIN
- lexerpath.*.lpeg=$(lexer.lpeg.home)/LexLPeg.dll
+ lexerpath.*.lpeg=$(lexer.lpeg.home)/../../lexers/lexlpeg.dll
+# lexerpath.*.lpeg=$(lexer.lpeg.home)/lexers/lexlpeg.dll
if PLAT_GTK
- lexerpath.*.lpeg=$(lexer.lpeg.home)/liblexlpeg.so
+ lexerpath.*.lpeg=$(lexer.lpeg.home)/../../lexers/liblexlpeg.so
+# lexerpath.*.lpeg=$(lexer.lpeg.home)/lexers/liblexlpeg.so
+
+# the variable lexer.name is automatically set but I'm not sure what the following
+# one is supposed to do so we keep it around (sams as in lpeg.properties, which we
+# don't load)
-# comment this one if you get crashes or side effects
+lexer.*.lpeg=lpeg
+
+# in principle you can do the following, as we're mostly compatible with the
+# default lexers but for a regular context setup the lexers built-in scite are
+# just fine so in principle we only need the dll/so
#
# import lexers/lpeg
-lexer.*.lpeg=lpeg
+# patterns should be original (not clash with built in)
-file.patterns.cweb=*.h;*.c;*.w;*.hh;*.cc;*.ww;*.hpp;*.cpp;*.hxx;*.cxx;
+file.patterns.cweb=*.w;*.ww;
+file.patterns.cpp=*.h;*.c;*.hh;*.cc;*.hpp;*.cpp;*.hxx;*.cxx;
+file.patterns.bib=*.bib
lexer.$(file.patterns.metapost)=lpeg_scite-context-lexer-mps
lexer.$(file.patterns.metafun)=lpeg_scite-context-lexer-mps
@@ -33,11 +50,12 @@ lexer.$(file.patterns.example)=lpeg_scite-context-lexer-xml
lexer.$(file.patterns.text)=lpeg_scite-context-lexer-txt
lexer.$(file.patterns.pdf)=lpeg_scite-context-lexer-pdf
lexer.$(file.patterns.cweb)=lpeg_scite-context-lexer-web
+lexer.$(file.patterns.cpp)=lpeg_scite-context-lexer-cpp
+lexer.$(file.patterns.bib)=lpeg_scite-context-lexer-bibtex
lexer.$(file.patterns.tex)=lpeg_scite-context-lexer-tex
lexer.$(file.patterns.xml)=lpeg_scite-context-lexer-xml
lexer.$(file.patterns.html)=lpeg_scite-context-lexer-xml
-lexer.$(file.patterns.cpp)=lpeg_scite-context-lexer-web
# It's a real pitty that we cannot overload the errorlist lexer. That would
# make scite even more interesting. Add to that including lpeg and the lpeg
diff --git a/context/data/scite/scite-context-internal.properties b/context/data/scite/context/scite-context-internal.properties
index 130e64f1e..038381dc7 100644
--- a/context/data/scite/scite-context-internal.properties
+++ b/context/data/scite/context/scite-context-internal.properties
@@ -8,8 +8,8 @@
#
# % interface=none|metapost|mp|metafun
-import scite-context-data-metapost
-import scite-context-data-metafun
+import context/scite-context-data-metapost
+import context/scite-context-data-metafun
keywordclass.metapost.all=$(keywordclass.metapost.tex) $(keywordclass.metapost.plain) $(keywordclass.metapost.primitives)
keywordclass.metafun.all=$(keywordclass.metafun.constants) $(keywordclass.metafun.helpers)
@@ -44,9 +44,9 @@ comment.block.at.line.start.metapost=1
#
# % interface=all|nl|en|de|cz|it|ro|latex
-import scite-context-data-tex
-import scite-context-data-context
-import scite-context-data-interfaces
+import context/scite-context-data-tex
+import context/scite-context-data-context
+import context/scite-context-data-interfaces
word.characters.$(file.patterns.context)=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ@!?_\\
diff --git a/context/data/scite/context/scite-context-user.properties b/context/data/scite/context/scite-context-user.properties
new file mode 100644
index 000000000..b6fc34282
--- /dev/null
+++ b/context/data/scite/context/scite-context-user.properties
@@ -0,0 +1,15 @@
+# this loades the basics
+
+import context/scite-context
+
+# internal lexing
+
+import context/scite-context-internal
+
+# external lexing (tex, mps, cld/lua, xml)
+
+import context/scite-context-external
+
+# this does some tuning
+
+import context/scite-pragma
diff --git a/context/data/scite/scite-context.properties b/context/data/scite/context/scite-context.properties
index 59f9ca65d..78850ef0d 100644
--- a/context/data/scite/scite-context.properties
+++ b/context/data/scite/context/scite-context.properties
@@ -160,7 +160,7 @@ xml.auto.close.tags=1
# extensions
-import scite-ctx
+import context/scite-ctx
# hard coded compile / build / go
@@ -229,14 +229,14 @@ command.groupundo.29.*=yes
command.save.before.29.*=2
command.shortcut.29.*=Alt+F12
-command.name.30.*=Run with jit
-command.subsystem.30.*=1
-command.30.$(file.patterns.context)=$(name.context.runjit) $(FileNameExt)
-command.30.$(file.patterns.metafun)=$(name.context.runjit) $(FileNameExt) --metapost
-command.30.$(file.patterns.exmaple)=$(name.context.runjit) $(FileNameExt) --xml
-command.groupundo.30.*=yes
-command.save.before.30.*=2
-command.shortcut.30.*=Alt+F7
+#~ command.name.30.*=Run with jit
+#~ command.subsystem.30.*=1
+#~ command.30.$(file.patterns.context)=$(name.context.runjit) $(FileNameExt)
+#~ command.30.$(file.patterns.metafun)=$(name.context.runjit) $(FileNameExt) --metapost
+#~ command.30.$(file.patterns.exmaple)=$(name.context.runjit) $(FileNameExt) --xml
+#~ command.groupundo.30.*=yes
+#~ command.save.before.30.*=2
+#~ command.shortcut.30.*=Alt+F7
# 2 : pdf viewing
diff --git a/context/data/scite/scite-ctx-context.properties b/context/data/scite/context/scite-ctx-context.properties
index a1d5800e6..a1d5800e6 100644
--- a/context/data/scite/scite-ctx-context.properties
+++ b/context/data/scite/context/scite-ctx-context.properties
diff --git a/context/data/scite/scite-ctx-example.properties b/context/data/scite/context/scite-ctx-example.properties
index 78b2f2859..78b2f2859 100644
--- a/context/data/scite/scite-ctx-example.properties
+++ b/context/data/scite/context/scite-ctx-example.properties
diff --git a/context/data/scite/scite-ctx.lua b/context/data/scite/context/scite-ctx.lua
index 421e9cd89..24f5b34b8 100644
--- a/context/data/scite/scite-ctx.lua
+++ b/context/data/scite/context/scite-ctx.lua
@@ -1383,3 +1383,13 @@ function toggle_strip(name)
OnStrip = ignore_strip
end
end
+
+-- this way we get proper lexing for lexers that do more extensive
+-- parsing
+
+function OnOpen(filename)
+ -- print("opening: " .. filename .. " (size: " .. editor.TextLength .. ")")
+ editor:Colourise(1,editor.TextLength)
+end
+
+-- output.LexerLanguage = ""
diff --git a/context/data/scite/scite-ctx.properties b/context/data/scite/context/scite-ctx.properties
index acbb33c0b..874a381e3 100644
--- a/context/data/scite/scite-ctx.properties
+++ b/context/data/scite/context/scite-ctx.properties
@@ -12,7 +12,7 @@
# <?xml version='1.0' language='uk' ?>
ext.lua.auto.reload=1
-ext.lua.startup.script=$(SciteDefaultHome)/scite-ctx.lua
+ext.lua.startup.script=$(SciteDefaultHome)/context/scite-ctx.lua
#~ extension.$(file.patterns.context)=scite-ctx.lua
#~ extension.$(file.patterns.example)=scite-ctx.lua
@@ -150,8 +150,8 @@ command.save.before.26.*=2
command.groupundo.26.*=yes
command.shortcut.26.*=Ctrl+E
-import scite-ctx-context
-import scite-ctx-example
+import context/scite-ctx-context
+import context/scite-ctx-example
ctx.template.scan=yes
ctx.template.rescan=no
diff --git a/context/data/scite/scite-metapost.properties b/context/data/scite/context/scite-metapost.properties
index e3ac25244..fc06dcaa2 100644
--- a/context/data/scite/scite-metapost.properties
+++ b/context/data/scite/context/scite-metapost.properties
@@ -69,7 +69,7 @@ lexer.metapost.comment.process=0
# Metapost: keywords
-import scite-context-data-metapost.properties
+import context/scite-context-data-metapost.properties
keywords.$(file.patterns.metapost)=$(keywordclass.metapost.all)
diff --git a/context/data/scite/scite-pragma.properties b/context/data/scite/context/scite-pragma.properties
index 7308f1fb6..2dea18bad 100644
--- a/context/data/scite/scite-pragma.properties
+++ b/context/data/scite/context/scite-pragma.properties
@@ -25,7 +25,9 @@ $(filter.metafun)\
$(filter.example)\
$(filter.lua)\
$(filter.text)\
-$(filter.pdf)
+$(filter.pdf)\
+$(filter.cweb)\
+$(filter.txt)
# Editor: menus
@@ -36,5 +38,4 @@ XML|xml||\
Lua|lua||\
Text|txt||\
PDF|pdf||\
-CWeb|web||\
-Text|txt||
+CWeb|cweb||
diff --git a/context/data/scite/scite-tex.properties b/context/data/scite/context/scite-tex.properties
index 6933971e2..7d271eaf1 100644
--- a/context/data/scite/scite-tex.properties
+++ b/context/data/scite/context/scite-tex.properties
@@ -89,7 +89,7 @@ lexer.tex.auto.if=1
# only the macros that make sense:
-import scite-context-data-tex.properties
+import context/scite-context-data-tex.properties
# collections
diff --git a/context/data/scite/lexers/archive/data-pre-303.zip b/context/data/scite/lexers/archive/data-pre-303.zip
deleted file mode 100644
index 4e8116d8a..000000000
--- a/context/data/scite/lexers/archive/data-pre-303.zip
+++ /dev/null
Binary files differ
diff --git a/context/data/scite/lexers/archive/data-pre-331.zip b/context/data/scite/lexers/archive/data-pre-331.zip
deleted file mode 100644
index f99c35147..000000000
--- a/context/data/scite/lexers/archive/data-pre-331.zip
+++ /dev/null
Binary files differ
diff --git a/context/data/scite/lexers/archive/data-pre-341.zip b/context/data/scite/lexers/archive/data-pre-341.zip
deleted file mode 100644
index 002855186..000000000
--- a/context/data/scite/lexers/archive/data-pre-341.zip
+++ /dev/null
Binary files differ
diff --git a/context/data/scite/lexers/scite-context-lexer-pdf-object.lua b/context/data/scite/lexers/scite-context-lexer-pdf-object.lua
deleted file mode 100644
index dffd492ae..000000000
--- a/context/data/scite/lexers/scite-context-lexer-pdf-object.lua
+++ /dev/null
@@ -1,119 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for pdf",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
-local P, R, S, C, V = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.V
-
-local lexer = require("lexer")
-local context = lexer.context
-local patterns = context.patterns
-
-local token = lexer.token
-
-local pdfobjectlexer = lexer.new("pdf-object","scite-context-lexer-pdf-object")
-local whitespace = pdfobjectlexer.whitespace
-
-local space = patterns.space
-local somespace = space^1
-
-local newline = paterns.eol
-local real = patterns.real
-local cardinal = patterns.cardinal
-
-local lparent = P("(")
-local rparent = P(")")
-local langle = P("<")
-local rangle = P(">")
-local escape = P("\\")
-local anything = P(1)
-local unicodetrigger = P("feff")
-
-local nametoken = 1 - space - S("<>/[]()")
-local name = P("/") * nametoken^1
-
-local p_string = P { ( escape * anything + lparent * V(1) * rparent + (1 - rparent) )^0 }
-
-local t_spacing = token(whitespace, space^1)
-local t_spaces = token(whitespace, space^1)^0
-
-local p_stream = P("stream")
-local p_endstream = P("endstream")
------ p_obj = P("obj")
-local p_endobj = P("endobj")
-local p_reference = P("R")
-
-local p_objectnumber = patterns.cardinal
-local p_comment = P('%') * (1-S("\n\r"))^0
-
-local string = token("quote", lparent)
- * token("string", p_string)
- * token("quote", rparent)
-local unicode = token("quote", langle)
- * token("plain", unicodetrigger)
- * token("string", (1-rangle)^1)
- * token("quote", rangle)
-local whatsit = token("quote", langle)
- * token("string", (1-rangle)^1)
- * token("quote", rangle)
-local keyword = token("command", name)
-local constant = token("constant", name)
-local number = token('number', real)
--- local reference = token("number", cardinal)
--- * t_spacing
--- * token("number", cardinal)
-local reserved = token("number", P("true") + P("false") + P("NULL"))
-local reference = token("warning", cardinal)
- * t_spacing
- * token("warning", cardinal)
- * t_spacing
- * token("keyword", p_reference)
-local t_comment = token("comment", p_comment)
-
--- t_openobject = token("number", p_objectnumber)
--- * t_spacing
--- * token("number", p_objectnumber)
--- * t_spacing
--- * token("keyword", p_obj)
-local t_closeobject = token("keyword", p_endobj)
-
-local t_opendictionary = token("grouping", P("<<"))
-local t_closedictionary = token("grouping", P(">>"))
-
-local t_openarray = token("grouping", P("["))
-local t_closearray = token("grouping", P("]"))
-
-local t_stream = token("keyword", p_stream)
--- * token("default", newline * (1-newline*p_endstream*newline)^1 * newline)
- * token("default", (1 - p_endstream)^1)
- * token("keyword", p_endstream)
-
-local t_dictionary = { "dictionary",
- dictionary = t_opendictionary * (t_spaces * keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
- array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
- whatever = V("dictionary") + V("array") + constant + reference + string + unicode + number + whatsit,
- }
-
-local t_object = { "object", -- weird that we need to catch the end here (probably otherwise an invalid lpeg)
- object = t_spaces * (V("dictionary") * t_spaces * t_stream^-1 + V("array") + V("number") + t_spaces) * t_spaces * t_closeobject,
- dictionary = t_opendictionary * (t_spaces * keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
- array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
- number = number,
- whatever = V("dictionary") + V("array") + constant + reference + string + unicode + number + reserved + whatsit,
- }
-
-pdfobjectlexer._shared = {
- dictionary = t_dictionary,
-}
-
-pdfobjectlexer._rules = {
- { 'whitespace', t_spacing },
- { 'object', t_object },
-}
-
-pdfobjectlexer._tokenstyles = context.styleset
-
-return pdfobjectlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-pdf-xref.lua b/context/data/scite/lexers/scite-context-lexer-pdf-xref.lua
deleted file mode 100644
index 139f06299..000000000
--- a/context/data/scite/lexers/scite-context-lexer-pdf-xref.lua
+++ /dev/null
@@ -1,53 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for pdf xref",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
--- xref
--- cardinal cardinal [character]
--- ..
--- %%EOF | startxref | trailer
-
-local P, R = lpeg.P, lpeg.R
-
-local lexer = require("lexer")
-local context = lexer.context
-local patterns = context.patterns
-
-local token = lexer.token
-
-local pdfxreflexer = lexer.new("pdf-xref","scite-context-lexer-pdf-xref")
-local whitespace = pdfxreflexer.whitespace
-
-local pdfobjectlexer = lexer.load("scite-context-lexer-pdf-object")
-
-local spacing = patterns.spacing
-
-local t_spacing = token(whitespace, spacing)
-
-local p_trailer = P("trailer")
-
-local t_number = token("number", R("09")^1)
- * t_spacing
- * token("number", R("09")^1)
- * t_spacing
- * (token("keyword", R("az","AZ")) * t_spacing)^-1
-
-local t_xref = t_number^1
-
--- t_xref = token("default", (1-p_trailer)^1)
--- * token("keyword", p_trailer)
--- * t_spacing
--- * pdfobjectlexer._shared.dictionary
-
-pdfxreflexer._rules = {
- { 'whitespace', t_spacing },
- { 'xref', t_xref },
-}
-
-pdfxreflexer._tokenstyles = context.styleset
-
-return pdfxreflexer
diff --git a/context/data/scite/lexers/scite-context-lexer-pdf.lua b/context/data/scite/lexers/scite-context-lexer-pdf.lua
deleted file mode 100644
index 91abff781..000000000
--- a/context/data/scite/lexers/scite-context-lexer-pdf.lua
+++ /dev/null
@@ -1,79 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for pdf",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
-local P, R, S = lpeg.P, lpeg.R, lpeg.S
-
-local lexer = require("lexer")
-local context = lexer.context
-local patterns = context.patterns
-
-local token = lexer.token
-
-local pdflexer = lexer.new("pdf","scite-context-lexer-pdf")
-local whitespace = pdflexer.whitespace
-
-local pdfobjectlexer = lexer.load("scite-context-lexer-pdf-object")
-local pdfxreflexer = lexer.load("scite-context-lexer-pdf-xref")
-
-local space = patterns.space
-local spacing = patterns.spacing
-local nospacing = patterns.nospacing
-local anything = patterns.anything
-local restofline = patterns.restofline
-
-local t_spacing = token(whitespace, spacing)
-local t_rest = token("default", nospacing) -- anything
-
-local p_obj = P("obj")
-local p_endobj = P("endobj")
-local p_xref = P("xref")
-local p_startxref = P("startxref")
-local p_eof = P("%%EOF")
-local p_trailer = P("trailer")
-
-local p_objectnumber = patterns.cardinal
-local p_comment = P('%') * restofline
-
-local t_comment = token("comment", p_comment)
-local t_openobject = token("warning", p_objectnumber)
- * t_spacing
- * token("warning", p_objectnumber)
- * t_spacing
- * token("keyword", p_obj)
- * t_spacing^0
-local t_closeobject = token("keyword", p_endobj)
-
--- We could do clever xref parsing but why should we (i.e. we should check for
--- the xref body. As a pdf file is not edited, we could do without a nested
--- lexer anyway.
-
-local t_trailer = token("keyword", p_trailer)
- * t_spacing
- * pdfobjectlexer._shared.dictionary
-
-local t_openxref = token("plain", p_xref)
-local t_closexref = token("plain", p_startxref)
- + token("comment", p_eof)
- + t_trailer
-local t_startxref = token("plain", p_startxref)
- * t_spacing
- * token("number", R("09")^1)
-
-lexer.embed_lexer(pdflexer, pdfobjectlexer, t_openobject, t_closeobject)
-lexer.embed_lexer(pdflexer, pdfxreflexer, t_openxref, t_closexref)
-
-pdflexer._rules = {
- { 'whitespace', t_spacing },
- { 'comment', t_comment },
- { 'xref', t_startxref },
- { 'rest', t_rest },
-}
-
-pdflexer._tokenstyles = context.styleset
-
-return pdflexer
diff --git a/context/data/scite/lexers/scite-context-lexer-web.lua b/context/data/scite/lexers/scite-context-lexer-web.lua
deleted file mode 100644
index 05c0ce1f3..000000000
--- a/context/data/scite/lexers/scite-context-lexer-web.lua
+++ /dev/null
@@ -1,159 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for w",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
--- this will be extended
-
-local P, R, S, C, Cg, Cb, Cs, Cmt, lpegmatch = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cg, lpeg.Cb, lpeg.Cs, lpeg.Cmt, lpeg.match
-local setmetatable = setmetatable
-
-local lexer = require("lexer")
-local context = lexer.context
-local patterns = context.patterns
-
-local token = lexer.token
-local exact_match = lexer.exact_match
-
-local weblexer = lexer.new("web","scite-context-lexer-web")
-local whitespace = weblexer.whitespace
-
-local keywords = { -- copied from cpp.lua
- -- c
- 'asm', 'auto', 'break', 'case', 'const', 'continue', 'default', 'do', 'else',
- 'extern', 'false', 'for', 'goto', 'if', 'inline', 'register', 'return',
- 'sizeof', 'static', 'switch', 'true', 'typedef', 'volatile', 'while',
- 'restrict',
- -- hm
- '_Bool', '_Complex', '_Pragma', '_Imaginary',
- -- c++.
- 'catch', 'class', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
- 'export', 'friend', 'mutable', 'namespace', 'new', 'operator', 'private',
- 'protected', 'public', 'signals', 'slots', 'reinterpret_cast',
- 'static_assert', 'static_cast', 'template', 'this', 'throw', 'try', 'typeid',
- 'typename', 'using', 'virtual'
-}
-
-local datatypes = { -- copied from cpp.lua
- 'bool', 'char', 'double', 'enum', 'float', 'int', 'long', 'short', 'signed',
- 'struct', 'union', 'unsigned', 'void'
-}
-
-local macros = { -- copied from cpp.lua
- 'define', 'elif', 'else', 'endif', 'error', 'if', 'ifdef', 'ifndef', 'import',
- 'include', 'line', 'pragma', 'undef', 'using', 'warning'
-}
-
-local space = patterns.space -- S(" \n\r\t\f\v")
-local any = patterns.any
-local restofline = patterns.restofline
-local startofline = patterns.startofline
-
-local squote = P("'")
-local dquote = P('"')
-local escaped = P("\\") * P(1)
-local slashes = P('//')
-local begincomment = P("/*")
-local endcomment = P("*/")
-local percent = P("%")
-
-local hexadecimal = patterns.hexadecimal
-local decimal = patterns.decimal
-local float = patterns.float
-local integer = P("-")^-1 * (hexadecimal + decimal) -- also in patterns ?
-
-local spacing = token(whitespace, space^1)
-local rest = token("default", any)
-
-local shortcomment = token("comment", slashes * restofline^0)
-local longcomment = token("comment", begincomment * (1-endcomment)^0 * endcomment^-1)
-local texcomment = token("comment", percent * restofline^0)
-
-local shortstring = token("quote", dquote) -- can be shared
- * token("string", (escaped + (1-dquote))^0)
- * token("quote", dquote)
- + token("quote", squote)
- * token("string", (escaped + (1-squote))^0)
- * token("quote", squote)
-
-local number = token("number", float + integer)
-
-local validword = R("AZ","az","__") * R("AZ","az","__","09")^0
-local identifier = token("default",validword)
-
-local operator = token("special", S('+-*/%^!=<>;:{}[]().&|?~'))
-
------ optionalspace = spacing^0
-
-local p_keywords = exact_match(keywords )
-local p_datatypes = exact_match(datatypes)
-local p_macros = exact_match(macros)
-
-local keyword = token("keyword", p_keywords)
-local datatype = token("keyword", p_datatypes)
-local identifier = token("default", validword)
-
-local macro = token("data", #P('#') * startofline * P('#') * S('\t ')^0 * p_macros)
-
-local beginweb = P("@")
-local endweb = P("@c")
-
-local webcomment = token("comment", #beginweb * startofline * beginweb * (1-endweb)^0 * endweb)
-
--- local texlexer = lexer.load('scite-context-lexer-tex')
-
--- lexer.embed_lexer(weblexer, texlexer, #beginweb * startofline * token("comment",beginweb), token("comment",endweb))
-
-weblexer._rules = {
- { 'whitespace', spacing },
- { 'keyword', keyword },
- { 'type', datatype },
- { 'identifier', identifier },
- { 'string', shortstring },
- -- { 'webcomment', webcomment },
- { 'texcomment', texcomment },
- { 'longcomment', longcomment },
- { 'shortcomment', shortcomment },
- { 'number', number },
- { 'macro', macro },
- { 'operator', operator },
- { 'rest', rest },
-}
-
-weblexer._tokenstyles = context.styleset
-
-weblexer._foldpattern = P("/*") + P("*/") + S("{}") -- separate entry else interference
-
-weblexer._foldsymbols = {
- _patterns = {
- '[{}]',
- '/%*',
- '%*/',
- },
- -- ["data"] = { -- macro
- -- ['region'] = 1,
- -- ['endregion'] = -1,
- -- ['if'] = 1,
- -- ['ifdef'] = 1,
- -- ['ifndef'] = 1,
- -- ['endif'] = -1,
- -- },
- ["special"] = { -- operator
- ['{'] = 1,
- ['}'] = -1,
- },
- ["comment"] = {
- ['/*'] = 1,
- ['*/'] = -1,
- }
-}
-
--- -- by indentation:
-
-weblexer._foldpatterns = nil
-weblexer._foldsymbols = nil
-
-return weblexer
diff --git a/context/data/scite/lexers/themes/scite-context-theme-keep.lua b/context/data/scite/lexers/themes/scite-context-theme-keep.lua
deleted file mode 100644
index 7f9423d9a..000000000
--- a/context/data/scite/lexers/themes/scite-context-theme-keep.lua
+++ /dev/null
@@ -1,233 +0,0 @@
-local info = {
- version = 1.002,
- comment = "theme for scintilla lpeg lexer for context/metafun",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
--- context_path = string.split(os.resultof("mtxrun --find-file context.mkiv"))[1] or ""
--- global.trace("OEPS") -- how do we get access to the regular lua extensions
-
--- The regular styles set the main lexer styles table but we avoid that in order not
--- to end up with updating issues. We just use another table.
-
--- if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
-
-local context_path = "t:/sources" -- c:/data/tex-context/tex/texmf-context/tex/base
-local font_name = 'Dejavu Sans Mono'
-local font_size = 14
-
-if not WIN32 then
- font_name = '!' .. font_name
-end
-
-local color = lexer.color
-local style = lexer.style
-
-lexer.context = lexer.context or { }
-local context = lexer.context
-
-context.path = context_path
-
-colors = {
- red = color('7F', '00', '00'),
- green = color('00', '7F', '00'),
- blue = color('00', '00', '7F'),
- cyan = color('00', '7F', '7F'),
- magenta = color('7F', '00', '7F'),
- yellow = color('7F', '7F', '00'),
- orange = color('B0', '7F', '00'),
- --
- white = color('FF', 'FF', 'FF'),
- light = color('CF', 'CF', 'CF'),
- grey = color('80', '80', '80'),
- dark = color('4F', '4F', '4F'),
- black = color('00', '00', '00'),
- --
- selection = color('F7', 'F7', 'F7'),
- logpanel = color('E7', 'E7', 'E7'),
- textpanel = color('CF', 'CF', 'CF'),
- linepanel = color('A7', 'A7', 'A7'),
- tippanel = color('44', '44', '44'),
- --
- right = color('00', '00', 'FF'),
- wrong = color('FF', '00', '00'),
-}
-
-colors.teal = colors.cyan
-colors.purple = colors.magenta
-
--- to be set:
---
--- style_nothing
--- style_class
--- style_comment
--- style_constant
--- style_definition
--- style_error
--- style_function
--- style_keyword
--- style_number
--- style_operator
--- style_string
--- style_preproc
--- style_tag
--- style_type
--- style_variable
--- style_embedded
--- style_label
--- style_regex
--- style_identifier
---
--- style_line_number
--- style_bracelight
--- style_bracebad
--- style_controlchar
--- style_indentguide
--- style_calltip
-
-style_default = style {
- font = font_name,
- size = font_size,
- fore = colors.black,
- back = colors.textpanel,
-}
-
-style_nothing = style {
- -- empty
-}
-
-style_number = style { fore = colors.cyan }
-style_comment = style { fore = colors.yellow }
-style_string = style { fore = colors.magenta }
-style_keyword = style { fore = colors.blue, bold = true }
-
-style_quote = style { fore = colors.blue, bold = true }
-style_special = style { fore = colors.blue }
-style_extra = style { fore = colors.yellow }
-
-style_embedded = style { fore = colors.black, bold = true }
-
-style_char = style { fore = colors.magenta }
-style_reserved = style { fore = colors.magenta, bold = true }
-style_class = style { fore = colors.black, bold = true }
-style_constant = style { fore = colors.cyan, bold = true }
-style_definition = style { fore = colors.black, bold = true }
-style_okay = style { fore = colors.dark }
-style_error = style { fore = colors.red }
-style_warning = style { fore = colors.orange }
-style_invisible = style { back = colors.orange }
-style_function = style { fore = colors.black, bold = true }
-style_operator = style { fore = colors.blue }
-style_preproc = style { fore = colors.yellow, bold = true }
-style_tag = style { fore = colors.cyan }
-style_type = style { fore = colors.blue }
-style_variable = style { fore = colors.black }
-style_identifier = style_nothing
-
-style_standout = style { fore = colors.orange, bold = true }
-
-style_line_number = style { back = colors.linepanel }
-style_bracelight = style_standout
-style_bracebad = style_standout
-style_indentguide = style { fore = colors.linepanel, back = colors.white }
-style_calltip = style { fore = colors.white, back = colors.tippanel }
-style_controlchar = style_nothing
-
-style_label = style { fore = colors.red, bold = true } -- style { fore = colors.cyan, bold = true }
-style_regex = style_string
-
-style_command = style { fore = colors.green, bold = true }
-
--- only bold seems to work
-
-lexer.style_nothing = style_nothing
-lexer.style_class = style_class
-lexer.style_comment = style_comment
-lexer.style_constant = style_constant
-lexer.style_definition = style_definition
-lexer.style_error = style_error
-lexer.style_function = style_function
-lexer.style_keyword = style_keyword
-lexer.style_number = style_number
-lexer.style_operator = style_operator
-lexer.style_string = style_string
-lexer.style_preproc = style_preproc
-lexer.style_tag = style_tag
-lexer.style_type = style_type
-lexer.style_variable = style_variable
-lexer.style_embedded = style_embedded
-lexer.style_label = style_label
-lexer.style_regex = style_regex
-lexer.style_identifier = style_nothing
-
-local styles = { -- as we have globals we could do with less
-
- -- ["whitespace"] = style_whitespace, -- not to be set!
-
-["default"] = style_nothing,
-["number"] = style_number,
-["comment"] = style_comment,
-["keyword"] = style_keyword,
-["string"] = style_string,
-["preproc"] = style_preproc,
-
- ["reserved"] = style_reserved,
- ["internal"] = style_standout,
-
- ["command"] = style_command,
- ["preamble"] = style_comment,
- ["embedded"] = style_embedded,
- ["grouping"] = style { fore = colors.red },
-["label"] = style_label,
- ["primitive"] = style_keyword,
- ["plain"] = style { fore = colors.dark, bold = true },
- ["user"] = style { fore = colors.green },
- ["data"] = style_constant,
- ["special"] = style_special,
- ["extra"] = style_extra,
- ["quote"] = style_quote,
-
- ["okay"] = style_okay,
- ["warning"] = style_warning,
- ["invisible"] = style_invisible,
-["error"] = style_error,
-
-}
-
--- Old method (still available):
-
-local styleset = { }
-
-for k, v in next, styles do
- styleset[#styleset+1] = { k, v }
-end
-
-context.styles = styles
-context.styleset = styleset
-
--- We need to be sparse due to some limitation (and the number of built in styles
--- growing).
-
--- function context.newstyleset(list)
--- local t = { }
--- if list then
--- for i=1,#list do
--- t[list[i]] = true
--- end
--- end
--- return t
--- end
-
--- function context.usestyle(set,name)
--- set[name] = true
--- return name
--- end
-
--- function context.usestyleset(set)
--- local t = { }
--- for k, _ in next, set do
--- t[#t+1] = { k, styles[k] or styles.default }
--- end
--- end
diff --git a/context/data/scite/metapost.properties b/context/data/scite/metapost.properties
deleted file mode 100644
index fe89b65eb..000000000
--- a/context/data/scite/metapost.properties
+++ /dev/null
@@ -1 +0,0 @@
-import scite-metapost
diff --git a/context/data/scite/scite-context-user.properties b/context/data/scite/scite-context-user.properties
deleted file mode 100644
index 88e803031..000000000
--- a/context/data/scite/scite-context-user.properties
+++ /dev/null
@@ -1,15 +0,0 @@
-# this loades the basics
-
-import scite-context
-
-# internal lexing
-
-import scite-context-internal
-
-# external lexing (tex, mps, cld/lua, xml)
-
-import scite-context-external
-
-# this does some tuning
-
-import scite-pragma
diff --git a/context/data/scite/scite-context-visual.tex b/context/data/scite/scite-context-visual.tex
deleted file mode 100644
index 0a1b8bb71..000000000
--- a/context/data/scite/scite-context-visual.tex
+++ /dev/null
@@ -1,52 +0,0 @@
-% language=uk
-
-\usemodule[art-01]
-
-\defineframedtext
- [entry]
-
-\starttext
-
-\startchapter[title=Some fancy title]
-
- \startluacode
- local entries = { -- there can be more
- { text = "The third entry!" },
- { text = "The fourth entry!" },
- }
-
- for i=1,#entries do
- context.startentry()
- context(entries[i].text)
- context.stopentry()
- end
- \stopluacode
-
- This is just some text to demonstrate the realtime spellchecker
- in combination with the embedded lua and metapost lexers and
- inline as well as display \ctxlua{context("lua code")}.
-
- Non breakable spaces in for instance 10 mm and quads like here
- are shown as well.
-
- \startlinecorrection
- \startMPcode
- for i=1 upto 100 :
- draw fullcircle scaled (i*mm) ;
- endfor ;
- \stopMPcode
- \stoplinecorrection
-
- \iftrue
- \def\crap{some text} % who cares
- \else
- \def\crap{some crap} % about this
- \fi
-
- \blank[2*big]
-
- \crap
-
-\stopchapter
-
-\stoptext
diff --git a/context/data/scite/tex.properties b/context/data/scite/tex.properties
deleted file mode 100644
index 3fbad41cb..000000000
--- a/context/data/scite/tex.properties
+++ /dev/null
@@ -1 +0,0 @@
-import scite-tex
diff --git a/doc/context/scripts/mkiv/mtx-scite.html b/doc/context/scripts/mkiv/mtx-scite.html
index c4dd157e0..24229db73 100644
--- a/doc/context/scripts/mkiv/mtx-scite.html
+++ b/doc/context/scripts/mkiv/mtx-scite.html
@@ -40,6 +40,8 @@
<tr><th style="width: 10em">flag</th><th style="width: 8em">value</th><th>description</th></tr>
<tr><th/><td/><td/></tr>
<tr><th>--words</th><td></td><td>convert spell-*.txt into spell-*.lua</td></tr>
+ <tr><th>--tree</th><td></td><td>converts a tree into an html tree (--source --target --numbers)</td></tr>
+ <tr><th>--file</th><td></td><td>converts a file into an html tree (--source --target --numbers --lexer)</td></tr>
</table>
<br/>
</div>
diff --git a/doc/context/scripts/mkiv/mtx-scite.man b/doc/context/scripts/mkiv/mtx-scite.man
index ece69a9a6..8f268c554 100644
--- a/doc/context/scripts/mkiv/mtx-scite.man
+++ b/doc/context/scripts/mkiv/mtx-scite.man
@@ -13,6 +13,12 @@
.TP
.B --words
convert spell-*.txt into spell-*.lua
+.TP
+.B --tree
+converts a tree into an html tree (--source --target --numbers)
+.TP
+.B --file
+converts a file into an html tree (--source --target --numbers --lexer)
.SH AUTHOR
More information about ConTeXt and the tools that come with it can be found at:
diff --git a/doc/context/scripts/mkiv/mtx-scite.xml b/doc/context/scripts/mkiv/mtx-scite.xml
index 87fe506dc..65ad8736a 100644
--- a/doc/context/scripts/mkiv/mtx-scite.xml
+++ b/doc/context/scripts/mkiv/mtx-scite.xml
@@ -9,6 +9,8 @@
<category name="basic">
<subcategory>
<flag name="words"><short>convert spell-*.txt into spell-*.lua</short></flag>
+ <flag name="tree"><short>converts a tree into an html tree (--source --target --numbers)</short></flag>
+ <flag name="file"><short>converts a file into an html tree (--source --target --numbers --lexer)</short></flag>
</subcategory>
</category>
</flags>
diff --git a/scripts/context/lua/mtx-scite.lua b/scripts/context/lua/mtx-scite.lua
index 972edbfe6..ae8c67387 100644
--- a/scripts/context/lua/mtx-scite.lua
+++ b/scripts/context/lua/mtx-scite.lua
@@ -6,6 +6,8 @@ if not modules then modules = { } end modules ['mtx-scite'] = {
license = "see context related readme files"
}
+-- mtxrun --script scite --tree --source=t:/texmf/tex/context --target=e:/tmp/context --numbers
+
local P, R, S, C, Ct, Cf, Cc, Cg = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Ct, lpeg.Cf, lpeg.Cc, lpeg.Cg
local lpegmatch = lpeg.match
local format, lower, gmatch = string.format, string.lower, string.gmatch
@@ -22,6 +24,8 @@ local helpinfo = [[
<category name="basic">
<subcategory>
<flag name="words"><short>convert spell-*.txt into spell-*.lua</short></flag>
+ <flag name="tree"><short>converts a tree into an html tree (--source --target --numbers)</short></flag>
+ <flag name="file"><short>converts a file into an html tree (--source --target --numbers --lexer)</short></flag>
</subcategory>
</category>
</flags>
@@ -36,6 +40,8 @@ local application = logs.application {
local report = application.report
+local scite = require("util-sci")
+
scripts = scripts or { }
scripts.scite = scripts.scite or { }
@@ -241,6 +247,51 @@ function scripts.scite.words()
report("you need to move the lua files to lexers/data")
end
+function scripts.scite.tree()
+ local source = environment.argument("source")
+ local target = environment.argument("target")
+ local numbers = environment.argument("numbers")
+ if not lfs.isdir(source) then
+ report("you need to pass a valid source path with --source")
+ return
+ end
+ if not lfs.isdir(target) then
+ report("you need to pass a valid target path with --target")
+ return
+ end
+ if source == target then
+ report("source and target paths must be different")
+ return
+ end
+ scite.converttree(source,target,numbers)
+end
+
+function scripts.scite.file()
+ local source = environment.argument("source")
+ local target = environment.argument("target")
+ local lexer = environment.argument("lexer")
+ local numbers = environment.argument("numbers")
+ if source then
+ local target = target or file.replacesuffix(source,"html")
+ if source == target then
+ report("the source file cannot be the same as the target")
+ else
+ scite.filetohtml(source,lexer,target,numbers)
+ end
+
+ else
+ for i=1,#environment.files do
+ local source = environment.files[i]
+ local target = file.replacesuffix(source,"html")
+ if source == target then
+ report("the source file cannot be the same as the target")
+ else
+ scite.filetohtml(source,nil,target,numbers)
+ end
+ end
+ end
+end
+
-- if environment.argument("start") then
-- scripts.scite.start(true)
-- elseif environment.argument("test") then
@@ -251,6 +302,10 @@ end
if environment.argument("words") then
scripts.scite.words()
+elseif environment.argument("tree") then
+ scripts.scite.tree()
+elseif environment.argument("file") then
+ scripts.scite.file()
elseif environment.argument("exporthelp") then
application.export(environment.argument("exporthelp"),environment.files[1])
else
diff --git a/scripts/context/lua/mtxrun.lua b/scripts/context/lua/mtxrun.lua
index 7bd96b247..8679aefb1 100644
--- a/scripts/context/lua/mtxrun.lua
+++ b/scripts/context/lua/mtxrun.lua
@@ -7180,7 +7180,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["trac-log"] = package.loaded["trac-log"] or true
--- original size: 25455, stripped down to: 16617
+-- original size: 25607, stripped down to: 16617
if not modules then modules={} end modules ['trac-log']={
version=1.001,
@@ -16875,8 +16875,8 @@ end -- of closure
-- used libraries : l-lua.lua l-package.lua l-lpeg.lua l-function.lua l-string.lua l-table.lua l-io.lua l-number.lua l-set.lua l-os.lua l-file.lua l-gzip.lua l-md5.lua l-url.lua l-dir.lua l-boolean.lua l-unicode.lua l-math.lua util-str.lua util-tab.lua util-sto.lua util-prs.lua util-fmt.lua trac-set.lua trac-log.lua trac-inf.lua trac-pro.lua util-lua.lua util-deb.lua util-mrg.lua util-tpl.lua util-env.lua luat-env.lua lxml-tab.lua lxml-lpt.lua lxml-mis.lua lxml-aux.lua lxml-xml.lua trac-xml.lua data-ini.lua data-exp.lua data-env.lua data-tmp.lua data-met.lua data-res.lua data-pre.lua data-inp.lua data-out.lua data-fil.lua data-con.lua data-use.lua data-zip.lua data-tre.lua data-sch.lua data-lua.lua data-aux.lua data-tmf.lua data-lst.lua util-lib.lua luat-sta.lua luat-fmt.lua
-- skipped libraries : -
--- original bytes : 694406
--- stripped bytes : 246345
+-- original bytes : 694558
+-- stripped bytes : 246497
-- end library merge
diff --git a/scripts/context/stubs/mswin/mtxrun.lua b/scripts/context/stubs/mswin/mtxrun.lua
index 7bd96b247..8679aefb1 100644
--- a/scripts/context/stubs/mswin/mtxrun.lua
+++ b/scripts/context/stubs/mswin/mtxrun.lua
@@ -7180,7 +7180,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["trac-log"] = package.loaded["trac-log"] or true
--- original size: 25455, stripped down to: 16617
+-- original size: 25607, stripped down to: 16617
if not modules then modules={} end modules ['trac-log']={
version=1.001,
@@ -16875,8 +16875,8 @@ end -- of closure
-- used libraries : l-lua.lua l-package.lua l-lpeg.lua l-function.lua l-string.lua l-table.lua l-io.lua l-number.lua l-set.lua l-os.lua l-file.lua l-gzip.lua l-md5.lua l-url.lua l-dir.lua l-boolean.lua l-unicode.lua l-math.lua util-str.lua util-tab.lua util-sto.lua util-prs.lua util-fmt.lua trac-set.lua trac-log.lua trac-inf.lua trac-pro.lua util-lua.lua util-deb.lua util-mrg.lua util-tpl.lua util-env.lua luat-env.lua lxml-tab.lua lxml-lpt.lua lxml-mis.lua lxml-aux.lua lxml-xml.lua trac-xml.lua data-ini.lua data-exp.lua data-env.lua data-tmp.lua data-met.lua data-res.lua data-pre.lua data-inp.lua data-out.lua data-fil.lua data-con.lua data-use.lua data-zip.lua data-tre.lua data-sch.lua data-lua.lua data-aux.lua data-tmf.lua data-lst.lua util-lib.lua luat-sta.lua luat-fmt.lua
-- skipped libraries : -
--- original bytes : 694406
--- stripped bytes : 246345
+-- original bytes : 694558
+-- stripped bytes : 246497
-- end library merge
diff --git a/scripts/context/stubs/unix/mtxrun b/scripts/context/stubs/unix/mtxrun
index 7bd96b247..8679aefb1 100755
--- a/scripts/context/stubs/unix/mtxrun
+++ b/scripts/context/stubs/unix/mtxrun
@@ -7180,7 +7180,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["trac-log"] = package.loaded["trac-log"] or true
--- original size: 25455, stripped down to: 16617
+-- original size: 25607, stripped down to: 16617
if not modules then modules={} end modules ['trac-log']={
version=1.001,
@@ -16875,8 +16875,8 @@ end -- of closure
-- used libraries : l-lua.lua l-package.lua l-lpeg.lua l-function.lua l-string.lua l-table.lua l-io.lua l-number.lua l-set.lua l-os.lua l-file.lua l-gzip.lua l-md5.lua l-url.lua l-dir.lua l-boolean.lua l-unicode.lua l-math.lua util-str.lua util-tab.lua util-sto.lua util-prs.lua util-fmt.lua trac-set.lua trac-log.lua trac-inf.lua trac-pro.lua util-lua.lua util-deb.lua util-mrg.lua util-tpl.lua util-env.lua luat-env.lua lxml-tab.lua lxml-lpt.lua lxml-mis.lua lxml-aux.lua lxml-xml.lua trac-xml.lua data-ini.lua data-exp.lua data-env.lua data-tmp.lua data-met.lua data-res.lua data-pre.lua data-inp.lua data-out.lua data-fil.lua data-con.lua data-use.lua data-zip.lua data-tre.lua data-sch.lua data-lua.lua data-aux.lua data-tmf.lua data-lst.lua util-lib.lua luat-sta.lua luat-fmt.lua
-- skipped libraries : -
--- original bytes : 694406
--- stripped bytes : 246345
+-- original bytes : 694558
+-- stripped bytes : 246497
-- end library merge
diff --git a/scripts/context/stubs/win64/mtxrun.lua b/scripts/context/stubs/win64/mtxrun.lua
index 7bd96b247..8679aefb1 100644
--- a/scripts/context/stubs/win64/mtxrun.lua
+++ b/scripts/context/stubs/win64/mtxrun.lua
@@ -7180,7 +7180,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["trac-log"] = package.loaded["trac-log"] or true
--- original size: 25455, stripped down to: 16617
+-- original size: 25607, stripped down to: 16617
if not modules then modules={} end modules ['trac-log']={
version=1.001,
@@ -16875,8 +16875,8 @@ end -- of closure
-- used libraries : l-lua.lua l-package.lua l-lpeg.lua l-function.lua l-string.lua l-table.lua l-io.lua l-number.lua l-set.lua l-os.lua l-file.lua l-gzip.lua l-md5.lua l-url.lua l-dir.lua l-boolean.lua l-unicode.lua l-math.lua util-str.lua util-tab.lua util-sto.lua util-prs.lua util-fmt.lua trac-set.lua trac-log.lua trac-inf.lua trac-pro.lua util-lua.lua util-deb.lua util-mrg.lua util-tpl.lua util-env.lua luat-env.lua lxml-tab.lua lxml-lpt.lua lxml-mis.lua lxml-aux.lua lxml-xml.lua trac-xml.lua data-ini.lua data-exp.lua data-env.lua data-tmp.lua data-met.lua data-res.lua data-pre.lua data-inp.lua data-out.lua data-fil.lua data-con.lua data-use.lua data-zip.lua data-tre.lua data-sch.lua data-lua.lua data-aux.lua data-tmf.lua data-lst.lua util-lib.lua luat-sta.lua luat-fmt.lua
-- skipped libraries : -
--- original bytes : 694406
--- stripped bytes : 246345
+-- original bytes : 694558
+-- stripped bytes : 246497
-- end library merge
diff --git a/tex/context/base/bibl-bib.lua b/tex/context/base/bibl-bib.lua
index 65ca1f9e1..baeb3d2f9 100644
--- a/tex/context/base/bibl-bib.lua
+++ b/tex/context/base/bibl-bib.lua
@@ -105,7 +105,7 @@ local spacing = space^0
local equal = P("=")
local collapsed = (space^1)/ " "
-local function add(a,b) if b then return a..b else return a end end
+----- function add(a,b) if b then return a..b else return a end end
local keyword = C((R("az","AZ","09") + S("@_:-"))^1) -- C((1-space)^1)
local s_quoted = ((escape*single) + collapsed + (1-single))^0
diff --git a/tex/context/base/cont-new.mkiv b/tex/context/base/cont-new.mkiv
index 30778c50c..e3df6f7bf 100644
--- a/tex/context/base/cont-new.mkiv
+++ b/tex/context/base/cont-new.mkiv
@@ -11,7 +11,7 @@
%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
%C details.
-\newcontextversion{2014.04.25 00:45}
+\newcontextversion{2014.04.28 23:24}
%D This file is loaded at runtime, thereby providing an excellent place for
%D hacks, patches, extensions and new features.
diff --git a/tex/context/base/context-version.pdf b/tex/context/base/context-version.pdf
index d778e2a07..6450c43f1 100644
--- a/tex/context/base/context-version.pdf
+++ b/tex/context/base/context-version.pdf
Binary files differ
diff --git a/tex/context/base/context.mkiv b/tex/context/base/context.mkiv
index 7ac90cc8d..e1ade2ba1 100644
--- a/tex/context/base/context.mkiv
+++ b/tex/context/base/context.mkiv
@@ -28,7 +28,7 @@
%D up and the dependencies are more consistent.
\edef\contextformat {\jobname}
-\edef\contextversion{2014.04.25 00:45}
+\edef\contextversion{2014.04.28 23:24}
\edef\contextkind {beta}
%D For those who want to use this:
diff --git a/tex/context/base/m-scite.mkiv b/tex/context/base/m-scite.mkiv
new file mode 100644
index 000000000..aed2c2631
--- /dev/null
+++ b/tex/context/base/m-scite.mkiv
@@ -0,0 +1,269 @@
+%D \module
+%D [ file=m-scite,
+%D version=2014.04.28,
+%D title=\CONTEXT\ Extra Modules,
+%D subtitle=\SCITE\ lexers,
+%D author=Hans Hagen,
+%D date=\currentdate,
+%D copyright={PRAGMA ADE \& \CONTEXT\ Development Team}]
+%C
+%C This module is part of the \CONTEXT\ macro||package and is
+%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
+%C details.
+
+% We can simplify the scite lexers, as long as we're able to return the
+% lexed result table and provide alexer module with the functions that
+% the lexer expects (so I need to decipher the cxx file).
+%
+% lexer._TOKENSTYLES : table
+% lexer._CHILDREN : flag
+% lexer._EXTRASTYLES : table
+% lexer._GRAMMAR : flag
+%
+% lexers.load : function
+% lexers.lex : function
+%
+% And some properties that map styles onto scintilla styling. I get the
+% impression that we end up with something simpler, a hybrid between the
+% scite lexing and the current context way, so we get an intermediate
+% step, with some penalty for context, but at least I don't have to
+% maintain two sets (three sets as we also have a line based series).
+
+% TODO: as these files are in tds we can locate them and set the lexer root
+% to that one. Currently we're on: we're on context/documents.
+
+% This is an experiment: eventually we need to hook it into the verbatim code
+% and deal with widow lines and so.
+
+\startluacode
+
+-- todo: merge with collapse
+-- todo: prehash whitespaces
+
+-- todo: hook into the pretty print code
+-- todo: a simple catcode regime with only \ { }
+
+local gsub, sub, find = string.gsub, string.sub, string.find
+local concat = table.concat
+local formatters = string.formatters
+local lpegmatch = lpeg.match
+local setmetatableindex = table.setmetatableindex
+
+local scite = require("util-sci")
+buffers.scite = scite
+
+-- context output:
+
+local f_def_color = formatters["\\definecolor[slxc%s][h=%s%s%s]%%"]
+local f_fore_none = formatters["\\def\\slx%s#1{{\\slxc%s#1}}%%"]
+local f_fore_bold = formatters["\\def\\slx%s#1{{\\slxc%s\\bf#1}}%%"]
+local f_none_bold = formatters["\\def\\slx%s#1{{\\bf#1}}%%"]
+local f_none_none = formatters["\\def\\slx%s#1{{#1}}%%"]
+local f_texstyled = formatters["\\slx%s{%s}"]
+
+local f_mapping = [[
+\let\string\slxL\string\letterleftbrace
+\let\string\slxR\string\letterrightbrace
+\let\string\slxM\string\letterdollar
+\let\string\slxV\string\letterbar
+\let\string\slxH\string\letterhash
+\let\string\slxB\string\letterbackslash
+\let\string\slxP\string\letterpercent
+\let\string\slxS\string\fixedspace
+%]]
+
+local replacer = lpeg.replacer {
+ ["{"] = "\\slxL ",
+ ["}"] = "\\slxR ",
+ ["$"] = "\\slxM ",
+ ["|"] = "\\slxV ",
+ ["#"] = "\\slxH ",
+ ["\\"] = "\\slxB ",
+ ["%"] = "\\slxP ",
+ [" "] = "\\slxS ",
+}
+
+local colors = nil
+
+local function exportcolors()
+ if not colors then
+ scite.loadscitelexer()
+ local function black(f)
+ return (f[1] == f[2]) and (f[2] == f[3]) and (f[3] == '00')
+ end
+ local result, r = { f_mapping }, 1
+ for k, v in table.sortedhash(lexer.context.styles) do
+ local fore = v.fore
+ if fore and not black(fore) then
+ r = r + 1
+ result[r] = f_def_color(k,fore[1],fore[2],fore[3])
+ end
+ end
+ r = r + 1
+ result[r] = "%"
+ for k, v in table.sortedhash(lexer.context.styles) do
+ local bold = v.bold
+ local fore = v.fore
+ r = r + 1
+ if fore and not black(fore) then
+ if bold then
+ result[r] = f_fore_bold(k,k)
+ else
+ result[r] = f_fore_none(k,k)
+ end
+ else
+ if bold then
+ result[r] = f_none_bold(k)
+ else
+ result[r] = f_none_none(k)
+ end
+ end
+ end
+ colors = concat(result,"\n")
+ end
+ return colors
+end
+
+local function exportwhites()
+ return setmetatableindex(function(t,k)
+ local v = find(k,"white") and true or false
+ t[k] = v
+ return v
+ end)
+end
+
+local function exportstyled(lexer,text)
+ local result = lexer.lex(lexer,text,0)
+ local start = 1
+ local whites = exportwhites()
+ local buffer = { }
+ for i=1,#result,2 do
+ local style = result[i]
+ local position = result[i+1]
+ local txt = sub(text,start,position-1)
+ txt = lpegmatch(replacer,txt)
+ if whites[style] then
+ buffer[#buffer+1] = txt
+ else
+ buffer[#buffer+1] = f_texstyled(style,txt)
+ end
+ start = position
+ end
+ buffer = concat(buffer)
+ return buffer
+end
+
+function scite.installcommands()
+ context(exportcolors())
+end
+
+local function lexdata(data,lexname)
+ buffers.assign("lex",exportstyled(scite.loadedlexers[lexname],data or ""))
+end
+
+scite.lexdata = lexdata
+
+function scite.lexbuffer(name,lexname)
+ lexdata(buffers.getcontent(name) or "",lexname or "tex")
+end
+
+function scite.lexfile(filename,lexname)
+ lexdata(io.loaddata(filename) or "",lexname or file.suffix(filename))
+end
+
+-- html output
+
+\stopluacode
+
+% This is a preliminary interface.
+
+\unprotect
+
+\unexpanded\def\installscitecommands
+ {\ctxlua{buffers.scite.installcommands()}%
+ \let\installscitecommands\relax}
+
+\unexpanded\def\startscite{\startlines}
+\unexpanded\def\stopscite {\stoplines}
+
+\unexpanded\def\scitefile
+ {\dosingleargument\module_scite_file}
+
+\unexpanded\def\module_scite_file[#1]%
+ {\start
+ \ctxlua{buffers.scite.lexfile("#1")}%
+ \installscitecommands
+ \tt
+ \dontcomplain
+ \startscite
+ \getbuffer[lex]%
+ \stopscite
+ \stop}
+
+\unexpanded\def\scitebuffer
+ {\dodoubleargument\module_scite_buffer}
+
+\unexpanded\def\module_scite_buffer[#1][#2]%
+ {\start
+ \ifsecondargument
+ \ctxlua{buffers.scite.lexbuffer("#2","#1")}%
+ \else
+ \ctxlua{buffers.scite.lexbuffer("#1","tex")}%
+ \fi
+ \installscitecommands
+ \tt
+ \dontcomplain
+ \startscite
+ \getbuffer[lex]%
+ \stopscite
+ \stop}
+
+\protect
+
+\continueifinputfile{m-scite.mkiv}
+
+\setupbodyfont[dejavu,8pt]
+
+\setuplayout
+ [width=middle,
+ height=middle,
+ header=1cm,
+ footer=1cm,
+ topspace=1cm,
+ bottomspace=1cm,
+ backspace=1cm]
+
+\startbuffer[demo]
+\startsubsubject[title={oeps}]
+
+\startMPcode
+ draw fullcircle
+ scaled 2cm
+ withpen pencircle scaled 1mm
+ withcolor .5green;
+ draw textext (
+ lua (
+ "local function f(s) return string.upper(s) end mp.quoted(f('foo'))"
+ )
+ ) withcolor .5red ;
+\stopMPcode
+
+\startluacode
+ context("foo")
+\stopluacode
+
+\stopsubsubject
+\stopbuffer
+
+\starttext
+
+% \scitefile[../lexers/scite-context-lexer.lua] \page
+% \scitefile[t:/manuals/about/about-metafun.tex] \page
+% \scitefile[t:/sources/strc-sec.mkiv] \page
+% \scitefile[e:/tmp/mp.w] \page
+% \scitefile[t:/manuals/hybrid/tugboat.bib] \page
+\scitefile[e:/tmp/test.bib] \page
+
+% \getbuffer[demo] \scitebuffer[demo]
+
+\stoptext
diff --git a/tex/context/base/status-files.pdf b/tex/context/base/status-files.pdf
index 88d9476f1..f7a228bfc 100644
--- a/tex/context/base/status-files.pdf
+++ b/tex/context/base/status-files.pdf
Binary files differ
diff --git a/tex/context/base/status-lua.pdf b/tex/context/base/status-lua.pdf
index 08433c7cd..547c0e785 100644
--- a/tex/context/base/status-lua.pdf
+++ b/tex/context/base/status-lua.pdf
Binary files differ
diff --git a/tex/context/base/trac-deb.lua b/tex/context/base/trac-deb.lua
index 059bf5c0d..af4f7c643 100644
--- a/tex/context/base/trac-deb.lua
+++ b/tex/context/base/trac-deb.lua
@@ -159,12 +159,24 @@ end
-- this will work ok in >=0.79
+-- todo: last tex error has ! prepended
+-- todo: some nested errors have two line numbers
+-- todo: collect errorcontext in string (after code cleanup)
+-- todo: have a separate status.lualinenumber
+
+-- todo: \starttext bla \blank[foo] bla \stoptext
+
local function processerror(offset)
local inputstack = resolvers.inputstack
local filename = inputstack[#inputstack] or status.filename
local linenumber = tonumber(status.linenumber) or 0
- -- print(status.lasterrorstring)
- -- print(status.lastluaerrorstring)
+ --
+ -- print("[[ last tex error: " .. tostring(status.lasterrorstring) .. " ]]")
+ -- print("[[ last lua error: " .. tostring(status.lastluaerrorstring) .. " ]]")
+ -- print("[[ start errorcontext ]]")
+ -- tex.show_context()
+ -- print("\n[[ stop errorcontext ]]")
+ --
local lasttexerror = status.lasterrorstring or "?"
local lastluaerror = status.lastluaerrorstring or lasttexerror
local luaerrorline = match(lastluaerror,[[lua%]?:.-(%d+)]]) or (lastluaerror and find(lastluaerror,"?:0:",1,true) and 0)
diff --git a/tex/context/base/trac-log.lua b/tex/context/base/trac-log.lua
index 9435fef9b..45cc550d4 100644
--- a/tex/context/base/trac-log.lua
+++ b/tex/context/base/trac-log.lua
@@ -6,6 +6,9 @@ if not modules then modules = { } end modules ['trac-log'] = {
license = "see context related readme files"
}
+-- In fact all writes could go through lua and we could write the console and
+-- terminal handler in lua then. Ok, maybe it's slower then, so a no-go.
+
-- if tex and (tex.jobname or tex.formatname) then
--
-- -- quick hack, awaiting speedup in engine (8 -> 6.4 sec for --make with console2)
diff --git a/tex/context/base/util-sci.lua b/tex/context/base/util-sci.lua
new file mode 100644
index 000000000..98b05fe75
--- /dev/null
+++ b/tex/context/base/util-sci.lua
@@ -0,0 +1,262 @@
+local gsub, sub, find = string.gsub, string.sub, string.find
+local concat = table.concat
+local formatters = string.formatters
+local lpegmatch = lpeg.match
+local setmetatableindex = table.setmetatableindex
+
+local scite = scite or { }
+utilities.scite = scite
+
+local report = logs.reporter("scite")
+
+local lexerroot = file.dirname(resolvers.find_file("scite-context-lexer.lua"))
+
+local knownlexers = {
+ tex = "tex", mkiv = "tex", mkvi = "tex", mkxi = "tex", mkix = "tex", mkii = "tex", cld = "tex",
+ lua = "lua", lfg = "lua", lus = "lua",
+ w = "web", ww = "web",
+ c = "cpp", h = "cpp", cpp = "cpp", hpp = "cpp", cxx = "cpp", hxx = "cpp",
+ xml = "xml", lmx = "xml", ctx = "xml", xsl = "xml", xsd = "xml", rlx = "xml", css = "xml", dtd = "xml",
+ bib = "bibtex",
+ rme = "txt",
+ -- todo: pat/hyp ori
+}
+
+lexer = nil -- main lexer, global (for the moment needed for themes)
+
+local function loadscitelexer()
+ if not lexer then
+ dir.push(lexerroot)
+ lexer = dofile("scite-context-lexer.lua")
+ dofile("themes/scite-context-theme.lua")
+ dir.pop()
+ end
+ return lexer
+end
+
+local loadedlexers = setmetatableindex(function(t,k)
+ local l = knownlexers[k] or k
+ dir.push(lexerroot)
+ loadscitelexer()
+ local v = lexer.load(formatters["scite-context-lexer-%s"](l))
+ dir.pop()
+ t[l] = v
+ t[k] = v
+ return v
+end)
+
+scite.loadedlexers = loadedlexers
+scite.knownlexers = knownlexers
+scite.loadscitelexer = loadscitelexer
+
+local f_fore_bold = formatters['.%s { display: inline ; font-weight: bold ; color: #%s%s%s ; }']
+local f_fore_none = formatters['.%s { display: inline ; font-weight: normal ; color: #%s%s%s ; }']
+local f_none_bold = formatters['.%s { display: inline ; font-weight: bold ; }']
+local f_none_none = formatters['.%s { display: inline ; font-weight: normal ; }']
+local f_div_class = formatters['<div class="%s">%s</div>']
+local f_linenumber = formatters['\n<div class="linenumber">%s</div>']
+local f_div_number = formatters['.linenumber { display: inline-block ; font-weight: normal ; width: %sem ; margin-right: 2em ; padding-right: .25em ; text-align: right ; background-color: #C7C7C7 ; }']
+
+local replacer_regular = lpeg.replacer {
+ ["<"] = "&lt;",
+ [">"] = "&gt;",
+ ["&"] = "&amp;",
+}
+
+local linenumber = 0
+
+local replacer_numbered = lpeg.replacer {
+ ["<"] = "&lt;",
+ [">"] = "&gt;",
+ ["&"] = "&amp;",
+ [lpeg.patterns.newline] = function() linenumber = linenumber + 1 return f_linenumber(linenumber) end,
+}
+
+local css = nil
+
+local function exportcsslexing()
+ if not css then
+ loadscitelexer()
+ local function black(f)
+ return (f[1] == f[2]) and (f[2] == f[3]) and (f[3] == '00')
+ end
+ local result, r = { }, 0
+ for k, v in table.sortedhash(lexer.context.styles) do
+ local bold = v.bold
+ local fore = v.fore
+ r = r + 1
+ if fore and not black(fore) then
+ if bold then
+ result[r] = f_fore_bold(k,fore[1],fore[2],fore[3])
+ else
+ result[r] = f_fore_none(k,fore[1],fore[2],fore[3])
+ end
+ else
+ if bold then
+ result[r] = f_none_bold(k)
+ else
+ result[r] = f_none_none(k)
+ end
+ end
+ end
+ css = concat(result,"\n")
+ end
+ return css
+end
+
+local function exportwhites()
+ return setmetatableindex(function(t,k)
+ local v = find(k,"white") and true or false
+ t[k] = v
+ return v
+ end)
+end
+
+local function exportstyled(lexer,text,numbered)
+ local result = lexer.lex(lexer,text,0)
+ local start = 1
+ local whites = exportwhites()
+ local buffer, b = { "<pre>" }, 1
+ linenumber = 1
+ local replacer = numbered and replacer_numbered or replacer_regular
+ if numbered then
+ b = b + 1
+ buffer[b] = f_linenumber(1)
+ end
+ local n = #result
+ for i=1,n,2 do
+ local ii = i + 1
+ local style = result[i]
+ local position = result[ii]
+ local txt = sub(text,start,position-1)
+ if ii == n then
+ txt = gsub(txt,"[%s]+$","")
+ end
+ txt = lpegmatch(replacer,txt)
+ b = b + 1
+ if whites[style] then
+ buffer[b] = txt
+ else
+ buffer[b] = f_div_class(style,txt)
+ end
+ start = position
+ end
+ buffer[b+1] = "</pre>"
+ buffer = concat(buffer)
+ return buffer
+end
+
+local function exportcsslinenumber()
+ return f_div_number(#tostring(linenumber)/2+1)
+end
+
+local htmlfile = utilities.templates.replacer([[
+<?xml version="1.0"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+ <html xmlns="http://www.w3.org/1999/xhtml">
+ <title>context util-sci web page: text</title>
+ <meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
+ <style type="text/css"><!--
+%lexingstyles%
+%numberstyles%
+ --></style>
+ <body>
+%lexedcontent%
+ </body>
+</html>
+]])
+
+function scite.tohtml(data,lexname,numbered)
+ return htmlfile {
+ lexedcontent = exportstyled(loadedlexers[lexname],data or "",numbered), -- before numberstyles
+ lexingstyles = exportcsslexing(),
+ numberstyles = exportcsslinenumber(),
+ }
+end
+
+function scite.filetohtml(filename,lexname,targetname,numbered)
+ io.savedata(targetname or "util-sci.html",scite.tohtml(io.loaddata(filename),lexname or file.suffix(filename),numbered))
+end
+
+function scite.css()
+ return exportcsslexing() .. "\n" .. exportcsslinenumber()
+end
+
+function scite.html(data,lexname,numbered)
+ return exportstyled(loadedlexers[lexname],data or "",numbered)
+end
+
+local f_tree_entry = formatters['<a href="%s" class="dir-entry">%s</a>']
+
+local htmlfile = utilities.templates.replacer([[
+<?xml version="1.0"?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+ <html xmlns="http://www.w3.org/1999/xhtml">
+ <title>context util-sci web page: text</title>
+ <meta http-equiv="content-type" content="text/html; charset=UTF-8"/>
+ <style type="text/css"><!--
+%styles%
+ --></style>
+ <body>
+ <pre>
+%dirlist%
+ </pre>
+ </body>
+</html>
+]])
+
+function scite.converttree(sourceroot,targetroot,numbered)
+ if lfs.isdir(sourceroot) then
+ statistics.starttiming()
+ local skipped = { }
+ local noffiles = 0
+ dir.makedirs(targetroot)
+ local function scan(sourceroot,targetroot)
+ local tree = { }
+ for name in lfs.dir(sourceroot) do
+ if name ~= "." and name ~= ".." then
+ local sourcename = file.join(sourceroot,name)
+ local targetname = file.join(targetroot,name)
+ local mode = lfs.attributes(sourcename,'mode')
+ if mode == 'file' then
+ local filetype = file.suffix(sourcename)
+ local basename = file.basename(name)
+ local targetname = file.replacesuffix(targetname,"html")
+ if knownlexers[filetype] then
+ report("converting file %a to %a",sourcename,targetname)
+ scite.filetohtml(sourcename,nil,targetname,numbered)
+ noffiles = noffiles + 1
+ tree[#tree+1] = f_tree_entry(file.basename(targetname),basename)
+ else
+ skipped[filetype] = true
+ report("no lexer for %a",sourcename)
+ end
+ else
+ dir.makedirs(targetname)
+ scan(sourcename,targetname)
+ tree[#tree+1] = f_tree_entry(file.join(name,"files.html"),name)
+ end
+ end
+ end
+ report("saving tree in %a",treename)
+ local htmldata = htmlfile {
+ dirlist = concat(tree,"\n"),
+ styles = "",
+ }
+ io.savedata(file.join(targetroot,"files.html"),htmldata)
+ end
+ scan(sourceroot,targetroot)
+ if next(skipped) then
+ report("skipped filetypes: %a",table.concat(table.sortedkeys(skipped)," "))
+ end
+ statistics.stoptiming()
+ report("conversion time for %s files: %s",noffiles,statistics.elapsedtime())
+ end
+end
+
+-- scite.filetohtml("strc-sec.mkiv",nil,"e:/tmp/util-sci.html",true)
+-- scite.filetohtml("syst-aux.mkiv",nil,"e:/tmp/util-sci.html",true)
+
+-- scite.converttree("t:/texmf/tex/context","e:/tmp/html/context",true)
+
+return scite
diff --git a/tex/generic/context/luatex/luatex-fonts-merged.lua b/tex/generic/context/luatex/luatex-fonts-merged.lua
index 1ac353e30..dd9868626 100644
--- a/tex/generic/context/luatex/luatex-fonts-merged.lua
+++ b/tex/generic/context/luatex/luatex-fonts-merged.lua
@@ -1,6 +1,6 @@
-- merged file : luatex-fonts-merged.lua
-- parent file : luatex-fonts.lua
--- merge date : 04/25/14 00:45:16
+-- merge date : 04/28/14 23:24:10
do -- begin closure to overcome local limits and interference