summaryrefslogtreecommitdiff
path: root/context/data
diff options
context:
space:
mode:
authorContext Git Mirror Bot <phg42.2a@gmail.com>2014-05-03 13:55:33 +0200
committerContext Git Mirror Bot <phg42.2a@gmail.com>2014-05-03 13:55:33 +0200
commit4fac60d85ade0f051d411be40d5502f360a28402 (patch)
tree3b6b52514c9c2cb779ed659f2daec23d6c6e2b79 /context/data
parent088de88944c1f2254250bb448c7371a87ff7ee39 (diff)
downloadcontext-4fac60d85ade0f051d411be40d5502f360a28402.tar.gz
2014-04-28 23:25:00
Diffstat (limited to 'context/data')
-rw-r--r--context/data/scite/context/documents/scite-context-readme.pdf (renamed from context/data/scite/scite-context-readme.pdf)bin210958 -> 221437 bytes
-rw-r--r--context/data/scite/context/documents/scite-context-readme.tex (renamed from context/data/scite/scite-context-readme.tex)236
-rw-r--r--context/data/scite/context/documents/scite-context-visual.pdf (renamed from context/data/scite/scite-context-visual.pdf)0
-rw-r--r--context/data/scite/context/documents/scite-context-visual.png (renamed from context/data/scite/scite-context-visual.png)bin77849 -> 77849 bytes
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-context.lua4
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-interfaces.lua (renamed from context/data/scite/lexers/data/scite-context-data-interfaces.lua)0
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-metafun.lua (renamed from context/data/scite/lexers/data/scite-context-data-metafun.lua)0
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-metapost.lua (renamed from context/data/scite/lexers/data/scite-context-data-metapost.lua)0
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-tex.lua (renamed from context/data/scite/lexers/data/scite-context-data-tex.lua)4
-rw-r--r--context/data/scite/context/lexers/lexer.lua3
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-bibtex.lua176
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-cld.lua (renamed from context/data/scite/lexers/scite-context-lexer-cld.lua)11
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-cpp-web.lua23
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-cpp.lua188
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-lua-longstring.lua (renamed from context/data/scite/lexers/scite-context-lexer-lua-longstring.lua)19
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-lua.lua (renamed from context/data/scite/lexers/scite-context-lexer-lua.lua)241
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-mps.lua177
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-pdf-object.lua136
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-pdf-xref.lua43
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-pdf.lua204
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-tex-web.lua23
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-tex.lua (renamed from context/data/scite/lexers/scite-context-lexer-tex.lua)157
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-txt.lua (renamed from context/data/scite/lexers/scite-context-lexer-txt.lua)28
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-web-snippets.lua133
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-web.lua67
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-xml-cdata.lua (renamed from context/data/scite/lexers/scite-context-lexer-xml-cdata.lua)21
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-xml-comment.lua33
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-xml-script.lua33
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer-xml.lua (renamed from context/data/scite/lexers/scite-context-lexer-xml.lua)213
-rw-r--r--context/data/scite/context/lexers/scite-context-lexer.lua2018
-rw-r--r--context/data/scite/context/lexers/themes/scite-context-theme.lua150
-rw-r--r--context/data/scite/context/scite-context-data-context.properties193
-rw-r--r--context/data/scite/context/scite-context-data-interfaces.properties (renamed from context/data/scite/scite-context-data-interfaces.properties)0
-rw-r--r--context/data/scite/context/scite-context-data-metafun.properties (renamed from context/data/scite/scite-context-data-metafun.properties)0
-rw-r--r--context/data/scite/context/scite-context-data-metapost.properties (renamed from context/data/scite/scite-context-data-metapost.properties)0
-rw-r--r--context/data/scite/context/scite-context-data-tex.properties (renamed from context/data/scite/scite-context-data-tex.properties)223
-rw-r--r--context/data/scite/context/scite-context-external.properties (renamed from context/data/scite/scite-context-external.properties)53
-rw-r--r--context/data/scite/context/scite-context-internal.properties (renamed from context/data/scite/scite-context-internal.properties)10
-rw-r--r--context/data/scite/context/scite-context-user.properties15
-rw-r--r--context/data/scite/context/scite-context.properties (renamed from context/data/scite/scite-context.properties)20
-rw-r--r--context/data/scite/context/scite-ctx-context.properties (renamed from context/data/scite/scite-ctx-context.properties)0
-rw-r--r--context/data/scite/context/scite-ctx-example.properties (renamed from context/data/scite/scite-ctx-example.properties)0
-rw-r--r--context/data/scite/context/scite-ctx.lua (renamed from context/data/scite/scite-ctx.lua)10
-rw-r--r--context/data/scite/context/scite-ctx.properties (renamed from context/data/scite/scite-ctx.properties)6
-rw-r--r--context/data/scite/context/scite-metapost.properties (renamed from context/data/scite/scite-metapost.properties)2
-rw-r--r--context/data/scite/context/scite-pragma.properties (renamed from context/data/scite/scite-pragma.properties)7
-rw-r--r--context/data/scite/context/scite-tex.properties (renamed from context/data/scite/scite-tex.properties)2
-rw-r--r--context/data/scite/lexers/archive/scite-context-lexer-pre-3-3-1.lua1100
-rw-r--r--context/data/scite/lexers/data/scite-context-data-context.lua4
-rw-r--r--context/data/scite/lexers/scite-context-lexer-mps.lua155
-rw-r--r--context/data/scite/lexers/scite-context-lexer-pdf-object.lua117
-rw-r--r--context/data/scite/lexers/scite-context-lexer-pdf-xref.lua51
-rw-r--r--context/data/scite/lexers/scite-context-lexer-pdf.lua80
-rw-r--r--context/data/scite/lexers/scite-context-lexer-web.lua155
-rw-r--r--context/data/scite/lexers/scite-context-lexer-xml-comment.lua42
-rw-r--r--context/data/scite/lexers/scite-context-lexer-xml-script.lua30
-rw-r--r--context/data/scite/lexers/scite-context-lexer.lua876
-rw-r--r--context/data/scite/lexers/themes/scite-context-theme-keep.lua233
-rw-r--r--context/data/scite/lexers/themes/scite-context-theme.lua226
-rw-r--r--context/data/scite/metapost.properties1
-rw-r--r--context/data/scite/scite-context-data-context.properties191
-rw-r--r--context/data/scite/scite-context-user.properties15
-rw-r--r--context/data/scite/scite-context-visual.tex52
-rw-r--r--context/data/scite/tex.properties1
64 files changed, 4297 insertions, 3914 deletions
diff --git a/context/data/scite/scite-context-readme.pdf b/context/data/scite/context/documents/scite-context-readme.pdf
index 99f05a2a5..2bd7d4216 100644
--- a/context/data/scite/scite-context-readme.pdf
+++ b/context/data/scite/context/documents/scite-context-readme.pdf
Binary files differ
diff --git a/context/data/scite/scite-context-readme.tex b/context/data/scite/context/documents/scite-context-readme.tex
index 42f5e0a98..cbfc00a33 100644
--- a/context/data/scite/scite-context-readme.tex
+++ b/context/data/scite/context/documents/scite-context-readme.tex
@@ -191,60 +191,115 @@ You need to add this path to your local path definition. Installing \SCITE\ to
some known place has the advantage that you can move it around. There are no
special dependencies on the operating system.
+On \MSWINDOWS\ you can for instance install \SCITE\ in:
+
+\starttyping
+c:\data\system\scite
+\stoptyping
+
+and then end up with:
+
+\starttyping
+c:\data\system\scite\wscite
+\stoptyping
+
+and that is the path you need to add to your environment \type {PATH} variable.
+
+On \LINUX\ the files end up in:
+
+\starttyping
+/usr/bin
+/usr/share/scite
+\stoptyping
+
+Where the second path is the path we will put more files.
+
+\subject{Installing \type {scintillua}}
+
Next you need to install the lpeg lexers. \footnote {Versions later than 2.11
will not run on \MSWINDOWS\ 2K. In that case you need to comment the external
-lexer import.} These can be fetched from:
+lexer import.} The library is part of the \type {textadept} editor by Mitchell
+(\hyphenatedurl {mitchell.att.foicica.com}) which is also based on scintilla:
+The archive can be fetched from:
\starttyping
http://foicica.com/scintillua/
\stoptyping
-On \MSWINDOWS\ you need to copy the \type {lexers} subfolder to the \type
-{wscite} folder. For \LINUX\ the place depends on the distribution and I just
-copy them in the same path as where the regular properties files live. \footnote
-{If you update, don't do so without testing first. Sometimes there are changes in
-\SCITE\ that influence the lexers in which case you have to wait till we have
-update them to suit those changes.}
+On \MSWINDOWS\ you need to copy the files to the \type {wscite} folder (so we end
+up with a \type {lexers} subfolder there). For \LINUX\ the place depends on the
+distribution, for instance \type {/usr/share/scite}; this is the place where the
+regular properties files live. \footnote {If you update, don't do so without
+testing first. Sometimes there are changes in \SCITE\ that influence the lexers
+in which case you have to wait till we have update them to suit those changes.}
-For \UNIX, one can take a precompiled version as well. Here we might need to split
-the set of files into:
+So, you end up, on \MSWINDOWS\ with:
\starttyping
-/usr/bin
-/usr/share/scite
+c:\data\system\scite\wscite\lexers
\stoptyping
-The second path is hard coded in the binary and moving all files there probably works
-okay. Beware: if you're on a 64 bit system, you need to rename the 64 bit \type {so}
-library.
+And on \LINUX:
+
+\starttyping
+/usr/share/scite/lexers
+\stoptyping
-If you want to use \CONTEXT, you need to copy the relevant files from
+Beware: if you're on a 64 bit system, you need to rename the 64 bit \type {so}
+library into one without a number. Unfortunately the 64 bit library is now always
+available which can give surprises when the operating system gets updates. In such
+a case you should downgrade or use \type {wine} with the \MSWINDOWS\ binaries
+instead. After installation you need to restart \SCITE\ in order to see if things
+work out as expected.
+
+\subject{Installing the \CONTEXT\ lexers}
+
+When we started using this nice extension, we ran into issues and as a
+consequence shipped a patched \LUA\ code. We also needed some more control as we
+wanted to provide more features and complex nested lexers. Because the library
+\API\ changed a couple of times, we now have our own variant which will be
+cleaned up over time to be more consistent with our other \LUA\ code (so that we
+can also use it in \CONTEXT\ as variant verbatim lexer). We hope to be able to
+use the \type {scintillua} library as it does the job.
+
+Anyway, if you want to use \CONTEXT, you need to copy the relevant files from
\starttyping
<texroot>/tex/texmf-context/context/data/scite
\stoptyping
-to the path were \SCITE\ keeps its property files (\type {*.properties}). There
-is a file called \type {SciteGlobal.properties}. At the end of that file (on
-\MSWINDOWS\ it is in the path where the Scite binary) you then add a line to the
-end:
+to the path were \SCITE\ keeps its property files (\type {*.properties}). This is
+the path we already mentioned. There should be a file there called \type
+{SciteGlobal.properties}.
+
+So,in the end you get on \MSWINDOWS\ new files in:
\starttyping
-import scite-context-user
+c:\data\system\scite\wscite
+c:\data\system\scite\wscite\context
+c:\data\system\scite\wscite\context\lexer
+c:\data\system\scite\wscite\context\lexer\themes
+c:\data\system\scite\wscite\context\lexer\data
+c:\data\system\scite\wscite\context\documents
\stoptyping
-You need to restart \SCITE\ in order to see if things work out as expected.
-
-Disabling the external lexer in a recent \SCITE\ is somewhat tricky. In that case
-the end of that file looks like:
+while on \LINUX\ you get:
\starttyping
-imports.exclude=scite-context-external
-import *
-import scite-context-user
+/usr/bin/share/
+/usr/bin/share/context
+/usr/bin/share/context/lexer
+/usr/bin/share/context/lexer/themes
+/usr/bin/share/context/lexer/data
+/usr/bin/share/context/documents
\stoptyping
-In any case you need to make sure that the user file is loaded last.
+At the end of the \type {SciteGlobal.properties} you need to add the following
+line:
+
+\starttyping
+import context/scite-context-user
+\stoptyping
After this, things should run as expected (given that \TEX\ runs at the console
as well).
@@ -266,102 +321,15 @@ The configuration file defaults to the Dejavu fonts. These free fonts are part o
the \CONTEXT\ suite (also known as the standalone distribution). Of course you
can fetch them from \type {http://dejavu-fonts.org} as well. You have to copy
them to where your operating system expects them. In the suite they are available
-in
+in:
\starttyping
<contextroot>/tex/texmf/fonts/truetype/public/dejavu
\stoptyping
-\subject{An alternative approach}
-
-If for some reason you prefer not to mess with property files in the main \SCITE\
-path, you can follow a different route and selectively copy files to places.
-
-The following files are needed for the lpeg based lexer:
-
-\starttyping
-lexers/scite-context-lexer.lua
-lexers/scite-context-lexer-tex.lua
-lexers/scite-context-lexer-mps.lua
-lexers/scite-context-lexer-lua.lua
-lexers/scite-context-lexer-cld.lua
-lexers/scite-context-lexer-txt.lua
-lexers/scite-context-lexer-xml*.lua
-lexers/scite-context-lexer-pdf*.lua
-
-lexers/context/data/scite-context-data-tex.lua
-lexers/context/data/scite-context-data-context.lua
-lexers/context/data/scite-context-data-interfaces.lua
-lexers/context/data/scite-context-data-metapost.lua
-lexers/context/data/scite-context-data-metafun.lua
-
-lexers/themes/scite-context-theme.lua
-\stoptyping
-
-The data files are needed because we cannot access property files from within the
-lexer. If we could open a file we could use the property files instead.
-
-These files go to the \type {lexers} subpath in your \SCITE\ installation.
-Normally this sits in the binary path. The following files provide some
-extensions. On \MSWINDOWS\ you can copy these files to the path where the \SCITE\
-binary lives.
-
-\starttyping
-scite-ctx.lua
-\stoptyping
-
-Because property files can only be loaded from the same path where the (user)
-file loads them you need to copy the following files to the same path where the
-loading is defined:
-
-\starttyping
-scite-context.properties
-scite-context-internal.properties
-scite-context-external.properties
-
-scite-pragma.properties
-
-scite-tex.properties
-scite-metapost.properties
-
-scite-context-data-tex.properties
-scite-context-data-context.properties
-scite-context-data-interfaces.properties
-scite-context-data-metapost.properties
-scite-context-data-metafun.properties
-
-scite-ctx.properties
-scite-ctx-context.properties
-scite-ctx-example.properties
-\stoptyping
-
-On \MSWINDOWS\ these go to:
-
-\starttyping
-c:/Users/YourName
-\stoptyping
-
-Next you need to add this to:
-
-\starttyping
-import scite-context
-import scite-context-internal
-import scite-context-external
-import scite-pragma
-\stoptyping
-
-to the file:
-
-\starttyping
-SciTEUser.properties
-\stoptyping
-
-Of course the pragma import is optional. You can comment either the internal or
-external variant but there is no reason not to keep them both.
-
\subject{Extensions}
-Just a quick not to some extensions. If you select a part of the text (normally
+Just a quick note to some extensions. If you select a part of the text (normally
you do this with the shift key pressed) and you hit \type {Shift-F11}, you get a
menu with some options. More (robust) ones will be provided at some point.
@@ -388,6 +356,27 @@ disable it). Wrong words are colored red, and words that might have a case
problem are colored orange. Recognized words are greyed and words with less than
three characters are ignored.
+A spell checking file has to be put in the \type {lexers/data} directory and
+looks as follows (e.g. \type {spell-uk.lua}):
+
+\starttyping
+return {
+ ["max"]=40,
+ ["min"]=3,
+ ["n"]=151493,
+ ["words"]={
+ ["aardvark"]="aardvark",
+ ["aardvarks"]="aardvarks",
+ ["aardwolf"]="aardwolf",
+ ["aardwolves"]="aardwolves",
+ ...
+ }
+}
+\stoptyping
+
+The keys are words that get checked for the given value (which can have uppercase
+characters). The word files are not distributed (but they might be at some point).
+
In the case of internal lexers, the following file is needed:
\starttyping
@@ -451,8 +440,8 @@ releases.
\subject{The external lexers}
-These are the more advanced. They provide more detail and the \CONTEXT\ lexer
-also supports nested \METAPOST\ and \LUA. Currently there is no detailed
+These are the more advanced lexers. They provide more detail and the \CONTEXT\
+lexer also supports nested \METAPOST\ and \LUA. Currently there is no detailed
configuration but this might change once they are stable.
The external lexers operate on documents while the internal ones operate on
@@ -463,13 +452,6 @@ garbage collecting many small tables comes at a price. Of course in practice thi
probably gets unnoticed. \footnote {I wrote the code in 2011 on a more than 5
years old Dell M90 laptop, so I suppose that speed is less an issue now.}
-In principle the external lexers can be used with \type {textadept} which also
-uses \type {scintilla}. Actually, support for lpeg lexing originates in \type
-{textadept}. Currently \type {textadept} lacks a couple of features I like about
-\SCITE\ (for instance it has no realtime logpane) and it's also still changing.
-At some point the \CONTEXT\ distribution might ship with files for \type
-{textadept} as well.
-
The external lpeg lexers work okay with the \MSWINDOWS\ and \LINUX\ versions of
\SCITE, but unfortunately at the time of writing this, the \LUA\ library that is
needed is not available for the \MACOSX\ version of \SCITE. Also, due to the fact
@@ -480,7 +462,7 @@ In addition to \CONTEXT\ and \METAFUN\ lexing a \LUA\ lexer is also provided so
that we can handle \CONTEXT\ \LUA\ Document (\CLD) files too. There is also an
\XML\ lexer. This one also provides spell checking. The \PDF\ lexer tries to do a
good job on \PDF\ files, but it has some limitations. There is also a simple text
-file lexer that does spell checking.
+file lexer that does spell checking. Finally there is a lexer for \CWEB\ files.
Don't worry if you see an orange rectangle in your \TEX\ or \XML\ document. This
indicates that there is a special space character there, for instance \type
diff --git a/context/data/scite/scite-context-visual.pdf b/context/data/scite/context/documents/scite-context-visual.pdf
index 69d82eda6..69d82eda6 100644
--- a/context/data/scite/scite-context-visual.pdf
+++ b/context/data/scite/context/documents/scite-context-visual.pdf
diff --git a/context/data/scite/scite-context-visual.png b/context/data/scite/context/documents/scite-context-visual.png
index 7368a68f1..7368a68f1 100644
--- a/context/data/scite/scite-context-visual.png
+++ b/context/data/scite/context/documents/scite-context-visual.png
Binary files differ
diff --git a/context/data/scite/context/lexers/data/scite-context-data-context.lua b/context/data/scite/context/lexers/data/scite-context-data-context.lua
new file mode 100644
index 000000000..6c0293fbd
--- /dev/null
+++ b/context/data/scite/context/lexers/data/scite-context-data-context.lua
@@ -0,0 +1,4 @@
+return {
+ ["constants"]={ "zerocount", "minusone", "minustwo", "plusone", "plustwo", "plusthree", "plusfour", "plusfive", "plussix", "plusseven", "pluseight", "plusnine", "plusten", "plussixteen", "plushundred", "plusthousand", "plustenthousand", "plustwentythousand", "medcard", "maxcard", "zeropoint", "onepoint", "halfapoint", "onebasepoint", "maxdimen", "scaledpoint", "thousandpoint", "points", "halfpoint", "zeroskip", "zeromuskip", "onemuskip", "pluscxxvii", "pluscxxviii", "pluscclv", "pluscclvi", "normalpagebox", "endoflinetoken", "outputnewlinechar", "emptytoks", "empty", "undefined", "voidbox", "emptybox", "emptyvbox", "emptyhbox", "bigskipamount", "medskipamount", "smallskipamount", "fmtname", "fmtversion", "texengine", "texenginename", "texengineversion", "luatexengine", "pdftexengine", "xetexengine", "unknownengine", "etexversion", "pdftexversion", "xetexversion", "xetexrevision", "activecatcode", "bgroup", "egroup", "endline", "conditionaltrue", "conditionalfalse", "attributeunsetvalue", "uprotationangle", "rightrotationangle", "downrotationangle", "leftrotationangle", "inicatcodes", "ctxcatcodes", "texcatcodes", "notcatcodes", "txtcatcodes", "vrbcatcodes", "prtcatcodes", "nilcatcodes", "luacatcodes", "tpacatcodes", "tpbcatcodes", "xmlcatcodes", "ctdcatcodes", "escapecatcode", "begingroupcatcode", "endgroupcatcode", "mathshiftcatcode", "alignmentcatcode", "endoflinecatcode", "parametercatcode", "superscriptcatcode", "subscriptcatcode", "ignorecatcode", "spacecatcode", "lettercatcode", "othercatcode", "activecatcode", "commentcatcode", "invalidcatcode", "tabasciicode", "newlineasciicode", "formfeedasciicode", "endoflineasciicode", "endoffileasciicode", "spaceasciicode", "hashasciicode", "dollarasciicode", "commentasciicode", "ampersandasciicode", "colonasciicode", "backslashasciicode", "circumflexasciicode", "underscoreasciicode", "leftbraceasciicode", "barasciicode", "rightbraceasciicode", "tildeasciicode", "delasciicode", "lessthanasciicode", "morethanasciicode", "doublecommentsignal", "atsignasciicode", "exclamationmarkasciicode", "questionmarkasciicode", "doublequoteasciicode", "singlequoteasciicode", "forwardslashasciicode", "primeasciicode", "activemathcharcode", "activetabtoken", "activeformfeedtoken", "activeendoflinetoken", "batchmodecode", "nonstopmodecode", "scrollmodecode", "errorstopmodecode", "bottomlevelgroupcode", "simplegroupcode", "hboxgroupcode", "adjustedhboxgroupcode", "vboxgroupcode", "vtopgroupcode", "aligngroupcode", "noaligngroupcode", "outputgroupcode", "mathgroupcode", "discretionarygroupcode", "insertgroupcode", "vcentergroupcode", "mathchoicegroupcode", "semisimplegroupcode", "mathshiftgroupcode", "mathleftgroupcode", "vadjustgroupcode", "charnodecode", "hlistnodecode", "vlistnodecode", "rulenodecode", "insertnodecode", "marknodecode", "adjustnodecode", "ligaturenodecode", "discretionarynodecode", "whatsitnodecode", "mathnodecode", "gluenodecode", "kernnodecode", "penaltynodecode", "unsetnodecode", "mathsnodecode", "charifcode", "catifcode", "numifcode", "dimifcode", "oddifcode", "vmodeifcode", "hmodeifcode", "mmodeifcode", "innerifcode", "voidifcode", "hboxifcode", "vboxifcode", "xifcode", "eofifcode", "trueifcode", "falseifcode", "caseifcode", "definedifcode", "csnameifcode", "fontcharifcode", "fontslantperpoint", "fontinterwordspace", "fontinterwordstretch", "fontinterwordshrink", "fontexheight", "fontemwidth", "fontextraspace", "slantperpoint", "interwordspace", "interwordstretch", "interwordshrink", "exheight", "emwidth", "extraspace", "mathsupdisplay", "mathsupnormal", "mathsupcramped", "mathsubnormal", "mathsubcombined", "mathaxisheight", "startmode", "stopmode", "startnotmode", "stopnotmode", "startmodeset", "stopmodeset", "doifmode", "doifmodeelse", "doifnotmode", "startmodeset", "stopmodeset", "startallmodes", "stopallmodes", "startnotallmodes", "stopnotallmodes", "doifallmodes", "doifallmodeselse", "doifnotallmodes", "startenvironment", "stopenvironment", "environment", "startcomponent", "stopcomponent", "component", "startproduct", "stopproduct", "product", "startproject", "stopproject", "project", "starttext", "stoptext", "startnotext", "stopnotext", "startdocument", "stopdocument", "documentvariable", "setupdocument", "startmodule", "stopmodule", "usemodule", "usetexmodule", "useluamodule", "setupmodule", "currentmoduleparameter", "moduleparameter", "startTEXpage", "stopTEXpage", "enablemode", "disablemode", "preventmode", "globalenablemode", "globaldisablemode", "globalpreventmode", "pushmode", "popmode", "typescriptone", "typescripttwo", "typescriptthree", "mathsizesuffix", "mathordcode", "mathopcode", "mathbincode", "mathrelcode", "mathopencode", "mathclosecode", "mathpunctcode", "mathalphacode", "mathinnercode", "mathnothingcode", "mathlimopcode", "mathnolopcode", "mathboxcode", "mathchoicecode", "mathaccentcode", "mathradicalcode", "constantnumber", "constantnumberargument", "constantdimen", "constantdimenargument", "constantemptyargument", "continueifinputfile", "luastringsep", "!!bs", "!!es", "lefttorightmark", "righttoleftmark", "breakablethinspace", "nobreakspace", "narrownobreakspace", "zerowidthnobreakspace", "ideographicspace", "ideographichalffillspace", "twoperemspace", "threeperemspace", "fourperemspace", "fiveperemspace", "sixperemspace", "figurespace", "punctuationspace", "hairspace", "zerowidthspace", "zerowidthnonjoiner", "zerowidthjoiner", "zwnj", "zwj" },
+ ["helpers"]={ "startsetups", "stopsetups", "startxmlsetups", "stopxmlsetups", "startluasetups", "stopluasetups", "starttexsetups", "stoptexsetups", "startrawsetups", "stoprawsetups", "startlocalsetups", "stoplocalsetups", "starttexdefinition", "stoptexdefinition", "starttexcode", "stoptexcode", "startcontextcode", "stopcontextcode", "startcontextdefinitioncode", "stopcontextdefinitioncode", "doifsetupselse", "doifsetups", "doifnotsetups", "setup", "setups", "texsetup", "xmlsetup", "luasetup", "directsetup", "doifelsecommandhandler", "doifnotcommandhandler", "doifcommandhandler", "newmode", "setmode", "resetmode", "newsystemmode", "setsystemmode", "resetsystemmode", "pushsystemmode", "popsystemmode", "booleanmodevalue", "newcount", "newdimen", "newskip", "newmuskip", "newbox", "newtoks", "newread", "newwrite", "newmarks", "newinsert", "newattribute", "newif", "newlanguage", "newfamily", "newfam", "newhelp", "then", "begcsname", "strippedcsname", "firstargumentfalse", "firstargumenttrue", "secondargumentfalse", "secondargumenttrue", "thirdargumentfalse", "thirdargumenttrue", "fourthargumentfalse", "fourthargumenttrue", "fifthargumentfalse", "fifthsargumenttrue", "sixthargumentfalse", "sixtsargumenttrue", "doglobal", "dodoglobal", "redoglobal", "resetglobal", "donothing", "dontcomplain", "forgetall", "donetrue", "donefalse", "htdp", "unvoidbox", "hfilll", "vfilll", "mathbox", "mathlimop", "mathnolop", "mathnothing", "mathalpha", "currentcatcodetable", "defaultcatcodetable", "catcodetablename", "newcatcodetable", "startcatcodetable", "stopcatcodetable", "startextendcatcodetable", "stopextendcatcodetable", "pushcatcodetable", "popcatcodetable", "restorecatcodes", "setcatcodetable", "letcatcodecommand", "defcatcodecommand", "uedcatcodecommand", "hglue", "vglue", "hfillneg", "vfillneg", "hfilllneg", "vfilllneg", "ruledhss", "ruledhfil", "ruledhfill", "ruledhfilneg", "ruledhfillneg", "normalhfillneg", "ruledvss", "ruledvfil", "ruledvfill", "ruledvfilneg", "ruledvfillneg", "normalvfillneg", "ruledhbox", "ruledvbox", "ruledvtop", "ruledvcenter", "ruledmbox", "ruledhskip", "ruledvskip", "ruledkern", "ruledmskip", "ruledmkern", "ruledhglue", "ruledvglue", "normalhglue", "normalvglue", "ruledpenalty", "filledhboxb", "filledhboxr", "filledhboxg", "filledhboxc", "filledhboxm", "filledhboxy", "filledhboxk", "scratchcounter", "globalscratchcounter", "scratchdimen", "globalscratchdimen", "scratchskip", "globalscratchskip", "scratchmuskip", "globalscratchmuskip", "scratchtoks", "globalscratchtoks", "scratchbox", "globalscratchbox", "normalbaselineskip", "normallineskip", "normallineskiplimit", "availablehsize", "localhsize", "setlocalhsize", "nextbox", "dowithnextbox", "dowithnextboxcs", "dowithnextboxcontent", "dowithnextboxcontentcs", "scratchwidth", "scratchheight", "scratchdepth", "scratchoffset", "scratchdistance", "scratchhsize", "scratchvsize", "scratchxoffset", "scratchyoffset", "scratchhoffset", "scratchvoffset", "scratchxposition", "scratchyposition", "scratchtopoffset", "scratchbottomoffset", "scratchleftoffset", "scratchrightoffset", "scratchcounterone", "scratchcountertwo", "scratchcounterthree", "scratchdimenone", "scratchdimentwo", "scratchdimenthree", "scratchskipone", "scratchskiptwo", "scratchskipthree", "scratchmuskipone", "scratchmuskiptwo", "scratchmuskipthree", "scratchtoksone", "scratchtokstwo", "scratchtoksthree", "scratchboxone", "scratchboxtwo", "scratchboxthree", "scratchnx", "scratchny", "scratchmx", "scratchmy", "scratchunicode", "scratchleftskip", "scratchrightskip", "scratchtopskip", "scratchbottomskip", "doif", "doifnot", "doifelse", "doifinset", "doifnotinset", "doifinsetelse", "doifnextcharelse", "doifnextoptionalelse", "doifnextoptionalcselse", "doiffastoptionalcheckelse", "doifnextbgroupelse", "doifnextbgroupcselse", "doifnextparenthesiselse", "doifundefinedelse", "doifdefinedelse", "doifundefined", "doifdefined", "doifelsevalue", "doifvalue", "doifnotvalue", "doifnothing", "doifsomething", "doifelsenothing", "doifsomethingelse", "doifvaluenothing", "doifvaluesomething", "doifelsevaluenothing", "doifdimensionelse", "doifnumberelse", "doifnumber", "doifnotnumber", "doifcommonelse", "doifcommon", "doifnotcommon", "doifinstring", "doifnotinstring", "doifinstringelse", "doifassignmentelse", "docheckassignment", "tracingall", "tracingnone", "loggingall", "removetoks", "appendtoks", "prependtoks", "appendtotoks", "prependtotoks", "to", "endgraf", "endpar", "everyendpar", "reseteverypar", "finishpar", "empty", "null", "space", "quad", "enspace", "obeyspaces", "obeylines", "obeyedspace", "obeyedline", "normalspace", "executeifdefined", "singleexpandafter", "doubleexpandafter", "tripleexpandafter", "dontleavehmode", "removelastspace", "removeunwantedspaces", "keepunwantedspaces", "wait", "writestatus", "define", "defineexpandable", "redefine", "setmeasure", "setemeasure", "setgmeasure", "setxmeasure", "definemeasure", "freezemeasure", "measure", "measured", "installcorenamespace", "getvalue", "getuvalue", "setvalue", "setevalue", "setgvalue", "setxvalue", "letvalue", "letgvalue", "resetvalue", "undefinevalue", "ignorevalue", "setuvalue", "setuevalue", "setugvalue", "setuxvalue", "globallet", "glet", "udef", "ugdef", "uedef", "uxdef", "checked", "unique", "getparameters", "geteparameters", "getgparameters", "getxparameters", "forgetparameters", "copyparameters", "getdummyparameters", "dummyparameter", "directdummyparameter", "setdummyparameter", "letdummyparameter", "usedummystyleandcolor", "usedummystyleparameter", "usedummycolorparameter", "processcommalist", "processcommacommand", "quitcommalist", "quitprevcommalist", "processaction", "processallactions", "processfirstactioninset", "processallactionsinset", "unexpanded", "expanded", "startexpanded", "stopexpanded", "protected", "protect", "unprotect", "firstofoneargument", "firstoftwoarguments", "secondoftwoarguments", "firstofthreearguments", "secondofthreearguments", "thirdofthreearguments", "firstoffourarguments", "secondoffourarguments", "thirdoffourarguments", "fourthoffourarguments", "firstoffivearguments", "secondoffivearguments", "thirdoffivearguments", "fourthoffivearguments", "fifthoffivearguments", "firstofsixarguments", "secondofsixarguments", "thirdofsixarguments", "fourthofsixarguments", "fifthofsixarguments", "sixthofsixarguments", "firstofoneunexpanded", "gobbleoneargument", "gobbletwoarguments", "gobblethreearguments", "gobblefourarguments", "gobblefivearguments", "gobblesixarguments", "gobblesevenarguments", "gobbleeightarguments", "gobbleninearguments", "gobbletenarguments", "gobbleoneoptional", "gobbletwooptionals", "gobblethreeoptionals", "gobblefouroptionals", "gobblefiveoptionals", "dorecurse", "doloop", "exitloop", "dostepwiserecurse", "recurselevel", "recursedepth", "dofastloopcs", "dowith", "newconstant", "setnewconstant", "setconstant", "setconstantvalue", "newconditional", "settrue", "setfalse", "settruevalue", "setfalsevalue", "newmacro", "setnewmacro", "newfraction", "newsignal", "dosingleempty", "dodoubleempty", "dotripleempty", "doquadrupleempty", "doquintupleempty", "dosixtupleempty", "doseventupleempty", "dosingleargument", "dodoubleargument", "dotripleargument", "doquadrupleargument", "doquintupleargument", "dosixtupleargument", "doseventupleargument", "dosinglegroupempty", "dodoublegroupempty", "dotriplegroupempty", "doquadruplegroupempty", "doquintuplegroupempty", "permitspacesbetweengroups", "dontpermitspacesbetweengroups", "nopdfcompression", "maximumpdfcompression", "normalpdfcompression", "modulonumber", "dividenumber", "getfirstcharacter", "doiffirstcharelse", "startnointerference", "stopnointerference", "twodigits", "threedigits", "leftorright", "strut", "setstrut", "strutbox", "strutht", "strutdp", "strutwd", "struthtdp", "begstrut", "endstrut", "lineheight", "ordordspacing", "ordopspacing", "ordbinspacing", "ordrelspacing", "ordopenspacing", "ordclosespacing", "ordpunctspacing", "ordinnerspacing", "opordspacing", "opopspacing", "opbinspacing", "oprelspacing", "opopenspacing", "opclosespacing", "oppunctspacing", "opinnerspacing", "binordspacing", "binopspacing", "binbinspacing", "binrelspacing", "binopenspacing", "binclosespacing", "binpunctspacing", "bininnerspacing", "relordspacing", "relopspacing", "relbinspacing", "relrelspacing", "relopenspacing", "relclosespacing", "relpunctspacing", "relinnerspacing", "openordspacing", "openopspacing", "openbinspacing", "openrelspacing", "openopenspacing", "openclosespacing", "openpunctspacing", "openinnerspacing", "closeordspacing", "closeopspacing", "closebinspacing", "closerelspacing", "closeopenspacing", "closeclosespacing", "closepunctspacing", "closeinnerspacing", "punctordspacing", "punctopspacing", "punctbinspacing", "punctrelspacing", "punctopenspacing", "punctclosespacing", "punctpunctspacing", "punctinnerspacing", "innerordspacing", "inneropspacing", "innerbinspacing", "innerrelspacing", "inneropenspacing", "innerclosespacing", "innerpunctspacing", "innerinnerspacing", "normalreqno", "startimath", "stopimath", "normalstartimath", "normalstopimath", "startdmath", "stopdmath", "normalstartdmath", "normalstopdmath", "uncramped", "cramped", "triggermathstyle", "mathstylefont", "mathsmallstylefont", "mathstyleface", "mathsmallstyleface", "mathstylecommand", "mathpalette", "mathstylehbox", "mathstylevbox", "mathstylevcenter", "mathstylevcenteredhbox", "mathstylevcenteredvbox", "mathtext", "setmathsmalltextbox", "setmathtextbox", "triggerdisplaystyle", "triggertextstyle", "triggerscriptstyle", "triggerscriptscriptstyle", "triggeruncrampedstyle", "triggercrampedstyle", "triggersmallstyle", "triggeruncrampedsmallstyle", "triggercrampedsmallstyle", "triggerbigstyle", "triggeruncrampedbigstyle", "triggercrampedbigstyle", "luaexpr", "expdoifelse", "expdoif", "expdoifnot", "expdoifcommonelse", "expdoifinsetelse", "ctxdirectlua", "ctxlatelua", "ctxsprint", "ctxwrite", "ctxcommand", "ctxdirectcommand", "ctxlatecommand", "ctxreport", "ctxlua", "luacode", "lateluacode", "directluacode", "registerctxluafile", "ctxloadluafile", "luaversion", "luamajorversion", "luaminorversion", "ctxluacode", "luaconditional", "luaexpanded", "startluaparameterset", "stopluaparameterset", "luaparameterset", "definenamedlua", "obeylualines", "obeyluatokens", "startluacode", "stopluacode", "startlua", "stoplua", "startctxfunction", "stopctxfunction", "ctxfunction", "startctxfunctiondefinition", "stopctxfunctiondefinition", "carryoverpar", "assumelongusagecs", "Umathbotaccent", "righttolefthbox", "lefttorighthbox", "righttoleftvbox", "lefttorightvbox", "righttoleftvtop", "lefttorightvtop", "rtlhbox", "ltrhbox", "rtlvbox", "ltrvbox", "rtlvtop", "ltrvtop", "autodirhbox", "autodirvbox", "autodirvtop", "lefttoright", "righttoleft", "synchronizelayoutdirection", "synchronizedisplaydirection", "synchronizeinlinedirection", "lesshyphens", "morehyphens", "nohyphens", "dohyphens", "Ucheckedstartdisplaymath", "Ucheckedstopdisplaymath" },
+} \ No newline at end of file
diff --git a/context/data/scite/lexers/data/scite-context-data-interfaces.lua b/context/data/scite/context/lexers/data/scite-context-data-interfaces.lua
index b2c09b62a..b2c09b62a 100644
--- a/context/data/scite/lexers/data/scite-context-data-interfaces.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-interfaces.lua
diff --git a/context/data/scite/lexers/data/scite-context-data-metafun.lua b/context/data/scite/context/lexers/data/scite-context-data-metafun.lua
index 50b9ecec4..50b9ecec4 100644
--- a/context/data/scite/lexers/data/scite-context-data-metafun.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-metafun.lua
diff --git a/context/data/scite/lexers/data/scite-context-data-metapost.lua b/context/data/scite/context/lexers/data/scite-context-data-metapost.lua
index 766ea90da..766ea90da 100644
--- a/context/data/scite/lexers/data/scite-context-data-metapost.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-metapost.lua
diff --git a/context/data/scite/lexers/data/scite-context-data-tex.lua b/context/data/scite/context/lexers/data/scite-context-data-tex.lua
index 7d710740c..415b74128 100644
--- a/context/data/scite/lexers/data/scite-context-data-tex.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-tex.lua
@@ -1,9 +1,9 @@
return {
["aleph"]={ "AlephVersion", "Alephminorversion", "Alephrevision", "Alephversion", "Omegaminorversion", "Omegarevision", "Omegaversion", "boxdir", "pagebottomoffset", "pagerightoffset" },
["etex"]={ "botmarks", "clubpenalties", "currentgrouplevel", "currentgrouptype", "currentifbranch", "currentiflevel", "currentiftype", "detokenize", "dimexpr", "displaywidowpenalties", "eTeXVersion", "eTeXminorversion", "eTeXrevision", "eTeXversion", "everyeof", "firstmarks", "fontchardp", "fontcharht", "fontcharic", "fontcharwd", "glueexpr", "glueshrink", "glueshrinkorder", "gluestretch", "gluestretchorder", "gluetomu", "ifcsname", "ifdefined", "iffontchar", "interactionmode", "interlinepenalties", "lastlinefit", "lastnodetype", "marks", "muexpr", "mutoglue", "numexpr", "pagediscards", "parshapedimen", "parshapeindent", "parshapelength", "predisplaydirection", "protected", "readline", "savinghyphcodes", "savingvdiscards", "scantokens", "showgroups", "showifs", "showtokens", "splitbotmarks", "splitdiscards", "splitfirstmarks", "topmarks", "tracingassigns", "tracinggroups", "tracingifs", "tracingnesting", "tracingscantokens", "unexpanded", "unless", "widowpenalties" },
- ["luatex"]={ "Uchar", "Udelcode", "Udelcodenum", "Udelimiter", "Udelimiterover", "Udelimiterunder", "Umathaccent", "Umathaxis", "Umathbinbinspacing", "Umathbinclosespacing", "Umathbininnerspacing", "Umathbinopenspacing", "Umathbinopspacing", "Umathbinordspacing", "Umathbinpunctspacing", "Umathbinrelspacing", "Umathchar", "Umathchardef", "Umathcharnum", "Umathclosebinspacing", "Umathcloseclosespacing", "Umathcloseinnerspacing", "Umathcloseopenspacing", "Umathcloseopspacing", "Umathcloseordspacing", "Umathclosepunctspacing", "Umathcloserelspacing", "Umathcode", "Umathcodenum", "Umathconnectoroverlapmin", "Umathfractiondelsize", "Umathfractiondenomdown", "Umathfractiondenomvgap", "Umathfractionnumup", "Umathfractionnumvgap", "Umathfractionrule", "Umathinnerbinspacing", "Umathinnerclosespacing", "Umathinnerinnerspacing", "Umathinneropenspacing", "Umathinneropspacing", "Umathinnerordspacing", "Umathinnerpunctspacing", "Umathinnerrelspacing", "Umathlimitabovebgap", "Umathlimitabovekern", "Umathlimitabovevgap", "Umathlimitbelowbgap", "Umathlimitbelowkern", "Umathlimitbelowvgap", "Umathopbinspacing", "Umathopclosespacing", "Umathopenbinspacing", "Umathopenclosespacing", "Umathopeninnerspacing", "Umathopenopenspacing", "Umathopenopspacing", "Umathopenordspacing", "Umathopenpunctspacing", "Umathopenrelspacing", "Umathoperatorsize", "Umathopinnerspacing", "Umathopopenspacing", "Umathopopspacing", "Umathopordspacing", "Umathoppunctspacing", "Umathoprelspacing", "Umathordbinspacing", "Umathordclosespacing", "Umathordinnerspacing", "Umathordopenspacing", "Umathordopspacing", "Umathordordspacing", "Umathordpunctspacing", "Umathordrelspacing", "Umathoverbarkern", "Umathoverbarrule", "Umathoverbarvgap", "Umathoverdelimiterbgap", "Umathoverdelimitervgap", "Umathpunctbinspacing", "Umathpunctclosespacing", "Umathpunctinnerspacing", "Umathpunctopenspacing", "Umathpunctopspacing", "Umathpunctordspacing", "Umathpunctpunctspacing", "Umathpunctrelspacing", "Umathquad", "Umathradicaldegreeafter", "Umathradicaldegreebefore", "Umathradicaldegreeraise", "Umathradicalkern", "Umathradicalrule", "Umathradicalvgap", "Umathrelbinspacing", "Umathrelclosespacing", "Umathrelinnerspacing", "Umathrelopenspacing", "Umathrelopspacing", "Umathrelordspacing", "Umathrelpunctspacing", "Umathrelrelspacing", "Umathspaceafterscript", "Umathstackdenomdown", "Umathstacknumup", "Umathstackvgap", "Umathsubshiftdown", "Umathsubshiftdrop", "Umathsubsupshiftdown", "Umathsubsupvgap", "Umathsubtopmax", "Umathsupbottommin", "Umathsupshiftdrop", "Umathsupshiftup", "Umathsupsubbottommax", "Umathunderbarkern", "Umathunderbarrule", "Umathunderbarvgap", "Umathunderdelimiterbgap", "Umathunderdelimitervgap", "Uoverdelimiter", "Uradical", "Uroot", "Ustack", "Ustartdisplaymath", "Ustartmath", "Ustopdisplaymath", "Ustopmath", "Usubscript", "Usuperscript", "Uunderdelimiter", "alignmark", "aligntab", "attribute", "attributedef", "catcodetable", "clearmarks", "crampeddisplaystyle", "crampedscriptscriptstyle", "crampedscriptstyle", "crampedtextstyle", "fontid", "formatname", "gleaders", "ifabsdim", "ifabsnum", "ifprimitive", "initcatcodetable", "latelua", "luaescapestring", "luastartup", "luatexdatestamp", "luatexrevision", "luatexversion", "mathstyle", "nokerns", "noligs", "outputbox", "pageleftoffset", "pagetopoffset", "postexhyphenchar", "posthyphenchar", "preexhyphenchar", "prehyphenchar", "primitive", "savecatcodetable", "scantextokens", "suppressfontnotfounderror", "suppressifcsnameerror", "suppresslongerror", "suppressoutererror", "synctex" },
+ ["luatex"]={ "Uchar", "Udelcode", "Udelcodenum", "Udelimiter", "Udelimiterover", "Udelimiterunder", "Umathaccent", "Umathaxis", "Umathbinbinspacing", "Umathbinclosespacing", "Umathbininnerspacing", "Umathbinopenspacing", "Umathbinopspacing", "Umathbinordspacing", "Umathbinpunctspacing", "Umathbinrelspacing", "Umathchar", "Umathchardef", "Umathcharnum", "Umathclosebinspacing", "Umathcloseclosespacing", "Umathcloseinnerspacing", "Umathcloseopenspacing", "Umathcloseopspacing", "Umathcloseordspacing", "Umathclosepunctspacing", "Umathcloserelspacing", "Umathcode", "Umathcodenum", "Umathconnectoroverlapmin", "Umathfractiondelsize", "Umathfractiondenomdown", "Umathfractiondenomvgap", "Umathfractionnumup", "Umathfractionnumvgap", "Umathfractionrule", "Umathinnerbinspacing", "Umathinnerclosespacing", "Umathinnerinnerspacing", "Umathinneropenspacing", "Umathinneropspacing", "Umathinnerordspacing", "Umathinnerpunctspacing", "Umathinnerrelspacing", "Umathlimitabovebgap", "Umathlimitabovekern", "Umathlimitabovevgap", "Umathlimitbelowbgap", "Umathlimitbelowkern", "Umathlimitbelowvgap", "Umathopbinspacing", "Umathopclosespacing", "Umathopenbinspacing", "Umathopenclosespacing", "Umathopeninnerspacing", "Umathopenopenspacing", "Umathopenopspacing", "Umathopenordspacing", "Umathopenpunctspacing", "Umathopenrelspacing", "Umathoperatorsize", "Umathopinnerspacing", "Umathopopenspacing", "Umathopopspacing", "Umathopordspacing", "Umathoppunctspacing", "Umathoprelspacing", "Umathordbinspacing", "Umathordclosespacing", "Umathordinnerspacing", "Umathordopenspacing", "Umathordopspacing", "Umathordordspacing", "Umathordpunctspacing", "Umathordrelspacing", "Umathoverbarkern", "Umathoverbarrule", "Umathoverbarvgap", "Umathoverdelimiterbgap", "Umathoverdelimitervgap", "Umathpunctbinspacing", "Umathpunctclosespacing", "Umathpunctinnerspacing", "Umathpunctopenspacing", "Umathpunctopspacing", "Umathpunctordspacing", "Umathpunctpunctspacing", "Umathpunctrelspacing", "Umathquad", "Umathradicaldegreeafter", "Umathradicaldegreebefore", "Umathradicaldegreeraise", "Umathradicalkern", "Umathradicalrule", "Umathradicalvgap", "Umathrelbinspacing", "Umathrelclosespacing", "Umathrelinnerspacing", "Umathrelopenspacing", "Umathrelopspacing", "Umathrelordspacing", "Umathrelpunctspacing", "Umathrelrelspacing", "Umathspaceafterscript", "Umathstackdenomdown", "Umathstacknumup", "Umathstackvgap", "Umathsubshiftdown", "Umathsubshiftdrop", "Umathsubsupshiftdown", "Umathsubsupvgap", "Umathsubtopmax", "Umathsupbottommin", "Umathsupshiftdrop", "Umathsupshiftup", "Umathsupsubbottommax", "Umathunderbarkern", "Umathunderbarrule", "Umathunderbarvgap", "Umathunderdelimiterbgap", "Umathunderdelimitervgap", "Uoverdelimiter", "Uradical", "Uroot", "Ustack", "Ustartdisplaymath", "Ustartmath", "Ustopdisplaymath", "Ustopmath", "Usubscript", "Usuperscript", "Uunderdelimiter", "alignmark", "aligntab", "attribute", "attributedef", "catcodetable", "clearmarks", "crampeddisplaystyle", "crampedscriptscriptstyle", "crampedscriptstyle", "crampedtextstyle", "fontid", "formatname", "gleaders", "ifabsdim", "ifabsnum", "ifprimitive", "initcatcodetable", "latelua", "luaescapestring", "luastartup", "luatexdatestamp", "luatexrevision", "luatexversion", "luafunction", "mathstyle", "nokerns", "noligs", "outputbox", "pageleftoffset", "pagetopoffset", "postexhyphenchar", "posthyphenchar", "preexhyphenchar", "prehyphenchar", "primitive", "savecatcodetable", "scantextokens", "suppressfontnotfounderror", "suppressifcsnameerror", "suppresslongerror", "suppressoutererror", "synctex" },
["omega"]={ "OmegaVersion", "bodydir", "chardp", "charht", "charit", "charwd", "leftghost", "localbrokenpenalty", "localinterlinepenalty", "localleftbox", "localrightbox", "mathdir", "odelcode", "odelimiter", "omathaccent", "omathchar", "omathchardef", "omathcode", "oradical", "pagedir", "pageheight", "pagewidth", "pardir", "rightghost", "textdir" },
["pdftex"]={ "efcode", "expanded", "ifincsname", "ifpdfabsdim", "ifpdfabsnum", "ifpdfprimitive", "leftmarginkern", "letterspacefont", "lpcode", "pdfadjustspacing", "pdfannot", "pdfcatalog", "pdfcolorstack", "pdfcolorstackinit", "pdfcompresslevel", "pdfcopyfont", "pdfcreationdate", "pdfdecimaldigits", "pdfdest", "pdfdestmargin", "pdfdraftmode", "pdfeachlinedepth", "pdfeachlineheight", "pdfendlink", "pdfendthread", "pdffirstlineheight", "pdffontattr", "pdffontexpand", "pdffontname", "pdffontobjnum", "pdffontsize", "pdfgamma", "pdfgentounicode", "pdfglyphtounicode", "pdfhorigin", "pdfignoreddimen", "pdfimageapplygamma", "pdfimagegamma", "pdfimagehicolor", "pdfimageresolution", "pdfincludechars", "pdfinclusioncopyfonts", "pdfinclusionerrorlevel", "pdfinfo", "pdfinsertht", "pdflastannot", "pdflastlinedepth", "pdflastlink", "pdflastobj", "pdflastxform", "pdflastximage", "pdflastximagecolordepth", "pdflastximagepages", "pdflastxpos", "pdflastypos", "pdflinkmargin", "pdfliteral", "pdfmapfile", "pdfmapline", "pdfminorversion", "pdfnames", "pdfnoligatures", "pdfnormaldeviate", "pdfobj", "pdfobjcompresslevel", "pdfoptionpdfminorversion", "pdfoutline", "pdfoutput", "pdfpageattr", "pdfpagebox", "pdfpageheight", "pdfpageref", "pdfpageresources", "pdfpagesattr", "pdfpagewidth", "pdfpkmode", "pdfpkresolution", "pdfprimitive", "pdfprotrudechars", "pdfpxdimen", "pdfrandomseed", "pdfrefobj", "pdfrefxform", "pdfrefximage", "pdfreplacefont", "pdfrestore", "pdfretval", "pdfsave", "pdfsavepos", "pdfsetmatrix", "pdfsetrandomseed", "pdfstartlink", "pdfstartthread", "pdftexbanner", "pdftexrevision", "pdftexversion", "pdfthread", "pdfthreadmargin", "pdftracingfonts", "pdftrailer", "pdfuniformdeviate", "pdfuniqueresname", "pdfvorigin", "pdfxform", "pdfxformattr", "pdfxformname", "pdfxformresources", "pdfximage", "pdfximagebbox", "quitvmode", "rightmarginkern", "rpcode", "tagcode" },
- ["tex"]={ "-", "/", "AlephVersion", "Alephminorversion", "Alephrevision", "Alephversion", "OmegaVersion", "Omegaminorversion", "Omegarevision", "Omegaversion", "Udelcode", "Udelcodenum", "Udelimiter", "Udelimiterover", "Udelimiterunder", "Umathaccent", "Umathaxis", "Umathbinbinspacing", "Umathbinclosespacing", "Umathbininnerspacing", "Umathbinopenspacing", "Umathbinopspacing", "Umathbinordspacing", "Umathbinpunctspacing", "Umathbinrelspacing", "Umathchar", "Umathchardef", "Umathcharnum", "Umathclosebinspacing", "Umathcloseclosespacing", "Umathcloseinnerspacing", "Umathcloseopenspacing", "Umathcloseopspacing", "Umathcloseordspacing", "Umathclosepunctspacing", "Umathcloserelspacing", "Umathcode", "Umathcodenum", "Umathconnectoroverlapmin", "Umathfractiondelsize", "Umathfractiondenomdown", "Umathfractiondenomvgap", "Umathfractionnumup", "Umathfractionnumvgap", "Umathfractionrule", "Umathinnerbinspacing", "Umathinnerclosespacing", "Umathinnerinnerspacing", "Umathinneropenspacing", "Umathinneropspacing", "Umathinnerordspacing", "Umathinnerpunctspacing", "Umathinnerrelspacing", "Umathlimitabovebgap", "Umathlimitabovekern", "Umathlimitabovevgap", "Umathlimitbelowbgap", "Umathlimitbelowkern", "Umathlimitbelowvgap", "Umathopbinspacing", "Umathopclosespacing", "Umathopenbinspacing", "Umathopenclosespacing", "Umathopeninnerspacing", "Umathopenopenspacing", "Umathopenopspacing", "Umathopenordspacing", "Umathopenpunctspacing", "Umathopenrelspacing", "Umathoperatorsize", "Umathopinnerspacing", "Umathopopenspacing", "Umathopopspacing", "Umathopordspacing", "Umathoppunctspacing", "Umathoprelspacing", "Umathordbinspacing", "Umathordclosespacing", "Umathordinnerspacing", "Umathordopenspacing", "Umathordopspacing", "Umathordordspacing", "Umathordpunctspacing", "Umathordrelspacing", "Umathoverbarkern", "Umathoverbarrule", "Umathoverbarvgap", "Umathoverdelimiterbgap", "Umathoverdelimitervgap", "Umathpunctbinspacing", "Umathpunctclosespacing", "Umathpunctinnerspacing", "Umathpunctopenspacing", "Umathpunctopspacing", "Umathpunctordspacing", "Umathpunctpunctspacing", "Umathpunctrelspacing", "Umathquad", "Umathradicaldegreeafter", "Umathradicaldegreebefore", "Umathradicaldegreeraise", "Umathradicalkern", "Umathradicalrule", "Umathradicalvgap", "Umathrelbinspacing", "Umathrelclosespacing", "Umathrelinnerspacing", "Umathrelopenspacing", "Umathrelopspacing", "Umathrelordspacing", "Umathrelpunctspacing", "Umathrelrelspacing", "Umathspaceafterscript", "Umathstackdenomdown", "Umathstacknumup", "Umathstackvgap", "Umathsubshiftdown", "Umathsubshiftdrop", "Umathsubsupshiftdown", "Umathsubsupvgap", "Umathsubtopmax", "Umathsupbottommin", "Umathsupshiftdrop", "Umathsupshiftup", "Umathsupsubbottommax", "Umathunderbarkern", "Umathunderbarrule", "Umathunderbarvgap", "Umathunderdelimiterbgap", "Umathunderdelimitervgap", "Uoverdelimiter", "Uradical", "Uroot", "Ustack", "Ustartdisplaymath", "Ustartmath", "Ustopdisplaymath", "Ustopmath", "Usubscript", "Usuperscript", "Uunderdelimiter", "above", "abovedisplayshortskip", "abovedisplayskip", "abovewithdelims", "accent", "adjdemerits", "advance", "afterassignment", "aftergroup", "alignmark", "aligntab", "atop", "atopwithdelims", "attribute", "attributedef", "badness", "baselineskip", "batchmode", "begingroup", "belowdisplayshortskip", "belowdisplayskip", "binoppenalty", "bodydir", "botmark", "botmarks", "box", "boxdir", "boxmaxdepth", "brokenpenalty", "catcode", "catcodetable", "char", "chardef", "chardp", "charht", "charit", "charwd", "cleaders", "clearmarks", "closein", "closeout", "clubpenalties", "clubpenalty", "copy", "count", "countdef", "cr", "crampeddisplaystyle", "crampedscriptscriptstyle", "crampedscriptstyle", "crampedtextstyle", "crcr", "csname", "currentgrouplevel", "currentgrouptype", "currentifbranch", "currentiflevel", "currentiftype", "day", "deadcycles", "def", "defaulthyphenchar", "defaultskewchar", "delcode", "delimiter", "delimiterfactor", "delimitershortfall", "detokenize", "dimen", "dimendef", "dimexpr", "directlua", "discretionary", "displayindent", "displaylimits", "displaystyle", "displaywidowpenalties", "displaywidowpenalty", "displaywidth", "divide", "doublehyphendemerits", "dp", "dump", "eTeXVersion", "eTeXminorversion", "eTeXrevision", "eTeXversion", "edef", "efcode", "else", "emergencystretch", "end", "endcsname", "endgroup", "endinput", "endlinechar", "eqno", "errhelp", "errmessage", "errorcontextlines", "errorstopmode", "escapechar", "everycr", "everydisplay", "everyeof", "everyhbox", "everyjob", "everymath", "everypar", "everyvbox", "exhyphenchar", "exhyphenpenalty", "expandafter", "expanded", "fam", "fi", "finalhyphendemerits", "firstmark", "firstmarks", "floatingpenalty", "font", "fontchardp", "fontcharht", "fontcharic", "fontcharwd", "fontdimen", "fontid", "fontname", "formatname", "futurelet", "gdef", "gleaders", "global", "globaldefs", "glueexpr", "glueshrink", "glueshrinkorder", "gluestretch", "gluestretchorder", "gluetomu", "halign", "hangafter", "hangindent", "hbadness", "hbox", "hfil", "hfill", "hfilneg", "hfuzz", "hoffset", "holdinginserts", "hrule", "hsize", "hskip", "hss", "ht", "hyphenation", "hyphenchar", "hyphenpenalty", "if", "ifabsdim", "ifabsnum", "ifcase", "ifcat", "ifcsname", "ifdefined", "ifdim", "ifeof", "iffalse", "iffontchar", "ifhbox", "ifhmode", "ifincsname", "ifinner", "ifmmode", "ifnum", "ifodd", "ifpdfabsdim", "ifpdfabsnum", "ifpdfprimitive", "ifprimitive", "iftrue", "ifvbox", "ifvmode", "ifvoid", "ifx", "ignorespaces", "immediate", "indent", "initcatcodetable", "input", "inputlineno", "insert", "insertpenalties", "interactionmode", "interlinepenalties", "interlinepenalty", "jobname", "kern", "language", "lastbox", "lastkern", "lastlinefit", "lastnodetype", "lastpenalty", "lastskip", "latelua", "lccode", "leaders", "left", "leftghost", "lefthyphenmin", "leftmarginkern", "leftskip", "leqno", "let", "letterspacefont", "limits", "linepenalty", "lineskip", "lineskiplimit", "localbrokenpenalty", "localinterlinepenalty", "localleftbox", "localrightbox", "long", "looseness", "lower", "lowercase", "lpcode", "luaescapestring", "luastartup", "luatexdatestamp", "luatexrevision", "luatexversion", "mag", "mark", "marks", "mathaccent", "mathbin", "mathchar", "mathchardef", "mathchoice", "mathclose", "mathcode", "mathdir", "mathinner", "mathop", "mathopen", "mathord", "mathpunct", "mathrel", "mathstyle", "mathsurround", "maxdeadcycles", "maxdepth", "meaning", "medmuskip", "message", "middle", "mkern", "month", "moveleft", "moveright", "mskip", "muexpr", "multiply", "muskip", "muskipdef", "mutoglue", "newlinechar", "noalign", "noboundary", "noexpand", "noindent", "nokerns", "noligs", "nolimits", "nolocaldirs", "nolocalwhatsits", "nonscript", "nonstopmode", "nulldelimiterspace", "nullfont", "number", "numexpr", "odelcode", "odelimiter", "omathaccent", "omathchar", "omathchardef", "omathcode", "omit", "openin", "openout", "or", "oradical", "outer", "output", "outputbox", "outputpenalty", "over", "overfullrule", "overline", "overwithdelims", "pagebottomoffset", "pagedepth", "pagedir", "pagediscards", "pagefilllstretch", "pagefillstretch", "pagefilstretch", "pagegoal", "pageheight", "pageleftoffset", "pagerightoffset", "pageshrink", "pagestretch", "pagetopoffset", "pagetotal", "pagewidth", "par", "pardir", "parfillskip", "parindent", "parshape", "parshapedimen", "parshapeindent", "parshapelength", "parskip", "patterns", "pausing", "pdfadjustspacing", "pdfannot", "pdfcatalog", "pdfcolorstack", "pdfcolorstackinit", "pdfcompresslevel", "pdfcopyfont", "pdfcreationdate", "pdfdecimaldigits", "pdfdest", "pdfdestmargin", "pdfdraftmode", "pdfeachlinedepth", "pdfeachlineheight", "pdfendlink", "pdfendthread", "pdffirstlineheight", "pdffontattr", "pdffontexpand", "pdffontname", "pdffontobjnum", "pdffontsize", "pdfgamma", "pdfgentounicode", "pdfglyphtounicode", "pdfhorigin", "pdfignoreddimen", "pdfimageapplygamma", "pdfimagegamma", "pdfimagehicolor", "pdfimageresolution", "pdfincludechars", "pdfinclusioncopyfonts", "pdfinclusionerrorlevel", "pdfinfo", "pdfinsertht", "pdflastannot", "pdflastlinedepth", "pdflastlink", "pdflastobj", "pdflastxform", "pdflastximage", "pdflastximagecolordepth", "pdflastximagepages", "pdflastxpos", "pdflastypos", "pdflinkmargin", "pdfliteral", "pdfmapfile", "pdfmapline", "pdfminorversion", "pdfnames", "pdfnoligatures", "pdfnormaldeviate", "pdfobj", "pdfobjcompresslevel", "pdfoptionpdfminorversion", "pdfoutline", "pdfoutput", "pdfpageattr", "pdfpagebox", "pdfpageheight", "pdfpageref", "pdfpageresources", "pdfpagesattr", "pdfpagewidth", "pdfpkmode", "pdfpkresolution", "pdfprimitive", "pdfprotrudechars", "pdfpxdimen", "pdfrandomseed", "pdfrefobj", "pdfrefxform", "pdfrefximage", "pdfreplacefont", "pdfrestore", "pdfretval", "pdfsave", "pdfsavepos", "pdfsetmatrix", "pdfsetrandomseed", "pdfstartlink", "pdfstartthread", "pdftexbanner", "pdftexrevision", "pdftexversion", "pdfthread", "pdfthreadmargin", "pdftracingfonts", "pdftrailer", "pdfuniformdeviate", "pdfuniqueresname", "pdfvorigin", "pdfxform", "pdfxformattr", "pdfxformname", "pdfxformresources", "pdfximage", "pdfximagebbox", "penalty", "postdisplaypenalty", "postexhyphenchar", "posthyphenchar", "predisplaydirection", "predisplaypenalty", "predisplaysize", "preexhyphenchar", "prehyphenchar", "pretolerance", "prevdepth", "prevgraf", "primitive", "protected", "quitvmode", "radical", "raise", "read", "readline", "relax", "relpenalty", "right", "rightghost", "righthyphenmin", "rightmarginkern", "rightskip", "romannumeral", "rpcode", "savecatcodetable", "savinghyphcodes", "savingvdiscards", "scantextokens", "scantokens", "scriptfont", "scriptscriptfont", "scriptscriptstyle", "scriptspace", "scriptstyle", "scrollmode", "setbox", "setlanguage", "sfcode", "shipout", "show", "showbox", "showboxbreadth", "showboxdepth", "showgroups", "showifs", "showlists", "showthe", "showtokens", "skewchar", "skip", "skipdef", "spacefactor", "spaceskip", "span", "special", "splitbotmark", "splitbotmarks", "splitdiscards", "splitfirstmark", "splitfirstmarks", "splitmaxdepth", "splittopskip", "string", "suppressfontnotfounderror", "suppressifcsnameerror", "suppresslongerror", "suppressoutererror", "synctex", "tabskip", "tagcode", "textdir", "textfont", "textstyle", "the", "thickmuskip", "thinmuskip", "time", "toks", "toksdef", "tolerance", "topmark", "topmarks", "topskip", "tracingassigns", "tracingcommands", "tracinggroups", "tracingifs", "tracinglostchars", "tracingmacros", "tracingnesting", "tracingonline", "tracingoutput", "tracingpages", "tracingparagraphs", "tracingrestores", "tracingscantokens", "tracingstats", "uccode", "uchyph", "underline", "unexpanded", "unhbox", "unhcopy", "unkern", "unless", "unpenalty", "unskip", "unvbox", "unvcopy", "uppercase", "vadjust", "valign", "vbadness", "vbox", "vcenter", "vfil", "vfill", "vfilneg", "vfuzz", "voffset", "vrule", "vsize", "vskip", "vsplit", "vss", "vtop", "wd", "widowpenalties", "widowpenalty", "write", "xdef", "xleaders", "xspaceskip", "year" },
+ ["tex"]={ "-", "/", "AlephVersion", "Alephminorversion", "Alephrevision", "Alephversion", "OmegaVersion", "Omegaminorversion", "Omegarevision", "Omegaversion", "Udelcode", "Udelcodenum", "Udelimiter", "Udelimiterover", "Udelimiterunder", "Umathaccent", "Umathaxis", "Umathbinbinspacing", "Umathbinclosespacing", "Umathbininnerspacing", "Umathbinopenspacing", "Umathbinopspacing", "Umathbinordspacing", "Umathbinpunctspacing", "Umathbinrelspacing", "Umathchar", "Umathchardef", "Umathcharnum", "Umathclosebinspacing", "Umathcloseclosespacing", "Umathcloseinnerspacing", "Umathcloseopenspacing", "Umathcloseopspacing", "Umathcloseordspacing", "Umathclosepunctspacing", "Umathcloserelspacing", "Umathcode", "Umathcodenum", "Umathconnectoroverlapmin", "Umathfractiondelsize", "Umathfractiondenomdown", "Umathfractiondenomvgap", "Umathfractionnumup", "Umathfractionnumvgap", "Umathfractionrule", "Umathinnerbinspacing", "Umathinnerclosespacing", "Umathinnerinnerspacing", "Umathinneropenspacing", "Umathinneropspacing", "Umathinnerordspacing", "Umathinnerpunctspacing", "Umathinnerrelspacing", "Umathlimitabovebgap", "Umathlimitabovekern", "Umathlimitabovevgap", "Umathlimitbelowbgap", "Umathlimitbelowkern", "Umathlimitbelowvgap", "Umathopbinspacing", "Umathopclosespacing", "Umathopenbinspacing", "Umathopenclosespacing", "Umathopeninnerspacing", "Umathopenopenspacing", "Umathopenopspacing", "Umathopenordspacing", "Umathopenpunctspacing", "Umathopenrelspacing", "Umathoperatorsize", "Umathopinnerspacing", "Umathopopenspacing", "Umathopopspacing", "Umathopordspacing", "Umathoppunctspacing", "Umathoprelspacing", "Umathordbinspacing", "Umathordclosespacing", "Umathordinnerspacing", "Umathordopenspacing", "Umathordopspacing", "Umathordordspacing", "Umathordpunctspacing", "Umathordrelspacing", "Umathoverbarkern", "Umathoverbarrule", "Umathoverbarvgap", "Umathoverdelimiterbgap", "Umathoverdelimitervgap", "Umathpunctbinspacing", "Umathpunctclosespacing", "Umathpunctinnerspacing", "Umathpunctopenspacing", "Umathpunctopspacing", "Umathpunctordspacing", "Umathpunctpunctspacing", "Umathpunctrelspacing", "Umathquad", "Umathradicaldegreeafter", "Umathradicaldegreebefore", "Umathradicaldegreeraise", "Umathradicalkern", "Umathradicalrule", "Umathradicalvgap", "Umathrelbinspacing", "Umathrelclosespacing", "Umathrelinnerspacing", "Umathrelopenspacing", "Umathrelopspacing", "Umathrelordspacing", "Umathrelpunctspacing", "Umathrelrelspacing", "Umathspaceafterscript", "Umathstackdenomdown", "Umathstacknumup", "Umathstackvgap", "Umathsubshiftdown", "Umathsubshiftdrop", "Umathsubsupshiftdown", "Umathsubsupvgap", "Umathsubtopmax", "Umathsupbottommin", "Umathsupshiftdrop", "Umathsupshiftup", "Umathsupsubbottommax", "Umathunderbarkern", "Umathunderbarrule", "Umathunderbarvgap", "Umathunderdelimiterbgap", "Umathunderdelimitervgap", "Uoverdelimiter", "Uradical", "Uroot", "Ustack", "Ustartdisplaymath", "Ustartmath", "Ustopdisplaymath", "Ustopmath", "Usubscript", "Usuperscript", "Uunderdelimiter", "above", "abovedisplayshortskip", "abovedisplayskip", "abovewithdelims", "accent", "adjdemerits", "advance", "afterassignment", "aftergroup", "alignmark", "aligntab", "atop", "atopwithdelims", "attribute", "attributedef", "badness", "baselineskip", "batchmode", "begingroup", "belowdisplayshortskip", "belowdisplayskip", "binoppenalty", "bodydir", "botmark", "botmarks", "box", "boxdir", "boxmaxdepth", "brokenpenalty", "catcode", "catcodetable", "char", "chardef", "cleaders", "clearmarks", "closein", "closeout", "clubpenalties", "clubpenalty", "copy", "count", "countdef", "cr", "crampeddisplaystyle", "crampedscriptscriptstyle", "crampedscriptstyle", "crampedtextstyle", "crcr", "csname", "currentgrouplevel", "currentgrouptype", "currentifbranch", "currentiflevel", "currentiftype", "day", "deadcycles", "def", "defaulthyphenchar", "defaultskewchar", "delcode", "delimiter", "delimiterfactor", "delimitershortfall", "detokenize", "dimen", "dimendef", "dimexpr", "directlua", "discretionary", "displayindent", "displaylimits", "displaystyle", "displaywidowpenalties", "displaywidowpenalty", "displaywidth", "divide", "doublehyphendemerits", "dp", "dump", "eTeXVersion", "eTeXminorversion", "eTeXrevision", "eTeXversion", "edef", "efcode", "else", "emergencystretch", "end", "endcsname", "endgroup", "endinput", "endlinechar", "eqno", "errhelp", "errmessage", "errorcontextlines", "errorstopmode", "escapechar", "everycr", "everydisplay", "everyeof", "everyhbox", "everyjob", "everymath", "everypar", "everyvbox", "exhyphenchar", "exhyphenpenalty", "expandafter", "expanded", "fam", "fi", "finalhyphendemerits", "firstmark", "firstmarks", "floatingpenalty", "font", "fontchardp", "fontcharht", "fontcharic", "fontcharwd", "fontdimen", "fontid", "fontname", "formatname", "futurelet", "gdef", "gleaders", "global", "globaldefs", "glueexpr", "glueshrink", "glueshrinkorder", "gluestretch", "gluestretchorder", "gluetomu", "halign", "hangafter", "hangindent", "hbadness", "hbox", "hfil", "hfill", "hfilneg", "hfuzz", "hoffset", "holdinginserts", "hrule", "hsize", "hskip", "hss", "ht", "hyphenation", "hyphenchar", "hyphenpenalty", "if", "ifabsdim", "ifabsnum", "ifcase", "ifcat", "ifcsname", "ifdefined", "ifdim", "ifeof", "iffalse", "iffontchar", "ifhbox", "ifhmode", "ifincsname", "ifinner", "ifmmode", "ifnum", "ifodd", "ifpdfabsdim", "ifpdfabsnum", "ifpdfprimitive", "ifprimitive", "iftrue", "ifvbox", "ifvmode", "ifvoid", "ifx", "ignorespaces", "immediate", "indent", "initcatcodetable", "input", "inputlineno", "insert", "insertpenalties", "interactionmode", "interlinepenalties", "interlinepenalty", "jobname", "kern", "language", "lastbox", "lastkern", "lastlinefit", "lastnodetype", "lastpenalty", "lastskip", "latelua", "lccode", "leaders", "left", "leftghost", "lefthyphenmin", "leftmarginkern", "leftskip", "leqno", "let", "letterspacefont", "limits", "linepenalty", "lineskip", "lineskiplimit", "localbrokenpenalty", "localinterlinepenalty", "localleftbox", "localrightbox", "long", "looseness", "lower", "lowercase", "lpcode", "luaescapestring", "luastartup", "luatexdatestamp", "luatexrevision", "luatexversion", "mag", "mark", "marks", "mathaccent", "mathbin", "mathchar", "mathchardef", "mathchoice", "mathclose", "mathcode", "mathdir", "mathinner", "mathop", "mathopen", "mathord", "mathpunct", "mathrel", "mathstyle", "mathsurround", "maxdeadcycles", "maxdepth", "meaning", "medmuskip", "message", "middle", "mkern", "month", "moveleft", "moveright", "mskip", "muexpr", "multiply", "muskip", "muskipdef", "mutoglue", "newlinechar", "noalign", "noboundary", "noexpand", "noindent", "nokerns", "noligs", "nolimits", "nolocaldirs", "nolocalwhatsits", "nonscript", "nonstopmode", "nulldelimiterspace", "nullfont", "number", "numexpr", "odelcode", "odelimiter", "omathaccent", "omathchar", "omathchardef", "omathcode", "omit", "openin", "openout", "or", "oradical", "outer", "output", "outputbox", "outputpenalty", "over", "overfullrule", "overline", "overwithdelims", "pagebottomoffset", "pagedepth", "pagedir", "pagediscards", "pagefilllstretch", "pagefillstretch", "pagefilstretch", "pagegoal", "pageheight", "pageleftoffset", "pagerightoffset", "pageshrink", "pagestretch", "pagetopoffset", "pagetotal", "pagewidth", "par", "pardir", "parfillskip", "parindent", "parshape", "parshapedimen", "parshapeindent", "parshapelength", "parskip", "patterns", "pausing", "pdfadjustspacing", "pdfannot", "pdfcatalog", "pdfcolorstack", "pdfcolorstackinit", "pdfcompresslevel", "pdfcopyfont", "pdfcreationdate", "pdfdecimaldigits", "pdfdest", "pdfdestmargin", "pdfdraftmode", "pdfeachlinedepth", "pdfeachlineheight", "pdfendlink", "pdfendthread", "pdffirstlineheight", "pdffontattr", "pdffontexpand", "pdffontname", "pdffontobjnum", "pdffontsize", "pdfgamma", "pdfgentounicode", "pdfglyphtounicode", "pdfhorigin", "pdfignoreddimen", "pdfimageapplygamma", "pdfimagegamma", "pdfimagehicolor", "pdfimageresolution", "pdfincludechars", "pdfinclusioncopyfonts", "pdfinclusionerrorlevel", "pdfinfo", "pdfinsertht", "pdflastannot", "pdflastlinedepth", "pdflastlink", "pdflastobj", "pdflastxform", "pdflastximage", "pdflastximagecolordepth", "pdflastximagepages", "pdflastxpos", "pdflastypos", "pdflinkmargin", "pdfliteral", "pdfmapfile", "pdfmapline", "pdfminorversion", "pdfnames", "pdfnoligatures", "pdfnormaldeviate", "pdfobj", "pdfobjcompresslevel", "pdfoptionpdfminorversion", "pdfoutline", "pdfoutput", "pdfpageattr", "pdfpagebox", "pdfpageheight", "pdfpageref", "pdfpageresources", "pdfpagesattr", "pdfpagewidth", "pdfpkmode", "pdfpkresolution", "pdfprimitive", "pdfprotrudechars", "pdfpxdimen", "pdfrandomseed", "pdfrefobj", "pdfrefxform", "pdfrefximage", "pdfreplacefont", "pdfrestore", "pdfretval", "pdfsave", "pdfsavepos", "pdfsetmatrix", "pdfsetrandomseed", "pdfstartlink", "pdfstartthread", "pdftexbanner", "pdftexrevision", "pdftexversion", "pdfthread", "pdfthreadmargin", "pdftracingfonts", "pdftrailer", "pdfuniformdeviate", "pdfuniqueresname", "pdfvorigin", "pdfxform", "pdfxformattr", "pdfxformname", "pdfxformresources", "pdfximage", "pdfximagebbox", "penalty", "postdisplaypenalty", "postexhyphenchar", "posthyphenchar", "predisplaydirection", "predisplaypenalty", "predisplaysize", "preexhyphenchar", "prehyphenchar", "pretolerance", "prevdepth", "prevgraf", "primitive", "protected", "quitvmode", "radical", "raise", "read", "readline", "relax", "relpenalty", "right", "rightghost", "righthyphenmin", "rightmarginkern", "rightskip", "romannumeral", "rpcode", "savecatcodetable", "savinghyphcodes", "savingvdiscards", "scantextokens", "scantokens", "scriptfont", "scriptscriptfont", "scriptscriptstyle", "scriptspace", "scriptstyle", "scrollmode", "setbox", "setlanguage", "sfcode", "shipout", "show", "showbox", "showboxbreadth", "showboxdepth", "showgroups", "showifs", "showlists", "showthe", "showtokens", "skewchar", "skip", "skipdef", "spacefactor", "spaceskip", "span", "special", "splitbotmark", "splitbotmarks", "splitdiscards", "splitfirstmark", "splitfirstmarks", "splitmaxdepth", "splittopskip", "string", "suppressfontnotfounderror", "suppressifcsnameerror", "suppresslongerror", "suppressoutererror", "synctex", "tabskip", "tagcode", "textdir", "textfont", "textstyle", "the", "thickmuskip", "thinmuskip", "time", "toks", "toksdef", "tolerance", "topmark", "topmarks", "topskip", "tracingassigns", "tracingcommands", "tracinggroups", "tracingifs", "tracinglostchars", "tracingmacros", "tracingnesting", "tracingonline", "tracingoutput", "tracingpages", "tracingparagraphs", "tracingrestores", "tracingscantokens", "tracingstats", "uccode", "uchyph", "underline", "unexpanded", "unhbox", "unhcopy", "unkern", "unless", "unpenalty", "unskip", "unvbox", "unvcopy", "uppercase", "vadjust", "valign", "vbadness", "vbox", "vcenter", "vfil", "vfill", "vfilneg", "vfuzz", "voffset", "vrule", "vsize", "vskip", "vsplit", "vss", "vtop", "wd", "widowpenalties", "widowpenalty", "write", "xdef", "xleaders", "xspaceskip", "year" },
["xetex"]={ "XeTeXversion" },
} \ No newline at end of file
diff --git a/context/data/scite/context/lexers/lexer.lua b/context/data/scite/context/lexers/lexer.lua
new file mode 100644
index 000000000..9582f6a76
--- /dev/null
+++ b/context/data/scite/context/lexers/lexer.lua
@@ -0,0 +1,3 @@
+-- this works ok:
+
+return require("scite-context-lexer")
diff --git a/context/data/scite/context/lexers/scite-context-lexer-bibtex.lua b/context/data/scite/context/lexers/scite-context-lexer-bibtex.lua
new file mode 100644
index 000000000..88b070e5e
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-bibtex.lua
@@ -0,0 +1,176 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for bibtex",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local global, string, table, lpeg = _G, string, table, lpeg
+local P, R, S, V = lpeg.P, lpeg.R, lpeg.S, lpeg.V
+local type = type
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+local exact_match = lexer.exact_match
+
+local bibtexlexer = lexer.new("xml","scite-context-lexer-xml")
+local whitespace = bibtexlexer.whitespace
+
+ local escape, left, right = P("\\"), P('{'), P('}')
+
+ patterns.balanced = P {
+ [1] = ((escape * (left+right)) + (1 - (left+right)) + V(2))^0,
+ [2] = left * V(1) * right
+ }
+
+-- taken from bibl-bib.lua
+
+local anything = patterns.anything
+local percent = P("%")
+local start = P("@")
+local comma = P(",")
+local hash = P("#")
+local escape = P("\\")
+local single = P("'")
+local double = P('"')
+local left = P('{')
+local right = P('}')
+local lineending = S("\n\r")
+local space = S(" \t\n\r\f")
+local spaces = space^1
+local equal = P("=")
+
+local keyword = (R("az","AZ","09") + S("@_:-"))^1
+local s_quoted = ((escape*single) + spaces^1 + (1-single))^0
+local d_quoted = ((escape*double) + spaces^1 + (1-double))^0
+local balanced = patterns.balanced
+
+local t_spacing = token(whitespace, space^1)
+local t_optionalws = token("default", space^1)^0
+
+local t_equal = token("operator",equal)
+local t_left = token("grouping",left)
+local t_right = token("grouping",right)
+local t_comma = token("operator",comma)
+local t_hash = token("operator",hash)
+
+local t_s_value = token("operator",single)
+ * token("text",s_quoted)
+ * token("operator",single)
+local t_d_value = token("operator",double)
+ * token("text",d_quoted)
+ * token("operator",double)
+local t_b_value = token("operator",left)
+ * token("text",balanced)
+ * token("operator",right)
+local t_r_value = token("text",keyword)
+
+local t_keyword = token("keyword",keyword)
+local t_key = token("command",keyword)
+local t_label = token("warning",keyword)
+
+local t_somevalue = t_s_value + t_d_value + t_b_value + t_r_value
+local t_value = t_somevalue
+ * ((t_optionalws * t_hash * t_optionalws) * t_somevalue)^0
+
+local t_assignment = t_optionalws
+ * t_key
+ * t_optionalws
+ * t_equal
+ * t_optionalws
+ * t_value
+
+local t_shortcut = t_keyword
+ * t_optionalws
+ * t_left
+ * t_optionalws
+ * (t_assignment * t_comma^0)^0
+ * t_optionalws
+ * t_right
+
+local t_definition = t_keyword
+ * t_optionalws
+ * t_left
+ * t_optionalws
+ * t_label
+ * t_optionalws
+ * t_comma
+ * (t_assignment * t_comma^0)^0
+ * t_optionalws
+ * t_right
+
+local t_comment = t_keyword
+ * t_optionalws
+ * t_left
+ * token("text",(1-t_right)^0)
+ * t_optionalws
+ * t_right
+
+local t_forget = token("comment",percent^1 * (1-lineending)^0)
+
+local t_rest = token("default",anything)
+
+-- this kind of lexing seems impossible as the size of the buffer passed to the lexer is not
+-- large enough .. but we can cheat and use this:
+--
+-- function OnOpen(filename) editor:Colourise(1,editor.TextLength) end -- or is it 0?
+
+bibtexlexer._rules = {
+ { "whitespace", t_spacing },
+ { "forget", t_forget },
+ { "shortcut", t_shortcut },
+ { "definition", t_definition },
+ { "comment", t_comment },
+ { "rest", t_rest },
+}
+
+-- local t_assignment = t_key
+-- * t_optionalws
+-- * t_equal
+-- * t_optionalws
+-- * t_value
+--
+-- local t_shortcut = t_keyword
+-- * t_optionalws
+-- * t_left
+--
+-- local t_definition = t_keyword
+-- * t_optionalws
+-- * t_left
+-- * t_optionalws
+-- * t_label
+-- * t_optionalws
+-- * t_comma
+--
+-- bibtexlexer._rules = {
+-- { "whitespace", t_spacing },
+-- { "assignment", t_assignment },
+-- { "definition", t_definition },
+-- { "shortcut", t_shortcut },
+-- { "right", t_right },
+-- { "comma", t_comma },
+-- { "forget", t_forget },
+-- { "comment", t_comment },
+-- { "rest", t_rest },
+-- }
+
+bibtexlexer._tokenstyles = context.styleset
+
+bibtexlexer._foldpattern = P("{") + P("}")
+
+bibtexlexer._foldsymbols = {
+ _patterns = {
+ "{",
+ "}",
+ },
+ ["grouping"] = {
+ ["{"] = 1,
+ ["}"] = -1,
+ },
+}
+
+return bibtexlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-cld.lua b/context/data/scite/context/lexers/scite-context-lexer-cld.lua
index 1e30c18a2..3442a195c 100644
--- a/context/data/scite/lexers/scite-context-lexer-cld.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-cld.lua
@@ -6,13 +6,14 @@ local info = {
license = "see context related readme files",
}
-local lexer = lexer
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
-local cldlexer = { _NAME = "cld", _FILENAME = "scite-context-lexer-cld" }
-local whitespace = lexer.WHITESPACE -- maybe we need to fix this
-local context = lexer.context
+local cldlexer = lexer.new("cld","scite-context-lexer-cld")
+local lualexer = lexer.load("scite-context-lexer-lua")
-local lualexer = lexer.load('scite-context-lexer-lua')
+-- can probably be done nicer now, a bit of a hack
cldlexer._rules = lualexer._rules_cld
cldlexer._tokenstyles = lualexer._tokenstyles
diff --git a/context/data/scite/context/lexers/scite-context-lexer-cpp-web.lua b/context/data/scite/context/lexers/scite-context-lexer-cpp-web.lua
new file mode 100644
index 000000000..daa9221ba
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-cpp-web.lua
@@ -0,0 +1,23 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for cpp web",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local cppweblexer = lexer.new("cpp-web","scite-context-lexer-cpp")
+local cpplexer = lexer.load("scite-context-lexer-cpp")
+
+-- can probably be done nicer now, a bit of a hack
+
+cppweblexer._rules = cpplexer._rules_web
+cppweblexer._tokenstyles = cpplexer._tokenstyles
+cppweblexer._foldsymbols = cpplexer._foldsymbols
+cppweblexer._directives = cpplexer._directives
+
+return cppweblexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-cpp.lua b/context/data/scite/context/lexers/scite-context-lexer-cpp.lua
new file mode 100644
index 000000000..31180e6a5
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-cpp.lua
@@ -0,0 +1,188 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for cpp",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+-- looks liks the original cpp lexer but web ready (so nothing special here yet)
+
+local P, R, S = lpeg.P, lpeg.R, lpeg.S
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+local exact_match = lexer.exact_match
+
+local cpplexer = lexer.new("cpp","scite-context-lexer-cpp")
+local whitespace = cpplexer.whitespace
+
+local keywords = { -- copied from cpp.lua
+ -- c
+ "asm", "auto", "break", "case", "const", "continue", "default", "do", "else",
+ "extern", "false", "for", "goto", "if", "inline", "register", "return",
+ "sizeof", "static", "switch", "true", "typedef", "volatile", "while",
+ "restrict",
+ -- hm
+ "_Bool", "_Complex", "_Pragma", "_Imaginary",
+ -- c++.
+ "catch", "class", "const_cast", "delete", "dynamic_cast", "explicit",
+ "export", "friend", "mutable", "namespace", "new", "operator", "private",
+ "protected", "public", "signals", "slots", "reinterpret_cast",
+ "static_assert", "static_cast", "template", "this", "throw", "try", "typeid",
+ "typename", "using", "virtual"
+}
+
+local datatypes = { -- copied from cpp.lua
+ "bool", "char", "double", "enum", "float", "int", "long", "short", "signed",
+ "struct", "union", "unsigned", "void"
+}
+
+local macros = { -- copied from cpp.lua
+ "define", "elif", "else", "endif", "error", "if", "ifdef", "ifndef", "import",
+ "include", "line", "pragma", "undef", "using", "warning"
+}
+
+local space = patterns.space -- S(" \n\r\t\f\v")
+local any = patterns.any
+local restofline = patterns.restofline
+local startofline = patterns.startofline
+
+local squote = P("'")
+local dquote = P('"')
+local period = P(".")
+local escaped = P("\\") * P(1)
+local slashes = P("//")
+local begincomment = P("/*")
+local endcomment = P("*/")
+local percent = P("%")
+
+local hexadecimal = patterns.hexadecimal
+local decimal = patterns.decimal
+local float = patterns.float
+local integer = P("-")^-1 * (hexadecimal + decimal) -- also in patterns ?
+
+local spacing = token(whitespace, space^1)
+local rest = token("default", any)
+
+local shortcomment = token("comment", slashes * restofline^0)
+local longcomment = token("comment", begincomment * (1-endcomment)^0 * endcomment^-1)
+
+local shortstring = token("quote", dquote) -- can be shared
+ * token("string", (escaped + (1-dquote))^0)
+ * token("quote", dquote)
+ + token("quote", squote)
+ * token("string", (escaped + (1-squote))^0)
+ * token("quote", squote)
+
+local number = token("number", float + integer)
+
+local validword = R("AZ","az","__") * R("AZ","az","__","09")^0
+local identifier = token("default",validword)
+
+local operator = token("special", S("+-*/%^!=<>;:{}[]().&|?~"))
+
+----- optionalspace = spacing^0
+
+local p_keywords = exact_match(keywords )
+local p_datatypes = exact_match(datatypes)
+local p_macros = exact_match(macros)
+
+local keyword = token("keyword", p_keywords)
+local datatype = token("keyword", p_datatypes)
+local identifier = token("default", validword)
+
+local macro = token("data", #P("#") * startofline * P("#") * S("\t ")^0 * p_macros)
+
+cpplexer._rules = {
+ { "whitespace", spacing },
+ { "keyword", keyword },
+ { "type", datatype },
+ { "identifier", identifier },
+ { "string", shortstring },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment },
+ { "number", number },
+ { "macro", macro },
+ { "operator", operator },
+ { "rest", rest },
+}
+
+local web = lexer.loadluafile("scite-context-lexer-web-snippets")
+
+if web then
+
+ lexer.inform("supporting web snippets in cpp lexer")
+
+ cpplexer._rules_web = {
+ { "whitespace", spacing },
+ { "keyword", keyword },
+ { "type", datatype },
+ { "identifier", identifier },
+ { "string", shortstring },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment },
+ { "web", web.pattern },
+ { "number", number },
+ { "macro", macro },
+ { "operator", operator },
+ { "rest", rest },
+ }
+
+else
+
+ lexer.report("not supporting web snippets in cpp lexer")
+
+ cpplexer._rules_web = {
+ { "whitespace", spacing },
+ { "keyword", keyword },
+ { "type", datatype },
+ { "identifier", identifier },
+ { "string", shortstring },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment },
+ { "number", number },
+ { "macro", macro },
+ { "operator", operator },
+ { "rest", rest },
+ }
+
+end
+
+cpplexer._tokenstyles = context.styleset
+
+cpplexer._foldpattern = P("/*") + P("*/") + S("{}") -- separate entry else interference
+
+cpplexer._foldsymbols = {
+ _patterns = {
+ "[{}]",
+ "/%*",
+ "%*/",
+ },
+ -- ["data"] = { -- macro
+ -- ["region"] = 1,
+ -- ["endregion"] = -1,
+ -- ["if"] = 1,
+ -- ["ifdef"] = 1,
+ -- ["ifndef"] = 1,
+ -- ["endif"] = -1,
+ -- },
+ ["special"] = { -- operator
+ ["{"] = 1,
+ ["}"] = -1,
+ },
+ ["comment"] = {
+ ["/*"] = 1,
+ ["*/"] = -1,
+ }
+}
+
+-- -- by indentation:
+
+cpplexer._foldpatterns = nil
+cpplexer._foldsymbols = nil
+
+return cpplexer
diff --git a/context/data/scite/lexers/scite-context-lexer-lua-longstring.lua b/context/data/scite/context/lexers/scite-context-lexer-lua-longstring.lua
index fdec301be..855adbe4e 100644
--- a/context/data/scite/lexers/scite-context-lexer-lua-longstring.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-lua-longstring.lua
@@ -1,20 +1,21 @@
local info = {
version = 1.002,
- comment = "scintilla lpeg lexer for lua",
+ comment = "scintilla lpeg lexer for lua longstrings",
author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
copyright = "PRAGMA ADE / ConTeXt Development Team",
license = "see context related readme files",
}
-local lexer = lexer
-local token = lexer.token
-local P = lpeg.P
-
-local stringlexer = { _NAME = "lua-longstring", _FILENAME = "scite-context-lexer-lua-longstring" }
-local whitespace = lexer.WHITESPACE
+local lexer = require("lexer") -- require("scite-context-lexer")
local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local stringlexer = lexer.new("lua-longstring","scite-context-lexer-lua-longstring")
+local whitespace = stringlexer.whitespace
-local space = lexer.space
+local space = patterns.space
local nospace = 1 - space
local p_spaces = token(whitespace, space ^1)
@@ -25,6 +26,6 @@ stringlexer._rules = {
{ "string", p_string },
}
-stringlexer._tokenstyles = lexer.context.styleset
+stringlexer._tokenstyles = context.styleset
return stringlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-lua.lua b/context/data/scite/context/lexers/scite-context-lexer-lua.lua
index 4c276b1bb..c44d586ba 100644
--- a/context/data/scite/lexers/scite-context-lexer-lua.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-lua.lua
@@ -6,58 +6,68 @@ local info = {
license = "see context related readme files",
}
--- todo: _G.print (keep _G colored)
-
-if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
+-- beware: all multiline is messy, so even if it's no lexer, it should be an embedded lexer
+-- we probably could use a local whitespace variant but this is cleaner
-local lexer = lexer
-local token, style, colors, exact_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.exact_match, lexer.style_nothing
-local P, R, S, C, Cg, Cb, Cs, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cg, lpeg.Cb, lpeg.Cs, lpeg.Cmt
+local P, R, S, C, Cmt, Cp = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cmt, lpeg.Cp
local match, find = string.match, string.find
local setmetatable = setmetatable
--- beware: all multiline is messy, so even if it's no lexer, it should be an embedded lexer
--- we probably could use a local whitespace variant but this is cleaner
-
-local lualexer = { _NAME = "lua", _FILENAME = "scite-context-lexer-lua" }
-local whitespace = lexer.WHITESPACE
+local lexer = require("lexer")
local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+local exact_match = lexer.exact_match
+local just_match = lexer.just_match
+
+local lualexer = lexer.new("lua","scite-context-lexer-lua")
+local whitespace = lualexer.whitespace
local stringlexer = lexer.load("scite-context-lexer-lua-longstring")
-local directives = { } -- communication channel
+local directives = { } -- communication channel
-- this will be extended
+-- we could combine some in a hash that returns the class that then makes the token
+-- this can save time on large files
+
local keywords = {
- 'and', 'break', 'do', 'else', 'elseif', 'end', 'false', 'for', 'function', -- 'goto',
- 'if', 'in', 'local', 'nil', 'not', 'or', 'repeat', 'return', 'then', 'true',
- 'until', 'while',
+ "and", "break", "do", "else", "elseif", "end", "false", "for", "function", -- "goto",
+ "if", "in", "local", "nil", "not", "or", "repeat", "return", "then", "true",
+ "until", "while",
}
local functions = {
- 'assert', 'collectgarbage', 'dofile', 'error', 'getmetatable',
- 'ipairs', 'load', 'loadfile', 'module', 'next', 'pairs',
- 'pcall', 'print', 'rawequal', 'rawget', 'rawset', 'require',
- 'setmetatable', 'tonumber', 'tostring', 'type', 'unpack', 'xpcall', 'select',
+ "assert", "collectgarbage", "dofile", "error", "getmetatable",
+ "ipairs", "load", "loadfile", "module", "next", "pairs",
+ "pcall", "print", "rawequal", "rawget", "rawset", "require",
+ "setmetatable", "tonumber", "tostring", "type", "unpack", "xpcall", "select",
"string", "table", "coroutine", "debug", "file", "io", "lpeg", "math", "os", "package", "bit32",
}
local constants = {
- '_G', '_VERSION', '_M', '...', '_ENV',
+ "_G", "_VERSION", "_M", "...", "_ENV",
-- here too
- '__add', '__call', '__concat', '__div', '__idiv', '__eq', '__gc', '__index',
- '__le', '__lt', '__metatable', '__mode', '__mul', '__newindex',
- '__pow', '__sub', '__tostring', '__unm', '__len',
- '__pairs', '__ipairs',
- 'NaN',
+ "__add", "__call", "__concat", "__div", "__idiv", "__eq", "__gc", "__index",
+ "__le", "__lt", "__metatable", "__mode", "__mul", "__newindex",
+ "__pow", "__sub", "__tostring", "__unm", "__len",
+ "__pairs", "__ipairs",
+ "NaN",
}
+-- local tokenmappings = { }
+--
+-- for i=1,#keywords do tokenmappings[keywords [i]] = "keyword" }
+-- for i=1,#functions do tokenmappings[functions[i]] = "function" }
+-- for i=1,#constants do tokenmappings[constants[i]] = "constant" }
+
local internals = { -- __
- 'add', 'call', 'concat', 'div', 'eq', 'gc', 'index',
- 'le', 'lt', 'metatable', 'mode', 'mul', 'newindex',
- 'pow', 'sub', 'tostring', 'unm', 'len',
+ "add", "call", "concat", "div", "eq", "gc", "index",
+ "le", "lt", "metatable", "mode", "mul", "newindex",
+ "pow", "sub", "tostring", "unm", "len",
}
local depricated = {
@@ -67,7 +77,9 @@ local depricated = {
}
local csnames = { -- todo: option
+ "commands",
"context",
+ "ctx",
"metafun",
"metapost",
}
@@ -81,14 +93,14 @@ local longonestart = P("[[")
local longonestop = P("]]")
local longonestring = (1-longonestop)^0
-local longtwostart = P('[') * Cmt(equals,setlevel) * P('[')
-local longtwostop = P(']') * equals * P(']')
+local longtwostart = P("[") * Cmt(equals,setlevel) * P("[")
+local longtwostop = P("]") * equals * P("]")
local sentinels = { } setmetatable(sentinels, { __index = function(t,k) local v = "]" .. k .. "]" t[k] = v return v end })
local longtwostring = P(function(input,index)
if level then
- -- local sentinel = ']' .. level .. ']'
+ -- local sentinel = "]" .. level .. "]"
local sentinel = sentinels[level]
local _, stop = find(input,sentinel,index,true)
return stop and stop + 1 - #sentinel or #input + 1
@@ -99,32 +111,33 @@ end)
local longtwostring_end = P(function(input,index)
if level then
- -- local sentinel = ']' .. level .. ']'
+ -- local sentinel = "]" .. level .. "]"
local sentinel = sentinels[level]
local _, stop = find(input,sentinel,index,true)
return stop and stop + 1 or #input + 1
end
end)
-local longcomment = Cmt(#('[[' + ('[' * C(equals) * '[')), function(input,index,level)
- -- local sentinel = ']' .. level .. ']'
+local longcomment = Cmt(#("[[" + ("[" * C(equals) * "[")), function(input,index,level)
+ -- local sentinel = "]" .. level .. "]"
local sentinel = sentinels[level]
local _, stop = find(input,sentinel,index,true)
return stop and stop + 1 or #input + 1
end)
-local space = lexer.space -- S(" \n\r\t\f\v")
-local any = lexer.any
+local space = patterns.space -- S(" \n\r\t\f\v")
+local any = patterns.any
+local eol = patterns.eol
local squote = P("'")
local dquote = P('"')
local escaped = P("\\") * P(1)
-local dashes = P('--')
+local dashes = P("--")
local spacing = token(whitespace, space^1)
local rest = token("default", any)
-local shortcomment = token("comment", dashes * lexer.nonnewline^0)
+local shortcomment = token("comment", dashes * (1-eol)^0)
local longcomment = token("comment", dashes * longcomment)
-- fails on very long string with \ at end of lines (needs embedded lexer)
@@ -149,21 +162,23 @@ local string = shortstring
lexer.embed_lexer(lualexer, stringlexer, token("quote",longtwostart), token("string",longtwostring_body) * token("quote",longtwostring_end))
-local integer = P("-")^-1 * (lexer.hex_num + lexer.dec_num)
-local number = token("number", lexer.float + integer)
+local integer = P("-")^-1 * (patterns.hexadecimal + patterns.decimal)
+local number = token("number", patterns.float + integer)
-- officially 127-255 are ok but not utf so useless
-local validword = R("AZ","az","__") * R("AZ","az","__","09")^0
+----- validword = R("AZ","az","__") * R("AZ","az","__","09")^0
local utf8character = P(1) * R("\128\191")^1
local validword = (R("AZ","az","__") + utf8character) * (R("AZ","az","__","09") + utf8character)^0
+local validsuffix = (R("AZ","az") + utf8character) * (R("AZ","az","__","09") + utf8character)^0
local identifier = token("default",validword)
----- operator = token("special", P('..') + P('~=') + S('+-*/%^#=<>;:,.{}[]()')) -- maybe split off {}[]()
----- operator = token("special", S('+-*/%^#=<>;:,{}[]()') + P('..') + P('.') + P('~=') ) -- maybe split off {}[]()
-local operator = token("special", S('+-*/%^#=<>;:,{}[]().') + P('~=') ) -- no ^1 because of nested lexers
+----- operator = token("special", S('+-*/%^#=<>;:,{}[]().') + P('~=') ) -- no ^1 because of nested lexers
+local operator = token("special", S('+-*/%^#=<>;:,{}[]().|~')) -- no ^1 because of nested lexers
local structure = token("special", S('{}[]()'))
@@ -182,8 +197,7 @@ local p_functions = exact_match(functions)
local p_constants = exact_match(constants)
local p_internals = P("__")
* exact_match(internals)
-local p_csnames = exact_match(csnames)
-
+local p_csnames = just_match(csnames)
local keyword = token("keyword", p_keywords)
local builtin = token("plain", p_functions)
local constant = token("data", p_constants)
@@ -191,8 +205,10 @@ local internal = token("data", p_internals)
local csname = token("user", p_csnames)
* (
optionalspace * hasargument
- + ( optionalspace * token("special", S(".:")) * optionalspace * token("user", validword) )^1
+ + ( optionalspace * token("special", S(".:")) * optionalspace * token("user", validword ) )^1
+ + token("user", P("_") * validsuffix)
)
+
local identifier = token("default", validword)
* ( optionalspace * token("special", S(".:")) * optionalspace * (
token("warning", p_keywords) +
@@ -200,22 +216,33 @@ local identifier = token("default", validword)
token("default", validword )
) )^0
+-- local t = { } for k, v in next, tokenmappings do t[#t+1] = k end t = table.concat(t)
+-- -- local experimental = (S(t)^1) / function(s) return tokenmappings[s] end * Cp()
+--
+-- local experimental = Cmt(S(t)^1, function(_,i,s)
+-- local t = tokenmappings[s]
+-- if t then
+-- return true, t, i
+-- end
+-- end)
+
lualexer._rules = {
- { 'whitespace', spacing },
- { 'keyword', keyword },
- -- { 'structure', structure },
- { 'function', builtin },
- { 'csname', csname },
- { 'constant', constant },
- { 'goto', gotokeyword },
- { 'identifier', identifier },
- { 'string', string },
- { 'number', number },
- { 'longcomment', longcomment },
- { 'shortcomment', shortcomment },
- { 'label', gotolabel },
- { 'operator', operator },
- { 'rest', rest },
+ { "whitespace", spacing },
+ { "keyword", keyword }, -- can be combined
+ -- { "structure", structure },
+ { "function", builtin }, -- can be combined
+ { "constant", constant }, -- can be combined
+ -- { "experimental", experimental }, -- works but better split
+ { "csname", csname },
+ { "goto", gotokeyword },
+ { "identifier", identifier },
+ { "string", string },
+ { "number", number },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment },
+ { "label", gotolabel },
+ { "operator", operator },
+ { "rest", rest },
}
-- -- experiment
@@ -250,18 +277,18 @@ lualexer._rules = {
-- }
--
-- lualexer._rules = {
--- { 'whitespace', spacing },
--- { 'whatever', whatever },
--- { 'csname', csname },
--- { 'goto', gotokeyword },
--- { 'identifier', identifier },
--- { 'string', string },
--- { 'number', number },
--- { 'longcomment', longcomment },
--- { 'shortcomment', shortcomment },
--- { 'label', gotolabel },
--- { 'operator', operator },
--- { 'rest', rest },
+-- { "whitespace", spacing },
+-- { "whatever", whatever },
+-- { "csname", csname },
+-- { "goto", gotokeyword },
+-- { "identifier", identifier },
+-- { "string", string },
+-- { "number", number },
+-- { "longcomment", longcomment },
+-- { "shortcomment", shortcomment },
+-- { "label", gotolabel },
+-- { "operator", operator },
+-- { "rest", rest },
-- }
lualexer._tokenstyles = context.styleset
@@ -273,26 +300,26 @@ lualexer._foldpattern = (P("end") + P("if") + P("do") + P("function") + P("repea
lualexer._foldsymbols = {
_patterns = {
- '[a-z][a-z]+',
- '[{}%[%]]',
+ "[a-z][a-z]+",
+ "[{}%[%]]",
},
- ['keyword'] = { -- challenge: if=0 then=1 else=-1 elseif=-1
- ['if'] = 1, -- if .. [then|else] .. end
- ['do'] = 1, -- [while] do .. end
- ['function'] = 1, -- function .. end
- ['repeat'] = 1, -- repeat .. until
- ['until'] = -1,
- ['end'] = -1,
+ ["keyword"] = { -- challenge: if=0 then=1 else=-1 elseif=-1
+ ["if"] = 1, -- if .. [then|else] .. end
+ ["do"] = 1, -- [while] do .. end
+ ["function"] = 1, -- function .. end
+ ["repeat"] = 1, -- repeat .. until
+ ["until"] = -1,
+ ["end"] = -1,
},
- ['comment'] = {
- ['['] = 1, [']'] = -1,
+ ["comment"] = {
+ ["["] = 1, ["]"] = -1,
},
- -- ['quote'] = { -- confusing
- -- ['['] = 1, [']'] = -1,
+ -- ["quote"] = { -- confusing
+ -- ["["] = 1, ["]"] = -1,
-- },
- ['special'] = {
- -- ['('] = 1, [')'] = -1,
- ['{'] = 1, ['}'] = -1,
+ ["special"] = {
+ -- ["("] = 1, [")"] = -1,
+ ["{"] = 1, ["}"] = -1,
},
}
@@ -300,9 +327,9 @@ lualexer._foldsymbols = {
local cstoken = R("az","AZ","\127\255") + S("@!?_")
local texcsname = P("\\") * cstoken^1
-local commentline = P('%') * (1-S("\n\r"))^0
+local commentline = P("%") * (1-S("\n\r"))^0
-local texcomment = token('comment', Cmt(commentline, function() return directives.cld_inline end))
+local texcomment = token("comment", Cmt(commentline, function() return directives.cld_inline end))
local longthreestart = P("\\!!bs")
local longthreestop = P("\\!!es")
@@ -312,7 +339,7 @@ local texstring = token("quote", longthreestart)
* token("string", longthreestring)
* token("quote", longthreestop)
--- local texcommand = token("user", texcsname)
+----- texcommand = token("user", texcsname)
local texcommand = token("warning", texcsname)
-- local texstring = token("quote", longthreestart)
@@ -325,22 +352,22 @@ local texcommand = token("warning", texcsname)
lualexer._directives = directives
lualexer._rules_cld = {
- { 'whitespace', spacing },
- { 'texstring', texstring },
- { 'texcomment', texcomment },
- { 'texcommand', texcommand },
- -- { 'structure', structure },
- { 'keyword', keyword },
- { 'function', builtin },
- { 'csname', csname },
- { 'constant', constant },
- { 'identifier', identifier },
- { 'string', string },
- { 'longcomment', longcomment },
- { 'shortcomment', shortcomment }, -- should not be used inline so best signal it as comment (otherwise complex state till end of inline)
- { 'number', number },
- { 'operator', operator },
- { 'rest', rest },
+ { "whitespace", spacing },
+ { "texstring", texstring },
+ { "texcomment", texcomment },
+ { "texcommand", texcommand },
+ -- { "structure", structure },
+ { "keyword", keyword },
+ { "function", builtin },
+ { "csname", csname },
+ { "constant", constant },
+ { "identifier", identifier },
+ { "string", string },
+ { "longcomment", longcomment },
+ { "shortcomment", shortcomment }, -- should not be used inline so best signal it as comment (otherwise complex state till end of inline)
+ { "number", number },
+ { "operator", operator },
+ { "rest", rest },
}
return lualexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-mps.lua b/context/data/scite/context/lexers/scite-context-lexer-mps.lua
new file mode 100644
index 000000000..b87ea83cb
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-mps.lua
@@ -0,0 +1,177 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for metafun",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local global, string, table, lpeg = _G, string, table, lpeg
+local P, R, S, V = lpeg.P, lpeg.R, lpeg.S, lpeg.V
+local type = type
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+local exact_match = lexer.exact_match
+
+local metafunlexer = lexer.new("mps","scite-context-lexer-mps")
+local whitespace = metafunlexer.whitespace
+
+local metapostprimitives = { }
+local metapostinternals = { }
+local metapostshortcuts = { }
+local metapostcommands = { }
+
+local metafuninternals = { }
+local metafunshortcuts = { }
+local metafuncommands = { }
+
+local mergedshortcuts = { }
+local mergedinternals = { }
+
+do
+
+ local definitions = context.loaddefinitions("scite-context-data-metapost")
+
+ if definitions then
+ metapostprimitives = definitions.primitives or { }
+ metapostinternals = definitions.internals or { }
+ metapostshortcuts = definitions.shortcuts or { }
+ metapostcommands = definitions.commands or { }
+ end
+
+ local definitions = context.loaddefinitions("scite-context-data-metafun")
+
+ if definitions then
+ metafuninternals = definitions.internals or { }
+ metafunshortcuts = definitions.shortcuts or { }
+ metafuncommands = definitions.commands or { }
+ end
+
+ for i=1,#metapostshortcuts do
+ mergedshortcuts[#mergedshortcuts+1] = metapostshortcuts[i]
+ end
+ for i=1,#metafunshortcuts do
+ mergedshortcuts[#mergedshortcuts+1] = metafunshortcuts[i]
+ end
+
+ for i=1,#metapostinternals do
+ mergedinternals[#mergedinternals+1] = metapostinternals[i]
+ end
+ for i=1,#metafuninternals do
+ mergedinternals[#mergedinternals+1] = metafuninternals[i]
+ end
+
+end
+
+local space = patterns.space -- S(" \n\r\t\f\v")
+local any = patterns.any
+
+local dquote = P('"')
+local cstoken = patterns.idtoken
+local mptoken = patterns.alpha
+local leftbrace = P("{")
+local rightbrace = P("}")
+local number = patterns.real
+
+local cstokentex = R("az","AZ","\127\255") + S("@!?_")
+
+-- we could collapse as in tex
+
+local spacing = token(whitespace, space^1)
+local rest = token("default", any)
+local comment = token("comment", P("%") * (1-S("\n\r"))^0)
+local internal = token("reserved", exact_match(mergedshortcuts,false))
+local shortcut = token("data", exact_match(mergedinternals))
+local helper = token("command", exact_match(metafuncommands))
+local plain = token("plain", exact_match(metapostcommands))
+local quoted = token("quote", dquote)
+ * token("string", P(1-dquote)^0)
+ * token("quote", dquote)
+local texstuff = token("quote", P("btex ") + P("verbatimtex "))
+ * token("string", P(1-P(" etex"))^0)
+ * token("quote", P(" etex"))
+local primitive = token("primitive", exact_match(metapostprimitives))
+local identifier = token("default", cstoken^1)
+local number = token("number", number)
+local grouping = token("grouping", S("()[]{}")) -- can be an option
+local special = token("special", S("#()[]{}<>=:\"")) -- or else := <> etc split
+local texlike = token("warning", P("\\") * cstokentex^1)
+local extra = token("extra", P("+-+") + P("++") + S("`~%^&_-+*/\'|\\"))
+
+local nested = P { leftbrace * (V(1) + (1-rightbrace))^0 * rightbrace }
+local texlike = token("embedded", P("\\") * (P("MP") + P("mp")) * mptoken^1)
+ * spacing^0
+ * token("grouping", leftbrace)
+ * token("default", (nested + (1-rightbrace))^0 )
+ * token("grouping", rightbrace)
+ + token("warning", P("\\") * cstokentex^1)
+
+-- lua: we assume: lua ( "lua code" )
+
+local cldlexer = lexer.load("scite-context-lexer-cld","mps-cld")
+
+local startlua = P("lua") * space^0 * P('(') * space^0 * P('"')
+local stoplua = P('"') * space^0 * P(')')
+
+local startluacode = token("embedded", startlua)
+local stopluacode = #stoplua * token("embedded", stoplua)
+
+lexer.embed_lexer(metafunlexer, cldlexer, startluacode, stopluacode)
+
+metafunlexer._rules = {
+ { "whitespace", spacing },
+ { "comment", comment },
+ { "internal", internal },
+ { "shortcut", shortcut },
+ { "helper", helper },
+ { "plain", plain },
+ { "primitive", primitive },
+ { "texstuff", texstuff },
+ { "identifier", identifier },
+ { "number", number },
+ { "quoted", quoted },
+ -- { "grouping", grouping }, -- can be an option
+ { "special", special },
+ { "texlike", texlike },
+ { "extra", extra },
+ { "rest", rest },
+}
+
+metafunlexer._tokenstyles = context.styleset
+
+metafunlexer._foldpattern = patterns.lower^2 -- separate entry else interference
+
+metafunlexer._foldsymbols = {
+ _patterns = {
+ "[a-z][a-z]+",
+ },
+ ["plain"] = {
+ ["beginfig"] = 1,
+ ["endfig"] = -1,
+ ["beginglyph"] = 1,
+ ["endglyph"] = -1,
+ -- ["begingraph"] = 1,
+ -- ["endgraph"] = -1,
+ },
+ ["primitive"] = {
+ ["def"] = 1,
+ ["vardef"] = 1,
+ ["primarydef"] = 1,
+ ["secondarydef" ] = 1,
+ ["tertiarydef"] = 1,
+ ["enddef"] = -1,
+ ["if"] = 1,
+ ["fi"] = -1,
+ ["for"] = 1,
+ ["forever"] = 1,
+ ["endfor"] = -1,
+ }
+}
+
+-- if inspect then inspect(metafunlexer) end
+
+return metafunlexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-pdf-object.lua b/context/data/scite/context/lexers/scite-context-lexer-pdf-object.lua
new file mode 100644
index 000000000..1fb95838a
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-pdf-object.lua
@@ -0,0 +1,136 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for pdf objects",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+-- no longer used: nesting lexers with whitespace in start/stop is unreliable
+
+local P, R, S, C, V = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.V
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local pdfobjectlexer = lexer.new("pdfobj","scite-context-lexer-pdf-object")
+local whitespace = pdfobjectlexer.whitespace
+
+local space = patterns.space
+local spacing = patterns.spacing
+local nospacing = patterns.nospacing
+local anything = patterns.anything
+local newline = patterns.eol
+local real = patterns.real
+local cardinal = patterns.cardinal
+
+local lparent = P("(")
+local rparent = P(")")
+local langle = P("<")
+local rangle = P(">")
+local escape = P("\\")
+local unicodetrigger = P("feff")
+
+local nametoken = 1 - space - S("<>/[]()")
+local name = P("/") * nametoken^1
+
+local p_string = P { ( escape * anything + lparent * V(1) * rparent + (1 - rparent) )^0 }
+
+local t_spacing = token(whitespace, spacing)
+local t_spaces = token(whitespace, spacing)^0
+local t_rest = token("default", nospacing) -- anything
+
+local p_stream = P("stream")
+local p_endstream = P("endstream")
+local p_obj = P("obj")
+local p_endobj = P("endobj")
+local p_reference = P("R")
+
+local p_objectnumber = patterns.cardinal
+local p_comment = P("%") * (1-S("\n\r"))^0
+
+local t_string = token("quote", lparent)
+ * token("string", p_string)
+ * token("quote", rparent)
+local t_unicode = token("quote", langle)
+ * token("plain", unicodetrigger)
+ * token("string", (1-rangle)^1)
+ * token("quote", rangle)
+local t_whatsit = token("quote", langle)
+ * token("string", (1-rangle)^1)
+ * token("quote", rangle)
+local t_keyword = token("command", name)
+local t_constant = token("constant", name)
+local t_number = token("number", real)
+-- t_reference = token("number", cardinal)
+-- * t_spacing
+-- * token("number", cardinal)
+local t_reserved = token("number", P("true") + P("false") + P("NULL"))
+local t_reference = token("warning", cardinal)
+ * t_spacing
+ * token("warning", cardinal)
+ * t_spacing
+ * token("keyword", p_reference)
+
+local t_comment = token("comment", p_comment)
+
+local t_openobject = token("warning", p_objectnumber * spacing)
+-- * t_spacing
+ * token("warning", p_objectnumber * spacing)
+-- * t_spacing
+ * token("keyword", p_obj)
+local t_closeobject = token("keyword", p_endobj)
+
+local t_opendictionary = token("grouping", P("<<"))
+local t_closedictionary = token("grouping", P(">>"))
+
+local t_openarray = token("grouping", P("["))
+local t_closearray = token("grouping", P("]"))
+
+-- todo: comment
+
+local t_stream = token("keyword", p_stream)
+-- * token("default", newline * (1-newline*p_endstream*newline)^1 * newline)
+-- * token("text", (1 - p_endstream)^1)
+ * (token("text", (1 - p_endstream-spacing)^1) + t_spacing)^1
+ * token("keyword", p_endstream)
+
+local t_dictionary = { "dictionary",
+ dictionary = t_opendictionary * (t_spaces * t_keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
+ array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
+ whatever = V("dictionary") + V("array") + t_constant + t_reference + t_string + t_unicode + t_number + t_reserved + t_whatsit,
+ }
+
+----- t_object = { "object", -- weird that we need to catch the end here (probably otherwise an invalid lpeg)
+----- object = t_spaces * (V("dictionary") * t_spaces * t_stream^-1 + V("array") + V("number") + t_spaces) * t_spaces * t_closeobject,
+----- dictionary = t_opendictionary * (t_spaces * t_keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
+----- array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
+----- whatever = V("dictionary") + V("array") + t_constant + t_reference + t_string + t_unicode + t_number + t_reserved + t_whatsit,
+----- number = t_number,
+----- }
+
+local t_object = { "object", -- weird that we need to catch the end here (probably otherwise an invalid lpeg)
+ dictionary = t_dictionary.dictionary,
+ array = t_dictionary.array,
+ whatever = t_dictionary.whatever,
+ object = t_openobject^-1 * t_spaces * (V("dictionary") * t_spaces * t_stream^-1 + V("array") + V("number") + t_spaces) * t_spaces * t_closeobject,
+ number = t_number,
+ }
+
+pdfobjectlexer._shared = {
+ dictionary = t_dictionary,
+ object = t_object,
+ stream = t_stream,
+}
+
+pdfobjectlexer._rules = {
+ { "whitespace", t_spacing }, -- in fact, here we don't want whitespace as it's top level lexer work
+ { "object", t_object },
+}
+
+pdfobjectlexer._tokenstyles = context.styleset
+
+return pdfobjectlexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-pdf-xref.lua b/context/data/scite/context/lexers/scite-context-lexer-pdf-xref.lua
new file mode 100644
index 000000000..7097c41a6
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-pdf-xref.lua
@@ -0,0 +1,43 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for pdf xref",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+-- no longer used: nesting lexers with whitespace in start/stop is unreliable
+
+local P, R = lpeg.P, lpeg.R
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local pdfxreflexer = lexer.new("pdfxref","scite-context-lexer-pdf-xref")
+local whitespace = pdfxreflexer.whitespace
+
+local spacing = patterns.spacing
+local cardinal = patterns.cardinal
+local alpha = patterns.alpha
+
+local t_spacing = token(whitespace, spacing)
+
+local p_xref = P("xref")
+local t_xref = token("keyword",p_xref)
+ * token("number", cardinal * spacing * cardinal * spacing)
+
+local t_number = token("number", cardinal * spacing * cardinal * spacing)
+ * token("keyword", alpha)
+
+pdfxreflexer._rules = {
+ { "whitespace", t_spacing },
+ { "xref", t_xref },
+ { "number", t_number },
+}
+
+pdfxreflexer._tokenstyles = context.styleset
+
+return pdfxreflexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-pdf.lua b/context/data/scite/context/lexers/scite-context-lexer-pdf.lua
new file mode 100644
index 000000000..f8e4e7380
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-pdf.lua
@@ -0,0 +1,204 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for pdf",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+-- pdf is normally startic .. i.e. not edited so we don't really
+-- need embedded lexers.
+
+local P, R, S, V = lpeg.P, lpeg.R, lpeg.S, lpeg.V
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local pdflexer = lexer.new("pdf","scite-context-lexer-pdf")
+local whitespace = pdflexer.whitespace
+
+----- pdfobjectlexer = lexer.load("scite-context-lexer-pdf-object")
+----- pdfxreflexer = lexer.load("scite-context-lexer-pdf-xref")
+
+local anything = patterns.anything
+local space = patterns.space
+local spacing = patterns.spacing
+local nospacing = patterns.nospacing
+local anything = patterns.anything
+local restofline = patterns.restofline
+
+local t_whitespace = token(whitespace, spacing)
+local t_spacing = token("default", spacing)
+----- t_rest = token("default", nospacing)
+local t_rest = token("default", anything)
+
+local p_comment = P("%") * restofline
+local t_comment = token("comment", p_comment)
+
+-- whatever
+
+local space = patterns.space
+local spacing = patterns.spacing
+local nospacing = patterns.nospacing
+local anything = patterns.anything
+local newline = patterns.eol
+local real = patterns.real
+local cardinal = patterns.cardinal
+local alpha = patterns.alpha
+
+local lparent = P("(")
+local rparent = P(")")
+local langle = P("<")
+local rangle = P(">")
+local escape = P("\\")
+local unicodetrigger = P("feff")
+
+local nametoken = 1 - space - S("<>/[]()")
+local name = P("/") * nametoken^1
+
+local p_string = P { ( escape * anything + lparent * V(1) * rparent + (1 - rparent) )^0 }
+
+local t_spacing = token("default", spacing)
+local t_spaces = token("default", spacing)^0
+local t_rest = token("default", nospacing) -- anything
+
+local p_stream = P("stream")
+local p_endstream = P("endstream")
+local p_obj = P("obj")
+local p_endobj = P("endobj")
+local p_reference = P("R")
+
+local p_objectnumber = patterns.cardinal
+local p_comment = P("%") * (1-S("\n\r"))^0
+
+local t_string = token("quote", lparent)
+ * token("string", p_string)
+ * token("quote", rparent)
+local t_unicode = token("quote", langle)
+ * token("plain", unicodetrigger)
+ * token("string", (1-rangle)^1)
+ * token("quote", rangle)
+local t_whatsit = token("quote", langle)
+ * token("string", (1-rangle)^1)
+ * token("quote", rangle)
+local t_keyword = token("command", name)
+local t_constant = token("constant", name)
+local t_number = token("number", real)
+-- t_reference = token("number", cardinal)
+-- * t_spacing
+-- * token("number", cardinal)
+local t_reserved = token("number", P("true") + P("false") + P("NULL"))
+-- t_reference = token("warning", cardinal * spacing * cardinal * spacing)
+-- * token("keyword", p_reference)
+local t_reference = token("warning", cardinal)
+ * t_spacing
+ * token("warning", cardinal)
+ * t_spacing
+ * token("keyword", p_reference)
+
+local t_comment = token("comment", p_comment)
+
+local t_openobject = token("warning", p_objectnumber)
+ * t_spacing
+ * token("warning", p_objectnumber)
+ * t_spacing
+ * token("keyword", p_obj)
+-- t_openobject = token("warning", p_objectnumber * spacing)
+-- * token("warning", p_objectnumber * spacing)
+-- * token("keyword", p_obj)
+local t_closeobject = token("keyword", p_endobj)
+
+local t_opendictionary = token("grouping", P("<<"))
+local t_closedictionary = token("grouping", P(">>"))
+
+local t_openarray = token("grouping", P("["))
+local t_closearray = token("grouping", P("]"))
+
+local t_stream = token("keyword", p_stream)
+ * token("text", (1 - p_endstream)^1)
+ * token("keyword", p_endstream)
+
+local t_dictionary = { "dictionary",
+ dictionary = t_opendictionary * (t_spaces * t_keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
+ array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
+ whatever = V("dictionary") + V("array") + t_constant + t_reference + t_string + t_unicode + t_number + t_reserved + t_whatsit,
+ }
+
+local t_object = { "object", -- weird that we need to catch the end here (probably otherwise an invalid lpeg)
+ dictionary = t_dictionary.dictionary,
+ array = t_dictionary.array,
+ whatever = t_dictionary.whatever,
+ object = t_openobject * t_spaces * (V("dictionary")^-1 * t_spaces * t_stream^-1 + V("array") + V("number") + t_spaces) * t_spaces * t_closeobject,
+ number = t_number,
+ }
+
+-- objects ... sometimes NUL characters play havoc ... and in xref we have
+-- issues with embedded lexers that have spaces in the start and stop
+-- conditions and this cannot be handled well either ... so, an imperfect
+-- solution ... but anyway, there is not that much that can end up in
+-- the root of the tree see we're sort of safe
+
+local p_trailer = P("trailer")
+local t_trailer = token("keyword", p_trailer)
+ * t_spacing
+ * t_dictionary
+-- t_trailer = token("keyword", p_trailer * spacing)
+-- * t_dictionary
+
+local p_startxref = P("startxref")
+local t_startxref = token("keyword", p_startxref)
+ * t_spacing
+ * token("number", cardinal)
+-- t_startxref = token("keyword", p_startxref * spacing)
+-- * token("number", cardinal)
+
+local p_xref = P("xref")
+local t_xref = token("keyword",p_xref)
+ * t_spacing
+ * token("number", cardinal)
+ * t_spacing
+ * token("number", cardinal)
+ * spacing
+-- t_xref = token("keyword",p_xref)
+-- * token("number", spacing * cardinal * spacing * cardinal * spacing)
+
+local t_number = token("number", cardinal)
+ * t_spacing
+ * token("number", cardinal)
+ * t_spacing
+ * token("keyword", S("fn"))
+-- t_number = token("number", cardinal * spacing * cardinal * spacing)
+-- * token("keyword", S("fn"))
+
+pdflexer._rules = {
+ { "whitespace", t_whitespace },
+ { "object", t_object },
+ { "comment", t_comment },
+ { "trailer", t_trailer },
+ { "startxref", t_startxref },
+ { "xref", t_xref },
+ { "number", t_number },
+ { "rest", t_rest },
+}
+
+pdflexer._tokenstyles = context.styleset
+
+-- lexer.inspect(pdflexer)
+
+-- collapser: obj endobj stream endstream
+
+pdflexer._foldpattern = p_obj + p_endobj + p_stream + p_endstream
+
+pdflexer._foldsymbols = {
+ ["keyword"] = {
+ ["obj"] = 1,
+ ["endobj"] = -1,
+ ["stream"] = 1,
+ ["endstream"] = -1,
+ },
+}
+
+return pdflexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-tex-web.lua b/context/data/scite/context/lexers/scite-context-lexer-tex-web.lua
new file mode 100644
index 000000000..5d8859c26
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-tex-web.lua
@@ -0,0 +1,23 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for tex web",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local texweblexer = lexer.new("tex-web","scite-context-lexer-tex")
+local texlexer = lexer.load("scite-context-lexer-tex")
+
+-- can probably be done nicer now, a bit of a hack
+
+texweblexer._rules = texlexer._rules_web
+texweblexer._tokenstyles = texlexer._tokenstyles
+texweblexer._foldsymbols = texlexer._foldsymbols
+texweblexer._directives = texlexer._directives
+
+return texweblexer
diff --git a/context/data/scite/lexers/scite-context-lexer-tex.lua b/context/data/scite/context/lexers/scite-context-lexer-tex.lua
index a509fadab..d67be2cd8 100644
--- a/context/data/scite/lexers/scite-context-lexer-tex.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-tex.lua
@@ -24,33 +24,26 @@ local info = {
-- local interface = props["keywordclass.macros.context.en"]
-- local interface = lexer.get_property("keywordclass.macros.context.en","")
- -- it seems that whitespace triggers the lexer when embedding happens, but this
- -- is quite fragile due to duplicate styles .. lexer.WHITESPACE is a number
- -- (initially) ... _NAME vs filename (but we don't want to overwrite files)
-
- -- this lexer does not care about other macro packages (one can of course add a fake
- -- interface but it's not on the agenda)
-
]]--
-if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
-
-local lexer = lexer
local global, string, table, lpeg = _G, string, table, lpeg
-local token, exact_match = lexer.token, lexer.exact_match
local P, R, S, V, C, Cmt, Cp, Cc, Ct = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt, lpeg.Cp, lpeg.Cc, lpeg.Ct
local type, next = type, next
local find, match, lower, upper = string.find, string.match, string.lower, string.upper
--- module(...)
-
-local contextlexer = { _NAME = "tex", _FILENAME = "scite-context-lexer-tex" }
-local whitespace = lexer.WHITESPACE
+local lexer = require("lexer")
local context = lexer.context
+local patterns = context.patterns
+local inform = context.inform
-local cldlexer = lexer.load('scite-context-lexer-cld')
------ cldlexer = lexer.load('scite-context-lexer-lua')
-local mpslexer = lexer.load('scite-context-lexer-mps')
+local token = lexer.token
+local exact_match = lexer.exact_match
+
+local contextlexer = lexer.new("tex","scite-context-lexer-tex")
+local whitespace = contextlexer.whitespace
+
+local cldlexer = lexer.load("scite-context-lexer-cld")
+local mpslexer = lexer.load("scite-context-lexer-mps")
local commands = { en = { } }
local primitives = { }
@@ -64,7 +57,9 @@ do -- todo: only once, store in global
local definitions = context.loaddefinitions("scite-context-data-interfaces")
if definitions then
+ local list = { }
for interface, list in next, definitions do
+ list[#list+1] = interface
local c = { }
for i=1,#list do
c[list[i]] = true
@@ -79,6 +74,7 @@ do -- todo: only once, store in global
end
commands[interface] = c
end
+ inform("context user interfaces '%s' supported",table.concat(list," "))
end
local definitions = context.loaddefinitions("scite-context-data-context")
@@ -146,13 +142,16 @@ local validminimum = 3
-- % language=uk
-local knownpreamble = Cmt(#P("% "), function(input,i,_) -- todo : utfbomb
+-- fails (empty loop message) ... latest lpeg issue?
+
+local knownpreamble = Cmt(P("% "), function(input,i,_) -- todo : utfbomb, was #P("% ")
if i < 10 then
validwords, validminimum = false, 3
- local s, e, word = find(input,'^(.+)[\n\r]',i) -- combine with match
+ local s, e, word = find(input,"^(.+)[\n\r]",i) -- combine with match
if word then
local interface = match(word,"interface=([a-z]+)")
- if interface then
+ if interface and #interface == 2 then
+ inform("enabling context user interface '%s'",interface)
currentcommands = commands[interface] or commands.en or { }
end
local language = match(word,"language=([a-z]+)")
@@ -170,7 +169,7 @@ end)
-- local helpers_hash = { } for i=1,#helpers do helpers_hash [helpers [i]] = true end
-- local primitives_hash = { } for i=1,#primitives do primitives_hash[primitives[i]] = true end
--- local specialword = Ct( P('\\') * Cmt( C(cstoken^1), function(input,i,s)
+-- local specialword = Ct( P("\\") * Cmt( C(cstoken^1), function(input,i,s)
-- if currentcommands[s] then
-- return true, "command", i
-- elseif constants_hash[s] then
@@ -184,7 +183,7 @@ end)
-- end
-- end) )
--- local specialword = P('\\') * Cmt( C(cstoken^1), function(input,i,s)
+-- local specialword = P("\\") * Cmt( C(cstoken^1), function(input,i,s)
-- if currentcommands[s] then
-- return true, { "command", i }
-- elseif constants_hash[s] then
@@ -202,11 +201,11 @@ end)
-- 10pt
-local commentline = P('%') * (1-S("\n\r"))^0
+local commentline = P("%") * (1-S("\n\r"))^0
local endline = S("\n\r")^1
-local space = lexer.space -- S(" \n\r\t\f\v")
-local any = lexer.any
+local space = patterns.space -- S(" \n\r\t\f\v")
+local any = patterns.any
local backslash = P("\\")
local hspace = S(" \t")
@@ -219,7 +218,7 @@ local p_command = backslash * knowncommand
local p_constant = backslash * exact_match(constants)
local p_helper = backslash * exact_match(helpers)
local p_primitive = backslash * exact_match(primitives)
-local p_ifprimitive = P('\\if') * cstoken^1
+local p_ifprimitive = P("\\if") * cstoken^1
local p_csname = backslash * (cstoken^1 + P(1))
local p_grouping = S("{$}")
local p_special = S("#()[]<>=\"")
@@ -299,24 +298,24 @@ local p_invisible = invisibles^1
local spacing = token(whitespace, p_spacing )
-local rest = token('default', p_rest )
-local preamble = token('preamble', p_preamble )
-local comment = token('comment', p_comment )
-local command = token('command', p_command )
-local constant = token('data', p_constant )
-local helper = token('plain', p_helper )
-local primitive = token('primitive', p_primitive )
-local ifprimitive = token('primitive', p_ifprimitive)
-local reserved = token('reserved', p_reserved )
-local csname = token('user', p_csname )
-local grouping = token('grouping', p_grouping )
-local number = token('number', p_number )
- * token('constant', p_unit )
-local special = token('special', p_special )
-local reserved = token('reserved', p_reserved ) -- reserved internal preproc
-local extra = token('extra', p_extra )
-local invisible = token('invisible', p_invisible )
-local text = token('default', p_text )
+local rest = token("default", p_rest )
+local preamble = token("preamble", p_preamble )
+local comment = token("comment", p_comment )
+local command = token("command", p_command )
+local constant = token("data", p_constant )
+local helper = token("plain", p_helper )
+local primitive = token("primitive", p_primitive )
+local ifprimitive = token("primitive", p_ifprimitive)
+local reserved = token("reserved", p_reserved )
+local csname = token("user", p_csname )
+local grouping = token("grouping", p_grouping )
+local number = token("number", p_number )
+ * token("constant", p_unit )
+local special = token("special", p_special )
+local reserved = token("reserved", p_reserved ) -- reserved internal preproc
+local extra = token("extra", p_extra )
+local invisible = token("invisible", p_invisible )
+local text = token("default", p_text )
local word = p_word
----- startluacode = token("grouping", P("\\startluacode"))
@@ -390,10 +389,11 @@ contextlexer._reset_parser = function()
end
local luaenvironment = P("lua") * (P("setups") + P("code") + P(true))
+ + P("ctxfunction") * (P("definition") + P(true))
local inlinelua = P("\\") * (
- P("ctx") * ( P("lua") + P("command") + P("late") * (P("lua") + P("command")) )
- + P("cld") * ( P("command") + P("context") )
+ P("ctx") * (P("lua") + P("command") + P("late") * (P("lua") + P("command")) + P("function"))
+ + P("cld") * (P("command") + P("context"))
+ P("luaexpr")
+ (P("direct") + P("late")) * P("lua")
)
@@ -434,9 +434,6 @@ local callers = token("embedded", P("\\") * metafuncall) * metafu
lexer.embed_lexer(contextlexer, cldlexer, startluacode, stopluacode)
lexer.embed_lexer(contextlexer, mpslexer, startmetafuncode, stopmetafuncode)
--- Watch the text grabber, after all, we're talking mostly of text (beware,
--- no punctuation here as it can be special. We might go for utf here.
-
contextlexer._rules = {
{ "whitespace", spacing },
{ "preamble", preamble },
@@ -460,11 +457,61 @@ contextlexer._rules = {
{ "rest", rest },
}
-contextlexer._tokenstyles = context.styleset
--- contextlexer._tokenstyles = context.stylesetcopy() -- experiment
+-- Watch the text grabber, after all, we're talking mostly of text (beware,
+-- no punctuation here as it can be special). We might go for utf here.
+
+local web = lexer.loadluafile("scite-context-lexer-web-snippets")
+
+if web then
+
+ lexer.inform("supporting web snippets in tex lexer")
+
+ contextlexer._rules_web = {
+ { "whitespace", spacing },
+ { "text", text }, -- non words
+ { "comment", comment },
+ { "constant", constant },
+ { "callers", callers },
+ { "helper", helper },
+ { "command", command },
+ { "primitive", primitive },
+ { "ifprimitive", ifprimitive },
+ { "reserved", reserved },
+ { "csname", csname },
+ { "grouping", grouping },
+ { "special", special },
+ { "extra", extra },
+ { "invisible", invisible },
+ { "web", web.pattern },
+ { "rest", rest },
+ }
+
+else
+
+ lexer.report("not supporting web snippets in tex lexer")
+
+ contextlexer._rules_web = {
+ { "whitespace", spacing },
+ { "text", text }, -- non words
+ { "comment", comment },
+ { "constant", constant },
+ { "callers", callers },
+ { "helper", helper },
+ { "command", command },
+ { "primitive", primitive },
+ { "ifprimitive", ifprimitive },
+ { "reserved", reserved },
+ { "csname", csname },
+ { "grouping", grouping },
+ { "special", special },
+ { "extra", extra },
+ { "invisible", invisible },
+ { "rest", rest },
+ }
--- contextlexer._tokenstyles[#contextlexer._tokenstyles + 1] = { cldlexer._NAME..'_whitespace', lexer.style_whitespace }
--- contextlexer._tokenstyles[#contextlexer._tokenstyles + 1] = { mpslexer._NAME..'_whitespace', lexer.style_whitespace }
+end
+
+contextlexer._tokenstyles = context.styleset
local environment = {
["\\start"] = 1, ["\\stop"] = -1,
@@ -495,4 +542,6 @@ contextlexer._foldsymbols = { -- these need to be style references
["grouping"] = group,
}
+-- context.inspect(contextlexer)
+
return contextlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-txt.lua b/context/data/scite/context/lexers/scite-context-lexer-txt.lua
index fe062fb94..43eec2c35 100644
--- a/context/data/scite/lexers/scite-context-lexer-txt.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-txt.lua
@@ -6,22 +6,23 @@ local info = {
license = "see context related readme files",
}
-if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
-
-local lexer = lexer
-local token = lexer.token
-local P, S, Cmt, Cp, Ct = lpeg.P, lpeg.S, lpeg.Cmt, lpeg.Cp, lpeg.Ct
+local P, S, Cmt, Cp = lpeg.P, lpeg.S, lpeg.Cmt, lpeg.Cp
local find, match = string.find, string.match
-local textlexer = { _NAME = "txt", _FILENAME = "scite-context-lexer-txt" }
-local whitespace = lexer.WHITESPACE
+local lexer = require("lexer")
local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local textlexer = lexer.new("txt","scite-context-lexer-txt")
+local whitespace = textlexer.whitespace
-local space = lexer.space
-local any = lexer.any
+local space = patterns.space
+local any = patterns.any
+local wordtoken = patterns.wordtoken
+local wordpattern = patterns.wordpattern
-local wordtoken = context.patterns.wordtoken
-local wordpattern = context.patterns.wordpattern
local checkedword = context.checkedword
local styleofword = context.styleofword
local setwordlist = context.setwordlist
@@ -36,10 +37,10 @@ local validminimum = 3
-- [#!-%] language=uk
-local p_preamble = Cmt(#(S("#!-%") * P(" ")), function(input,i,_) -- todo: utf bomb
+local p_preamble = Cmt((S("#!-%") * P(" ")), function(input,i,_) -- todo: utf bomb no longer #
if i == 1 then -- < 10 then
validwords, validminimum = false, 3
- local s, e, line = find(input,'^[#!%-%%](.+)[\n\r]',i)
+ local s, e, line = find(input,"^[#!%-%%](.+)[\n\r]",i)
if line then
local language = match(line,"language=([a-z]+)")
if language then
@@ -54,7 +55,6 @@ local t_preamble =
token("preamble", p_preamble)
local t_word =
--- Ct( wordpattern / function(s) return styleofword(validwords,validminimum,s) end * Cp() ) -- the function can be inlined
wordpattern / function(s) return styleofword(validwords,validminimum,s) end * Cp() -- the function can be inlined
local t_text =
diff --git a/context/data/scite/context/lexers/scite-context-lexer-web-snippets.lua b/context/data/scite/context/lexers/scite-context-lexer-web-snippets.lua
new file mode 100644
index 000000000..196a545bc
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-web-snippets.lua
@@ -0,0 +1,133 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for web snippets",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local P, R, S, C, Cg, Cb, Cs, Cmt, lpegmatch = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cg, lpeg.Cb, lpeg.Cs, lpeg.Cmt, lpeg.match
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local websnippets = { }
+
+local space = patterns.space -- S(" \n\r\t\f\v")
+local any = patterns.any
+local restofline = patterns.restofline
+local startofline = patterns.startofline
+
+local squote = P("'")
+local dquote = P('"')
+local period = P(".")
+
+local t_whitespace = token(whitespace, space^1)
+local t_spacing = token("default", space^1)
+local t_rest = token("default", any)
+
+-- the web subset
+
+local p_beginofweb = P("@")
+local p_endofweb = P("@>")
+
+-- @, @/ @| @# @+ @; @[ @]
+
+local p_directive_1 = p_beginofweb * S(",/|#+;[]")
+local t_directive_1 = token("label",p_directive_1)
+
+-- @.text @>(monospaced)
+-- @:text @>(macro driven)
+-- @= verbose@>
+-- @! underlined @>
+-- @t text @> (hbox)
+-- @q ignored @>
+
+local p_typeset = p_beginofweb * S(".:=!tq")
+local t_typeset = token("label",p_typeset) * token("warning",(1-p_endofweb)^1) * token("label",p_endofweb)
+
+-- @^index@>
+
+local p_index = p_beginofweb * P("^")
+local t_index = token("label",p_index) * token("function",(1-p_endofweb)^1) * token("label",p_endofweb)
+
+-- @f text renderclass
+
+local p_render = p_beginofweb * S("f")
+local t_render = token("label",p_render) * t_spacing * token("warning",(1-space)^1) * t_spacing * token("label",(1-space)^1)
+
+-- @s idem
+-- @p idem
+-- @& strip (spaces before)
+-- @h
+
+local p_directive_2 = p_beginofweb * S("sp&h")
+local t_directive_2 = token("label",p_directive_2)
+
+-- @< ... @> [=|+=|]
+-- @(foo@>
+
+local p_reference = p_beginofweb * S("<(")
+local t_reference = token("label",p_reference) * token("function",(1-p_endofweb)^1) * token("label",p_endofweb * (P("+=") + P("="))^-1)
+
+-- @'char' (ascii code)
+
+local p_character = p_beginofweb * S("'")
+local t_character = token("label",p_character) * token("reserved",(1-squote)^1) * token("label",squote)
+
+-- @l nonascii
+
+local p_nonascii = p_beginofweb * S("l")
+local t_nonascii = token("label",p_nonascii) * t_spacing * token("reserved",(1-space)^1)
+
+-- @x @y @z changefile
+-- @i webfile
+
+local p_filename = p_beginofweb * S("xyzi")
+local t_filename = token("label",p_filename) * t_spacing * token("reserved",(1-space)^1)
+
+-- @@ escape
+
+local p_escape = p_beginofweb * p_beginofweb
+local t_escape = token("text",p_escape)
+
+-- structure
+
+-- @* title.
+
+-- local p_section = p_beginofweb * P("*")^1
+-- local t_section = token("label",p_section) * t_spacing * token("function",(1-period)^1) * token("label",period)
+
+-- @ explanation
+
+-- local p_explanation = p_beginofweb
+-- local t_explanation = token("label",p_explanation) * t_spacing^1
+
+-- @d macro
+
+-- local p_macro = p_beginofweb * P("d")
+-- local t_macro = token("label",p_macro)
+
+-- @c code
+
+-- local p_code = p_beginofweb * P("c")
+-- local t_code = token("label",p_code)
+
+websnippets.pattern = P (
+ t_typeset
+ + t_index
+ + t_render
+ + t_reference
+ + t_filename
+ + t_directive_1
+ + t_directive_2
+ + t_character
+ + t_nonascii
+ + t_escape
+)
+
+
+return websnippets
diff --git a/context/data/scite/context/lexers/scite-context-lexer-web.lua b/context/data/scite/context/lexers/scite-context-lexer-web.lua
new file mode 100644
index 000000000..86ae76644
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-web.lua
@@ -0,0 +1,67 @@
+local info = {
+ version = 1.003,
+ comment = "scintilla lpeg lexer for web",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local P, R, S = lpeg.P, lpeg.R, lpeg.S
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+local exact_match = lexer.exact_match
+
+local weblexer = lexer.new("web","scite-context-lexer-web")
+local whitespace = weblexer.whitespace
+
+local space = patterns.space -- S(" \n\r\t\f\v")
+local any = patterns.any
+local restofline = patterns.restofline
+local startofline = patterns.startofline
+
+local period = P(".")
+local percent = P("%")
+
+local spacing = token(whitespace, space^1)
+local rest = token("default", any)
+
+local eop = P("@>")
+local eos = eop * P("+")^-1 * P("=")
+
+-- we can put some of the next in the web-snippets file
+-- is f okay here?
+
+local texcomment = token("comment", percent * restofline^0)
+
+local texpart = token("label",P("@")) * #spacing
+ + token("label",P("@") * P("*")^1) * token("function",(1-period)^1) * token("label",period)
+local midpart = token("label",P("@d")) * #spacing
+ + token("label",P("@f")) * #spacing
+local cpppart = token("label",P("@c")) * #spacing
+ + token("label",P("@p")) * #spacing
+ + token("label",P("@") * S("<(")) * token("function",(1-eop)^1) * token("label",eos)
+
+local anypart = P("@") * ( P("*")^1 + S("dfcp") + space^1 + S("<(") * (1-eop)^1 * eos )
+local limbo = 1 - anypart - percent
+
+local texlexer = lexer.load("scite-context-lexer-tex-web")
+local cpplexer = lexer.load("scite-context-lexer-cpp-web")
+
+lexer.embed_lexer(weblexer, texlexer, texpart + limbo, #anypart)
+lexer.embed_lexer(weblexer, cpplexer, cpppart + midpart, #anypart)
+
+local texcomment = token("comment", percent * restofline^0)
+
+weblexer._rules = {
+ { "whitespace", spacing },
+ { "texcomment", texcomment }, -- else issues with first tex section
+ { "rest", rest },
+}
+
+weblexer._tokenstyles = context.styleset
+
+return weblexer
diff --git a/context/data/scite/lexers/scite-context-lexer-xml-cdata.lua b/context/data/scite/context/lexers/scite-context-lexer-xml-cdata.lua
index 97253e140..e6276da0d 100644
--- a/context/data/scite/lexers/scite-context-lexer-xml-cdata.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-xml-cdata.lua
@@ -6,23 +6,26 @@ local info = {
license = "see context related readme files",
}
-local lexer = lexer
-local token = lexer.token
local P = lpeg.P
-local xmlcdatalexer = { _NAME = "xml-cdata", _FILENAME = "scite-context-lexer-xml-cdata" }
-local whitespace = lexer.WHITESPACE -- triggers states
+local lexer = require("lexer")
local context = lexer.context
+local patterns = context.patterns
-local space = lexer.space
+local token = lexer.token
+
+local xmlcdatalexer = lexer.new("xml-cdata","scite-context-lexer-xml-cdata")
+local whitespace = xmlcdatalexer.whitespace
+
+local space = patterns.space
local nospace = 1 - space - P("]]>")
-local p_spaces = token(whitespace, space ^1)
-local p_cdata = token("comment", nospace^1)
+local t_spaces = token(whitespace, space ^1)
+local t_cdata = token("comment", nospace^1)
xmlcdatalexer._rules = {
- { "whitespace", p_spaces },
- { "cdata", p_cdata },
+ { "whitespace", t_spaces },
+ { "cdata", t_cdata },
}
xmlcdatalexer._tokenstyles = context.styleset
diff --git a/context/data/scite/context/lexers/scite-context-lexer-xml-comment.lua b/context/data/scite/context/lexers/scite-context-lexer-xml-comment.lua
new file mode 100644
index 000000000..b5b3fefe0
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-xml-comment.lua
@@ -0,0 +1,33 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for xml comments",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local P = lpeg.P
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local xmlcommentlexer = lexer.new("xml-comment","scite-context-lexer-xml-comment")
+local whitespace = xmlcommentlexer.whitespace
+
+local space = patterns.space
+local nospace = 1 - space - P("-->")
+
+local t_spaces = token(whitespace, space ^1)
+local t_comment = token("comment", nospace^1)
+
+xmlcommentlexer._rules = {
+ { "whitespace", t_spaces },
+ { "comment", t_comment },
+}
+
+xmlcommentlexer._tokenstyles = context.styleset
+
+return xmlcommentlexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer-xml-script.lua b/context/data/scite/context/lexers/scite-context-lexer-xml-script.lua
new file mode 100644
index 000000000..bbb938dc5
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer-xml-script.lua
@@ -0,0 +1,33 @@
+local info = {
+ version = 1.002,
+ comment = "scintilla lpeg lexer for xml script",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+local P = lpeg.P
+
+local lexer = require("lexer")
+local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+
+local xmlscriptlexer = lexer.new("xml-script","scite-context-lexer-xml-script")
+local whitespace = xmlscriptlexer.whitespace
+
+local space = patterns.space
+local nospace = 1 - space - (P("</") * P("script") + P("SCRIPT")) * P(">")
+
+local t_spaces = token(whitespace, space ^1)
+local t_script = token("default", nospace^1)
+
+xmlscriptlexer._rules = {
+ { "whitespace", t_spaces },
+ { "script", t_script },
+}
+
+xmlscriptlexer._tokenstyles = context.styleset
+
+return xmlscriptlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-xml.lua b/context/data/scite/context/lexers/scite-context-lexer-xml.lua
index 241e22591..77c89b1d6 100644
--- a/context/data/scite/lexers/scite-context-lexer-xml.lua
+++ b/context/data/scite/context/lexers/scite-context-lexer-xml.lua
@@ -12,26 +12,28 @@ local info = {
-- todo: parse entities in attributes
-if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
-
-local lexer = lexer
local global, string, table, lpeg = _G, string, table, lpeg
-local token, exact_match = lexer.token, lexer.exact_match
-local P, R, S, V, C, Cmt, Ct, Cp = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt, lpeg.Ct, lpeg.Cp
+local P, R, S, C, Cmt, Cp = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cmt, lpeg.Cp
local type = type
local match, find = string.match, string.find
-local xmllexer = { _NAME = "xml", _FILENAME = "scite-context-lexer-xml" }
-local whitespace = lexer.WHITESPACE -- triggers states
+local lexer = require("lexer")
local context = lexer.context
+local patterns = context.patterns
+
+local token = lexer.token
+local exact_match = lexer.exact_match
-local xmlcommentlexer = lexer.load("scite-context-lexer-xml-comment") -- indirect (some issue with the lexer framework)
-local xmlcdatalexer = lexer.load("scite-context-lexer-xml-cdata") -- indirect (some issue with the lexer framework)
-local xmlscriptlexer = lexer.load("scite-context-lexer-xml-script") -- indirect (some issue with the lexer framework)
-local lualexer = lexer.load("scite-context-lexer-lua") --
+local xmllexer = lexer.new("xml","scite-context-lexer-xml")
+local whitespace = xmllexer.whitespace
-local space = lexer.space -- S(" \t\n\r\v\f")
-local any = lexer.any -- P(1)
+local xmlcommentlexer = lexer.load("scite-context-lexer-xml-comment")
+local xmlcdatalexer = lexer.load("scite-context-lexer-xml-cdata")
+local xmlscriptlexer = lexer.load("scite-context-lexer-xml-script")
+local lualexer = lexer.load("scite-context-lexer-lua")
+
+local space = patterns.space
+local any = patterns.any
local dquote = P('"')
local squote = P("'")
@@ -40,7 +42,7 @@ local semicolon = P(";")
local equal = P("=")
local ampersand = P("&")
-local name = (R("az","AZ","09") + S('_-.'))^1
+local name = (R("az","AZ","09") + S("_-."))^1
local openbegin = P("<")
local openend = P("</")
local closebegin = P("/>") + P(">")
@@ -84,12 +86,12 @@ local validminimum = 3
--
-- <?context-directive editor language us ?>
-local p_preamble = Cmt(#P("<?xml "), function(input,i,_) -- todo: utf bomb
+local t_preamble = Cmt(P("<?xml "), function(input,i,_) -- todo: utf bomb, no longer #
if i < 200 then
validwords, validminimum = false, 3
local language = match(input,"^<%?xml[^>]*%?>%s*<%?context%-directive%s+editor%s+language%s+(..)%s+%?>")
-- if not language then
- -- language = match(input,'^<%?xml[^>]*language=[\"\'](..)[\"\'][^>]*%?>',i)
+ -- language = match(input,"^<%?xml[^>]*language=[\"\'](..)[\"\'][^>]*%?>",i)
-- end
if language then
validwords, validminimum = setwordlist(language)
@@ -98,24 +100,23 @@ local p_preamble = Cmt(#P("<?xml "), function(input,i,_) -- todo: utf bomb
return false
end)
-local p_word =
+local t_word =
-- Ct( iwordpattern / function(s) return styleofword(validwords,validminimum,s) end * Cp() ) -- the function can be inlined
iwordpattern / function(s) return styleofword(validwords,validminimum,s) end * Cp() -- the function can be inlined
-local p_rest =
+local t_rest =
token("default", any)
-local p_text =
+local t_text =
token("default", (1-S("<>&")-space)^1)
-local p_spacing =
+local t_spacing =
token(whitespace, space^1)
--- token("whitespace", space^1)
-local p_optionalwhitespace =
- p_spacing^0
+local t_optionalwhitespace =
+ token("default", space^1)^0
-local p_localspacing =
+local t_localspacing =
token("default", space^1)
-- Because we want a differently colored open and close we need an embedded lexer (whitespace
@@ -123,22 +124,22 @@ local p_localspacing =
-- Even using different style keys is not robust as they can be shared. I'll fix the main
-- lexer code.
-local p_sstring =
+local t_sstring =
token("quote",dquote)
* token("string",(1-dquote)^0) -- different from context
* token("quote",dquote)
-local p_dstring =
+local t_dstring =
token("quote",squote)
* token("string",(1-squote)^0) -- different from context
* token("quote",squote)
--- local p_comment =
+-- local t_comment =
-- token("command",opencomment)
-- * token("comment",(1-closecomment)^0) -- different from context
-- * token("command",closecomment)
--- local p_cdata =
+-- local t_cdata =
-- token("command",opencdata)
-- * token("comment",(1-closecdata)^0) -- different from context
-- * token("command",closecdata)
@@ -156,74 +157,74 @@ local p_dstring =
-- <!ENTITY xxxx PUBLIC "yyyy" >
-- <!ENTITY xxxx "yyyy" >
-local p_docstr = p_dstring + p_sstring
+local t_docstr = t_dstring + t_sstring
-local p_docent = token("command",P("<!ENTITY"))
- * p_optionalwhitespace
+local t_docent = token("command",P("<!ENTITY"))
+ * t_optionalwhitespace
* token("keyword",name)
- * p_optionalwhitespace
+ * t_optionalwhitespace
* (
(
token("constant",P("SYSTEM"))
- * p_optionalwhitespace
- * p_docstr
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * t_docstr
+ * t_optionalwhitespace
* token("constant",P("NDATA"))
- * p_optionalwhitespace
+ * t_optionalwhitespace
* token("keyword",name)
) + (
token("constant",P("PUBLIC"))
- * p_optionalwhitespace
- * p_docstr
+ * t_optionalwhitespace
+ * t_docstr
) + (
- p_docstr
+ t_docstr
)
)
- * p_optionalwhitespace
+ * t_optionalwhitespace
* token("command",P(">"))
-local p_docele = token("command",P("<!ELEMENT"))
- * p_optionalwhitespace
+local t_docele = token("command",P("<!ELEMENT"))
+ * t_optionalwhitespace
* token("keyword",name)
- * p_optionalwhitespace
+ * t_optionalwhitespace
* token("command",P("("))
* (
- p_spacing
+ t_localspacing
+ token("constant",P("#CDATA") + P("#PCDATA") + P("ANY"))
+ token("text",P(","))
+ token("comment",(1-S(",)"))^1)
)^1
* token("command",P(")"))
- * p_optionalwhitespace
+ * t_optionalwhitespace
* token("command",P(">"))
-local p_docset = token("command",P("["))
- * p_optionalwhitespace
- * ((p_optionalwhitespace * (p_docent + p_docele))^1 + token("comment",(1-P("]"))^0))
- * p_optionalwhitespace
+local t_docset = token("command",P("["))
+ * t_optionalwhitespace
+ * ((t_optionalwhitespace * (t_docent + t_docele))^1 + token("comment",(1-P("]"))^0))
+ * t_optionalwhitespace
* token("command",P("]"))
-local p_doctype = token("command",P("<!DOCTYPE"))
- * p_optionalwhitespace
+local t_doctype = token("command",P("<!DOCTYPE"))
+ * t_optionalwhitespace
* token("keyword",name)
- * p_optionalwhitespace
+ * t_optionalwhitespace
* (
(
token("constant",P("PUBLIC"))
- * p_optionalwhitespace
- * p_docstr
- * p_optionalwhitespace
- * p_docstr
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * t_docstr
+ * t_optionalwhitespace
+ * t_docstr
+ * t_optionalwhitespace
) + (
token("constant",P("SYSTEM"))
- * p_optionalwhitespace
- * p_docstr
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * t_docstr
+ * t_optionalwhitespace
)
)^-1
- * p_docset^-1
- * p_optionalwhitespace
+ * t_docset^-1
+ * t_optionalwhitespace
* token("command",P(">"))
lexer.embed_lexer(xmllexer, lualexer, token("command", openlua), token("command", closelua))
@@ -231,7 +232,7 @@ lexer.embed_lexer(xmllexer, xmlcommentlexer, token("command", opencomment), toke
lexer.embed_lexer(xmllexer, xmlcdatalexer, token("command", opencdata), token("command", closecdata))
lexer.embed_lexer(xmllexer, xmlscriptlexer, token("command", openscript), token("command", closescript))
--- local p_name =
+-- local t_name =
-- token("plain",name)
-- * (
-- token("default",colon)
@@ -239,11 +240,11 @@ lexer.embed_lexer(xmllexer, xmlscriptlexer, token("command", openscript), toke
-- )
-- + token("keyword",name)
-local p_name = -- more robust
+local t_name = -- more robust
token("plain",name * colon)^-1
* token("keyword",name)
--- local p_key =
+-- local t_key =
-- token("plain",name)
-- * (
-- token("default",colon)
@@ -251,81 +252,82 @@ local p_name = -- more robust
-- )
-- + token("constant",name)
-local p_key =
+local t_key =
token("plain",name * colon)^-1
* token("constant",name)
-local p_attributes = (
- p_optionalwhitespace
- * p_key
- * p_optionalwhitespace
+local t_attributes = (
+ t_optionalwhitespace
+ * t_key
+ * t_optionalwhitespace
* token("plain",equal)
- * p_optionalwhitespace
- * (p_dstring + p_sstring)
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * (t_dstring + t_sstring)
+ * t_optionalwhitespace
)^0
-local p_open =
+local t_open =
token("keyword",openbegin)
* (
- p_name
- * p_optionalwhitespace
- * p_attributes
+ t_name
+ * t_optionalwhitespace
+ * t_attributes
* token("keyword",closebegin)
+
token("error",(1-closebegin)^1)
)
-local p_close =
+local t_close =
token("keyword",openend)
* (
- p_name
- * p_optionalwhitespace
+ t_name
+ * t_optionalwhitespace
* token("keyword",closeend)
+
token("error",(1-closeend)^1)
)
-local p_entity =
+local t_entity =
token("constant",entity)
-local p_instruction =
+local t_instruction =
token("command",openinstruction * P("xml"))
- * p_optionalwhitespace
- * p_attributes
- * p_optionalwhitespace
+ * t_optionalwhitespace
+ * t_attributes
+ * t_optionalwhitespace
* token("command",closeinstruction)
+ token("command",openinstruction * name)
* token("default",(1-closeinstruction)^1)
* token("command",closeinstruction)
-local p_invisible =
+local t_invisible =
token("invisible",invisibles^1)
--- local p_preamble =
--- token('preamble', p_preamble )
+-- local t_preamble =
+-- token("preamble", t_preamble )
xmllexer._rules = {
- { "whitespace", p_spacing },
- { "preamble", p_preamble },
- { "word", p_word },
- -- { "text", p_text },
- -- { "comment", p_comment },
- -- { "cdata", p_cdata },
- { "doctype", p_doctype },
- { "instruction", p_instruction },
- { "close", p_close },
- { "open", p_open },
- { "entity", p_entity },
- { "invisible", p_invisible },
- { "rest", p_rest },
+ { "whitespace", t_spacing },
+ { "preamble", t_preamble },
+ { "word", t_word },
+ -- { "text", t_text },
+ -- { "comment", t_comment },
+ -- { "cdata", t_cdata },
+ { "doctype", t_doctype },
+ { "instruction", t_instruction },
+ { "close", t_close },
+ { "open", t_open },
+ { "entity", t_entity },
+ { "invisible", t_invisible },
+ { "rest", t_rest },
}
xmllexer._tokenstyles = context.styleset
xmllexer._foldpattern = P("</") + P("<") + P("/>") -- separate entry else interference
++ P("<!--") + P("-->")
-xmllexer._foldsymbols = { -- somehow doesn't work yet
+xmllexer._foldsymbols = {
_patterns = {
"</",
"/>",
@@ -336,6 +338,13 @@ xmllexer._foldsymbols = { -- somehow doesn't work yet
["/>"] = -1,
["<"] = 1,
},
+ ["command"] = {
+ ["</"] = -1,
+ ["/>"] = -1,
+ ["<!--"] = 1,
+ ["-->"] = -1,
+ ["<"] = 1,
+ },
}
return xmllexer
diff --git a/context/data/scite/context/lexers/scite-context-lexer.lua b/context/data/scite/context/lexers/scite-context-lexer.lua
new file mode 100644
index 000000000..6335af911
--- /dev/null
+++ b/context/data/scite/context/lexers/scite-context-lexer.lua
@@ -0,0 +1,2018 @@
+local info = {
+ version = 1.400,
+ comment = "basics for scintilla lpeg lexer for context/metafun",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+ comment = "contains copyrighted code from mitchell.att.foicica.com",
+
+}
+
+-- todo: hook into context resolver etc
+-- todo: only old api in lexers, rest in context subnamespace
+-- todo: make sure we can run in one state .. copies or shared?
+-- todo: auto-nesting
+
+local log = false
+local trace = false
+local detail = false
+local show = false -- nice for tracing (also for later)
+local collapse = false -- can save some 15% (maybe easier on scintilla)
+local inspect = false -- can save some 15% (maybe easier on scintilla)
+
+-- local log = true
+-- local trace = true
+
+-- GET GOING
+--
+-- You need to copy this file over lexer.lua. In principle other lexers could
+-- work too but not now. Maybe some day. All patterns will move into the patterns
+-- name space. I might do the same with styles. If you run an older version of
+-- SciTE you can take one of the archives. Pre 3.41 versions can just be copied
+-- to the right path, as there we still use part of the normal lexer.
+--
+-- REMARK
+--
+-- We started using lpeg lexing as soon as it came available. Because we had
+-- rather demanding files an dalso wanted to use nested lexers, we ended up with
+-- our own variant (more robust and faster). As a consequence successive versions
+-- had to be adapted to changes in the (still unstable) api. In addition to
+-- lexing we also have spell checking and such.
+--
+-- STATUS
+--
+-- todo: maybe use a special stripped version of the dll (stable api)
+-- todo: play with hotspot and other properties
+-- wish: access to all scite properties and in fact integrate in scite
+-- todo: add proper tracing and so .. not too hard as we can run on mtxrun
+-- todo: get rid of these lexers.STYLE_XX and lexers.XX (hide such details)
+--
+-- HISTORY
+--
+-- The fold and lex functions are copied and patched from original code by Mitchell
+-- (see lexer.lua). All errors are mine. The ability to use lpeg is a real nice
+-- adition and a brilliant move. The code is a byproduct of the (mainly Lua based)
+-- textadept (still a rapidly moving target) that unfortunately misses a realtime
+-- output pane. On the other hand, SciTE is somewhat crippled by the fact that we
+-- cannot pop in our own (language dependent) lexer into the output pane (somehow
+-- the errorlist lexer is hard coded into the editor). Hopefully that will change
+-- some day.
+--
+-- Starting with SciTE version 3.20 there is an issue with coloring. As we still
+-- lack a connection with SciTE itself (properties as well as printing to the log
+-- pane) and we cannot trace this (on windows). As far as I can see, there are no
+-- fundamental changes in lexer.lua or LexLPeg.cxx so it must be in Scintilla
+-- itself. So for the moment I stick to 3.10. Indicators are: no lexing of 'next'
+-- and 'goto <label>' in the Lua lexer and no brace highlighting either. Interesting
+-- is that it does work ok in the cld lexer (so the Lua code is okay). Also the fact
+-- that char-def.lua lexes fast is a signal that the lexer quits somewhere halfway.
+-- Maybe there are some hard coded limitations on the amount of styles and/or length
+-- if names.
+--
+-- After checking 3.24 and adapting to the new lexer tables things are okay again.
+-- So, this version assumes 3.24 or higher. In 3.24 we have a different token
+-- result, i.e. no longer a { tag, pattern } but just two return values. I didn't
+-- check other changes but will do that when I run into issues. I had optimized
+-- these small tables by hashing which was more efficient but this is no longer
+-- needed. For the moment we keep some of that code around as I don't know what
+-- happens in future versions.
+--
+-- In 3.31 another major change took place: some helper constants (maybe they're no
+-- longer constants) and functions were moved into the lexer modules namespace but
+-- the functions are assigned to the Lua module afterward so we cannot alias them
+-- beforehand. We're probably getting close to a stable interface now. I've
+-- considered making a whole copy and patch the other functions too as we need an
+-- extra nesting model. However, I don't want to maintain too much. An unfortunate
+-- change in 3.03 is that no longer a script can be specified. This means that
+-- instead of loading the extensions via the properties file, we now need to load
+-- them in our own lexers, unless of course we replace lexer.lua completely (which
+-- adds another installation issue).
+--
+-- Another change has been that _LEXERHOME is no longer available. It looks like
+-- more and more functionality gets dropped so maybe at some point we need to ship
+-- our own dll/so files. For instance, I'd like to have access to the current
+-- filename and other scite properties. For instance, we could cache some info with
+-- each file, if only we had knowledge of what file we're dealing with.
+--
+-- For huge files folding can be pretty slow and I do have some large ones that I
+-- keep open all the time. Loading is normally no ussue, unless one has remembered
+-- the status and the cursor is at the last line of a 200K line file. Optimizing the
+-- fold function brought down loading of char-def.lua from 14 sec => 8 sec.
+-- Replacing the word_match function and optimizing the lex function gained another
+-- 2+ seconds. A 6 second load is quite ok for me. The changed lexer table structure
+-- (no subtables) brings loading down to a few seconds.
+--
+-- When the lexer path is copied to the textadept lexer path, and the theme
+-- definition to theme path (as lexer.lua), the lexer works there as well. When I
+-- have time and motive I will make a proper setup file to tune the look and feel a
+-- bit and associate suffixes with the context lexer. The textadept editor has a
+-- nice style tracing option but lacks the tabs for selecting files that scite has.
+-- It also has no integrated run that pipes to the log pane. Interesting is that the
+-- jit version of textadept crashes on lexing large files (and does not feel faster
+-- either; maybe a side effect of known limitations).
+--
+-- Function load(lexer_name) starts with _lexers.WHITESPACE = lexer_name ..
+-- '_whitespace' which means that we need to have it frozen at the moment we load
+-- another lexer. Because spacing is used to revert to a parent lexer we need to
+-- make sure that we load children as late as possible in order not to get the wrong
+-- whitespace trigger. This took me quite a while to figure out (not being that
+-- familiar with the internals). The lex and fold functions have been optimized. It
+-- is a pitty that there is no proper print available. Another thing needed is a
+-- default style in our own theme style definition, as otherwise we get wrong nested
+-- lexers, especially if they are larger than a view. This is the hardest part of
+-- getting things right.
+--
+-- It's a pitty that there is no scintillua library for the OSX version of scite.
+-- Even better would be to have the scintillua library as integral part of scite as
+-- that way I could use OSX alongside windows and linux (depending on needs). Also
+-- nice would be to have a proper interface to scite then because currently the
+-- lexer is rather isolated and the lua version does not provide all standard
+-- libraries. It would also be good to have lpeg support in the regular scite lua
+-- extension (currently you need to pick it up from someplace else).
+--
+-- With 3.41 the interface changed again so it gets time to look into the C++ code
+-- and consider compiling and patching myself. Loading is more complicated not as
+-- the lexer gets loaded automatically so we have little control over extending the
+-- code now. After a few days trying all kind of solutions I decided to follow a
+-- different approach: drop in a complete replacement. This of course means that I
+-- need to keep track of even more changes (which for sure will happen) but at least
+-- I get rid of interferences. The api (lexing and configuration) is simply too
+-- unstable across versions. Maybe in a few years things have stabelized. (Or maybe
+-- it's not really expected that one writes lexers at all.) A side effect is that I
+-- now no longer will use shipped lexers but just the built-in ones. Not that it
+-- matters much as the context lexers cover what I need (and I can always write
+-- more).
+--
+-- In fact, the transition to 3.41 was triggered by an unfateful update of Ubuntu
+-- which left me with an incompatible SciTE and lexer library and updating was not
+-- possible due to the lack of 64 bit libraries. We'll see what the future brings.
+--
+-- Promissing is that the library now can use another Lua instance so maybe some day
+-- it will get properly in SciTE and we can use more clever scripting.
+--
+-- In some lexers we use embedded ones even if we could do it directly, The reason is
+-- that when the end token is edited (e.g. -->), backtracking to the space before the
+-- begin token (e.g. <!--) results in applying the surrounding whitespace which in
+-- turn means that when the end token is edited right, backtracking doesn't go back.
+-- One solution (in the dll) would be to backtrack several space categories. After all,
+-- lexing is quite fast (applying the result is much slower).
+--
+-- For some reason the first blob of text tends to go wrong (pdf and web). It would be
+-- nice to have 'whole doc' initial lexing. Quite fishy as it makes it impossible to
+-- lex the first part well (for already opened documents) because only a partial
+-- text is passed.
+--
+-- So, maybe I should just write this from scratch (assuming more generic usage)
+-- because after all, the dll expects just tables, based on a string. I can then also
+-- do some more aggressive resource sharing (needed when used generic).
+--
+-- I think that nested lexers are still bugged (esp over longer ranges). It never was
+-- robust or maybe it's simply not meant for too complex cases. The 3.24 version was
+-- probably the best so far. The fact that styles bleed between lexers even if their
+-- states are isolated is an issue. Another issus is that zero characters in the
+-- text passed to the lexer can mess things up (pdf files have them in streams).
+--
+-- For more complex 'languages', like web or xml, we need to make sure that we use
+-- e.g. 'default' for spacing that makes up some construct. Ok, we then still have a
+-- backtracking issue but less.
+--
+-- TODO
+--
+-- I can make an export to context, but first I'll redo the code that makes the grammar,
+-- as we only seem to need
+--
+-- lexer._TOKENSTYLES : table
+-- lexer._CHILDREN : flag
+-- lexer._EXTRASTYLES : table
+-- lexer._GRAMMAR : flag
+--
+-- lexers.load : function
+-- lexers.lex : function
+--
+-- So, if we drop compatibility with other lex definitions, we can make things simpler.
+
+-- TRACING
+--
+-- The advantage is that we now can check more easily with regular Lua. We can also
+-- use wine and print to the console (somehow stdout is intercepted there.) So, I've
+-- added a bit of tracing. Interesting is to notice that each document gets its own
+-- instance which has advantages but also means that when we are spellchecking we
+-- reload the word lists each time. (In the past I assumed a shared instance and took
+-- some precautions.)
+
+local lpeg = require("lpeg")
+
+local global = _G
+local find, gmatch, match, lower, upper, gsub, sub, format = string.find, string.gmatch, string.match, string.lower, string.upper, string.gsub, string.sub, string.format
+local concat = table.concat
+local type, next, setmetatable, rawset, tonumber, tostring = type, next, setmetatable, rawset, tonumber, tostring
+local R, P, S, V, C, Cp, Cs, Ct, Cmt, Cc, Cf, Cg, Carg = lpeg.R, lpeg.P, lpeg.S, lpeg.V, lpeg.C, lpeg.Cp, lpeg.Cs, lpeg.Ct, lpeg.Cmt, lpeg.Cc, lpeg.Cf, lpeg.Cg, lpeg.Carg
+local lpegmatch = lpeg.match
+
+local nesting = 0
+
+local function report(fmt,str,...)
+ if log then
+ if str then
+ fmt = format(fmt,str,...)
+ end
+ print(format("scite lpeg lexer > %s > %s",nesting == 0 and "-" or nesting,fmt))
+ end
+end
+
+local function inform(...)
+ if log and trace then
+ report(...)
+ end
+end
+
+inform("loading context lexer module (global table: %s)",tostring(global))
+
+if not package.searchpath then
+
+ -- Unfortunately the io library is only available when we end up
+ -- in this branch of code.
+
+ inform("using adapted function 'package.searchpath' (if used at all)")
+
+ function package.searchpath(name,path)
+ local tried = { }
+ for part in gmatch(path,"[^;]+") do
+ local filename = gsub(part,"%?",name)
+ local f = io.open(filename,"r")
+ if f then
+ inform("file found on path: %s",filename)
+ f:close()
+ return filename
+ end
+ tried[#tried + 1] = format("no file '%s'",filename)
+ end
+ -- added: local path .. for testing
+ local f = io.open(filename,"r")
+ if f then
+ inform("file found on current path: %s",filename)
+ f:close()
+ return filename
+ end
+ --
+ tried[#tried + 1] = format("no file '%s'",filename)
+ return nil, concat(tried,"\n")
+ end
+
+end
+
+local lexers = { }
+local context = { }
+lexers.context = context
+
+local patterns = { }
+context.patterns = patterns -- todo: lexers.patterns
+
+context.report = report
+context.inform = inform
+
+lexers.LEXERPATH = package.path -- can be multiple paths separated by ;
+lexers.LEXERPATH = "./?.lua" -- good enough, will be set anyway (was
+
+if resolvers then
+ -- todo: set LEXERPATH
+ -- todo: set report
+end
+
+local usedlexers = { }
+local parent_lexer = nil
+
+-- The problem with styles is that there is some nasty interaction with scintilla
+-- and each version of lexer dll/so has a different issue. So, from now on we will
+-- just add them here. There is also a limit on some 30 styles. Maybe I should
+-- hash them in order to reuse.
+
+-- todo: work with proper hashes and analyze what styles are really used by a
+-- lexer
+
+local default = {
+ "nothing", "whitespace", "comment", "string", "number", "keyword",
+ "identifier", "operator", "error", "preprocessor", "constant", "variable",
+ "function", "type", "label", "embedded",
+ "quote", "special", "extra", "reserved", "okay", "warning",
+ "command", "internal", "preamble", "grouping", "primitive", "plain",
+ "user",
+ -- not used (yet) .. we cross the 32 boundary so had to patch the initializer, see (1)
+ "char", "class", "data", "definition", "invisible", "regex",
+ "standout", "tag",
+ "text",
+}
+
+local predefined = {
+ "default", "linenumber", "bracelight", "bracebad", "controlchar",
+ "indentguide", "calltip"
+}
+
+-- Bah ... ugly ... nicer would be a proper hash .. we now have properties
+-- as well as STYLE_* and some connection between them ... why .. ok, we
+-- could delay things but who cares. Anyway, at this moment the properties
+-- are still unknown.
+
+local function preparestyles(list)
+ local reverse = { }
+ for i=1,#list do
+ local k = list[i]
+ local K = upper(k)
+ local s = "style." .. k
+ lexers[K] = k -- is this used
+ lexers["STYLE_"..K] = "$(" .. k .. ")"
+ reverse[k] = true
+ end
+ return reverse
+end
+
+local defaultstyles = preparestyles(default)
+local predefinedstyles = preparestyles(predefined)
+
+-- These helpers are set afterwards so we delay their initialization ... there
+-- is no need to alias each time again and this way we can more easily adapt
+-- to updates.
+
+-- These keep changing (values, functions, tables ...) so we nee to check these
+-- with each update. Some of them are set in the loader (the require 'lexer' is
+-- in fact not a real one as the lexer code is loaded in the dll). It's also not
+-- getting more efficient.
+
+-- FOLD_BASE = lexers.FOLD_BASE or SC_FOLDLEVELBASE
+-- FOLD_HEADER = lexers.FOLD_HEADER or SC_FOLDLEVELHEADERFLAG
+-- FOLD_BLANK = lexers.FOLD_BLANK or SC_FOLDLEVELWHITEFLAG
+-- get_style_at = lexers.get_style_at or GetStyleAt
+-- get_indent_amount = lexers.get_indent_amount or GetIndentAmount
+-- get_property = lexers.get_property or GetProperty
+-- get_fold_level = lexers.get_fold_level or GetFoldLevel
+
+-- It needs checking: do we have access to all properties now? I'll clean
+-- this up anyway as I want a simple clean and stable model.
+
+-- This is somewhat messy. The lexer dll provides some virtual fields:
+--
+-- + property
+-- + property_int
+-- + style_at
+-- + fold_level
+-- + indent_amount
+--
+-- but for some reasons not:
+--
+-- + property_expanded
+--
+-- As a consequence we need to define it here because otherwise the
+-- lexer will crash. The fuzzy thing is that we don't have to define
+-- the property and property_int tables but we do have to define the
+-- expanded beforehand. The folding properties are no longer interfaced
+-- so the interface to scite is now rather weak (only a few hard coded
+-- properties).
+
+local FOLD_BASE = 0
+local FOLD_HEADER = 0
+local FOLD_BLANK = 0
+
+local style_at = { }
+local indent_amount = { }
+local fold_level = { }
+
+local function check_main_properties()
+ if not lexers.property then
+ lexers.property = { }
+ end
+ if not lexers.property_int then
+ lexers.property_int = setmetatable({ }, {
+ __index = function(t,k)
+ -- why the tostring .. it relies on lua casting to a number when
+ -- doing a comparison
+ return tonumber(lexers.property[k]) or 0 -- tostring removed
+ end,
+ __newindex = function(t,k,v)
+ report("properties are read-only, '%s' is not changed",k)
+ end,
+ })
+ end
+end
+
+lexers.property_expanded = setmetatable({ }, {
+ __index = function(t,k)
+ -- better be safe for future changes .. what if at some point this is
+ -- made consistent in the dll ... we need to keep an eye on that
+ local property = lexers.property
+ if not property then
+ check_main_properties()
+ end
+ --
+ return gsub(property[k],"[$%%]%b()", function(k)
+ return t[sub(k,3,-2)]
+ end)
+ end,
+ __newindex = function(t,k,v)
+ report("properties are read-only, '%s' is not changed",k)
+ end,
+})
+
+-- A downward compatible feature but obsolete:
+
+-- local function get_property(tag,default)
+-- return lexers.property_int[tag] or lexers.property[tag] or default
+-- end
+
+-- We still want our own properties (as it keeps changing so better play
+-- safe from now on):
+
+local function check_properties(lexer)
+ if lexer.properties then
+ return lexer
+ end
+ check_main_properties()
+ -- we use a proxy
+ local mainproperties = lexers.property
+ local properties = { }
+ local expanded = setmetatable({ }, {
+ __index = function(t,k)
+ return gsub(properties[k] or mainproperties[k],"[$%%]%b()", function(k)
+ return t[sub(k,3,-2)]
+ end)
+ end,
+ })
+ lexer.properties = setmetatable(properties, {
+ __index = mainproperties,
+ __call = function(t,k,default) -- expands
+ local v = expanded[k]
+ local t = type(default)
+ if t == "number" then
+ return tonumber(v) or default
+ elseif t == "boolean" then
+ return v == nil and default or v
+ else
+ return v or default
+ end
+ end,
+ })
+ return lexer
+end
+
+-- do
+-- lexers.property = { foo = 123, red = "R" }
+-- local a = check_properties({}) print("a.foo",a.properties.foo)
+-- a.properties.foo = "bar" print("a.foo",a.properties.foo)
+-- a.properties.foo = "bar:$(red)" print("a.foo",a.properties.foo) print("a.foo",a.properties("foo"))
+-- end
+
+local function set(value,default)
+ if value == 0 or value == false or value == "0" then
+ return false
+ elseif value == 1 or value == true or value == "1" then
+ return true
+ else
+ return default
+ end
+end
+
+local function check_context_properties()
+ local property = lexers.property -- let's hope that this stays
+ log = set(property["lexer.context.log"], log)
+ trace = set(property["lexer.context.trace"], trace)
+ detail = set(property["lexer.context.detail"], detail)
+ show = set(property["lexer.context.show"], show)
+ collapse = set(property["lexer.context.collapse"],collapse)
+ inspect = set(property["lexer.context.inspect"], inspect)
+end
+
+function context.registerproperties(p) -- global
+ check_main_properties()
+ local property = lexers.property -- let's hope that this stays
+ for k, v in next, p do
+ property[k] = v
+ end
+ check_context_properties()
+end
+
+context.properties = setmetatable({ }, {
+ __index = lexers.property,
+ __newindex = function(t,k,v)
+ check_main_properties()
+ lexers.property[k] = v
+ check_context_properties()
+ end,
+})
+
+-- We want locals to we set them delayed. Once.
+
+local function initialize()
+ FOLD_BASE = lexers.FOLD_BASE
+ FOLD_HEADER = lexers.FOLD_HEADER
+ FOLD_BLANK = lexers.FOLD_BLANK
+ --
+ style_at = lexers.style_at -- table
+ indent_amount = lexers.indent_amount -- table
+ fold_level = lexers.fold_level -- table
+ --
+ check_main_properties()
+ --
+ initialize = nil
+end
+
+-- Style handler.
+--
+-- The property table will be set later (after loading) by the library. The
+-- styleset is not needed any more as we predefine all styles as defaults
+-- anyway (too bug sensitive otherwise).
+
+local function toproperty(specification)
+ local serialized = { }
+ for key, value in next, specification do
+ if value == true then
+ serialized[#serialized+1] = key
+ elseif type(value) == "table" then
+ serialized[#serialized+1] = key .. ":" .. "#" .. value[1] .. value[2] .. value[3]
+ else
+ serialized[#serialized+1] = key .. ":" .. tostring(value)
+ end
+ end
+ return concat(serialized,",")
+end
+
+local function tostyles(styles)
+ local styleset = { }
+ local property = lexers.property or { }
+ for k, v in next, styles do
+ v = toproperty(v)
+ styleset[k] = v
+ property["style."..k] = v
+ end
+ return styleset
+end
+
+context.toproperty = toproperty
+context.tostyles = tostyles
+
+local function sortedkeys(hash)
+ local t, n = { }, 0
+ for k, v in next, hash do
+ t[#t+1] = k
+ local l = #tostring(k)
+ if l > n then
+ n = l
+ end
+ end
+ table.sort(t)
+ return t, n
+end
+
+-- If we had one instance/state of Lua as well as all regular libraries
+-- preloaded we could use the context base libraries. So, let's go poor-
+-- mans solution now.
+
+function context.registerstyles(styles)
+ local styleset = tostyles(styles)
+ context.styles = styles
+ context.styleset = styleset
+ if trace then
+ if detail then
+ local t, n = sortedkeys(styleset)
+ local template = " %-" .. n .. "s : %s"
+ report("initializing styleset:")
+ for i=1,#t do
+ local k = t[i]
+ report(template,k,styleset[k])
+ end
+ else
+ report("initializing styleset")
+ end
+ end
+end
+
+-- Some spell checking related stuff. Unfortunately we cannot use a path set
+-- by property. This will get a hook for resolvers.
+
+local locations = {
+ "context/lexers", -- context lexers
+ "context/lexers/data", -- context lexers
+ "../lexers", -- original lexers
+ "../lexers/data", -- original lexers
+ ".", -- whatever
+ "./data", -- whatever
+}
+
+local function collect(name)
+ local root = gsub(lexers.LEXERPATH or ".","/.-lua$","") .. "/" -- this is a horrible hack
+ -- report("module '%s' locating '%s'",tostring(lexers),name)
+ for i=1,#locations do
+ local fullname = root .. locations[i] .. "/" .. name .. ".lua" -- so we can also check for .luc
+ if trace then
+ report("attempt to locate '%s'",fullname)
+ end
+ local okay, result = pcall(function () return dofile(fullname) end)
+ if okay then
+ return result, fullname
+ end
+ end
+end
+
+function context.loadluafile(name)
+ local data, fullname = collect(name)
+ if data then
+ if trace then
+ report("lua file '%s' has been loaded",fullname)
+ end
+ return data, fullname
+ end
+ report("unable to load lua file '%s'",name)
+end
+
+-- in fact we could share more as we probably process the data but then we need
+-- to have a more advanced helper
+
+local cache = { }
+
+function context.loaddefinitions(name)
+ local data = cache[name]
+ if data then
+ if trace then
+ report("reusing definitions '%s'",name)
+ end
+ return data
+ elseif trace and data == false then
+ report("definitions '%s' were not found",name)
+ end
+ local data, fullname = collect(name)
+ if not data then
+ report("unable to load definition file '%s'",name)
+ data = false
+ elseif trace then
+ report("definition file '%s' has been loaded",fullname)
+ if detail then
+ local t, n = sortedkeys(data)
+ local template = " %-" .. n .. "s : %s"
+ for i=1,#t do
+ local k = t[i]
+ local v = data[k]
+ if type(v) ~= "table" then
+ report(template,k,tostring(v))
+ elseif #v > 0 then
+ report(template,k,#v)
+ else
+ -- no need to show hash
+ end
+ end
+ end
+ end
+ cache[name] = data
+ return type(data) == "table" and data
+end
+
+function context.word_match(words,word_chars,case_insensitive)
+ local chars = "%w_" -- maybe just "" when word_chars
+ if word_chars then
+ chars = "^([" .. chars .. gsub(word_chars,"([%^%]%-])", "%%%1") .."]+)"
+ else
+ chars = "^([" .. chars .."]+)"
+ end
+ if case_insensitive then
+ local word_list = { }
+ for i=1,#words do
+ word_list[lower(words[i])] = true
+ end
+ return P(function(input, index)
+ local s, e, word = find(input,chars,index)
+ return word and word_list[lower(word)] and e + 1 or nil
+ end)
+ else
+ local word_list = { }
+ for i=1,#words do
+ word_list[words[i]] = true
+ end
+ return P(function(input, index)
+ local s, e, word = find(input,chars,index)
+ return word and word_list[word] and e + 1 or nil
+ end)
+ end
+end
+
+-- Patterns are grouped in a separate namespace but the regular lexers expect
+-- shortcuts to be present in the lexers library. Maybe I'll incorporate some
+-- of l-lpeg later.
+
+do
+
+ local anything = P(1)
+ local idtoken = R("az","AZ","\127\255","__")
+ local digit = R("09")
+ local sign = S("+-")
+ local period = P(".")
+ local octdigit = R("07")
+ local hexdigit = R("09","AF","af")
+ local lower = R("az")
+ local upper = R("AZ")
+ local alpha = upper + lower
+ local space = S(" \n\r\t\f\v")
+ local eol = S("\r\n")
+ local backslash = P("\\")
+ local decimal = digit^1
+ local octal = P("0")
+ * octdigit^1
+ local hexadecimal = P("0") * S("xX")
+ * (hexdigit^0 * period * hexdigit^1 + hexdigit^1 * period * hexdigit^0 + hexdigit^1)
+ * (S("pP") * sign^-1 * hexdigit^1)^-1 -- *
+
+ patterns.idtoken = idtoken
+ patterns.digit = digit
+ patterns.sign = sign
+ patterns.period = period
+ patterns.octdigit = octdigit
+ patterns.hexdigit = hexdigit
+ patterns.ascii = R("\000\127") -- useless
+ patterns.extend = R("\000\255") -- useless
+ patterns.control = R("\000\031")
+ patterns.lower = lower
+ patterns.upper = upper
+ patterns.alpha = alpha
+ patterns.decimal = decimal
+ patterns.octal = octal
+ patterns.hexadecimal = hexadecimal
+ patterns.float = sign^-1
+ * (digit^0 * period * digit^1 + digit^1 * period * digit^0 + digit^1)
+ * S("eE") * sign^-1 * digit^1 -- *
+ patterns.cardinal = decimal
+
+ patterns.signeddecimal = sign^-1 * decimal
+ patterns.signedoctal = sign^-1 * octal
+ patterns.signedhexadecimal = sign^-1 * hexadecimal
+ patterns.integer = sign^-1 * (hexadecimal + octal + decimal)
+ patterns.real =
+ sign^-1 * ( -- at most one
+ digit^1 * period * digit^0 -- 10.0 10.
+ + digit^0 * period * digit^1 -- 0.10 .10
+ + digit^1 -- 10
+ )
+
+ patterns.anything = anything
+ patterns.any = anything
+ patterns.restofline = (1-eol)^1
+ patterns.space = space
+ patterns.spacing = space^1
+ patterns.nospacing = (1-space)^1
+ patterns.eol = eol
+ patterns.newline = P("\r\n") + eol
+
+ local endof = S("\n\r\f")
+
+ patterns.startofline = P(function(input,index)
+ return (index == 1 or lpegmatch(endof,input,index-1)) and index
+ end)
+
+ -- These are the expected ones for other lexers. Maybe all in own namespace
+ -- and provide compatibility layer. or should I just remove them?
+
+ lexers.any = anything
+ lexers.ascii = ascii
+ lexers.extend = extend
+ lexers.alpha = alpha
+ lexers.digit = digit
+ lexers.alnum = alnum
+ lexers.lower = lower
+ lexers.upper = upper
+ lexers.xdigit = hexdigit
+ lexers.cntrl = control
+ lexers.graph = R("!~")
+ lexers.print = R(" ~")
+ lexers.punct = R("!/", ":@", "[\'", "{~")
+ lexers.space = space
+ lexers.newline = S("\r\n\f")^1
+ lexers.nonnewline = 1 - lexers.newline
+ lexers.nonnewline_esc = 1 - (lexers.newline + '\\') + backslash * anything
+ lexers.dec_num = decimal
+ lexers.oct_num = octal
+ lexers.hex_num = hexadecimal
+ lexers.integer = integer
+ lexers.float = float
+ lexers.word = (alpha + "_") * (alpha + digit + "_")^0 -- weird, why digits
+
+end
+
+-- end of patterns
+
+function context.exact_match(words,word_chars,case_insensitive)
+ local characters = concat(words)
+ local pattern -- the concat catches _ etc
+ if word_chars == true or word_chars == false or word_chars == nil then
+ word_chars = ""
+ end
+ if type(word_chars) == "string" then
+ pattern = S(characters) + patterns.idtoken
+ if case_insensitive then
+ pattern = pattern + S(upper(characters)) + S(lower(characters))
+ end
+ if word_chars ~= "" then
+ pattern = pattern + S(word_chars)
+ end
+ elseif word_chars then
+ pattern = word_chars
+ end
+ if case_insensitive then
+ local list = { }
+ if #words == 0 then
+ for k, v in next, words do
+ list[lower(k)] = v
+ end
+ else
+ for i=1,#words do
+ list[lower(words[i])] = true
+ end
+ end
+ return Cmt(pattern^1, function(_,i,s)
+ return list[lower(s)] -- and i or nil
+ end)
+ else
+ local list = { }
+ if #words == 0 then
+ for k, v in next, words do
+ list[k] = v
+ end
+ else
+ for i=1,#words do
+ list[words[i]] = true
+ end
+ end
+ return Cmt(pattern^1, function(_,i,s)
+ return list[s] -- and i or nil
+ end)
+ end
+end
+
+function context.just_match(words)
+ local p = P(words[1])
+ for i=2,#words do
+ p = p + P(words[i])
+ end
+ return p
+end
+
+-- spell checking (we can only load lua files)
+--
+-- return {
+-- min = 3,
+-- max = 40,
+-- n = 12345,
+-- words = {
+-- ["someword"] = "someword",
+-- ["anotherword"] = "Anotherword",
+-- },
+-- }
+
+local lists = { }
+
+function context.setwordlist(tag,limit) -- returns hash (lowercase keys and original values)
+ if not tag or tag == "" then
+ return false, 3
+ end
+ local list = lists[tag]
+ if not list then
+ list = context.loaddefinitions("spell-" .. tag)
+ if not list or type(list) ~= "table" then
+ report("invalid spell checking list for '%s'",tag)
+ list = { words = false, min = 3 }
+ else
+ list.words = list.words or false
+ list.min = list.min or 3
+ end
+ lists[tag] = list
+ end
+ if trace then
+ report("enabling spell checking for '%s' with minimum '%s'",tag,list.min)
+ end
+ return list.words, list.min
+end
+
+patterns.wordtoken = R("az","AZ","\127\255")
+patterns.wordpattern = patterns.wordtoken^3 -- todo: if limit and #s < limit then
+
+function context.checkedword(validwords,validminimum,s,i) -- ,limit
+ if not validwords then -- or #s < validminimum then
+ return true, "text", i -- true, "default", i
+ else
+ -- keys are lower
+ local word = validwords[s]
+ if word == s then
+ return true, "okay", i -- exact match
+ elseif word then
+ return true, "warning", i -- case issue
+ else
+ local word = validwords[lower(s)]
+ if word == s then
+ return true, "okay", i -- exact match
+ elseif word then
+ return true, "warning", i -- case issue
+ elseif upper(s) == s then
+ return true, "warning", i -- probably a logo or acronym
+ else
+ return true, "error", i
+ end
+ end
+ end
+end
+
+function context.styleofword(validwords,validminimum,s) -- ,limit
+ if not validwords or #s < validminimum then
+ return "text"
+ else
+ -- keys are lower
+ local word = validwords[s]
+ if word == s then
+ return "okay" -- exact match
+ elseif word then
+ return "warning" -- case issue
+ else
+ local word = validwords[lower(s)]
+ if word == s then
+ return "okay" -- exact match
+ elseif word then
+ return "warning" -- case issue
+ elseif upper(s) == s then
+ return "warning" -- probably a logo or acronym
+ else
+ return "error"
+ end
+ end
+ end
+end
+
+-- overloaded functions
+
+local h_table, b_table, n_table = { }, { }, { } -- from the time small tables were used (optimization)
+
+setmetatable(h_table, { __index = function(t,level) local v = { level, FOLD_HEADER } t[level] = v return v end })
+setmetatable(b_table, { __index = function(t,level) local v = { level, FOLD_BLANK } t[level] = v return v end })
+setmetatable(n_table, { __index = function(t,level) local v = { level } t[level] = v return v end })
+
+local newline = patterns.newline
+local p_yes = Cp() * Cs((1-newline)^1) * newline^-1
+local p_nop = newline
+
+local folders = { }
+
+local function fold_by_parsing(text,start_pos,start_line,start_level,lexer)
+ local folder = folders[lexer]
+ if not folder then
+ --
+ local pattern, folds, text, start_pos, line_num, prev_level, current_level
+ --
+ local fold_symbols = lexer._foldsymbols
+ local fold_pattern = lexer._foldpattern -- use lpeg instead (context extension)
+ --
+ if fold_pattern then
+ -- if no functions are found then we could have a faster one
+ fold_pattern = Cp() * C(fold_pattern) / function(s,match)
+ local symbols = fold_symbols[style_at[start_pos + s]]
+ if symbols then
+ local l = symbols[match]
+ if l then
+ current_level = current_level + l
+ end
+ end
+ end
+ local action_y = function()
+ folds[line_num] = prev_level
+ if current_level > prev_level then
+ folds[line_num] = prev_level + FOLD_HEADER
+ end
+ if current_level < FOLD_BASE then
+ current_level = FOLD_BASE
+ end
+ prev_level = current_level
+ line_num = line_num + 1
+ end
+ local action_n = function()
+ folds[line_num] = prev_level + FOLD_BLANK
+ line_num = line_num + 1
+ end
+ pattern = ((fold_pattern + (1-newline))^1 * newline / action_y + newline/action_n)^0
+
+ else
+ -- the traditional one but a bit optimized
+ local fold_symbols_patterns = fold_symbols._patterns
+ local action_y = function(pos,line)
+ for j = 1, #fold_symbols_patterns do
+ for s, match in gmatch(line,fold_symbols_patterns[j]) do -- "()(" .. patterns[i] .. ")"
+ local symbols = fold_symbols[style_at[start_pos + pos + s - 1]]
+ local l = symbols and symbols[match]
+ local t = type(l)
+ if t == "number" then
+ current_level = current_level + l
+ elseif t == "function" then
+ current_level = current_level + l(text, pos, line, s, match)
+ end
+ end
+ end
+ folds[line_num] = prev_level
+ if current_level > prev_level then
+ folds[line_num] = prev_level + FOLD_HEADER
+ end
+ if current_level < FOLD_BASE then
+ current_level = FOLD_BASE
+ end
+ prev_level = current_level
+ line_num = line_num + 1
+ end
+ local action_n = function()
+ folds[line_num] = prev_level + FOLD_BLANK
+ line_num = line_num + 1
+ end
+ pattern = (p_yes/action_y + p_nop/action_n)^0
+ end
+ --
+ local reset_parser = lexer._reset_parser
+ --
+ folder = function(_text_,_start_pos_,_start_line_,_start_level_)
+ if reset_parser then
+ reset_parser()
+ end
+ folds = { }
+ text = _text_
+ start_pos = _start_pos_
+ line_num = _start_line_
+ prev_level = _start_level_
+ current_level = prev_level
+ lpegmatch(pattern,text)
+ -- make folds collectable
+ local t = folds
+ folds = nil
+ return t
+ end
+ folders[lexer] = folder
+ end
+ return folder(text,start_pos,start_line,start_level,lexer)
+end
+
+local folds, current_line, prev_level
+
+local function action_y()
+ local current_level = FOLD_BASE + indent_amount[current_line]
+ if current_level > prev_level then -- next level
+ local i = current_line - 1
+ local f
+ while true do
+ f = folds[i]
+ if not f then
+ break
+ elseif f[2] == FOLD_BLANK then
+ i = i - 1
+ else
+ f[2] = FOLD_HEADER -- low indent
+ break
+ end
+ end
+ folds[current_line] = { current_level } -- high indent
+ elseif current_level < prev_level then -- prev level
+ local f = folds[current_line - 1]
+ if f then
+ f[1] = prev_level -- high indent
+ end
+ folds[current_line] = { current_level } -- low indent
+ else -- same level
+ folds[current_line] = { prev_level }
+ end
+ prev_level = current_level
+ current_line = current_line + 1
+end
+
+local function action_n()
+ folds[current_line] = { prev_level, FOLD_BLANK }
+ current_line = current_line + 1
+end
+
+local pattern = ( S("\t ")^0 * ( (1-patterns.eol)^1 / action_y + P(true) / action_n) * newline )^0
+
+local function fold_by_indentation(text,start_pos,start_line,start_level)
+ -- initialize
+ folds = { }
+ current_line = start_line
+ prev_level = start_level
+ -- define
+ -- -- not here .. pattern binds and local functions are not frozen
+ -- analyze
+ lpegmatch(pattern,text)
+ -- flatten
+ for line, level in next, folds do
+ folds[line] = level[1] + (level[2] or 0)
+ end
+ -- done, make folds collectable
+ local t = folds
+ folds = nil
+ return t
+end
+
+local function fold_by_line(text,start_pos,start_line,start_level)
+ local folds = { }
+ -- can also be lpeg'd
+ for _ in gmatch(text,".-\r?\n") do
+ folds[start_line] = n_table[start_level] -- { start_level } -- stile tables ? needs checking
+ start_line = start_line + 1
+ end
+ return folds
+end
+
+local threshold_by_lexer = 512 * 1024 -- we don't know the filesize yet
+local threshold_by_parsing = 512 * 1024 -- we don't know the filesize yet
+local threshold_by_indentation = 512 * 1024 -- we don't know the filesize yet
+local threshold_by_line = 512 * 1024 -- we don't know the filesize yet
+
+function context.fold(lexer,text,start_pos,start_line,start_level) -- hm, we had size thresholds .. where did they go
+ if text == "" then
+ return { }
+ end
+ if initialize then
+ initialize()
+ end
+ local fold_by_lexer = lexer._fold
+ local fold_by_symbols = lexer._foldsymbols
+ local filesize = 0 -- we don't know that
+ if fold_by_lexer then
+ if filesize <= threshold_by_lexer then
+ return fold_by_lexer(text,start_pos,start_line,start_level,lexer)
+ end
+ elseif fold_by_symbols then -- and lexer.properties("fold.by.parsing",1) > 0 then
+ if filesize <= threshold_by_parsing then
+ return fold_by_parsing(text,start_pos,start_line,start_level,lexer)
+ end
+ elseif lexer.properties("fold.by.indentation",1) > 0 then
+ if filesize <= threshold_by_indentation then
+ return fold_by_indentation(text,start_pos,start_line,start_level,lexer)
+ end
+ elseif lexer.properties("fold.by.line",1) > 0 then
+ if filesize <= threshold_by_line then
+ return fold_by_line(text,start_pos,start_line,start_level,lexer)
+ end
+ end
+ return { }
+end
+
+-- The following code is mostly unchanged:
+
+local function add_rule(lexer,id,rule) -- unchanged
+ if not lexer._RULES then
+ lexer._RULES = { }
+ lexer._RULEORDER = { }
+ end
+ lexer._RULES[id] = rule
+ lexer._RULEORDER[#lexer._RULEORDER + 1] = id
+end
+
+-- I finally figured out that adding more styles was an issue because of several
+-- reasons:
+--
+-- + in old versions there was a limit in the amount, so we overran the built-in
+-- hard coded scintilla range
+-- + then, the add_style function didn't check for already known ones, so again
+-- we had an overrun (with some magic that could be avoided)
+-- + then, when I messed with a new default set I realized that there is no check
+-- in initializing _TOKENSTYLES (here the inspect function helps)
+-- + of course it was mostly a side effect of passing all the used styles to the
+-- _tokenstyles instead of only the not-default ones but such a thing should not
+-- matter (read: intercepted)
+--
+-- This finally removed a head-ache and was revealed by lots of tracing, which I
+-- should have built in way earlier.
+
+local function add_style(lexer,token_name,style) -- changed a bit around 3.41
+ -- We don't add styles that are already defined as this can overflow the
+ -- amount possible (in old versions of scintilla).
+ if defaultstyles[token_name] then
+ if trace and detail then
+ report("default style '%s' is ignored as extra style",token_name)
+ end
+ return
+ elseif predefinedstyles[token_name] then
+ if trace and detail then
+ report("predefined style '%s' is ignored as extra style",token_name)
+ end
+ return
+ else
+ if trace and detail then
+ report("adding extra style '%s' as '%s'",token_name,style)
+ end
+ end
+ -- This is unchanged. We skip the dangerous zone.
+ local num_styles = lexer._numstyles
+ if num_styles == 32 then
+ num_styles = num_styles + 8
+ end
+ if num_styles >= 255 then
+ report("there can't be more than %s styles",255)
+ end
+ lexer._TOKENSTYLES[token_name] = num_styles
+ lexer._EXTRASTYLES[token_name] = style
+ lexer._numstyles = num_styles + 1
+end
+
+local function check_styles(lexer)
+ -- Here we also use a check for the dangerous zone. That way we can have a
+ -- larger default set. The original code just assumes that #default is less
+ -- than the dangerous zone's start.
+ local numstyles = 0
+ local tokenstyles = { }
+ for i=1, #default do
+ if numstyles == 32 then
+ numstyles = numstyles + 8
+ end
+ tokenstyles[default[i]] = numstyles
+ numstyles = numstyles + 1
+ end
+ -- Unchanged.
+ for i=1, #predefined do
+ tokenstyles[predefined[i]] = i + 31
+ end
+ lexer._TOKENSTYLES = tokenstyles
+ lexer._numstyles = numstyles
+ lexer._EXTRASTYLES = { }
+ return lexer
+end
+
+-- At some point an 'any' append showed up in the original code ...
+-- but I see no need to catch that case ... beter fix the specification.
+--
+-- hm, why are many joined twice
+
+local function join_tokens(lexer) -- slightly different from the original (no 'any' append)
+ local patterns = lexer._RULES
+ local order = lexer._RULEORDER
+ -- report("lexer: %s, tokens: %s",lexer._NAME,table.concat(order," + "))
+ if patterns and order then
+ local token_rule = patterns[order[1]] -- normally whitespace
+ for i=2,#order do
+ token_rule = token_rule + patterns[order[i]]
+ end
+ if lexer._TYPE ~= "context" then
+ token_rule = token_rule + lexers.token(lexers.DEFAULT, patterns.any)
+ end
+ lexer._TOKENRULE = token_rule
+ return token_rule
+ else
+ return P(1)
+ end
+end
+
+local function add_lexer(grammar, lexer) -- mostly the same as the original
+ local token_rule = join_tokens(lexer)
+ local lexer_name = lexer._NAME
+ local children = lexer._CHILDREN
+ for i=1,#children do
+ local child = children[i]
+ if child._CHILDREN then
+ add_lexer(grammar, child)
+ end
+ local child_name = child._NAME
+ local rules = child._EMBEDDEDRULES[lexer_name]
+ local rules_token_rule = grammar["__" .. child_name] or rules.token_rule
+ local pattern = (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1
+ grammar[child_name] = pattern * V(lexer_name)
+ local embedded_child = "_" .. child_name
+ grammar[embedded_child] = rules.start_rule * pattern
+ token_rule = V(embedded_child) + token_rule
+ end
+ if trace then
+ report("adding lexer '%s' with %s children",lexer_name,#children)
+ end
+ grammar["__" .. lexer_name] = token_rule
+ grammar[lexer_name] = token_rule^0
+end
+
+local function build_grammar(lexer,initial_rule) -- same as the original
+ local children = lexer._CHILDREN
+ local lexer_name = lexer._NAME
+ if children then
+ if not initial_rule then
+ initial_rule = lexer_name
+ end
+ local grammar = { initial_rule }
+ add_lexer(grammar, lexer)
+ lexer._INITIALRULE = initial_rule
+ lexer._GRAMMAR = Ct(P(grammar))
+ if trace then
+ report("building grammar for '%s' with whitespace '%s'and %s children",lexer_name,lexer.whitespace or "?",#children)
+ end
+ else
+ lexer._GRAMMAR = Ct(join_tokens(lexer)^0)
+ if trace then
+ report("building grammar for '%s' with whitespace '%s'",lexer_name,lexer.whitespace or "?")
+ end
+ end
+end
+
+-- So far. We need these local functions in the next one.
+
+local lineparsers = { }
+
+local maxmatched = 100
+
+local function collapsed(t)
+ local lasttoken = nil
+ local lastindex = nil
+ for i=1,#t,2 do
+ local token = t[i]
+ local position = t[i+1]
+ if token == lasttoken then
+ t[lastindex] = position
+ elseif lastindex then
+ lastindex = lastindex + 1
+ t[lastindex] = token
+ lastindex = lastindex + 1
+ t[lastindex] = position
+ lasttoken = token
+ else
+ lastindex = i+1
+ lasttoken = token
+ end
+ end
+ for i=#t,lastindex+1,-1 do
+ t[i] = nil
+ end
+ return t
+end
+
+local function matched(lexer,grammar,text)
+ -- text = string.gsub(text,"\z","!")
+ local t = lpegmatch(grammar,text)
+ if trace then
+ if show then
+ report("output of lexer: %s (max %s entries)",lexer._NAME,maxmatched)
+ local s = lexer._TOKENSTYLES
+ local p = 1
+ for i=1,2*maxmatched,2 do
+ local n = i + 1
+ local ti = t[i]
+ local tn = t[n]
+ if ti then
+ local txt = sub(text,p,tn-1)
+ if txt then
+ txt = gsub(txt,"[%s]"," ")
+ else
+ txt = "!no text!"
+ end
+ report("%4i : %s > %s (%s) (%s)",n/2,ti,tn,s[ti] or "!unset!",txt)
+ p = tn
+ else
+ break
+ end
+ end
+ end
+ report("lexer results: %s, length: %s, ranges: %s",lexer._NAME,#text,#t/2)
+ if collapse then
+ t = collapsed(t)
+ report("lexer collapsed: %s, length: %s, ranges: %s",lexer._NAME,#text,#t/2)
+ end
+ elseif collapse then
+ t = collapsed(t)
+ end
+ return t
+end
+
+-- Todo: make nice generic lexer (extra argument with start/stop commands) for
+-- context itself.
+
+function context.lex(lexer,text,init_style)
+ -- local lexer = global._LEXER
+ local grammar = lexer._GRAMMAR
+ if initialize then
+ initialize()
+ end
+ if not grammar then
+ return { }
+ elseif lexer._LEXBYLINE then -- we could keep token
+ local tokens = { }
+ local offset = 0
+ local noftokens = 0
+ local lineparser = lineparsers[lexer]
+ if not lineparser then -- probably a cmt is more efficient
+ lineparser = C((1-newline)^0 * newline) / function(line)
+ local length = #line
+ local line_tokens = length > 0 and lpegmatch(grammar,line)
+ if line_tokens then
+ for i=1,#line_tokens,2 do
+ noftokens = noftokens + 1
+ tokens[noftokens] = line_tokens[i]
+ noftokens = noftokens + 1
+ tokens[noftokens] = line_tokens[i + 1] + offset
+ end
+ end
+ offset = offset + length
+ if noftokens > 0 and tokens[noftokens] ~= offset then
+ noftokens = noftokens + 1
+ tokens[noftokens] = "default"
+ noftokens = noftokens + 1
+ tokens[noftokens] = offset + 1
+ end
+ end
+ lineparser = lineparser^0
+ lineparsers[lexer] = lineparser
+ end
+ lpegmatch(lineparser,text)
+ return tokens
+ elseif lexer._CHILDREN then
+ local hash = lexer._HASH -- hm, was _hash
+ if not hash then
+ hash = { }
+ lexer._HASH = hash
+ end
+ grammar = hash[init_style]
+ if grammar then
+ lexer._GRAMMAR = grammar
+ -- lexer._GRAMMAR = lexer._GRAMMAR or grammar
+ else
+ for style, style_num in next, lexer._TOKENSTYLES do
+ if style_num == init_style then
+ -- the name of the lexers is filtered from the whitespace
+ -- specification .. weird code, should be a reverse hash
+ local lexer_name = match(style,"^(.+)_whitespace") or lexer._NAME
+ if lexer._INITIALRULE ~= lexer_name then
+ grammar = hash[lexer_name]
+ if not grammar then
+ build_grammar(lexer,lexer_name)
+ grammar = lexer._GRAMMAR
+ hash[lexer_name] = grammar
+ end
+ end
+ break
+ end
+ end
+ grammar = grammar or lexer._GRAMMAR
+ hash[init_style] = grammar
+ end
+ if trace then
+ report("lexing '%s' with initial style '%s' and %s children",lexer._NAME,#lexer._CHILDREN or 0,init_style)
+ end
+ return matched(lexer,grammar,text)
+ else
+ if trace then
+ report("lexing '%s' with initial style '%s'",lexer._NAME,init_style)
+ end
+ return matched(lexer,grammar,text)
+ end
+end
+
+-- hm, changed in 3.24 .. no longer small table but one table:
+
+function context.token(name, patt)
+ return patt * Cc(name) * Cp()
+end
+
+-- The next ones were mostly unchanged (till now), we moved it here when 3.41
+-- became close to impossible to combine with cq. overload and a merge was
+-- the only solution. It makes later updates more painful but the update to
+-- 3.41 was already a bit of a nightmare anyway.
+
+-- Loading lexers is rather interwoven with what the dll/so sets and
+-- it changes over time. So, we need to keep an eye on changes. One
+-- problem that we always faced were the limitations in length of
+-- lexer names (as they get app/prepended occasionally to strings with
+-- a hard coded limit). So, we always used alternative names and now need
+-- to make sure this doesn't clash. As I no longer intend to use shipped
+-- lexers I could strip away some of the code in the future, but keeping
+-- it as reference makes sense.
+
+-- I spend quite some time figuring out why 3.41 didn't work or crashed which
+-- is hard when no stdout is available and when the io library is absent. In
+-- the end of of the problems was in the _NAME setting. We set _NAME
+-- to e.g. 'tex' but load from a file with a longer name, which we do
+-- as we don't want to clash with existing files, we end up in
+-- lexers not being found.
+
+local whitespaces = { }
+
+local function push_whitespace(name)
+ table.insert(whitespaces,lexers.WHITESPACE or "whitespace")
+ lexers.WHITESPACE = name .. "_whitespace"
+end
+
+local function pop_whitespace()
+ lexers.WHITESPACE = table.remove(whitespaces) or "whitespace"
+end
+
+local function check_whitespace(lexer,name)
+ if lexer then
+ lexer.whitespace = (name or lexer.name or lexer._NAME) .. "_whitespace"
+ end
+end
+
+function context.new(name,filename)
+ local lexer = {
+ _TYPE = "context",
+ --
+ _NAME = name, -- used for token building
+ _FILENAME = filename, -- for diagnostic purposed
+ --
+ name = name,
+ filename = filename,
+ }
+ if trace then
+ report("initializing lexer tagged '%s' from file '%s'",name,filename or name)
+ end
+ check_whitespace(lexer)
+ check_styles(lexer)
+ check_properties(lexer)
+ return lexer
+end
+
+local function nolexer(name)
+ local lexer = {
+ _TYPE = "unset",
+ _NAME = name,
+ -- _rules = { },
+ }
+ check_styles(lexer)
+ check_whitespace(lexer)
+ check_properties(lexer)
+ return lexer
+end
+
+local function load_lexer(name,namespace)
+ if trace then
+ report("loading lexer file '%s'",name)
+ end
+ push_whitespace(namespace or name) -- for traditional lexers .. no alt_name yet
+ local lexer, fullname = context.loadluafile(name)
+ pop_whitespace()
+ if not lexer then
+ report("invalid lexer file '%s'",name)
+ elseif trace then
+ report("lexer file '%s' has been loaded",fullname)
+ end
+ if type(lexer) ~= "table" then
+ if trace then
+ report("lexer file '%s' gets a dummy lexer",name)
+ end
+ return nolexer(name)
+ end
+ if lexer._TYPE ~= "context" then
+ lexer._TYPE = "native"
+ check_styles(lexer)
+ check_whitespace(lexer,namespace or name)
+ check_properties(lexer)
+ end
+ if not lexer._NAME then
+ lexer._NAME = name -- so: filename
+ end
+ if name ~= namespace then
+ lexer._NAME = namespace
+ end
+ return lexer
+end
+
+-- tracing ...
+
+local function inspect_lexer(lexer,level)
+ -- If we had the regular libs available I could use the usual
+ -- helpers.
+ local parent = lexer._lexer
+ lexer._lexer = nil -- prevent endless recursion
+ local name = lexer._NAME
+ local function showstyles_1(tag,styles)
+ local numbers = { }
+ for k, v in next, styles do
+ numbers[v] = k
+ end
+ -- sort by number and make number hash too
+ local keys = sortedkeys(numbers)
+ for i=1,#keys do
+ local k = keys[i]
+ local v = numbers[k]
+ report("[%s %s] %s %s = %s",level,name,tag,k,v)
+ end
+ end
+ local function showstyles_2(tag,styles)
+ local keys = sortedkeys(styles)
+ for i=1,#keys do
+ local k = keys[i]
+ local v = styles[k]
+ report("[%s %s] %s %s = %s",level,name,tag,k,v)
+ end
+ end
+ local keys = sortedkeys(lexer)
+ for i=1,#keys do
+ local k = keys[i]
+ local v = lexer[k]
+ report("[%s %s] root key : %s = %s",level,name,k,tostring(v))
+ end
+ showstyles_1("token style",lexer._TOKENSTYLES)
+ showstyles_2("extra style",lexer._EXTRASTYLES)
+ local children = lexer._CHILDREN
+ if children then
+ for i=1,#children do
+ inspect_lexer(children[i],level+1)
+ end
+ end
+ lexer._lexer = parent
+end
+
+function context.inspect(lexer)
+ inspect_lexer(lexer,0)
+end
+
+-- An optional second argument has been introduced so that one can embed a lexer
+-- more than once ... maybe something to look into (as not it's done by remembering
+-- the start sequence ... quite okay but maybe suboptimal ... anyway, never change
+-- a working solution).
+
+-- namespace can be automatic: if parent then use name of parent (chain)
+
+function context.loadlexer(filename,namespace)
+ nesting = nesting + 1
+ if not namespace then
+ namespace = filename
+ end
+ local lexer = usedlexers[namespace] -- we load by filename but the internal name can be short
+ if lexer then
+ if trace then
+ report("reusing lexer '%s'",namespace)
+ end
+ nesting = nesting - 1
+ return lexer
+ elseif trace then
+ report("loading lexer '%s'",namespace)
+ end
+ --
+ if initialize then
+ initialize()
+ end
+ --
+ parent_lexer = nil
+ --
+ lexer = load_lexer(filename,namespace) or nolexer(filename,namespace)
+ usedlexers[filename] = lexer
+ --
+ if not lexer._rules and not lexer._lexer then
+ lexer._lexer = parent_lexer
+ end
+ --
+ if lexer._lexer then
+ local _l = lexer._lexer
+ local _r = lexer._rules
+ local _s = lexer._tokenstyles
+ if not _l._tokenstyles then
+ _l._tokenstyles = { }
+ end
+ if _r then
+ local rules = _l._rules
+ local name = lexer.name
+ for i=1,#_r do
+ local rule = _r[i]
+ rules[#rules + 1] = {
+ name .. "_" .. rule[1],
+ rule[2],
+ }
+ end
+ end
+ if _s then
+ local tokenstyles = _l._tokenstyles
+ for token, style in next, _s do
+ tokenstyles[token] = style
+ end
+ end
+ lexer = _l
+ end
+ --
+ local _r = lexer._rules
+ if _r then
+ local _s = lexer._tokenstyles
+ if _s then
+ for token, style in next, _s do
+ add_style(lexer, token, style)
+ end
+ end
+ for i=1,#_r do
+ local rule = _r[i]
+ add_rule(lexer, rule[1], rule[2])
+ end
+ build_grammar(lexer)
+ end
+ --
+ add_style(lexer, lexer.whitespace, lexers.STYLE_WHITESPACE)
+ --
+ local foldsymbols = lexer._foldsymbols
+ if foldsymbols then
+ local patterns = foldsymbols._patterns
+ if patterns then
+ for i = 1, #patterns do
+ patterns[i] = "()(" .. patterns[i] .. ")"
+ end
+ end
+ end
+ --
+ lexer.lex = lexers.lex
+ lexer.fold = lexers.fold
+ --
+ nesting = nesting - 1
+ --
+ if inspect then
+ context.inspect(lexer)
+ end
+ --
+ return lexer
+end
+
+function context.embed_lexer(parent, child, start_rule, end_rule) -- mostly the same as the original
+ local embeddedrules = child._EMBEDDEDRULES
+ if not embeddedrules then
+ embeddedrules = { }
+ child._EMBEDDEDRULES = embeddedrules
+ end
+ if not child._RULES then
+ local rules = child._rules
+ if not rules then
+ report("child lexer '%s' has no rules",child._NAME or "unknown")
+ rules = { }
+ child._rules = rules
+ end
+ for i=1,#rules do
+ local rule = rules[i]
+ add_rule(child, rule[1], rule[2])
+ end
+ end
+ embeddedrules[parent._NAME] = {
+ ["start_rule"] = start_rule,
+ ["token_rule"] = join_tokens(child),
+ ["end_rule"] = end_rule
+ }
+ local children = parent._CHILDREN
+ if not children then
+ children = { }
+ parent._CHILDREN = children
+ end
+ children[#children + 1] = child
+ local tokenstyles = parent._tokenstyles
+ if not tokenstyles then
+ tokenstyles = { }
+ parent._tokenstyles = tokenstyles
+ end
+ local childname = child._NAME
+ local whitespace = childname .. "_whitespace"
+ tokenstyles[whitespace] = lexers.STYLE_WHITESPACE -- all these STYLE_THINGS will go .. just a proper hash
+ if trace then
+ report("using whitespace '%s' as trigger for '%s' with property '%s'",whitespace,childname,lexers.STYLE_WHITESPACE)
+ end
+ local childstyles = child._tokenstyles
+ if childstyles then
+ for token, style in next, childstyles do
+ tokenstyles[token] = style
+ end
+ end
+ child._lexer = parent
+ parent_lexer = parent
+end
+
+-- we now move the adapted code to the lexers namespace
+
+lexers.new = context.new
+lexers.load = context.loadlexer
+------.loadlexer = context.loadlexer
+lexers.loadluafile = context.loadluafile
+lexers.embed_lexer = context.embed_lexer
+lexers.fold = context.fold
+lexers.lex = context.lex
+lexers.token = context.token
+lexers.word_match = context.word_match
+lexers.exact_match = context.exact_match
+lexers.just_match = context.just_match
+lexers.inspect = context.inspect
+lexers.report = context.report
+lexers.inform = context.inform
+
+-- helper .. alas ... the lexer's lua instance is rather crippled .. not even
+-- math is part of it
+
+do
+
+ local floor = math and math.floor
+ local char = string.char
+
+ if not floor then
+
+ floor = function(n)
+ return tonumber(format("%d",n))
+ end
+
+ math = math or { }
+
+ math.floor = floor
+
+ end
+
+ local function utfchar(n)
+ if n < 0x80 then
+ return char(n)
+ elseif n < 0x800 then
+ return char(
+ 0xC0 + floor(n/0x40),
+ 0x80 + (n % 0x40)
+ )
+ elseif n < 0x10000 then
+ return char(
+ 0xE0 + floor(n/0x1000),
+ 0x80 + (floor(n/0x40) % 0x40),
+ 0x80 + (n % 0x40)
+ )
+ elseif n < 0x40000 then
+ return char(
+ 0xF0 + floor(n/0x40000),
+ 0x80 + floor(n/0x1000),
+ 0x80 + (floor(n/0x40) % 0x40),
+ 0x80 + (n % 0x40)
+ )
+ else
+ -- return char(
+ -- 0xF1 + floor(n/0x1000000),
+ -- 0x80 + floor(n/0x40000),
+ -- 0x80 + floor(n/0x1000),
+ -- 0x80 + (floor(n/0x40) % 0x40),
+ -- 0x80 + (n % 0x40)
+ -- )
+ return "?"
+ end
+ end
+
+ context.utfchar = utfchar
+
+ -- a helper from l-lpeg:
+
+ local function make(t)
+ local p
+ for k, v in next, t do
+ if not p then
+ if next(v) then
+ p = P(k) * make(v)
+ else
+ p = P(k)
+ end
+ else
+ if next(v) then
+ p = p + P(k) * make(v)
+ else
+ p = p + P(k)
+ end
+ end
+ end
+ return p
+ end
+
+ function lpeg.utfchartabletopattern(list)
+ local tree = { }
+ for i=1,#list do
+ local t = tree
+ for c in gmatch(list[i],".") do
+ if not t[c] then
+ t[c] = { }
+ end
+ t = t[c]
+ end
+ end
+ return make(tree)
+ end
+
+ patterns.invisibles = lpeg.utfchartabletopattern {
+ utfchar(0x00A0), -- nbsp
+ utfchar(0x2000), -- enquad
+ utfchar(0x2001), -- emquad
+ utfchar(0x2002), -- enspace
+ utfchar(0x2003), -- emspace
+ utfchar(0x2004), -- threeperemspace
+ utfchar(0x2005), -- fourperemspace
+ utfchar(0x2006), -- sixperemspace
+ utfchar(0x2007), -- figurespace
+ utfchar(0x2008), -- punctuationspace
+ utfchar(0x2009), -- breakablethinspace
+ utfchar(0x200A), -- hairspace
+ utfchar(0x200B), -- zerowidthspace
+ utfchar(0x202F), -- narrownobreakspace
+ utfchar(0x205F), -- math thinspace
+ }
+
+ -- now we can make:
+
+ patterns.iwordtoken = patterns.wordtoken - patterns.invisibles
+ patterns.iwordpattern = patterns.iwordtoken^3
+
+end
+
+-- The following helpers are not used, partyally replace by other mechanism and
+-- when needed I'll first optimize them. I only made them somewhat more readable.
+
+function lexers.delimited_range(chars, single_line, no_escape, balanced) -- unchanged
+ local s = sub(chars,1,1)
+ local e = #chars == 2 and sub(chars,2,2) or s
+ local range
+ local b = balanced and s or ""
+ local n = single_line and "\n" or ""
+ if no_escape then
+ local invalid = S(e .. n .. b)
+ range = patterns.any - invalid
+ else
+ local invalid = S(e .. n .. b) + patterns.backslash
+ range = patterns.any - invalid + patterns.backslash * patterns.any
+ end
+ if balanced and s ~= e then
+ return P {
+ s * (range + V(1))^0 * e
+ }
+ else
+ return s * range^0 * P(e)^-1
+ end
+end
+
+function lexers.starts_line(patt) -- unchanged
+ return P ( function(input, index)
+ if index == 1 then
+ return index
+ end
+ local char = sub(input,index - 1,index - 1)
+ if char == "\n" or char == "\r" or char == "\f" then
+ return index
+ end
+ end ) * patt
+end
+
+function lexers.last_char_includes(s) -- unchanged
+ s = "[" .. gsub(s,"[-%%%[]", "%%%1") .. "]"
+ return P ( function(input, index)
+ if index == 1 then
+ return index
+ end
+ local i = index
+ while match(sub(input,i - 1,i - 1),"[ \t\r\n\f]") do
+ i = i - 1
+ end
+ if match(sub(input,i - 1,i - 1),s) then
+ return index
+ end
+ end)
+end
+
+function lexers.nested_pair(start_chars, end_chars) -- unchanged
+ local s = start_chars
+ local e = P(end_chars)^-1
+ return P {
+ s * (patterns.any - s - end_chars + V(1))^0 * e
+ }
+end
+
+local function prev_line_is_comment(prefix, text, pos, line, s) -- unchanged
+ local start = find(line,"%S")
+ if start < s and not find(line,prefix,start,true) then
+ return false
+ end
+ local p = pos - 1
+ if sub(text,p,p) == "\n" then
+ p = p - 1
+ if sub(text,p,p) == "\r" then
+ p = p - 1
+ end
+ if sub(text,p,p) ~= "\n" then
+ while p > 1 and sub(text,p - 1,p - 1) ~= "\n"
+ do p = p - 1
+ end
+ while find(sub(text,p,p),"^[\t ]$") do
+ p = p + 1
+ end
+ return sub(text,p,p + #prefix - 1) == prefix
+ end
+ end
+ return false
+end
+
+local function next_line_is_comment(prefix, text, pos, line, s)
+ local p = find(text,"\n",pos + s)
+ if p then
+ p = p + 1
+ while find(sub(text,p,p),"^[\t ]$") do
+ p = p + 1
+ end
+ return sub(text,p,p + #prefix - 1) == prefix
+ end
+ return false
+end
+
+function lexers.fold_line_comments(prefix)
+ local property_int = lexers.property_int
+ return function(text, pos, line, s)
+ if property_int["fold.line.comments"] == 0 then
+ return 0
+ end
+ if s > 1 and match(line,"^%s*()") < s then
+ return 0
+ end
+ local prev_line_comment = prev_line_is_comment(prefix, text, pos, line, s)
+ local next_line_comment = next_line_is_comment(prefix, text, pos, line, s)
+ if not prev_line_comment and next_line_comment then
+ return 1
+ end
+ if prev_line_comment and not next_line_comment then
+ return -1
+ end
+ return 0
+ end
+end
+
+-- done
+
+return lexers
diff --git a/context/data/scite/context/lexers/themes/scite-context-theme.lua b/context/data/scite/context/lexers/themes/scite-context-theme.lua
new file mode 100644
index 000000000..b0c63fe39
--- /dev/null
+++ b/context/data/scite/context/lexers/themes/scite-context-theme.lua
@@ -0,0 +1,150 @@
+local info = {
+ version = 1.002,
+ comment = "theme for scintilla lpeg lexer for context/metafun",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files",
+}
+
+-- context_path = string.split(os.resultof("mtxrun --find-file context.mkiv"))[1] or ""
+
+-- What used to be proper Lua definitions are in 3.42 SciTE properties although
+-- integration is still somewhat half. Also, the indexed style specification is
+-- now a hash (which indeed makes more sense). However, the question is: am I
+-- going to rewrite the style bit? It anyway makes more sense to keep this file
+-- somewhat neutral as we no longer need to be compatible. However, we cannot be
+-- sure of helpers being present yet when this file is loaded, so we are somewhat
+-- crippled. On the other hand, I don't see other schemes being used with the
+-- context lexers.
+
+-- The next kludge is no longer needed which is good!
+--
+-- if GTK then -- WIN32 GTK OSX CURSES
+-- font_name = '!' .. font_name
+-- end
+
+-- I need to play with these, some work ok:
+--
+-- eolfilled noteolfilled
+-- characterset:u|l
+-- visible notvisible
+-- changeable notchangeable (this way we can protect styles, e.g. preamble?)
+-- hotspot nothotspot
+
+local font_name = 'Dejavu Sans Mono'
+local font_size = '14'
+
+local colors = {
+ red = { '7F', '00', '00' },
+ green = { '00', '7F', '00' },
+ blue = { '00', '00', '7F' },
+ cyan = { '00', '7F', '7F' },
+ magenta = { '7F', '00', '7F' },
+ yellow = { '7F', '7F', '00' },
+ orange = { 'B0', '7F', '00' },
+ --
+ white = { 'FF', 'FF', 'FF' },
+ light = { 'CF', 'CF', 'CF' },
+ grey = { '80', '80', '80' },
+ dark = { '4F', '4F', '4F' },
+ black = { '00', '00', '00' },
+ --
+ selection = { 'F7', 'F7', 'F7' },
+ logpanel = { 'E7', 'E7', 'E7' },
+ textpanel = { 'CF', 'CF', 'CF' },
+ linepanel = { 'A7', 'A7', 'A7' },
+ tippanel = { '44', '44', '44' },
+ --
+ right = { '00', '00', 'FF' },
+ wrong = { 'FF', '00', '00' },
+}
+
+local styles = {
+
+ ["whitespace"] = { },
+ ["default"] = { font = font_name, size = font_size, fore = colors.black, back = colors.textpanel },
+ ["default"] = { font = font_name, size = font_size, fore = colors.black },
+ ["number"] = { fore = colors.cyan },
+ ["comment"] = { fore = colors.yellow },
+ ["keyword"] = { fore = colors.blue, bold = true },
+ ["string"] = { fore = colors.magenta },
+ -- ["preproc"] = { fore = colors.yellow, bold = true },
+ ["error"] = { fore = colors.red },
+ ["label"] = { fore = colors.red, bold = true },
+
+ ["nothing"] = { },
+ ["class"] = { fore = colors.black, bold = true },
+ ["function"] = { fore = colors.black, bold = true },
+ ["constant"] = { fore = colors.cyan, bold = true },
+ ["operator"] = { fore = colors.blue },
+ ["regex"] = { fore = colors.magenta },
+ ["preprocessor"] = { fore = colors.yellow, bold = true },
+ ["tag"] = { fore = colors.cyan },
+ ["type"] = { fore = colors.blue },
+ ["variable"] = { fore = colors.black },
+ ["identifier"] = { },
+
+ ["linenumber"] = { back = colors.linepanel },
+ ["bracelight"] = { fore = colors.orange, bold = true },
+ ["bracebad"] = { fore = colors.orange, bold = true },
+ ["controlchar"] = { },
+ ["indentguide"] = { fore = colors.linepanel, back = colors.white },
+ ["calltip"] = { fore = colors.white, back = colors.tippanel },
+
+ ["invisible"] = { back = colors.orange },
+ ["quote"] = { fore = colors.blue, bold = true },
+ ["special"] = { fore = colors.blue },
+ ["extra"] = { fore = colors.yellow },
+ ["embedded"] = { fore = colors.black, bold = true },
+ ["char"] = { fore = colors.magenta },
+ ["reserved"] = { fore = colors.magenta, bold = true },
+ ["definition"] = { fore = colors.black, bold = true },
+ ["okay"] = { fore = colors.dark },
+ ["warning"] = { fore = colors.orange },
+ ["standout"] = { fore = colors.orange, bold = true },
+ ["command"] = { fore = colors.green, bold = true },
+ ["internal"] = { fore = colors.orange, bold = true },
+ ["preamble"] = { fore = colors.yellow },
+ ["grouping"] = { fore = colors.red },
+ ["primitive"] = { fore = colors.blue, bold = true },
+ ["plain"] = { fore = colors.dark, bold = true },
+ ["user"] = { fore = colors.green },
+ ["data"] = { fore = colors.cyan, bold = true },
+
+ -- equal to default:
+
+ ["text"] = { font = font_name, size = font_size, fore = colors.black, back = colors.textpanel },
+ ["text"] = { font = font_name, size = font_size, fore = colors.black },
+
+}
+
+local properties = {
+ ["fold.by.parsing"] = 1,
+ ["fold.by.indentation"] = 0,
+ ["fold.by.line"] = 0,
+ ["fold.line.comments"] = 0,
+ --
+ ["lexer.context.log"] = 1, -- log errors and warnings
+ ["lexer.context.trace"] = 0, -- show loading, initializations etc
+ ["lexer.context.detail"] = 0, -- show more detail when tracing
+ ["lexer.context.show"] = 0, -- show result of lexing
+ ["lexer.context.collapse"] = 0, -- make lexing results somewhat more efficient
+ ["lexer.context.inspect"] = 0, -- show some info about lexer (styles and so)
+ --
+-- ["lexer.context.log"] = 1, -- log errors and warnings
+-- ["lexer.context.trace"] = 1, -- show loading, initializations etc
+}
+
+local lexer = lexer or require("lexer")
+local context = lexer.context
+
+if context then
+ context.inform("loading context (style) properties")
+ if context.registerstyles then
+ context.registerstyles(styles)
+ end
+ if context.registerproperties then
+ context.registerproperties(properties)
+ end
+end
+
diff --git a/context/data/scite/context/scite-context-data-context.properties b/context/data/scite/context/scite-context-data-context.properties
new file mode 100644
index 000000000..3e53862f7
--- /dev/null
+++ b/context/data/scite/context/scite-context-data-context.properties
@@ -0,0 +1,193 @@
+keywordclass.context.constants=\
+zerocount minusone minustwo plusone \
+plustwo plusthree plusfour plusfive plussix \
+plusseven pluseight plusnine plusten plussixteen \
+plushundred plusthousand plustenthousand plustwentythousand medcard \
+maxcard zeropoint onepoint halfapoint onebasepoint \
+maxdimen scaledpoint thousandpoint points halfpoint \
+zeroskip zeromuskip onemuskip pluscxxvii pluscxxviii \
+pluscclv pluscclvi normalpagebox endoflinetoken outputnewlinechar \
+emptytoks empty undefined voidbox emptybox \
+emptyvbox emptyhbox bigskipamount medskipamount smallskipamount \
+fmtname fmtversion texengine texenginename texengineversion \
+luatexengine pdftexengine xetexengine unknownengine etexversion \
+pdftexversion xetexversion xetexrevision activecatcode bgroup \
+egroup endline conditionaltrue conditionalfalse attributeunsetvalue \
+uprotationangle rightrotationangle downrotationangle leftrotationangle inicatcodes \
+ctxcatcodes texcatcodes notcatcodes txtcatcodes vrbcatcodes \
+prtcatcodes nilcatcodes luacatcodes tpacatcodes tpbcatcodes \
+xmlcatcodes ctdcatcodes escapecatcode begingroupcatcode endgroupcatcode \
+mathshiftcatcode alignmentcatcode endoflinecatcode parametercatcode superscriptcatcode \
+subscriptcatcode ignorecatcode spacecatcode lettercatcode othercatcode \
+activecatcode commentcatcode invalidcatcode tabasciicode newlineasciicode \
+formfeedasciicode endoflineasciicode endoffileasciicode spaceasciicode hashasciicode \
+dollarasciicode commentasciicode ampersandasciicode colonasciicode backslashasciicode \
+circumflexasciicode underscoreasciicode leftbraceasciicode barasciicode rightbraceasciicode \
+tildeasciicode delasciicode lessthanasciicode morethanasciicode doublecommentsignal \
+atsignasciicode exclamationmarkasciicode questionmarkasciicode doublequoteasciicode singlequoteasciicode \
+forwardslashasciicode primeasciicode activemathcharcode activetabtoken activeformfeedtoken \
+activeendoflinetoken batchmodecode nonstopmodecode scrollmodecode errorstopmodecode \
+bottomlevelgroupcode simplegroupcode hboxgroupcode adjustedhboxgroupcode vboxgroupcode \
+vtopgroupcode aligngroupcode noaligngroupcode outputgroupcode mathgroupcode \
+discretionarygroupcode insertgroupcode vcentergroupcode mathchoicegroupcode semisimplegroupcode \
+mathshiftgroupcode mathleftgroupcode vadjustgroupcode charnodecode hlistnodecode \
+vlistnodecode rulenodecode insertnodecode marknodecode adjustnodecode \
+ligaturenodecode discretionarynodecode whatsitnodecode mathnodecode gluenodecode \
+kernnodecode penaltynodecode unsetnodecode mathsnodecode charifcode \
+catifcode numifcode dimifcode oddifcode vmodeifcode \
+hmodeifcode mmodeifcode innerifcode voidifcode hboxifcode \
+vboxifcode xifcode eofifcode trueifcode falseifcode \
+caseifcode definedifcode csnameifcode fontcharifcode fontslantperpoint \
+fontinterwordspace fontinterwordstretch fontinterwordshrink fontexheight fontemwidth \
+fontextraspace slantperpoint interwordspace interwordstretch interwordshrink \
+exheight emwidth extraspace mathsupdisplay mathsupnormal \
+mathsupcramped mathsubnormal mathsubcombined mathaxisheight startmode \
+stopmode startnotmode stopnotmode startmodeset stopmodeset \
+doifmode doifmodeelse doifnotmode startmodeset stopmodeset \
+startallmodes stopallmodes startnotallmodes stopnotallmodes doifallmodes \
+doifallmodeselse doifnotallmodes startenvironment stopenvironment environment \
+startcomponent stopcomponent component startproduct stopproduct \
+product startproject stopproject project starttext \
+stoptext startnotext stopnotext startdocument stopdocument \
+documentvariable setupdocument startmodule stopmodule usemodule \
+usetexmodule useluamodule setupmodule currentmoduleparameter moduleparameter \
+startTEXpage stopTEXpage enablemode disablemode preventmode \
+globalenablemode globaldisablemode globalpreventmode pushmode popmode \
+typescriptone typescripttwo typescriptthree mathsizesuffix mathordcode \
+mathopcode mathbincode mathrelcode mathopencode mathclosecode \
+mathpunctcode mathalphacode mathinnercode mathnothingcode mathlimopcode \
+mathnolopcode mathboxcode mathchoicecode mathaccentcode mathradicalcode \
+constantnumber constantnumberargument constantdimen constantdimenargument constantemptyargument \
+continueifinputfile luastringsep !!bs !!es lefttorightmark \
+righttoleftmark breakablethinspace nobreakspace narrownobreakspace zerowidthnobreakspace \
+ideographicspace ideographichalffillspace twoperemspace threeperemspace fourperemspace \
+fiveperemspace sixperemspace figurespace punctuationspace hairspace \
+zerowidthspace zerowidthnonjoiner zerowidthjoiner zwnj zwj
+
+keywordclass.context.helpers=\
+startsetups stopsetups startxmlsetups stopxmlsetups \
+startluasetups stopluasetups starttexsetups stoptexsetups startrawsetups \
+stoprawsetups startlocalsetups stoplocalsetups starttexdefinition stoptexdefinition \
+starttexcode stoptexcode startcontextcode stopcontextcode startcontextdefinitioncode \
+stopcontextdefinitioncode doifsetupselse doifsetups doifnotsetups setup \
+setups texsetup xmlsetup luasetup directsetup \
+doifelsecommandhandler doifnotcommandhandler doifcommandhandler newmode setmode \
+resetmode newsystemmode setsystemmode resetsystemmode pushsystemmode \
+popsystemmode booleanmodevalue newcount newdimen newskip \
+newmuskip newbox newtoks newread newwrite \
+newmarks newinsert newattribute newif newlanguage \
+newfamily newfam newhelp then begcsname \
+strippedcsname firstargumentfalse firstargumenttrue secondargumentfalse secondargumenttrue \
+thirdargumentfalse thirdargumenttrue fourthargumentfalse fourthargumenttrue fifthargumentfalse \
+fifthsargumenttrue sixthargumentfalse sixtsargumenttrue doglobal dodoglobal \
+redoglobal resetglobal donothing dontcomplain forgetall \
+donetrue donefalse htdp unvoidbox hfilll \
+vfilll mathbox mathlimop mathnolop mathnothing \
+mathalpha currentcatcodetable defaultcatcodetable catcodetablename newcatcodetable \
+startcatcodetable stopcatcodetable startextendcatcodetable stopextendcatcodetable pushcatcodetable \
+popcatcodetable restorecatcodes setcatcodetable letcatcodecommand defcatcodecommand \
+uedcatcodecommand hglue vglue hfillneg vfillneg \
+hfilllneg vfilllneg ruledhss ruledhfil ruledhfill \
+ruledhfilneg ruledhfillneg normalhfillneg ruledvss ruledvfil \
+ruledvfill ruledvfilneg ruledvfillneg normalvfillneg ruledhbox \
+ruledvbox ruledvtop ruledvcenter ruledmbox ruledhskip \
+ruledvskip ruledkern ruledmskip ruledmkern ruledhglue \
+ruledvglue normalhglue normalvglue ruledpenalty filledhboxb \
+filledhboxr filledhboxg filledhboxc filledhboxm filledhboxy \
+filledhboxk scratchcounter globalscratchcounter scratchdimen globalscratchdimen \
+scratchskip globalscratchskip scratchmuskip globalscratchmuskip scratchtoks \
+globalscratchtoks scratchbox globalscratchbox normalbaselineskip normallineskip \
+normallineskiplimit availablehsize localhsize setlocalhsize nextbox \
+dowithnextbox dowithnextboxcs dowithnextboxcontent dowithnextboxcontentcs scratchwidth \
+scratchheight scratchdepth scratchoffset scratchdistance scratchhsize \
+scratchvsize scratchxoffset scratchyoffset scratchhoffset scratchvoffset \
+scratchxposition scratchyposition scratchtopoffset scratchbottomoffset scratchleftoffset \
+scratchrightoffset scratchcounterone scratchcountertwo scratchcounterthree scratchdimenone \
+scratchdimentwo scratchdimenthree scratchskipone scratchskiptwo scratchskipthree \
+scratchmuskipone scratchmuskiptwo scratchmuskipthree scratchtoksone scratchtokstwo \
+scratchtoksthree scratchboxone scratchboxtwo scratchboxthree scratchnx \
+scratchny scratchmx scratchmy scratchunicode scratchleftskip \
+scratchrightskip scratchtopskip scratchbottomskip doif doifnot \
+doifelse doifinset doifnotinset doifinsetelse doifnextcharelse \
+doifnextoptionalelse doifnextoptionalcselse doiffastoptionalcheckelse doifnextbgroupelse doifnextbgroupcselse \
+doifnextparenthesiselse doifundefinedelse doifdefinedelse doifundefined doifdefined \
+doifelsevalue doifvalue doifnotvalue doifnothing doifsomething \
+doifelsenothing doifsomethingelse doifvaluenothing doifvaluesomething doifelsevaluenothing \
+doifdimensionelse doifnumberelse doifnumber doifnotnumber doifcommonelse \
+doifcommon doifnotcommon doifinstring doifnotinstring doifinstringelse \
+doifassignmentelse docheckassignment tracingall tracingnone loggingall \
+removetoks appendtoks prependtoks appendtotoks prependtotoks \
+to endgraf endpar everyendpar reseteverypar \
+finishpar empty null space quad \
+enspace obeyspaces obeylines obeyedspace obeyedline \
+normalspace executeifdefined singleexpandafter doubleexpandafter tripleexpandafter \
+dontleavehmode removelastspace removeunwantedspaces keepunwantedspaces wait \
+writestatus define defineexpandable redefine setmeasure \
+setemeasure setgmeasure setxmeasure definemeasure freezemeasure \
+measure measured installcorenamespace getvalue getuvalue \
+setvalue setevalue setgvalue setxvalue letvalue \
+letgvalue resetvalue undefinevalue ignorevalue setuvalue \
+setuevalue setugvalue setuxvalue globallet glet \
+udef ugdef uedef uxdef checked \
+unique getparameters geteparameters getgparameters getxparameters \
+forgetparameters copyparameters getdummyparameters dummyparameter directdummyparameter \
+setdummyparameter letdummyparameter usedummystyleandcolor usedummystyleparameter usedummycolorparameter \
+processcommalist processcommacommand quitcommalist quitprevcommalist processaction \
+processallactions processfirstactioninset processallactionsinset unexpanded expanded \
+startexpanded stopexpanded protected protect unprotect \
+firstofoneargument firstoftwoarguments secondoftwoarguments firstofthreearguments secondofthreearguments \
+thirdofthreearguments firstoffourarguments secondoffourarguments thirdoffourarguments fourthoffourarguments \
+firstoffivearguments secondoffivearguments thirdoffivearguments fourthoffivearguments fifthoffivearguments \
+firstofsixarguments secondofsixarguments thirdofsixarguments fourthofsixarguments fifthofsixarguments \
+sixthofsixarguments firstofoneunexpanded gobbleoneargument gobbletwoarguments gobblethreearguments \
+gobblefourarguments gobblefivearguments gobblesixarguments gobblesevenarguments gobbleeightarguments \
+gobbleninearguments gobbletenarguments gobbleoneoptional gobbletwooptionals gobblethreeoptionals \
+gobblefouroptionals gobblefiveoptionals dorecurse doloop exitloop \
+dostepwiserecurse recurselevel recursedepth dofastloopcs dowith \
+newconstant setnewconstant setconstant setconstantvalue newconditional \
+settrue setfalse settruevalue setfalsevalue newmacro \
+setnewmacro newfraction newsignal dosingleempty dodoubleempty \
+dotripleempty doquadrupleempty doquintupleempty dosixtupleempty doseventupleempty \
+dosingleargument dodoubleargument dotripleargument doquadrupleargument doquintupleargument \
+dosixtupleargument doseventupleargument dosinglegroupempty dodoublegroupempty dotriplegroupempty \
+doquadruplegroupempty doquintuplegroupempty permitspacesbetweengroups dontpermitspacesbetweengroups nopdfcompression \
+maximumpdfcompression normalpdfcompression modulonumber dividenumber getfirstcharacter \
+doiffirstcharelse startnointerference stopnointerference twodigits threedigits \
+leftorright strut setstrut strutbox strutht \
+strutdp strutwd struthtdp begstrut endstrut \
+lineheight ordordspacing ordopspacing ordbinspacing ordrelspacing \
+ordopenspacing ordclosespacing ordpunctspacing ordinnerspacing opordspacing \
+opopspacing opbinspacing oprelspacing opopenspacing opclosespacing \
+oppunctspacing opinnerspacing binordspacing binopspacing binbinspacing \
+binrelspacing binopenspacing binclosespacing binpunctspacing bininnerspacing \
+relordspacing relopspacing relbinspacing relrelspacing relopenspacing \
+relclosespacing relpunctspacing relinnerspacing openordspacing openopspacing \
+openbinspacing openrelspacing openopenspacing openclosespacing openpunctspacing \
+openinnerspacing closeordspacing closeopspacing closebinspacing closerelspacing \
+closeopenspacing closeclosespacing closepunctspacing closeinnerspacing punctordspacing \
+punctopspacing punctbinspacing punctrelspacing punctopenspacing punctclosespacing \
+punctpunctspacing punctinnerspacing innerordspacing inneropspacing innerbinspacing \
+innerrelspacing inneropenspacing innerclosespacing innerpunctspacing innerinnerspacing \
+normalreqno startimath stopimath normalstartimath normalstopimath \
+startdmath stopdmath normalstartdmath normalstopdmath uncramped \
+cramped triggermathstyle mathstylefont mathsmallstylefont mathstyleface \
+mathsmallstyleface mathstylecommand mathpalette mathstylehbox mathstylevbox \
+mathstylevcenter mathstylevcenteredhbox mathstylevcenteredvbox mathtext setmathsmalltextbox \
+setmathtextbox triggerdisplaystyle triggertextstyle triggerscriptstyle triggerscriptscriptstyle \
+triggeruncrampedstyle triggercrampedstyle triggersmallstyle triggeruncrampedsmallstyle triggercrampedsmallstyle \
+triggerbigstyle triggeruncrampedbigstyle triggercrampedbigstyle luaexpr expdoifelse \
+expdoif expdoifnot expdoifcommonelse expdoifinsetelse ctxdirectlua \
+ctxlatelua ctxsprint ctxwrite ctxcommand ctxdirectcommand \
+ctxlatecommand ctxreport ctxlua luacode lateluacode \
+directluacode registerctxluafile ctxloadluafile luaversion luamajorversion \
+luaminorversion ctxluacode luaconditional luaexpanded startluaparameterset \
+stopluaparameterset luaparameterset definenamedlua obeylualines obeyluatokens \
+startluacode stopluacode startlua stoplua startctxfunction \
+stopctxfunction ctxfunction startctxfunctiondefinition stopctxfunctiondefinition carryoverpar \
+assumelongusagecs Umathbotaccent righttolefthbox lefttorighthbox righttoleftvbox \
+lefttorightvbox righttoleftvtop lefttorightvtop rtlhbox ltrhbox \
+rtlvbox ltrvbox rtlvtop ltrvtop autodirhbox \
+autodirvbox autodirvtop lefttoright righttoleft synchronizelayoutdirection \
+synchronizedisplaydirection synchronizeinlinedirection lesshyphens morehyphens nohyphens \
+dohyphens Ucheckedstartdisplaymath Ucheckedstopdisplaymath
+
diff --git a/context/data/scite/scite-context-data-interfaces.properties b/context/data/scite/context/scite-context-data-interfaces.properties
index 9c2ca4623..9c2ca4623 100644
--- a/context/data/scite/scite-context-data-interfaces.properties
+++ b/context/data/scite/context/scite-context-data-interfaces.properties
diff --git a/context/data/scite/scite-context-data-metafun.properties b/context/data/scite/context/scite-context-data-metafun.properties
index 9381b4f8d..9381b4f8d 100644
--- a/context/data/scite/scite-context-data-metafun.properties
+++ b/context/data/scite/context/scite-context-data-metafun.properties
diff --git a/context/data/scite/scite-context-data-metapost.properties b/context/data/scite/context/scite-context-data-metapost.properties
index 88ace57ca..88ace57ca 100644
--- a/context/data/scite/scite-context-data-metapost.properties
+++ b/context/data/scite/context/scite-context-data-metapost.properties
diff --git a/context/data/scite/scite-context-data-tex.properties b/context/data/scite/context/scite-context-data-tex.properties
index 195125433..d1780794d 100644
--- a/context/data/scite/scite-context-data-tex.properties
+++ b/context/data/scite/context/scite-context-data-tex.properties
@@ -50,10 +50,10 @@ attribute attributedef catcodetable clearmarks crampeddisplaystyle \
crampedscriptscriptstyle crampedscriptstyle crampedtextstyle fontid formatname \
gleaders ifabsdim ifabsnum ifprimitive initcatcodetable \
latelua luaescapestring luastartup luatexdatestamp luatexrevision \
-luatexversion mathstyle nokerns noligs outputbox \
-pageleftoffset pagetopoffset postexhyphenchar posthyphenchar preexhyphenchar \
-prehyphenchar primitive savecatcodetable scantextokens suppressfontnotfounderror \
-suppressifcsnameerror suppresslongerror suppressoutererror synctex
+luatexversion luafunction mathstyle nokerns noligs \
+outputbox pageleftoffset pagetopoffset postexhyphenchar posthyphenchar \
+preexhyphenchar prehyphenchar primitive savecatcodetable scantextokens \
+suppressfontnotfounderror suppressifcsnameerror suppresslongerror suppressoutererror synctex
keywordclass.tex.omega=\
OmegaVersion bodydir chardp charht \
@@ -124,114 +124,113 @@ attribute attributedef badness baselineskip batchmode \
begingroup belowdisplayshortskip belowdisplayskip binoppenalty bodydir \
botmark botmarks box boxdir boxmaxdepth \
brokenpenalty catcode catcodetable char chardef \
-chardp charht charit charwd cleaders \
-clearmarks closein closeout clubpenalties clubpenalty \
-copy count countdef cr crampeddisplaystyle \
-crampedscriptscriptstyle crampedscriptstyle crampedtextstyle crcr csname \
-currentgrouplevel currentgrouptype currentifbranch currentiflevel currentiftype \
-day deadcycles def defaulthyphenchar defaultskewchar \
-delcode delimiter delimiterfactor delimitershortfall detokenize \
-dimen dimendef dimexpr directlua discretionary \
-displayindent displaylimits displaystyle displaywidowpenalties displaywidowpenalty \
-displaywidth divide doublehyphendemerits dp dump \
-eTeXVersion eTeXminorversion eTeXrevision eTeXversion edef \
-efcode else emergencystretch end endcsname \
-endgroup endinput endlinechar eqno errhelp \
-errmessage errorcontextlines errorstopmode escapechar everycr \
-everydisplay everyeof everyhbox everyjob everymath \
-everypar everyvbox exhyphenchar exhyphenpenalty expandafter \
-expanded fam fi finalhyphendemerits firstmark \
-firstmarks floatingpenalty font fontchardp fontcharht \
-fontcharic fontcharwd fontdimen fontid fontname \
-formatname futurelet gdef gleaders global \
-globaldefs glueexpr glueshrink glueshrinkorder gluestretch \
-gluestretchorder gluetomu halign hangafter hangindent \
-hbadness hbox hfil hfill hfilneg \
-hfuzz hoffset holdinginserts hrule hsize \
-hskip hss ht hyphenation hyphenchar \
-hyphenpenalty if ifabsdim ifabsnum ifcase \
-ifcat ifcsname ifdefined ifdim ifeof \
-iffalse iffontchar ifhbox ifhmode ifincsname \
-ifinner ifmmode ifnum ifodd ifpdfabsdim \
-ifpdfabsnum ifpdfprimitive ifprimitive iftrue ifvbox \
-ifvmode ifvoid ifx ignorespaces immediate \
-indent initcatcodetable input inputlineno insert \
-insertpenalties interactionmode interlinepenalties interlinepenalty jobname \
-kern language lastbox lastkern lastlinefit \
-lastnodetype lastpenalty lastskip latelua lccode \
-leaders left leftghost lefthyphenmin leftmarginkern \
-leftskip leqno let letterspacefont limits \
-linepenalty lineskip lineskiplimit localbrokenpenalty localinterlinepenalty \
-localleftbox localrightbox long looseness lower \
-lowercase lpcode luaescapestring luastartup luatexdatestamp \
-luatexrevision luatexversion mag mark marks \
-mathaccent mathbin mathchar mathchardef mathchoice \
-mathclose mathcode mathdir mathinner mathop \
-mathopen mathord mathpunct mathrel mathstyle \
-mathsurround maxdeadcycles maxdepth meaning medmuskip \
-message middle mkern month moveleft \
-moveright mskip muexpr multiply muskip \
-muskipdef mutoglue newlinechar noalign noboundary \
-noexpand noindent nokerns noligs nolimits \
-nolocaldirs nolocalwhatsits nonscript nonstopmode nulldelimiterspace \
-nullfont number numexpr odelcode odelimiter \
-omathaccent omathchar omathchardef omathcode omit \
-openin openout or oradical outer \
-output outputbox outputpenalty over overfullrule \
-overline overwithdelims pagebottomoffset pagedepth pagedir \
-pagediscards pagefilllstretch pagefillstretch pagefilstretch pagegoal \
-pageheight pageleftoffset pagerightoffset pageshrink pagestretch \
-pagetopoffset pagetotal pagewidth par pardir \
-parfillskip parindent parshape parshapedimen parshapeindent \
-parshapelength parskip patterns pausing pdfadjustspacing \
-pdfannot pdfcatalog pdfcolorstack pdfcolorstackinit pdfcompresslevel \
-pdfcopyfont pdfcreationdate pdfdecimaldigits pdfdest pdfdestmargin \
-pdfdraftmode pdfeachlinedepth pdfeachlineheight pdfendlink pdfendthread \
-pdffirstlineheight pdffontattr pdffontexpand pdffontname pdffontobjnum \
-pdffontsize pdfgamma pdfgentounicode pdfglyphtounicode pdfhorigin \
-pdfignoreddimen pdfimageapplygamma pdfimagegamma pdfimagehicolor pdfimageresolution \
-pdfincludechars pdfinclusioncopyfonts pdfinclusionerrorlevel pdfinfo pdfinsertht \
-pdflastannot pdflastlinedepth pdflastlink pdflastobj pdflastxform \
-pdflastximage pdflastximagecolordepth pdflastximagepages pdflastxpos pdflastypos \
-pdflinkmargin pdfliteral pdfmapfile pdfmapline pdfminorversion \
-pdfnames pdfnoligatures pdfnormaldeviate pdfobj pdfobjcompresslevel \
-pdfoptionpdfminorversion pdfoutline pdfoutput pdfpageattr pdfpagebox \
-pdfpageheight pdfpageref pdfpageresources pdfpagesattr pdfpagewidth \
-pdfpkmode pdfpkresolution pdfprimitive pdfprotrudechars pdfpxdimen \
-pdfrandomseed pdfrefobj pdfrefxform pdfrefximage pdfreplacefont \
-pdfrestore pdfretval pdfsave pdfsavepos pdfsetmatrix \
-pdfsetrandomseed pdfstartlink pdfstartthread pdftexbanner pdftexrevision \
-pdftexversion pdfthread pdfthreadmargin pdftracingfonts pdftrailer \
-pdfuniformdeviate pdfuniqueresname pdfvorigin pdfxform pdfxformattr \
-pdfxformname pdfxformresources pdfximage pdfximagebbox penalty \
-postdisplaypenalty postexhyphenchar posthyphenchar predisplaydirection predisplaypenalty \
-predisplaysize preexhyphenchar prehyphenchar pretolerance prevdepth \
-prevgraf primitive protected quitvmode radical \
-raise read readline relax relpenalty \
-right rightghost righthyphenmin rightmarginkern rightskip \
-romannumeral rpcode savecatcodetable savinghyphcodes savingvdiscards \
-scantextokens scantokens scriptfont scriptscriptfont scriptscriptstyle \
-scriptspace scriptstyle scrollmode setbox setlanguage \
-sfcode shipout show showbox showboxbreadth \
-showboxdepth showgroups showifs showlists showthe \
-showtokens skewchar skip skipdef spacefactor \
-spaceskip span special splitbotmark splitbotmarks \
-splitdiscards splitfirstmark splitfirstmarks splitmaxdepth splittopskip \
-string suppressfontnotfounderror suppressifcsnameerror suppresslongerror suppressoutererror \
-synctex tabskip tagcode textdir textfont \
-textstyle the thickmuskip thinmuskip time \
-toks toksdef tolerance topmark topmarks \
-topskip tracingassigns tracingcommands tracinggroups tracingifs \
-tracinglostchars tracingmacros tracingnesting tracingonline tracingoutput \
-tracingpages tracingparagraphs tracingrestores tracingscantokens tracingstats \
-uccode uchyph underline unexpanded unhbox \
-unhcopy unkern unless unpenalty unskip \
-unvbox unvcopy uppercase vadjust valign \
-vbadness vbox vcenter vfil vfill \
-vfilneg vfuzz voffset vrule vsize \
-vskip vsplit vss vtop wd \
-widowpenalties widowpenalty write xdef xleaders \
-xspaceskip year
+cleaders clearmarks closein closeout clubpenalties \
+clubpenalty copy count countdef cr \
+crampeddisplaystyle crampedscriptscriptstyle crampedscriptstyle crampedtextstyle crcr \
+csname currentgrouplevel currentgrouptype currentifbranch currentiflevel \
+currentiftype day deadcycles def defaulthyphenchar \
+defaultskewchar delcode delimiter delimiterfactor delimitershortfall \
+detokenize dimen dimendef dimexpr directlua \
+discretionary displayindent displaylimits displaystyle displaywidowpenalties \
+displaywidowpenalty displaywidth divide doublehyphendemerits dp \
+dump eTeXVersion eTeXminorversion eTeXrevision eTeXversion \
+edef efcode else emergencystretch end \
+endcsname endgroup endinput endlinechar eqno \
+errhelp errmessage errorcontextlines errorstopmode escapechar \
+everycr everydisplay everyeof everyhbox everyjob \
+everymath everypar everyvbox exhyphenchar exhyphenpenalty \
+expandafter expanded fam fi finalhyphendemerits \
+firstmark firstmarks floatingpenalty font fontchardp \
+fontcharht fontcharic fontcharwd fontdimen fontid \
+fontname formatname futurelet gdef gleaders \
+global globaldefs glueexpr glueshrink glueshrinkorder \
+gluestretch gluestretchorder gluetomu halign hangafter \
+hangindent hbadness hbox hfil hfill \
+hfilneg hfuzz hoffset holdinginserts hrule \
+hsize hskip hss ht hyphenation \
+hyphenchar hyphenpenalty if ifabsdim ifabsnum \
+ifcase ifcat ifcsname ifdefined ifdim \
+ifeof iffalse iffontchar ifhbox ifhmode \
+ifincsname ifinner ifmmode ifnum ifodd \
+ifpdfabsdim ifpdfabsnum ifpdfprimitive ifprimitive iftrue \
+ifvbox ifvmode ifvoid ifx ignorespaces \
+immediate indent initcatcodetable input inputlineno \
+insert insertpenalties interactionmode interlinepenalties interlinepenalty \
+jobname kern language lastbox lastkern \
+lastlinefit lastnodetype lastpenalty lastskip latelua \
+lccode leaders left leftghost lefthyphenmin \
+leftmarginkern leftskip leqno let letterspacefont \
+limits linepenalty lineskip lineskiplimit localbrokenpenalty \
+localinterlinepenalty localleftbox localrightbox long looseness \
+lower lowercase lpcode luaescapestring luastartup \
+luatexdatestamp luatexrevision luatexversion mag mark \
+marks mathaccent mathbin mathchar mathchardef \
+mathchoice mathclose mathcode mathdir mathinner \
+mathop mathopen mathord mathpunct mathrel \
+mathstyle mathsurround maxdeadcycles maxdepth meaning \
+medmuskip message middle mkern month \
+moveleft moveright mskip muexpr multiply \
+muskip muskipdef mutoglue newlinechar noalign \
+noboundary noexpand noindent nokerns noligs \
+nolimits nolocaldirs nolocalwhatsits nonscript nonstopmode \
+nulldelimiterspace nullfont number numexpr odelcode \
+odelimiter omathaccent omathchar omathchardef omathcode \
+omit openin openout or oradical \
+outer output outputbox outputpenalty over \
+overfullrule overline overwithdelims pagebottomoffset pagedepth \
+pagedir pagediscards pagefilllstretch pagefillstretch pagefilstretch \
+pagegoal pageheight pageleftoffset pagerightoffset pageshrink \
+pagestretch pagetopoffset pagetotal pagewidth par \
+pardir parfillskip parindent parshape parshapedimen \
+parshapeindent parshapelength parskip patterns pausing \
+pdfadjustspacing pdfannot pdfcatalog pdfcolorstack pdfcolorstackinit \
+pdfcompresslevel pdfcopyfont pdfcreationdate pdfdecimaldigits pdfdest \
+pdfdestmargin pdfdraftmode pdfeachlinedepth pdfeachlineheight pdfendlink \
+pdfendthread pdffirstlineheight pdffontattr pdffontexpand pdffontname \
+pdffontobjnum pdffontsize pdfgamma pdfgentounicode pdfglyphtounicode \
+pdfhorigin pdfignoreddimen pdfimageapplygamma pdfimagegamma pdfimagehicolor \
+pdfimageresolution pdfincludechars pdfinclusioncopyfonts pdfinclusionerrorlevel pdfinfo \
+pdfinsertht pdflastannot pdflastlinedepth pdflastlink pdflastobj \
+pdflastxform pdflastximage pdflastximagecolordepth pdflastximagepages pdflastxpos \
+pdflastypos pdflinkmargin pdfliteral pdfmapfile pdfmapline \
+pdfminorversion pdfnames pdfnoligatures pdfnormaldeviate pdfobj \
+pdfobjcompresslevel pdfoptionpdfminorversion pdfoutline pdfoutput pdfpageattr \
+pdfpagebox pdfpageheight pdfpageref pdfpageresources pdfpagesattr \
+pdfpagewidth pdfpkmode pdfpkresolution pdfprimitive pdfprotrudechars \
+pdfpxdimen pdfrandomseed pdfrefobj pdfrefxform pdfrefximage \
+pdfreplacefont pdfrestore pdfretval pdfsave pdfsavepos \
+pdfsetmatrix pdfsetrandomseed pdfstartlink pdfstartthread pdftexbanner \
+pdftexrevision pdftexversion pdfthread pdfthreadmargin pdftracingfonts \
+pdftrailer pdfuniformdeviate pdfuniqueresname pdfvorigin pdfxform \
+pdfxformattr pdfxformname pdfxformresources pdfximage pdfximagebbox \
+penalty postdisplaypenalty postexhyphenchar posthyphenchar predisplaydirection \
+predisplaypenalty predisplaysize preexhyphenchar prehyphenchar pretolerance \
+prevdepth prevgraf primitive protected quitvmode \
+radical raise read readline relax \
+relpenalty right rightghost righthyphenmin rightmarginkern \
+rightskip romannumeral rpcode savecatcodetable savinghyphcodes \
+savingvdiscards scantextokens scantokens scriptfont scriptscriptfont \
+scriptscriptstyle scriptspace scriptstyle scrollmode setbox \
+setlanguage sfcode shipout show showbox \
+showboxbreadth showboxdepth showgroups showifs showlists \
+showthe showtokens skewchar skip skipdef \
+spacefactor spaceskip span special splitbotmark \
+splitbotmarks splitdiscards splitfirstmark splitfirstmarks splitmaxdepth \
+splittopskip string suppressfontnotfounderror suppressifcsnameerror suppresslongerror \
+suppressoutererror synctex tabskip tagcode textdir \
+textfont textstyle the thickmuskip thinmuskip \
+time toks toksdef tolerance topmark \
+topmarks topskip tracingassigns tracingcommands tracinggroups \
+tracingifs tracinglostchars tracingmacros tracingnesting tracingonline \
+tracingoutput tracingpages tracingparagraphs tracingrestores tracingscantokens \
+tracingstats uccode uchyph underline unexpanded \
+unhbox unhcopy unkern unless unpenalty \
+unskip unvbox unvcopy uppercase vadjust \
+valign vbadness vbox vcenter vfil \
+vfill vfilneg vfuzz voffset vrule \
+vsize vskip vsplit vss vtop \
+wd widowpenalties widowpenalty write xdef \
+xleaders xspaceskip year
keywordclass.tex.xetex=\
XeTeXversion
diff --git a/context/data/scite/scite-context-external.properties b/context/data/scite/context/scite-context-external.properties
index 5c7149341..c7d0c4a17 100644
--- a/context/data/scite/scite-context-external.properties
+++ b/context/data/scite/context/scite-context-external.properties
@@ -1,36 +1,46 @@
# external lpeg lexers
-import $(SciteDefaultHome)/lexers/lpeg
+lexer.lpeg.home=$(SciteDefaultHome)/context/lexers
-lexer.lpeg.home=$(SciteDefaultHome)/lexers
+lexer.lpeg.color.theme=scite-context-theme
+# lexer.lpeg.color.theme=$(SciteDefaultHome)/context/lexers/themes/scite-context-theme.lua
-# # pre 3.03:
-#
-#~ lexer.lpeg.script=$(lexer.lpeg.home)/scite-context-lexer.lua
-#
-# # post 3.03:
-#
-lexer.lpeg.script=$(lexer.lpeg.home)/lexer.lua
-#
-# where we load the extensions in the lexers themselves.
-
-lexer.lpeg.color.theme=$(lexer.lpeg.home)/themes/scite-context-theme.lua
-
-# alas, only a few properties are passed (only indentation)
+# The lexer dll no longer interfaces to teh following properties. It never had a full
+# interface, so maybe I'll make my own.
fold.by.parsing=1
fold.by.indentation=0
fold.by.line=0
+fold.line.comments=0
+
+# you can put the dll/so file in the <scitehome>/context/lexers path or keep it in
+# <scitehome>/lexers
if PLAT_WIN
- lexerpath.*.lpeg=$(lexer.lpeg.home)/LexLPeg.dll
+ lexerpath.*.lpeg=$(lexer.lpeg.home)/../../lexers/lexlpeg.dll
+# lexerpath.*.lpeg=$(lexer.lpeg.home)/lexers/lexlpeg.dll
if PLAT_GTK
- lexerpath.*.lpeg=$(lexer.lpeg.home)/liblexlpeg.so
+ lexerpath.*.lpeg=$(lexer.lpeg.home)/../../lexers/liblexlpeg.so
+# lexerpath.*.lpeg=$(lexer.lpeg.home)/lexers/liblexlpeg.so
+
+# the variable lexer.name is automatically set but I'm not sure what the following
+# one is supposed to do so we keep it around (sams as in lpeg.properties, which we
+# don't load)
lexer.*.lpeg=lpeg
-file.patterns.cweb=*.h;*.c;*.w;*.hh;*.cc;*.ww;*.hpp;*.cpp;*.hxx;*.cxx;
+# in principle you can do the following, as we're mostly compatible with the
+# default lexers but for a regular context setup the lexers built-in scite are
+# just fine so in principle we only need the dll/so
+#
+# import lexers/lpeg
+
+# patterns should be original (not clash with built in)
+
+file.patterns.cweb=*.w;*.ww;
+file.patterns.cpp=*.h;*.c;*.hh;*.cc;*.hpp;*.cpp;*.hxx;*.cxx;
+file.patterns.bib=*.bib
lexer.$(file.patterns.metapost)=lpeg_scite-context-lexer-mps
lexer.$(file.patterns.metafun)=lpeg_scite-context-lexer-mps
@@ -40,18 +50,19 @@ lexer.$(file.patterns.example)=lpeg_scite-context-lexer-xml
lexer.$(file.patterns.text)=lpeg_scite-context-lexer-txt
lexer.$(file.patterns.pdf)=lpeg_scite-context-lexer-pdf
lexer.$(file.patterns.cweb)=lpeg_scite-context-lexer-web
+lexer.$(file.patterns.cpp)=lpeg_scite-context-lexer-cpp
+lexer.$(file.patterns.bib)=lpeg_scite-context-lexer-bibtex
lexer.$(file.patterns.tex)=lpeg_scite-context-lexer-tex
lexer.$(file.patterns.xml)=lpeg_scite-context-lexer-xml
lexer.$(file.patterns.html)=lpeg_scite-context-lexer-xml
-lexer.$(file.patterns.cpp)=lpeg_scite-context-lexer-web
# It's a real pitty that we cannot overload the errorlist lexer. That would
# make scite even more interesting. Add to that including lpeg and the lpeg
# lexer and thereby providing an interface to properties.
-# lexer.errorlist=lpeg_scite-context-lexer-txt
-# lexer.output=lpeg_scite-context-lexer-txt
+#~ lexer.errorlist=lpeg_scite-context-lexer-txt
+#~ lexer.output=lpeg_scite-context-lexer-txt
comment.block.lpeg_scite-context-lexer-tex=%
comment.block.at.line.start.lpeg_scite-context-lexer-tex=1
diff --git a/context/data/scite/scite-context-internal.properties b/context/data/scite/context/scite-context-internal.properties
index 130e64f1e..038381dc7 100644
--- a/context/data/scite/scite-context-internal.properties
+++ b/context/data/scite/context/scite-context-internal.properties
@@ -8,8 +8,8 @@
#
# % interface=none|metapost|mp|metafun
-import scite-context-data-metapost
-import scite-context-data-metafun
+import context/scite-context-data-metapost
+import context/scite-context-data-metafun
keywordclass.metapost.all=$(keywordclass.metapost.tex) $(keywordclass.metapost.plain) $(keywordclass.metapost.primitives)
keywordclass.metafun.all=$(keywordclass.metafun.constants) $(keywordclass.metafun.helpers)
@@ -44,9 +44,9 @@ comment.block.at.line.start.metapost=1
#
# % interface=all|nl|en|de|cz|it|ro|latex
-import scite-context-data-tex
-import scite-context-data-context
-import scite-context-data-interfaces
+import context/scite-context-data-tex
+import context/scite-context-data-context
+import context/scite-context-data-interfaces
word.characters.$(file.patterns.context)=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ@!?_\\
diff --git a/context/data/scite/context/scite-context-user.properties b/context/data/scite/context/scite-context-user.properties
new file mode 100644
index 000000000..b6fc34282
--- /dev/null
+++ b/context/data/scite/context/scite-context-user.properties
@@ -0,0 +1,15 @@
+# this loades the basics
+
+import context/scite-context
+
+# internal lexing
+
+import context/scite-context-internal
+
+# external lexing (tex, mps, cld/lua, xml)
+
+import context/scite-context-external
+
+# this does some tuning
+
+import context/scite-pragma
diff --git a/context/data/scite/scite-context.properties b/context/data/scite/context/scite-context.properties
index bc1af717c..78850ef0d 100644
--- a/context/data/scite/scite-context.properties
+++ b/context/data/scite/context/scite-context.properties
@@ -66,7 +66,7 @@ open.suffix.$(file.patterns.context)=.tex
# Example : patterns
file.patterns.xml=
-file.patterns.example=*.xml;*.xsl;*.xsd;*.fo;*.exa;*.rlb;*.rlg;*.rlv;*.rng;*.xfdf;*.xslt;*.dtd;*.lmx;*.htm;*.html;*.xhtml*.ctx;*.export;
+file.patterns.example=*.xml;*.xsl;*.xsd;*.fo;*.exa;*.rlb;*.rlg;*.rlv;*.rng;*.xfdf;*.xslt;*.dtd;*.lmx;*.htm;*.html;*.xhtml;*.ctx;*.export;*.svg;*.xul
open.suffix.$(file.patterns.example)=.xml
filter.example=eXaMpLe|$(file.patterns.example)|
#~ lexer.$(file.patterns.example)=xml
@@ -160,7 +160,7 @@ xml.auto.close.tags=1
# extensions
-import scite-ctx
+import context/scite-ctx
# hard coded compile / build / go
@@ -229,14 +229,14 @@ command.groupundo.29.*=yes
command.save.before.29.*=2
command.shortcut.29.*=Alt+F12
-command.name.30.*=Run with jit
-command.subsystem.30.*=1
-command.30.$(file.patterns.context)=$(name.context.runjit) $(FileNameExt)
-command.30.$(file.patterns.metafun)=$(name.context.runjit) $(FileNameExt) --metapost
-command.30.$(file.patterns.exmaple)=$(name.context.runjit) $(FileNameExt) --xml
-command.groupundo.30.*=yes
-command.save.before.30.*=2
-command.shortcut.30.*=Alt+F7
+#~ command.name.30.*=Run with jit
+#~ command.subsystem.30.*=1
+#~ command.30.$(file.patterns.context)=$(name.context.runjit) $(FileNameExt)
+#~ command.30.$(file.patterns.metafun)=$(name.context.runjit) $(FileNameExt) --metapost
+#~ command.30.$(file.patterns.exmaple)=$(name.context.runjit) $(FileNameExt) --xml
+#~ command.groupundo.30.*=yes
+#~ command.save.before.30.*=2
+#~ command.shortcut.30.*=Alt+F7
# 2 : pdf viewing
diff --git a/context/data/scite/scite-ctx-context.properties b/context/data/scite/context/scite-ctx-context.properties
index a1d5800e6..a1d5800e6 100644
--- a/context/data/scite/scite-ctx-context.properties
+++ b/context/data/scite/context/scite-ctx-context.properties
diff --git a/context/data/scite/scite-ctx-example.properties b/context/data/scite/context/scite-ctx-example.properties
index 78b2f2859..78b2f2859 100644
--- a/context/data/scite/scite-ctx-example.properties
+++ b/context/data/scite/context/scite-ctx-example.properties
diff --git a/context/data/scite/scite-ctx.lua b/context/data/scite/context/scite-ctx.lua
index 421e9cd89..24f5b34b8 100644
--- a/context/data/scite/scite-ctx.lua
+++ b/context/data/scite/context/scite-ctx.lua
@@ -1383,3 +1383,13 @@ function toggle_strip(name)
OnStrip = ignore_strip
end
end
+
+-- this way we get proper lexing for lexers that do more extensive
+-- parsing
+
+function OnOpen(filename)
+ -- print("opening: " .. filename .. " (size: " .. editor.TextLength .. ")")
+ editor:Colourise(1,editor.TextLength)
+end
+
+-- output.LexerLanguage = ""
diff --git a/context/data/scite/scite-ctx.properties b/context/data/scite/context/scite-ctx.properties
index acbb33c0b..874a381e3 100644
--- a/context/data/scite/scite-ctx.properties
+++ b/context/data/scite/context/scite-ctx.properties
@@ -12,7 +12,7 @@
# <?xml version='1.0' language='uk' ?>
ext.lua.auto.reload=1
-ext.lua.startup.script=$(SciteDefaultHome)/scite-ctx.lua
+ext.lua.startup.script=$(SciteDefaultHome)/context/scite-ctx.lua
#~ extension.$(file.patterns.context)=scite-ctx.lua
#~ extension.$(file.patterns.example)=scite-ctx.lua
@@ -150,8 +150,8 @@ command.save.before.26.*=2
command.groupundo.26.*=yes
command.shortcut.26.*=Ctrl+E
-import scite-ctx-context
-import scite-ctx-example
+import context/scite-ctx-context
+import context/scite-ctx-example
ctx.template.scan=yes
ctx.template.rescan=no
diff --git a/context/data/scite/scite-metapost.properties b/context/data/scite/context/scite-metapost.properties
index e3ac25244..fc06dcaa2 100644
--- a/context/data/scite/scite-metapost.properties
+++ b/context/data/scite/context/scite-metapost.properties
@@ -69,7 +69,7 @@ lexer.metapost.comment.process=0
# Metapost: keywords
-import scite-context-data-metapost.properties
+import context/scite-context-data-metapost.properties
keywords.$(file.patterns.metapost)=$(keywordclass.metapost.all)
diff --git a/context/data/scite/scite-pragma.properties b/context/data/scite/context/scite-pragma.properties
index 7308f1fb6..2dea18bad 100644
--- a/context/data/scite/scite-pragma.properties
+++ b/context/data/scite/context/scite-pragma.properties
@@ -25,7 +25,9 @@ $(filter.metafun)\
$(filter.example)\
$(filter.lua)\
$(filter.text)\
-$(filter.pdf)
+$(filter.pdf)\
+$(filter.cweb)\
+$(filter.txt)
# Editor: menus
@@ -36,5 +38,4 @@ XML|xml||\
Lua|lua||\
Text|txt||\
PDF|pdf||\
-CWeb|web||\
-Text|txt||
+CWeb|cweb||
diff --git a/context/data/scite/scite-tex.properties b/context/data/scite/context/scite-tex.properties
index 6933971e2..7d271eaf1 100644
--- a/context/data/scite/scite-tex.properties
+++ b/context/data/scite/context/scite-tex.properties
@@ -89,7 +89,7 @@ lexer.tex.auto.if=1
# only the macros that make sense:
-import scite-context-data-tex.properties
+import context/scite-context-data-tex.properties
# collections
diff --git a/context/data/scite/lexers/archive/scite-context-lexer-pre-3-3-1.lua b/context/data/scite/lexers/archive/scite-context-lexer-pre-3-3-1.lua
deleted file mode 100644
index 7883177b4..000000000
--- a/context/data/scite/lexers/archive/scite-context-lexer-pre-3-3-1.lua
+++ /dev/null
@@ -1,1100 +0,0 @@
-local info = {
- version = 1.324,
- comment = "basics for scintilla lpeg lexer for context/metafun",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
- comment = "contains copyrighted code from mitchell.att.foicica.com",
-
-}
-
--- todo: move all code here
--- todo: explore adapted dll ... properties + init
-
--- The fold and lex functions are copied and patched from original code by Mitchell (see
--- lexer.lua). All errors are mine.
---
--- Starting with SciTE version 3.20 there is an issue with coloring. As we still lack
--- a connection with scite itself (properties as well as printing to the log pane) we
--- cannot trace this (on windows). As far as I can see, there are no fundamental
--- changes in lexer.lua or LexLPeg.cxx so it must be in scintilla itself. So for the
--- moment I stick to 3.10. Indicators are: no lexing of 'next' and 'goto <label>' in the
--- Lua lexer and no brace highlighting either. Interesting is that it does work ok in
--- the cld lexer (so the Lua code is okay). Also the fact that char-def.lua lexes fast
--- is a signal that the lexer quits somewhere halfway.
---
--- After checking 3.24 and adapting to the new lexer tables things are okay again. So,
--- this version assumes 3.24 or higher. In 3.24 we have a different token result, i.e. no
--- longer a { tag, pattern } but just two return values. I didn't check other changes but
--- will do that when I run into issues.
---
--- I've considered making a whole copy and patch the other functions too as we need
--- an extra nesting model. However, I don't want to maintain too much. An unfortunate
--- change in 3.03 is that no longer a script can be specified. This means that instead
--- of loading the extensions via the properties file, we now need to load them in our
--- own lexers, unless of course we replace lexer.lua completely (which adds another
--- installation issue).
---
--- Another change has been that _LEXERHOME is no longer available. It looks like more and
--- more functionality gets dropped so maybe at some point we need to ship our own dll/so
--- files. For instance, I'd like to have access to the current filename and other scite
--- properties. For instance, we could cache some info with each file, if only we had
--- knowledge of what file we're dealing with.
---
--- For huge files folding can be pretty slow and I do have some large ones that I keep
--- open all the time. Loading is normally no ussue, unless one has remembered the status
--- and the cursor is at the last line of a 200K line file. Optimizing the fold function
--- brought down loading of char-def.lua from 14 sec => 8 sec. Replacing the word_match
--- function and optimizing the lex function gained another 2+ seconds. A 6 second load
--- is quite ok for me. The changed lexer table structure (no subtables) brings loading
--- down to a few seconds.
---
--- When the lexer path is copied to the textadept lexer path, and the theme definition to
--- theme path (as lexer.lua), the lexer works there as well. When I have time and motive
--- I will make a proper setup file to tune the look and feel a bit and associate suffixes
--- with the context lexer. The textadept editor has a nice style tracing option but lacks
--- the tabs for selecting files that scite has. It also has no integrated run that pipes
--- to the log pane (I wonder if it could borrow code from the console2 project). Interesting
--- is that the jit version of textadept crashes on lexing large files (and does not feel
--- faster either).
---
--- Function load(lexer_name) starts with _M.WHITESPACE = lexer_name..'_whitespace' which
--- means that we need to have it frozen at the moment we load another lexer. Because spacing
--- is used to revert to a parent lexer we need to make sure that we load children as late
--- as possible in order not to get the wrong whitespace trigger. This took me quite a while
--- to figure out (not being that familiar with the internals). The lex and fold functions
--- have been optimized. It is a pitty that there is no proper print available. Another thing
--- needed is a default style in ourown theme style definition, as otherwise we get wrong
--- nested lexers, especially if they are larger than a view. This is the hardest part of
--- getting things right.
---
--- Eventually it might be safer to copy the other methods from lexer.lua here as well so
--- that we have no dependencies, apart from the c library (for which at some point the api
--- will be stable I hope).
---
--- It's a pitty that there is no scintillua library for the OSX version of scite. Even
--- better would be to have the scintillua library as integral part of scite as that way I
--- could use OSX alongside windows and linux (depending on needs). Also nice would be to
--- have a proper interface to scite then because currently the lexer is rather isolated and the
--- lua version does not provide all standard libraries. It would also be good to have lpeg
--- support in the regular scite lua extension (currently you need to pick it up from someplace
--- else).
-
-local lpeg = require 'lpeg'
-
-local R, P, S, C, V, Cp, Cs, Ct, Cmt, Cc, Cf, Cg, Carg = lpeg.R, lpeg.P, lpeg.S, lpeg.C, lpeg.V, lpeg.Cp, lpeg.Cs, lpeg.Ct, lpeg.Cmt, lpeg.Cc, lpeg.Cf, lpeg.Cg, lpeg.Carg
-local lpegmatch = lpeg.match
-local find, gmatch, match, lower, upper, gsub = string.find, string.gmatch, string.match, string.lower, string.upper, string.gsub
-local concat = table.concat
-local global = _G
-local type, next, setmetatable, rawset = type, next, setmetatable, rawset
-
-if lexer then
- -- in recent c++ code the lexername and loading is hard coded
-elseif _LEXERHOME then
- dofile(_LEXERHOME .. '/lexer.lua') -- pre 3.03 situation
-else
- dofile('lexer.lua') -- whatever
-end
-
-lexer.context = lexer.context or { }
-local context = lexer.context
-
-context.patterns = context.patterns or { }
-local patterns = context.patterns
-
-lexer._CONTEXTEXTENSIONS = true
-
-local locations = {
- -- lexer.context.path,
- "data", -- optional data directory
- "..", -- regular scite directory
-}
-
-local function collect(name)
--- local definitions = loadfile(name .. ".luc") or loadfile(name .. ".lua")
- local okay, definitions = pcall(function () return require(name) end)
- if okay then
- if type(definitions) == "function" then
- definitions = definitions()
- end
- if type(definitions) == "table" then
- return definitions
- end
- end
-end
-
-function context.loaddefinitions(name)
- for i=1,#locations do
- local data = collect(locations[i] .. "/" .. name)
- if data then
- return data
- end
- end
-end
-
--- maybe more efficient:
-
-function context.word_match(words,word_chars,case_insensitive)
- local chars = '%w_' -- maybe just "" when word_chars
- if word_chars then
- chars = '^([' .. chars .. gsub(word_chars,'([%^%]%-])', '%%%1') ..']+)'
- else
- chars = '^([' .. chars ..']+)'
- end
- if case_insensitive then
- local word_list = { }
- for i=1,#words do
- word_list[lower(words[i])] = true
- end
- return P(function(input, index)
- local s, e, word = find(input,chars,index)
- return word and word_list[lower(word)] and e + 1 or nil
- end)
- else
- local word_list = { }
- for i=1,#words do
- word_list[words[i]] = true
- end
- return P(function(input, index)
- local s, e, word = find(input,chars,index)
- return word and word_list[word] and e + 1 or nil
- end)
- end
-end
-
-local idtoken = R("az","AZ","\127\255","__")
-local digit = R("09")
-local sign = S("+-")
-local period = P(".")
-local space = S(" \n\r\t\f\v")
-
-patterns.idtoken = idtoken
-
-patterns.digit = digit
-patterns.sign = sign
-patterns.period = period
-
-patterns.cardinal = digit^1
-patterns.integer = sign^-1 * digit^1
-
-patterns.real =
- sign^-1 * ( -- at most one
- digit^1 * period * digit^0 -- 10.0 10.
- + digit^0 * period * digit^1 -- 0.10 .10
- + digit^1 -- 10
- )
-
-patterns.restofline = (1-S("\n\r"))^1
-patterns.space = space
-patterns.spacing = space^1
-patterns.nospacing = (1-space)^1
-patterns.anything = P(1)
-
-local endof = S("\n\r\f")
-
-patterns.startofline = P(function(input,index)
- return (index == 1 or lpegmatch(endof,input,index-1)) and index
-end)
-
-function context.exact_match(words,word_chars,case_insensitive)
- local characters = concat(words)
- local pattern -- the concat catches _ etc
- if word_chars == true or word_chars == false or word_chars == nil then
- word_chars = ""
- end
- if type(word_chars) == "string" then
- pattern = S(characters) + idtoken
- if case_insensitive then
- pattern = pattern + S(upper(characters)) + S(lower(characters))
- end
- if word_chars ~= "" then
- pattern = pattern + S(word_chars)
- end
- elseif word_chars then
- pattern = word_chars
- end
- if case_insensitive then
- local list = { }
- for i=1,#words do
- list[lower(words[i])] = true
- end
- return Cmt(pattern^1, function(_,i,s)
- return list[lower(s)] -- and i or nil
- end)
- else
- local list = { }
- for i=1,#words do
- list[words[i]] = true
- end
- return Cmt(pattern^1, function(_,i,s)
- return list[s] -- and i or nil
- end)
- end
-end
-
--- spell checking (we can only load lua files)
---
--- return {
--- min = 3,
--- max = 40,
--- n = 12345,
--- words = {
--- ["someword"] = "someword",
--- ["anotherword"] = "Anotherword",
--- },
--- }
-
-local lists = { }
-
-function context.setwordlist(tag,limit) -- returns hash (lowercase keys and original values)
- if not tag or tag == "" then
- return false, 3
- end
- local list = lists[tag]
- if not list then
- list = context.loaddefinitions("spell-" .. tag)
- if not list or type(list) ~= "table" then
- list = { words = false, min = 3 }
- else
- list.words = list.words or false
- list.min = list.min or 3
- end
- lists[tag] = list
- end
- return list.words, list.min
-end
-
-patterns.wordtoken = R("az","AZ","\127\255")
-patterns.wordpattern = patterns.wordtoken^3 -- todo: if limit and #s < limit then
-
--- -- pre 3.24:
---
--- function context.checkedword(validwords,validminimum,s,i) -- ,limit
--- if not validwords then -- or #s < validminimum then
--- return true, { "text", i } -- { "default", i }
--- else
--- -- keys are lower
--- local word = validwords[s]
--- if word == s then
--- return true, { "okay", i } -- exact match
--- elseif word then
--- return true, { "warning", i } -- case issue
--- else
--- local word = validwords[lower(s)]
--- if word == s then
--- return true, { "okay", i } -- exact match
--- elseif word then
--- return true, { "warning", i } -- case issue
--- elseif upper(s) == s then
--- return true, { "warning", i } -- probably a logo or acronym
--- else
--- return true, { "error", i }
--- end
--- end
--- end
--- end
-
-function context.checkedword(validwords,validminimum,s,i) -- ,limit
- if not validwords then -- or #s < validminimum then
- return true, "text", i -- { "default", i }
- else
- -- keys are lower
- local word = validwords[s]
- if word == s then
- return true, "okay", i -- exact match
- elseif word then
- return true, "warning", i -- case issue
- else
- local word = validwords[lower(s)]
- if word == s then
- return true, "okay", i -- exact match
- elseif word then
- return true, "warning", i -- case issue
- elseif upper(s) == s then
- return true, "warning", i -- probably a logo or acronym
- else
- return true, "error", i
- end
- end
- end
-end
-
-function context.styleofword(validwords,validminimum,s) -- ,limit
- if not validwords or #s < validminimum then
- return "text"
- else
- -- keys are lower
- local word = validwords[s]
- if word == s then
- return "okay" -- exact match
- elseif word then
- return "warning" -- case issue
- else
- local word = validwords[lower(s)]
- if word == s then
- return "okay" -- exact match
- elseif word then
- return "warning" -- case issue
- elseif upper(s) == s then
- return "warning" -- probably a logo or acronym
- else
- return "error"
- end
- end
- end
-end
-
--- overloaded functions
-
-local FOLD_BASE = SC_FOLDLEVELBASE
-local FOLD_HEADER = SC_FOLDLEVELHEADERFLAG
-local FOLD_BLANK = SC_FOLDLEVELWHITEFLAG
-
-local get_style_at = GetStyleAt
-local get_property = GetProperty
-local get_indent_amount = GetIndentAmount
-
-local h_table, b_table, n_table = { }, { }, { }
-
-setmetatable(h_table, { __index = function(t,level) local v = { level, FOLD_HEADER } t[level] = v return v end })
-setmetatable(b_table, { __index = function(t,level) local v = { level, FOLD_BLANK } t[level] = v return v end })
-setmetatable(n_table, { __index = function(t,level) local v = { level } t[level] = v return v end })
-
--- -- todo: move the local functions outside (see below) .. old variant < 3.24
---
--- local newline = P("\r\n") + S("\r\n")
--- local p_yes = Cp() * Cs((1-newline)^1) * newline^-1
--- local p_nop = newline
---
--- local function fold_by_parsing(text,start_pos,start_line,start_level,lexer)
--- local foldsymbols = lexer._foldsymbols
--- if not foldsymbols then
--- return { }
--- end
--- local patterns = foldsymbols._patterns
--- if not patterns then
--- return { }
--- end
--- local nofpatterns = #patterns
--- if nofpatterns == 0 then
--- return { }
--- end
--- local folds = { }
--- local line_num = start_line
--- local prev_level = start_level
--- local current_level = prev_level
--- local validmatches = foldsymbols._validmatches
--- if not validmatches then
--- validmatches = { }
--- for symbol, matches in next, foldsymbols do -- whatever = { start = 1, stop = -1 }
--- if not find(symbol,"^_") then -- brrr
--- for s, _ in next, matches do
--- validmatches[s] = true
--- end
--- end
--- end
--- foldsymbols._validmatches = validmatches
--- end
--- -- of course we could instead build a nice lpeg checker .. something for
--- -- a rainy day with a stack of new cd's at hand
--- local function action_y(pos,line)
--- for i=1,nofpatterns do
--- for s, m in gmatch(line,patterns[i]) do
--- if validmatches[m] then
--- local symbols = foldsymbols[get_style_at(start_pos + pos + s - 1)]
--- if symbols then
--- local action = symbols[m]
--- if action then
--- if type(action) == 'number' then -- we could store this in validmatches if there was only one symbol category
--- current_level = current_level + action
--- else
--- current_level = current_level + action(text,pos,line,s,m)
--- end
--- if current_level < FOLD_BASE then
--- current_level = FOLD_BASE
--- end
--- end
--- end
--- end
--- end
--- end
--- if current_level > prev_level then
--- folds[line_num] = h_table[prev_level] -- { prev_level, FOLD_HEADER }
--- else
--- folds[line_num] = n_table[prev_level] -- { prev_level }
--- end
--- prev_level = current_level
--- line_num = line_num + 1
--- end
--- local function action_n()
--- folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK }
--- line_num = line_num + 1
--- end
--- if lexer._reset_parser then
--- lexer._reset_parser()
--- end
--- local lpegpattern = (p_yes/action_y + p_nop/action_n)^0 -- not too efficient but indirect function calls are neither but
--- lpegmatch(lpegpattern,text) -- keys are not pressed that fast ... large files are slow anyway
--- return folds
--- end
-
--- The 3.24 variant; no longer subtable optimization is needed:
-
-local newline = P("\r\n") + S("\r\n")
-local p_yes = Cp() * Cs((1-newline)^1) * newline^-1
-local p_nop = newline
-
-local folders = { }
-
-local function fold_by_parsing(text,start_pos,start_line,start_level,lexer)
- local folder = folders[lexer]
- if not folder then
- --
- local pattern, folds, text, start_pos, line_num, prev_level, current_level
- --
- local fold_symbols = lexer._foldsymbols
- local fold_pattern = lexer._foldpattern -- use lpeg instead (context extension)
- --
- if fold_pattern then
- -- if no functions are found then we could have a faster one
-
- -- fold_pattern = Cp() * C(fold_pattern) * Carg(1) / function(s,match,pos)
- -- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)]
- -- local l = symbols and symbols[match]
- -- if l then
- -- local t = type(l)
- -- if t == 'number' then
- -- current_level = current_level + l
- -- elseif t == 'function' then
- -- current_level = current_level + l(text, pos, line, s, match)
- -- end
- -- end
- -- end
- -- fold_pattern = (fold_pattern + P(1))^0
- -- local action_y = function(pos,line)
- -- lpegmatch(fold_pattern,line,1,pos)
- -- folds[line_num] = prev_level
- -- if current_level > prev_level then
- -- folds[line_num] = prev_level + FOLD_HEADER
- -- end
- -- if current_level < FOLD_BASE then
- -- current_level = FOLD_BASE
- -- end
- -- prev_level = current_level
- -- line_num = line_num + 1
- -- end
- -- local action_n = function()
- -- folds[line_num] = prev_level + FOLD_BLANK
- -- line_num = line_num + 1
- -- end
- -- pattern = (p_yes/action_y + p_nop/action_n)^0
-
- fold_pattern = Cp() * C(fold_pattern) / function(s,match)
- local symbols = fold_symbols[get_style_at(start_pos + s)]
- if symbols then
- local l = symbols[match]
- if l then
- current_level = current_level + l
- end
- end
- end
- local action_y = function()
- folds[line_num] = prev_level
- if current_level > prev_level then
- folds[line_num] = prev_level + FOLD_HEADER
- end
- if current_level < FOLD_BASE then
- current_level = FOLD_BASE
- end
- prev_level = current_level
- line_num = line_num + 1
- end
- local action_n = function()
- folds[line_num] = prev_level + FOLD_BLANK
- line_num = line_num + 1
- end
- pattern = ((fold_pattern + (1-newline))^1 * newline / action_y + newline/action_n)^0
-
- else
- -- the traditional one but a bit optimized
- local fold_symbols_patterns = fold_symbols._patterns
- local action_y = function(pos,line)
- for j = 1, #fold_symbols_patterns do
- for s, match in gmatch(line,fold_symbols_patterns[j]) do -- '()('..patterns[i]..')'
- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)]
- local l = symbols and symbols[match]
- local t = type(l)
- if t == 'number' then
- current_level = current_level + l
- elseif t == 'function' then
- current_level = current_level + l(text, pos, line, s, match)
- end
- end
- end
- folds[line_num] = prev_level
- if current_level > prev_level then
- folds[line_num] = prev_level + FOLD_HEADER
- end
- if current_level < FOLD_BASE then
- current_level = FOLD_BASE
- end
- prev_level = current_level
- line_num = line_num + 1
- end
- local action_n = function()
- folds[line_num] = prev_level + FOLD_BLANK
- line_num = line_num + 1
- end
- pattern = (p_yes/action_y + p_nop/action_n)^0
- end
- --
- local reset_parser = lexer._reset_parser
- --
- folder = function(_text_,_start_pos_,_start_line_,_start_level_)
- if reset_parser then
- reset_parser()
- end
- folds = { }
- text = _text_
- start_pos = _start_pos_
- line_num = _start_line_
- prev_level = _start_level_
- current_level = prev_level
- lpegmatch(pattern,text)
--- return folds
-local t = folds
-folds = nil
-return t -- so folds can be collected
- end
- folders[lexer] = folder
- end
- return folder(text,start_pos,start_line,start_level,lexer)
-end
-
--- local function fold_by_indentation(text,start_pos,start_line,start_level)
--- local folds = { }
--- local current_line = start_line
--- local prev_level = start_level
--- for line in gmatch(text,'[\t ]*(.-)\r?\n') do
--- if line ~= "" then
--- local current_level = FOLD_BASE + get_indent_amount(current_line)
--- if current_level > prev_level then -- next level
--- local i = current_line - 1
--- while true do
--- local f = folds[i]
--- if f and f[2] == FOLD_BLANK then
--- i = i - 1
--- else
--- break
--- end
--- end
--- local f = folds[i]
--- if f then
--- f[2] = FOLD_HEADER
--- end -- low indent
--- folds[current_line] = n_table[current_level] -- { current_level } -- high indent
--- elseif current_level < prev_level then -- prev level
--- local f = folds[current_line - 1]
--- if f then
--- f[1] = prev_level -- high indent
--- end
--- folds[current_line] = n_table[current_level] -- { current_level } -- low indent
--- else -- same level
--- folds[current_line] = n_table[prev_level] -- { prev_level }
--- end
--- prev_level = current_level
--- else
--- folds[current_line] = b_table[prev_level] -- { prev_level, FOLD_BLANK }
--- end
--- current_line = current_line + 1
--- end
--- return folds
--- end
-
--- local function fold_by_indentation(text,start_pos,start_line,start_level)
--- local folds = { }
--- local current_line = start_line
--- local prev_level = start_level
--- for line in gmatch(text,'[\t ]*(.-)\r?\n') do
--- if line ~= '' then
--- local current_level = FOLD_BASE + get_indent_amount(current_line)
--- if current_level > prev_level then -- next level
--- local i = current_line - 1
--- local f
--- while true do
--- f = folds[i]
--- if not f then
--- break
--- elseif f[2] == FOLD_BLANK then
--- i = i - 1
--- else
--- f[2] = FOLD_HEADER -- low indent
--- break
--- end
--- end
--- folds[current_line] = { current_level } -- high indent
--- elseif current_level < prev_level then -- prev level
--- local f = folds[current_line - 1]
--- if f then
--- f[1] = prev_level -- high indent
--- end
--- folds[current_line] = { current_level } -- low indent
--- else -- same level
--- folds[current_line] = { prev_level }
--- end
--- prev_level = current_level
--- else
--- folds[current_line] = { prev_level, FOLD_BLANK }
--- end
--- current_line = current_line + 1
--- end
--- for line, level in next, folds do
--- folds[line] = level[1] + (level[2] or 0)
--- end
--- return folds
--- end
-
-local folds, current_line, prev_level
-
-local function action_y()
- local current_level = FOLD_BASE + get_indent_amount(current_line)
- if current_level > prev_level then -- next level
- local i = current_line - 1
- local f
- while true do
- f = folds[i]
- if not f then
- break
- elseif f[2] == FOLD_BLANK then
- i = i - 1
- else
- f[2] = FOLD_HEADER -- low indent
- break
- end
- end
- folds[current_line] = { current_level } -- high indent
- elseif current_level < prev_level then -- prev level
- local f = folds[current_line - 1]
- if f then
- f[1] = prev_level -- high indent
- end
- folds[current_line] = { current_level } -- low indent
- else -- same level
- folds[current_line] = { prev_level }
- end
- prev_level = current_level
- current_line = current_line + 1
-end
-
-local function action_n()
- folds[current_line] = { prev_level, FOLD_BLANK }
- current_line = current_line + 1
-end
-
-local pattern = ( S("\t ")^0 * ( (1-S("\n\r"))^1 / action_y + P(true) / action_n) * newline )^0
-
-local function fold_by_indentation(text,start_pos,start_line,start_level)
- -- initialize
- folds = { }
- current_line = start_line
- prev_level = start_level
- -- define
- -- -- not here .. pattern binds and local functions are not frozen
- -- analyze
- lpegmatch(pattern,text)
- -- flatten
- for line, level in next, folds do
- folds[line] = level[1] + (level[2] or 0)
- end
- -- done
--- return folds
-local t = folds
-folds = nil
-return t -- so folds can be collected
-end
-
-local function fold_by_line(text,start_pos,start_line,start_level)
- local folds = { }
- -- can also be lpeg'd
- for _ in gmatch(text,".-\r?\n") do
- folds[start_line] = n_table[start_level] -- { start_level }
- start_line = start_line + 1
- end
- return folds
-end
-
-local threshold_by_lexer = 512 * 1024 -- we don't know the filesize yet
-local threshold_by_parsing = 512 * 1024 -- we don't know the filesize yet
-local threshold_by_indentation = 512 * 1024 -- we don't know the filesize yet
-local threshold_by_line = 512 * 1024 -- we don't know the filesize yet
-
-function context.fold(text,start_pos,start_line,start_level) -- hm, we had size thresholds .. where did they go
- if text == '' then
- return { }
- end
- local lexer = global._LEXER
- local fold_by_lexer = lexer._fold
- local fold_by_symbols = lexer._foldsymbols
- local filesize = 0 -- we don't know that
- if fold_by_lexer then
- if filesize <= threshold_by_lexer then
- return fold_by_lexer(text,start_pos,start_line,start_level,lexer)
- end
- elseif fold_by_symbols then -- and get_property('fold.by.parsing',1) > 0 then
- if filesize <= threshold_by_parsing then
- return fold_by_parsing(text,start_pos,start_line,start_level,lexer)
- end
- elseif get_property('fold.by.indentation',1) > 0 then
- if filesize <= threshold_by_indentation then
- return fold_by_indentation(text,start_pos,start_line,start_level,lexer)
- end
- elseif get_property('fold.by.line',1) > 0 then
- if filesize <= threshold_by_line then
- return fold_by_line(text,start_pos,start_line,start_level,lexer)
- end
- end
- return { }
-end
-
--- The following code is mostly unchanged:
-
-local function add_rule(lexer, id, rule)
- if not lexer._RULES then
- lexer._RULES = {}
- lexer._RULEORDER = {}
- end
- lexer._RULES[id] = rule
- lexer._RULEORDER[#lexer._RULEORDER + 1] = id
-end
-
-local function add_style(lexer, token_name, style)
- local len = lexer._STYLES.len
- if len == 32 then
- len = len + 8
- end
- if len >= 128 then
- print('Too many styles defined (128 MAX)')
- end
- lexer._TOKENS[token_name] = len
- lexer._STYLES[len] = style
- lexer._STYLES.len = len + 1
-end
-
-local function join_tokens(lexer)
- local patterns, order = lexer._RULES, lexer._RULEORDER
- local token_rule = patterns[order[1]]
- for i=2,#order do
- token_rule = token_rule + patterns[order[i]]
- end
- lexer._TOKENRULE = token_rule
- return lexer._TOKENRULE
-end
-
-local function add_lexer(grammar, lexer, token_rule)
- local token_rule = join_tokens(lexer)
- local lexer_name = lexer._NAME
- local children = lexer._CHILDREN
- for i=1,#children do
- local child = children[i]
- if child._CHILDREN then
- add_lexer(grammar, child)
- end
- local child_name = child._NAME
- local rules = child._EMBEDDEDRULES[lexer_name]
- local rules_token_rule = grammar['__'..child_name] or rules.token_rule
- grammar[child_name] = (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1 * V(lexer_name)
- local embedded_child = '_' .. child_name
- grammar[embedded_child] = rules.start_rule * (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1
- token_rule = V(embedded_child) + token_rule
- end
- grammar['__' .. lexer_name] = token_rule
- grammar[lexer_name] = token_rule^0
-end
-
-local function build_grammar(lexer, initial_rule)
- local children = lexer._CHILDREN
- if children then
- local lexer_name = lexer._NAME
- if not initial_rule then
- initial_rule = lexer_name
- end
- local grammar = { initial_rule }
- add_lexer(grammar, lexer)
- lexer._INITIALRULE = initial_rule
- lexer._GRAMMAR = Ct(P(grammar))
- else
- lexer._GRAMMAR = Ct(join_tokens(lexer)^0)
- end
-end
-
--- so far. We need these local functions in the next one.
---
--- Before 3.24 we had tokens[..] = { category, position }, now it's a two values.
-
-local lineparsers = { }
-
-function context.lex(text,init_style)
- local lexer = global._LEXER
- local grammar = lexer._GRAMMAR
- if not grammar then
- return { }
- elseif lexer._LEXBYLINE then -- we could keep token
- local tokens = { }
- local offset = 0
- local noftokens = 0
- -- -- pre 3.24
- --
- -- for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg
- -- local line_tokens = lpegmatch(grammar,line)
- -- if line_tokens then
- -- for i=1,#line_tokens do
- -- local token = line_tokens[i]
- -- token[2] = token[2] + offset
- -- noftokens = noftokens + 1
- -- tokens[noftokens] = token
- -- end
- -- end
- -- offset = offset + #line
- -- if noftokens > 0 and tokens[noftokens][2] ~= offset then
- -- noftokens = noftokens + 1
- -- tokens[noftokens] = { 'default', offset + 1 }
- -- end
- -- end
-
- -- for line in gmatch(text,'[^\r\n]*\r?\n?') do
- -- local line_tokens = lpegmatch(grammar,line)
- -- if line_tokens then
- -- for i=1,#line_tokens,2 do
- -- noftokens = noftokens + 1
- -- tokens[noftokens] = line_tokens[i]
- -- noftokens = noftokens + 1
- -- tokens[noftokens] = line_tokens[i + 1] + offset
- -- end
- -- end
- -- offset = offset + #line
- -- if noftokens > 0 and tokens[noftokens] ~= offset then
- -- noftokens = noftokens + 1
- -- tokens[noftokens] = 'default'
- -- noftokens = noftokens + 1
- -- tokens[noftokens] = offset + 1
- -- end
- -- end
-
- local lineparser = lineparsers[lexer]
- if not lineparser then -- probably a cmt is more efficient
- lineparser = C((1-newline)^0 * newline) / function(line)
- local length = #line
- local line_tokens = length > 0 and lpegmatch(grammar,line)
- if line_tokens then
- for i=1,#line_tokens,2 do
- noftokens = noftokens + 1
- tokens[noftokens] = line_tokens[i]
- noftokens = noftokens + 1
- tokens[noftokens] = line_tokens[i + 1] + offset
- end
- end
- offset = offset + length
- if noftokens > 0 and tokens[noftokens] ~= offset then
- noftokens = noftokens + 1
- tokens[noftokens] = 'default'
- noftokens = noftokens + 1
- tokens[noftokens] = offset + 1
- end
- end
- lineparser = lineparser^0
- lineparsers[lexer] = lineparser
- end
- lpegmatch(lineparser,text)
- return tokens
-
- elseif lexer._CHILDREN then
- -- as we cannot print, tracing is not possible ... this might change as we can as well
- -- generate them all in one go (sharing as much as possible)
- local hash = lexer._HASH -- hm, was _hash
- if not hash then
- hash = { }
- lexer._HASH = hash
- end
- grammar = hash[init_style]
- if grammar then
- lexer._GRAMMAR = grammar
- else
- for style, style_num in next, lexer._TOKENS do
- if style_num == init_style then
- -- the name of the lexers is filtered from the whitespace
- -- specification
- local lexer_name = match(style,'^(.+)_whitespace') or lexer._NAME
- if lexer._INITIALRULE ~= lexer_name then
- grammar = hash[lexer_name]
- if not grammar then
- build_grammar(lexer,lexer_name)
- grammar = lexer._GRAMMAR
- hash[lexer_name] = grammar
- end
- end
- break
- end
- end
- grammar = grammar or lexer._GRAMMAR
- hash[init_style] = grammar
- end
- return lpegmatch(grammar,text)
- else
- return lpegmatch(grammar,text)
- end
-end
-
--- todo: keywords: one lookup and multiple matches
-
--- function context.token(name, patt)
--- return Ct(patt * Cc(name) * Cp())
--- end
---
--- -- hm, changed in 3.24 .. no longer a table
-
-function context.token(name, patt)
- return patt * Cc(name) * Cp()
-end
-
-lexer.fold = context.fold
-lexer.lex = context.lex
-lexer.token = context.token
-lexer.exact_match = context.exact_match
-
--- helper .. alas ... the lexer's lua instance is rather crippled .. not even
--- math is part of it
-
-local floor = math and math.floor
-local char = string.char
-
-if not floor then
-
- floor = function(n)
- return tonumber(string.format("%d",n))
- end
-
- math = math or { }
-
- math.floor = floor
-
-end
-
-local function utfchar(n)
- if n < 0x80 then
- return char(n)
- elseif n < 0x800 then
- return char(
- 0xC0 + floor(n/0x40),
- 0x80 + (n % 0x40)
- )
- elseif n < 0x10000 then
- return char(
- 0xE0 + floor(n/0x1000),
- 0x80 + (floor(n/0x40) % 0x40),
- 0x80 + (n % 0x40)
- )
- elseif n < 0x40000 then
- return char(
- 0xF0 + floor(n/0x40000),
- 0x80 + floor(n/0x1000),
- 0x80 + (floor(n/0x40) % 0x40),
- 0x80 + (n % 0x40)
- )
- else
- -- return char(
- -- 0xF1 + floor(n/0x1000000),
- -- 0x80 + floor(n/0x40000),
- -- 0x80 + floor(n/0x1000),
- -- 0x80 + (floor(n/0x40) % 0x40),
- -- 0x80 + (n % 0x40)
- -- )
- return "?"
- end
-end
-
-context.utfchar = utfchar
-
--- a helper from l-lpeg:
-
-local gmatch = string.gmatch
-
-local function make(t)
- local p
- for k, v in next, t do
- if not p then
- if next(v) then
- p = P(k) * make(v)
- else
- p = P(k)
- end
- else
- if next(v) then
- p = p + P(k) * make(v)
- else
- p = p + P(k)
- end
- end
- end
- return p
-end
-
-function lpeg.utfchartabletopattern(list)
- local tree = { }
- for i=1,#list do
- local t = tree
- for c in gmatch(list[i],".") do
- if not t[c] then
- t[c] = { }
- end
- t = t[c]
- end
- end
- return make(tree)
-end
-
--- patterns.invisibles =
--- P(utfchar(0x00A0)) -- nbsp
--- + P(utfchar(0x2000)) -- enquad
--- + P(utfchar(0x2001)) -- emquad
--- + P(utfchar(0x2002)) -- enspace
--- + P(utfchar(0x2003)) -- emspace
--- + P(utfchar(0x2004)) -- threeperemspace
--- + P(utfchar(0x2005)) -- fourperemspace
--- + P(utfchar(0x2006)) -- sixperemspace
--- + P(utfchar(0x2007)) -- figurespace
--- + P(utfchar(0x2008)) -- punctuationspace
--- + P(utfchar(0x2009)) -- breakablethinspace
--- + P(utfchar(0x200A)) -- hairspace
--- + P(utfchar(0x200B)) -- zerowidthspace
--- + P(utfchar(0x202F)) -- narrownobreakspace
--- + P(utfchar(0x205F)) -- math thinspace
-
-patterns.invisibles = lpeg.utfchartabletopattern {
- utfchar(0x00A0), -- nbsp
- utfchar(0x2000), -- enquad
- utfchar(0x2001), -- emquad
- utfchar(0x2002), -- enspace
- utfchar(0x2003), -- emspace
- utfchar(0x2004), -- threeperemspace
- utfchar(0x2005), -- fourperemspace
- utfchar(0x2006), -- sixperemspace
- utfchar(0x2007), -- figurespace
- utfchar(0x2008), -- punctuationspace
- utfchar(0x2009), -- breakablethinspace
- utfchar(0x200A), -- hairspace
- utfchar(0x200B), -- zerowidthspace
- utfchar(0x202F), -- narrownobreakspace
- utfchar(0x205F), -- math thinspace
-}
-
--- now we can make:
-
-patterns.iwordtoken = patterns.wordtoken - patterns.invisibles
-patterns.iwordpattern = patterns.iwordtoken^3
-
--- require("themes/scite-context-theme")
-
--- In order to deal with some bug in additional styles (I have no cue what is
--- wrong, but additional styles get ignored and clash somehow) I just copy the
--- original lexer code ... see original for comments.
diff --git a/context/data/scite/lexers/data/scite-context-data-context.lua b/context/data/scite/lexers/data/scite-context-data-context.lua
deleted file mode 100644
index 0d577c8da..000000000
--- a/context/data/scite/lexers/data/scite-context-data-context.lua
+++ /dev/null
@@ -1,4 +0,0 @@
-return {
- ["constants"]={ "zerocount", "minusone", "minustwo", "plusone", "plustwo", "plusthree", "plusfour", "plusfive", "plussix", "plusseven", "pluseight", "plusnine", "plusten", "plussixteen", "plushundred", "plusthousand", "plustenthousand", "plustwentythousand", "medcard", "maxcard", "zeropoint", "onepoint", "halfapoint", "onebasepoint", "maxdimen", "scaledpoint", "thousandpoint", "points", "halfpoint", "zeroskip", "zeromuskip", "onemuskip", "pluscxxvii", "pluscxxviii", "pluscclv", "pluscclvi", "normalpagebox", "endoflinetoken", "outputnewlinechar", "emptytoks", "empty", "undefined", "voidbox", "emptybox", "emptyvbox", "emptyhbox", "bigskipamount", "medskipamount", "smallskipamount", "fmtname", "fmtversion", "texengine", "texenginename", "texengineversion", "luatexengine", "pdftexengine", "xetexengine", "unknownengine", "etexversion", "pdftexversion", "xetexversion", "xetexrevision", "activecatcode", "bgroup", "egroup", "endline", "conditionaltrue", "conditionalfalse", "attributeunsetvalue", "uprotationangle", "rightrotationangle", "downrotationangle", "leftrotationangle", "inicatcodes", "ctxcatcodes", "texcatcodes", "notcatcodes", "txtcatcodes", "vrbcatcodes", "prtcatcodes", "nilcatcodes", "luacatcodes", "tpacatcodes", "tpbcatcodes", "xmlcatcodes", "escapecatcode", "begingroupcatcode", "endgroupcatcode", "mathshiftcatcode", "alignmentcatcode", "endoflinecatcode", "parametercatcode", "superscriptcatcode", "subscriptcatcode", "ignorecatcode", "spacecatcode", "lettercatcode", "othercatcode", "activecatcode", "commentcatcode", "invalidcatcode", "tabasciicode", "newlineasciicode", "formfeedasciicode", "endoflineasciicode", "endoffileasciicode", "spaceasciicode", "hashasciicode", "dollarasciicode", "commentasciicode", "ampersandasciicode", "colonasciicode", "backslashasciicode", "circumflexasciicode", "underscoreasciicode", "leftbraceasciicode", "barasciicode", "rightbraceasciicode", "tildeasciicode", "delasciicode", "lessthanasciicode", "morethanasciicode", "doublecommentsignal", "atsignasciicode", "exclamationmarkasciicode", "questionmarkasciicode", "doublequoteasciicode", "singlequoteasciicode", "forwardslashasciicode", "primeasciicode", "activemathcharcode", "activetabtoken", "activeformfeedtoken", "activeendoflinetoken", "batchmodecode", "nonstopmodecode", "scrollmodecode", "errorstopmodecode", "bottomlevelgroupcode", "simplegroupcode", "hboxgroupcode", "adjustedhboxgroupcode", "vboxgroupcode", "vtopgroupcode", "aligngroupcode", "noaligngroupcode", "outputgroupcode", "mathgroupcode", "discretionarygroupcode", "insertgroupcode", "vcentergroupcode", "mathchoicegroupcode", "semisimplegroupcode", "mathshiftgroupcode", "mathleftgroupcode", "vadjustgroupcode", "charnodecode", "hlistnodecode", "vlistnodecode", "rulenodecode", "insertnodecode", "marknodecode", "adjustnodecode", "ligaturenodecode", "discretionarynodecode", "whatsitnodecode", "mathnodecode", "gluenodecode", "kernnodecode", "penaltynodecode", "unsetnodecode", "mathsnodecode", "charifcode", "catifcode", "numifcode", "dimifcode", "oddifcode", "vmodeifcode", "hmodeifcode", "mmodeifcode", "innerifcode", "voidifcode", "hboxifcode", "vboxifcode", "xifcode", "eofifcode", "trueifcode", "falseifcode", "caseifcode", "definedifcode", "csnameifcode", "fontcharifcode", "fontslantperpoint", "fontinterwordspace", "fontinterwordstretch", "fontinterwordshrink", "fontexheight", "fontemwidth", "fontextraspace", "slantperpoint", "interwordspace", "interwordstretch", "interwordshrink", "exheight", "emwidth", "extraspace", "mathsupdisplay", "mathsupnormal", "mathsupcramped", "mathsubnormal", "mathsubcombined", "mathaxisheight", "startmode", "stopmode", "startnotmode", "stopnotmode", "startmodeset", "stopmodeset", "doifmode", "doifmodeelse", "doifnotmode", "startallmodes", "stopallmodes", "startnotallmodes", "stopnotallmodes", "doifallmodes", "doifallmodeselse", "doifnotallmodes", "startenvironment", "stopenvironment", "environment", "startcomponent", "stopcomponent", "component", "startproduct", "stopproduct", "product", "startproject", "stopproject", "project", "starttext", "stoptext", "startnotext", "stopnotext", "startdocument", "stopdocument", "documentvariable", "setupdocument", "startmodule", "stopmodule", "usemodule", "usetexmodule", "useluamodule", "setupmodule", "currentmoduleparameter", "moduleparameter", "startTEXpage", "stopTEXpage", "enablemode", "disablemode", "preventmode", "globalenablemode", "globaldisablemode", "globalpreventmode", "pushmode", "popmode", "typescriptone", "typescripttwo", "typescriptthree", "mathsizesuffix", "mathordcode", "mathopcode", "mathbincode", "mathrelcode", "mathopencode", "mathclosecode", "mathpunctcode", "mathalphacode", "mathinnercode", "mathnothingcode", "mathlimopcode", "mathnolopcode", "mathboxcode", "mathchoicecode", "mathaccentcode", "mathradicalcode", "constantnumber", "constantnumberargument", "constantdimen", "constantdimenargument", "constantemptyargument", "continueifinputfile", "luastringsep", "!!bs", "!!es", "lefttorightmark", "righttoleftmark", "breakablethinspace", "nobreakspace", "narrownobreakspace", "zerowidthnobreakspace", "ideographicspace", "ideographichalffillspace", "twoperemspace", "threeperemspace", "fourperemspace", "fiveperemspace", "sixperemspace", "figurespace", "punctuationspace", "hairspace", "zerowidthspace", "zerowidthnonjoiner", "zerowidthjoiner", "zwnj", "zwj" },
- ["helpers"]={ "startsetups", "stopsetups", "startxmlsetups", "stopxmlsetups", "startluasetups", "stopluasetups", "starttexsetups", "stoptexsetups", "startrawsetups", "stoprawsetups", "startlocalsetups", "stoplocalsetups", "starttexdefinition", "stoptexdefinition", "starttexcode", "stoptexcode", "startcontextcode", "stopcontextcode", "doifsetupselse", "doifsetups", "doifnotsetups", "setup", "setups", "texsetup", "xmlsetup", "luasetup", "directsetup", "doifelsecommandhandler", "doifnotcommandhandler", "doifcommandhandler", "newmode", "setmode", "resetmode", "newsystemmode", "setsystemmode", "resetsystemmode", "pushsystemmode", "popsystemmode", "booleanmodevalue", "newcount", "newdimen", "newskip", "newmuskip", "newbox", "newtoks", "newread", "newwrite", "newmarks", "newinsert", "newattribute", "newif", "newlanguage", "newfamily", "newfam", "newhelp", "then", "begcsname", "strippedcsname", "firstargumentfalse", "firstargumenttrue", "secondargumentfalse", "secondargumenttrue", "thirdargumentfalse", "thirdargumenttrue", "fourthargumentfalse", "fourthargumenttrue", "fifthargumentfalse", "fifthsargumenttrue", "sixthargumentfalse", "sixtsargumenttrue", "doglobal", "dodoglobal", "redoglobal", "resetglobal", "donothing", "dontcomplain", "forgetall", "donetrue", "donefalse", "htdp", "unvoidbox", "hfilll", "vfilll", "mathbox", "mathlimop", "mathnolop", "mathnothing", "mathalpha", "currentcatcodetable", "defaultcatcodetable", "catcodetablename", "newcatcodetable", "startcatcodetable", "stopcatcodetable", "startextendcatcodetable", "stopextendcatcodetable", "pushcatcodetable", "popcatcodetable", "restorecatcodes", "setcatcodetable", "letcatcodecommand", "defcatcodecommand", "uedcatcodecommand", "hglue", "vglue", "hfillneg", "vfillneg", "hfilllneg", "vfilllneg", "ruledhss", "ruledhfil", "ruledhfill", "ruledhfilneg", "ruledhfillneg", "normalhfillneg", "ruledvss", "ruledvfil", "ruledvfill", "ruledvfilneg", "ruledvfillneg", "normalvfillneg", "ruledhbox", "ruledvbox", "ruledvtop", "ruledvcenter", "ruledmbox", "ruledhskip", "ruledvskip", "ruledkern", "ruledmskip", "ruledmkern", "ruledhglue", "ruledvglue", "normalhglue", "normalvglue", "ruledpenalty", "filledhboxb", "filledhboxr", "filledhboxg", "filledhboxc", "filledhboxm", "filledhboxy", "filledhboxk", "scratchcounter", "globalscratchcounter", "scratchdimen", "globalscratchdimen", "scratchskip", "globalscratchskip", "scratchmuskip", "globalscratchmuskip", "scratchtoks", "globalscratchtoks", "scratchbox", "globalscratchbox", "normalbaselineskip", "normallineskip", "normallineskiplimit", "availablehsize", "localhsize", "setlocalhsize", "nextbox", "dowithnextbox", "dowithnextboxcs", "dowithnextboxcontent", "dowithnextboxcontentcs", "scratchwidth", "scratchheight", "scratchdepth", "scratchoffset", "scratchdistance", "scratchhsize", "scratchvsize", "scratchxoffset", "scratchyoffset", "scratchhoffset", "scratchvoffset", "scratchxposition", "scratchyposition", "scratchtopoffset", "scratchbottomoffset", "scratchleftoffset", "scratchrightoffset", "scratchcounterone", "scratchcountertwo", "scratchcounterthree", "scratchdimenone", "scratchdimentwo", "scratchdimenthree", "scratchskipone", "scratchskiptwo", "scratchskipthree", "scratchmuskipone", "scratchmuskiptwo", "scratchmuskipthree", "scratchtoksone", "scratchtokstwo", "scratchtoksthree", "scratchboxone", "scratchboxtwo", "scratchboxthree", "scratchnx", "scratchny", "scratchmx", "scratchmy", "scratchunicode", "scratchleftskip", "scratchrightskip", "scratchtopskip", "scratchbottomskip", "doif", "doifnot", "doifelse", "doifinset", "doifnotinset", "doifinsetelse", "doifnextcharelse", "doifnextoptionalelse", "doifnextbgroupelse", "doifnextparenthesiselse", "doiffastoptionalcheckelse", "doifundefinedelse", "doifdefinedelse", "doifundefined", "doifdefined", "doifelsevalue", "doifvalue", "doifnotvalue", "doifnothing", "doifsomething", "doifelsenothing", "doifsomethingelse", "doifvaluenothing", "doifvaluesomething", "doifelsevaluenothing", "doifdimensionelse", "doifnumberelse", "doifnumber", "doifnotnumber", "doifcommonelse", "doifcommon", "doifnotcommon", "doifinstring", "doifnotinstring", "doifinstringelse", "doifassignmentelse", "docheckassignment", "tracingall", "tracingnone", "loggingall", "removetoks", "appendtoks", "prependtoks", "appendtotoks", "prependtotoks", "to", "endgraf", "endpar", "everyendpar", "reseteverypar", "finishpar", "empty", "null", "space", "quad", "enspace", "obeyspaces", "obeylines", "obeyedspace", "obeyedline", "normalspace", "executeifdefined", "singleexpandafter", "doubleexpandafter", "tripleexpandafter", "dontleavehmode", "removelastspace", "removeunwantedspaces", "keepunwantedspaces", "wait", "writestatus", "define", "defineexpandable", "redefine", "setmeasure", "setemeasure", "setgmeasure", "setxmeasure", "definemeasure", "freezemeasure", "measure", "measured", "installcorenamespace", "getvalue", "getuvalue", "setvalue", "setevalue", "setgvalue", "setxvalue", "letvalue", "letgvalue", "resetvalue", "undefinevalue", "ignorevalue", "setuvalue", "setuevalue", "setugvalue", "setuxvalue", "globallet", "glet", "udef", "ugdef", "uedef", "uxdef", "checked", "unique", "getparameters", "geteparameters", "getgparameters", "getxparameters", "forgetparameters", "copyparameters", "getdummyparameters", "dummyparameter", "directdummyparameter", "setdummyparameter", "letdummyparameter", "usedummystyleandcolor", "usedummystyleparameter", "usedummycolorparameter", "processcommalist", "processcommacommand", "quitcommalist", "quitprevcommalist", "processaction", "processallactions", "processfirstactioninset", "processallactionsinset", "unexpanded", "expanded", "startexpanded", "stopexpanded", "protected", "protect", "unprotect", "firstofoneargument", "firstoftwoarguments", "secondoftwoarguments", "firstofthreearguments", "secondofthreearguments", "thirdofthreearguments", "firstoffourarguments", "secondoffourarguments", "thirdoffourarguments", "fourthoffourarguments", "firstoffivearguments", "secondoffivearguments", "thirdoffivearguments", "fourthoffivearguments", "fifthoffivearguments", "firstofsixarguments", "secondofsixarguments", "thirdofsixarguments", "fourthofsixarguments", "fifthofsixarguments", "sixthofsixarguments", "firstofoneunexpanded", "gobbleoneargument", "gobbletwoarguments", "gobblethreearguments", "gobblefourarguments", "gobblefivearguments", "gobblesixarguments", "gobblesevenarguments", "gobbleeightarguments", "gobbleninearguments", "gobbletenarguments", "gobbleoneoptional", "gobbletwooptionals", "gobblethreeoptionals", "gobblefouroptionals", "gobblefiveoptionals", "dorecurse", "doloop", "exitloop", "dostepwiserecurse", "recurselevel", "recursedepth", "dofastloopcs", "dowith", "newconstant", "setnewconstant", "setconstant", "setconstantvalue", "newconditional", "settrue", "setfalse", "settruevalue", "setfalsevalue", "newmacro", "setnewmacro", "newfraction", "newsignal", "dosingleempty", "dodoubleempty", "dotripleempty", "doquadrupleempty", "doquintupleempty", "dosixtupleempty", "doseventupleempty", "dosingleargument", "dodoubleargument", "dotripleargument", "doquadrupleargument", "doquintupleargument", "dosixtupleargument", "doseventupleargument", "dosinglegroupempty", "dodoublegroupempty", "dotriplegroupempty", "doquadruplegroupempty", "doquintuplegroupempty", "permitspacesbetweengroups", "dontpermitspacesbetweengroups", "nopdfcompression", "maximumpdfcompression", "normalpdfcompression", "modulonumber", "dividenumber", "getfirstcharacter", "doiffirstcharelse", "startnointerference", "stopnointerference", "twodigits", "threedigits", "leftorright", "strut", "setstrut", "strutbox", "strutht", "strutdp", "strutwd", "struthtdp", "begstrut", "endstrut", "lineheight", "ordordspacing", "ordopspacing", "ordbinspacing", "ordrelspacing", "ordopenspacing", "ordclosespacing", "ordpunctspacing", "ordinnerspacing", "opordspacing", "opopspacing", "opbinspacing", "oprelspacing", "opopenspacing", "opclosespacing", "oppunctspacing", "opinnerspacing", "binordspacing", "binopspacing", "binbinspacing", "binrelspacing", "binopenspacing", "binclosespacing", "binpunctspacing", "bininnerspacing", "relordspacing", "relopspacing", "relbinspacing", "relrelspacing", "relopenspacing", "relclosespacing", "relpunctspacing", "relinnerspacing", "openordspacing", "openopspacing", "openbinspacing", "openrelspacing", "openopenspacing", "openclosespacing", "openpunctspacing", "openinnerspacing", "closeordspacing", "closeopspacing", "closebinspacing", "closerelspacing", "closeopenspacing", "closeclosespacing", "closepunctspacing", "closeinnerspacing", "punctordspacing", "punctopspacing", "punctbinspacing", "punctrelspacing", "punctopenspacing", "punctclosespacing", "punctpunctspacing", "punctinnerspacing", "innerordspacing", "inneropspacing", "innerbinspacing", "innerrelspacing", "inneropenspacing", "innerclosespacing", "innerpunctspacing", "innerinnerspacing", "normalreqno", "startimath", "stopimath", "normalstartimath", "normalstopimath", "startdmath", "stopdmath", "normalstartdmath", "normalstopdmath", "uncramped", "cramped", "triggermathstyle", "mathstylefont", "mathsmallstylefont", "mathstyleface", "mathsmallstyleface", "mathstylecommand", "mathpalette", "mathstylehbox", "mathstylevbox", "mathstylevcenter", "mathstylevcenteredhbox", "mathstylevcenteredvbox", "mathtext", "setmathsmalltextbox", "setmathtextbox", "triggerdisplaystyle", "triggertextstyle", "triggerscriptstyle", "triggerscriptscriptstyle", "triggeruncrampedstyle", "triggercrampedstyle", "triggersmallstyle", "triggeruncrampedsmallstyle", "triggercrampedsmallstyle", "triggerbigstyle", "triggeruncrampedbigstyle", "triggercrampedbigstyle", "luaexpr", "expdoifelse", "expdoif", "expdoifnot", "expdoifcommonelse", "expdoifinsetelse", "ctxdirectlua", "ctxlatelua", "ctxsprint", "ctxwrite", "ctxcommand", "ctxdirectcommand", "ctxlatecommand", "ctxreport", "ctxlua", "luacode", "lateluacode", "directluacode", "registerctxluafile", "ctxloadluafile", "luaversion", "luamajorversion", "luaminorversion", "ctxluacode", "luaconditional", "luaexpanded", "startluaparameterset", "stopluaparameterset", "luaparameterset", "definenamedlua", "obeylualines", "obeyluatokens", "startluacode", "stopluacode", "startlua", "stoplua", "carryoverpar", "assumelongusagecs", "Umathbotaccent", "righttolefthbox", "lefttorighthbox", "righttoleftvbox", "lefttorightvbox", "righttoleftvtop", "lefttorightvtop", "rtlhbox", "ltrhbox", "rtlvbox", "ltrvbox", "rtlvtop", "ltrvtop", "autodirhbox", "autodirvbox", "autodirvtop", "lefttoright", "righttoleft", "synchronizelayoutdirection", "synchronizedisplaydirection", "synchronizeinlinedirection", "lesshyphens", "morehyphens", "nohyphens", "dohyphens", "Ucheckedstartdisplaymath", "Ucheckedstopdisplaymath" },
-} \ No newline at end of file
diff --git a/context/data/scite/lexers/scite-context-lexer-mps.lua b/context/data/scite/lexers/scite-context-lexer-mps.lua
deleted file mode 100644
index f0d88eb3b..000000000
--- a/context/data/scite/lexers/scite-context-lexer-mps.lua
+++ /dev/null
@@ -1,155 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for metafun",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
-if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
-
-local lexer = lexer
-local global, string, table, lpeg = _G, string, table, lpeg
-local token, exact_match = lexer.token, lexer.exact_match
-local P, R, S, V, C, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt
-local type = type
-
-local metafunlexer = { _NAME = "mps", _FILENAME = "scite-context-lexer-mps" }
-local whitespace = lexer.WHITESPACE
-local context = lexer.context
-
-local metapostprimitives = { }
-local metapostinternals = { }
-local metapostshortcuts = { }
-local metapostcommands = { }
-
-local metafuninternals = { }
-local metafunshortcuts = { }
-local metafuncommands = { }
-
-local mergedshortcuts = { }
-local mergedinternals = { }
-
-do
-
- local definitions = context.loaddefinitions("scite-context-data-metapost")
-
- if definitions then
- metapostprimitives = definitions.primitives or { }
- metapostinternals = definitions.internals or { }
- metapostshortcuts = definitions.shortcuts or { }
- metapostcommands = definitions.commands or { }
- end
-
- local definitions = context.loaddefinitions("scite-context-data-metafun")
-
- if definitions then
- metafuninternals = definitions.internals or { }
- metafunshortcuts = definitions.shortcuts or { }
- metafuncommands = definitions.commands or { }
- end
-
- for i=1,#metapostshortcuts do
- mergedshortcuts[#mergedshortcuts+1] = metapostshortcuts[i]
- end
- for i=1,#metafunshortcuts do
- mergedshortcuts[#mergedshortcuts+1] = metafunshortcuts[i]
- end
-
- for i=1,#metapostinternals do
- mergedinternals[#mergedinternals+1] = metapostinternals[i]
- end
- for i=1,#metafuninternals do
- mergedinternals[#mergedinternals+1] = metafuninternals[i]
- end
-
-end
-
-local space = lexer.space -- S(" \n\r\t\f\v")
-local any = lexer.any
-
-local dquote = P('"')
-local cstoken = R("az","AZ") + P("_")
-local mptoken = R("az","AZ")
-local leftbrace = P("{")
-local rightbrace = P("}")
-local number = context.patterns.real
-
-local cstokentex = R("az","AZ","\127\255") + S("@!?_")
-
--- we could collapse as in tex
-
-local spacing = token(whitespace, space^1)
-local rest = token('default', any)
-local comment = token('comment', P('%') * (1-S("\n\r"))^0)
-local internal = token('reserved', exact_match(mergedshortcuts,false))
-local shortcut = token('data', exact_match(mergedinternals))
-local helper = token('command', exact_match(metafuncommands))
-local plain = token('plain', exact_match(metapostcommands))
-local quoted = token('quote', dquote)
- * token('string', P(1-dquote)^0)
- * token('quote', dquote)
-local texstuff = token('quote', P("btex ") + P("verbatimtex "))
- * token('string', P(1-P(" etex"))^0)
- * token('quote', P(" etex"))
-local primitive = token('primitive', exact_match(metapostprimitives))
-local identifier = token('default', cstoken^1)
-local number = token('number', number)
-local grouping = token('grouping', S("()[]{}")) -- can be an option
-local special = token('special', S("#()[]{}<>=:\"")) -- or else := <> etc split
-local texlike = token('warning', P("\\") * cstokentex^1)
-local extra = token('extra', P("+-+") + P("++") + S("`~%^&_-+*/\'|\\"))
-
-local nested = P { leftbrace * (V(1) + (1-rightbrace))^0 * rightbrace }
-local texlike = token('embedded', P("\\") * (P("MP") + P("mp")) * mptoken^1)
- * spacing^0
- * token('grouping', leftbrace)
- * token('rest', (nested + (1-rightbrace))^0 )
- * token('grouping', rightbrace)
- + token('warning', P("\\") * cstokentex^1)
-
-metafunlexer._rules = {
- { 'whitespace', spacing },
- { 'comment', comment },
- { 'internal', internal },
- { 'shortcut', shortcut },
- { 'helper', helper },
- { 'plain', plain },
- { 'primitive', primitive },
- { 'texstuff', texstuff },
- { 'identifier', identifier },
- { 'number', number },
- { 'quoted', quoted },
- -- { 'grouping', grouping }, -- can be an option
- { 'special', special },
- { 'texlike', texlike },
- { 'extra', extra },
- { 'rest', rest },
-}
-
-metafunlexer._tokenstyles = context.styleset
-
-metafunlexer._foldpattern = R("az")^2 -- separate entry else interference
-
-metafunlexer._foldsymbols = {
- _patterns = {
- '[a-z][a-z]+',
- },
- ["primitive"] = {
- ["beginfig"] = 1,
- ["endfig"] = -1,
- ["def"] = 1,
- ["vardef"] = 1,
- ["primarydef"] = 1,
- ["secondarydef" ] = 1,
- ["tertiarydef"] = 1,
- ["enddef"] = -1,
- ["if"] = 1,
- ["fi"] = -1,
- ["for"] = 1,
- ["forever"] = 1,
- ["endfor"] = -1,
- }
-}
-
-return metafunlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-pdf-object.lua b/context/data/scite/lexers/scite-context-lexer-pdf-object.lua
deleted file mode 100644
index 6d0b6d8da..000000000
--- a/context/data/scite/lexers/scite-context-lexer-pdf-object.lua
+++ /dev/null
@@ -1,117 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for pdf",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
-local lexer = lexer
-local token = lexer.token
-local P, R, S, C, V = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.V
-
-local pdfobjectlexer = { _NAME = "pdf-object", _FILENAME = "scite-context-lexer-pdf-object" }
-local whitespace = lexer.WHITESPACE -- triggers states
-local context = lexer.context
-local patterns = context.patterns
-
-local space = lexer.space
-local somespace = space^1
-
-local newline = S("\n\r")
-local real = patterns.real
-local cardinal = patterns.cardinal
-
-local lparent = P("(")
-local rparent = P(")")
-local langle = P("<")
-local rangle = P(">")
-local escape = P("\\")
-local anything = P(1)
-local unicodetrigger = P("feff")
-
-local nametoken = 1 - space - S("<>/[]()")
-local name = P("/") * nametoken^1
-
-local p_string = P { ( escape * anything + lparent * V(1) * rparent + (1 - rparent) )^0 }
-
-local t_spacing = token(whitespace, space^1)
-local t_spaces = token(whitespace, space^1)^0
-
-local p_stream = P("stream")
-local p_endstream = P("endstream")
------ p_obj = P("obj")
-local p_endobj = P("endobj")
-local p_reference = P("R")
-
-local p_objectnumber = patterns.cardinal
-local p_comment = P('%') * (1-S("\n\r"))^0
-
-local string = token("quote", lparent)
- * token("string", p_string)
- * token("quote", rparent)
-local unicode = token("quote", langle)
- * token("plain", unicodetrigger)
- * token("string", (1-rangle)^1)
- * token("quote", rangle)
-local whatsit = token("quote", langle)
- * token("string", (1-rangle)^1)
- * token("quote", rangle)
-local keyword = token("command", name)
-local constant = token("constant", name)
-local number = token('number', real)
--- local reference = token("number", cardinal)
--- * t_spacing
--- * token("number", cardinal)
-local reserved = token("number", P("true") + P("false") + P("NULL"))
-local reference = token("warning", cardinal)
- * t_spacing
- * token("warning", cardinal)
- * t_spacing
- * token("keyword", p_reference)
-local t_comment = token("comment", p_comment)
-
--- t_openobject = token("number", p_objectnumber)
--- * t_spacing
--- * token("number", p_objectnumber)
--- * t_spacing
--- * token("keyword", p_obj)
-local t_closeobject = token("keyword", p_endobj)
-
-local t_opendictionary = token("grouping", P("<<"))
-local t_closedictionary = token("grouping", P(">>"))
-
-local t_openarray = token("grouping", P("["))
-local t_closearray = token("grouping", P("]"))
-
-local t_stream = token("keyword", p_stream)
--- * token("default", newline * (1-newline*p_endstream*newline)^1 * newline)
- * token("default", (1 - p_endstream)^1)
- * token("keyword", p_endstream)
-
-local t_dictionary = { "dictionary",
- dictionary = t_opendictionary * (t_spaces * keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
- array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
- whatever = V("dictionary") + V("array") + constant + reference + string + unicode + number + whatsit,
- }
-
-local t_object = { "object", -- weird that we need to catch the end here (probably otherwise an invalid lpeg)
- object = t_spaces * (V("dictionary") * t_spaces * t_stream^-1 + V("array") + V("number") + t_spaces) * t_spaces * t_closeobject,
- dictionary = t_opendictionary * (t_spaces * keyword * t_spaces * V("whatever"))^0 * t_spaces * t_closedictionary,
- array = t_openarray * (t_spaces * V("whatever"))^0 * t_spaces * t_closearray,
- number = number,
- whatever = V("dictionary") + V("array") + constant + reference + string + unicode + number + reserved + whatsit,
- }
-
-pdfobjectlexer._shared = {
- dictionary = t_dictionary,
-}
-
-pdfobjectlexer._rules = {
- { 'whitespace', t_spacing },
- { 'object', t_object },
-}
-
-pdfobjectlexer._tokenstyles = context.styleset
-
-return pdfobjectlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-pdf-xref.lua b/context/data/scite/lexers/scite-context-lexer-pdf-xref.lua
deleted file mode 100644
index f205e9130..000000000
--- a/context/data/scite/lexers/scite-context-lexer-pdf-xref.lua
+++ /dev/null
@@ -1,51 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for pdf xref",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
-local lexer = lexer
-local token = lexer.token
-local P, R = lpeg.P, lpeg.R
-
--- xref
--- cardinal cardinal [character]
--- ..
--- %%EOF | startxref | trailer
-
-local pdfxreflexer = { _NAME = "pdf-xref", _FILENAME = "scite-context-lexer-pdf-xref" }
-local whitespace = lexer.WHITESPACE -- triggers states
-local context = lexer.context
-local patterns = context.patterns
-
-local pdfobjectlexer = lexer.load("scite-context-lexer-pdf-object")
-
-local spacing = patterns.spacing
-
-local t_spacing = token(whitespace, spacing)
-
-local p_trailer = P("trailer")
-
-local t_number = token("number", R("09")^1)
- * t_spacing
- * token("number", R("09")^1)
- * t_spacing
- * (token("keyword", R("az","AZ")) * t_spacing)^-1
-
-local t_xref = t_number^1
-
--- local t_xref = token("default", (1-p_trailer)^1)
--- * token("keyword", p_trailer)
--- * t_spacing
--- * pdfobjectlexer._shared.dictionary
-
-pdfxreflexer._rules = {
- { 'whitespace', t_spacing },
- { 'xref', t_xref },
-}
-
-pdfxreflexer._tokenstyles = context.styleset
-
-return pdfxreflexer
diff --git a/context/data/scite/lexers/scite-context-lexer-pdf.lua b/context/data/scite/lexers/scite-context-lexer-pdf.lua
deleted file mode 100644
index 685fdb16e..000000000
--- a/context/data/scite/lexers/scite-context-lexer-pdf.lua
+++ /dev/null
@@ -1,80 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for pdf",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
-if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
-
-local lexer = lexer
-local token = lexer.token
-local P, R, S = lpeg.P, lpeg.R, lpeg.S
-
-local pdflexer = { _NAME = "pdf", _FILENAME = "scite-context-lexer-pdf" }
-local whitespace = lexer.WHITESPACE -- triggers states
-
-local pdfobjectlexer = lexer.load("scite-context-lexer-pdf-object")
-local pdfxreflexer = lexer.load("scite-context-lexer-pdf-xref")
-
-local context = lexer.context
-local patterns = context.patterns
-
-local space = patterns.space
-local spacing = patterns.spacing
-local nospacing = patterns.nospacing
-local anything = patterns.anything
-local restofline = patterns.restofline
-
-local t_spacing = token(whitespace, spacing)
-local t_rest = token("default", nospacing) -- anything
-
-local p_obj = P("obj")
-local p_endobj = P("endobj")
-local p_xref = P("xref")
-local p_startxref = P("startxref")
-local p_eof = P("%%EOF")
-local p_trailer = P("trailer")
-
-local p_objectnumber = patterns.cardinal
-local p_comment = P('%') * restofline
-
-local t_comment = token("comment", p_comment)
-local t_openobject = token("warning", p_objectnumber)
- * t_spacing
- * token("warning", p_objectnumber)
- * t_spacing
- * token("keyword", p_obj)
- * t_spacing^0
-local t_closeobject = token("keyword", p_endobj)
-
--- We could do clever xref parsing but why should we (i.e. we should check for
--- the xref body. As a pdf file is not edited, we could do without a nested
--- lexer anyway.
-
-local t_trailer = token("keyword", p_trailer)
- * t_spacing
- * pdfobjectlexer._shared.dictionary
-
-local t_openxref = token("plain", p_xref)
-local t_closexref = token("plain", p_startxref)
- + token("comment", p_eof)
- + t_trailer
-local t_startxref = token("plain", p_startxref)
- * t_spacing
- * token("number", R("09")^1)
-
-lexer.embed_lexer(pdflexer, pdfobjectlexer, t_openobject, t_closeobject)
-lexer.embed_lexer(pdflexer, pdfxreflexer, t_openxref, t_closexref)
-
-pdflexer._rules = {
- { 'whitespace', t_spacing },
- { 'comment', t_comment },
- { 'xref', t_startxref },
- { 'rest', t_rest },
-}
-
-pdflexer._tokenstyles = context.styleset
-
-return pdflexer
diff --git a/context/data/scite/lexers/scite-context-lexer-web.lua b/context/data/scite/lexers/scite-context-lexer-web.lua
deleted file mode 100644
index f59a3205d..000000000
--- a/context/data/scite/lexers/scite-context-lexer-web.lua
+++ /dev/null
@@ -1,155 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for w",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
--- this will be extended
-
-if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
-
-local lexer = lexer
-local token, style, colors, exact_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.exact_match, lexer.style_nothing
-local P, R, S, C, Cg, Cb, Cs, Cmt, lpegmatch = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cg, lpeg.Cb, lpeg.Cs, lpeg.Cmt, lpeg.match
-local setmetatable = setmetatable
-
-local weblexer = { _NAME = "web", _FILENAME = "scite-context-lexer-web" }
-local whitespace = lexer.WHITESPACE
-local context = lexer.context
-
-local keywords = { -- copied from cpp.lua
- -- c
- 'asm', 'auto', 'break', 'case', 'const', 'continue', 'default', 'do', 'else',
- 'extern', 'false', 'for', 'goto', 'if', 'inline', 'register', 'return',
- 'sizeof', 'static', 'switch', 'true', 'typedef', 'volatile', 'while',
- 'restrict',
- -- hm
- '_Bool', '_Complex', '_Pragma', '_Imaginary',
- -- c++.
- 'catch', 'class', 'const_cast', 'delete', 'dynamic_cast', 'explicit',
- 'export', 'friend', 'mutable', 'namespace', 'new', 'operator', 'private',
- 'protected', 'public', 'signals', 'slots', 'reinterpret_cast',
- 'static_assert', 'static_cast', 'template', 'this', 'throw', 'try', 'typeid',
- 'typename', 'using', 'virtual'
-}
-
-local datatypes = { -- copied from cpp.lua
- 'bool', 'char', 'double', 'enum', 'float', 'int', 'long', 'short', 'signed',
- 'struct', 'union', 'unsigned', 'void'
-}
-
-local macros = { -- copied from cpp.lua
- 'define', 'elif', 'else', 'endif', 'error', 'if', 'ifdef', 'ifndef', 'import',
- 'include', 'line', 'pragma', 'undef', 'using', 'warning'
-}
-
-local space = lexer.space -- S(" \n\r\t\f\v")
-local any = lexer.any
-local patterns = context.patterns
-local restofline = patterns.restofline
-local startofline = patterns.startofline
-
-local squote = P("'")
-local dquote = P('"')
-local escaped = P("\\") * P(1)
-local slashes = P('//')
-local begincomment = P("/*")
-local endcomment = P("*/")
-local percent = P("%")
-
-local spacing = token(whitespace, space^1)
-local rest = token("default", any)
-
-local shortcomment = token("comment", slashes * restofline^0)
-local longcomment = token("comment", begincomment * (1-endcomment)^0 * endcomment^-1)
-local texcomment = token("comment", percent * restofline^0)
-
-local shortstring = token("quote", dquote) -- can be shared
- * token("string", (escaped + (1-dquote))^0)
- * token("quote", dquote)
- + token("quote", squote)
- * token("string", (escaped + (1-squote))^0)
- * token("quote", squote)
-
-local integer = P("-")^-1 * (lexer.hex_num + lexer.dec_num)
-local number = token("number", lexer.float + integer)
-
-local validword = R("AZ","az","__") * R("AZ","az","__","09")^0
-
-local identifier = token("default",validword)
-
-local operator = token("special", S('+-*/%^!=<>;:{}[]().&|?~'))
-
------ optionalspace = spacing^0
-
-local p_keywords = exact_match(keywords )
-local p_datatypes = exact_match(datatypes)
-local p_macros = exact_match(macros)
-
-local keyword = token("keyword", p_keywords)
-local datatype = token("keyword", p_datatypes)
-local identifier = token("default", validword)
-
-local macro = token("data", #P('#') * startofline * P('#') * S('\t ')^0 * p_macros)
-
-local beginweb = P("@")
-local endweb = P("@c")
-
-local webcomment = token("comment", #beginweb * startofline * beginweb * (1-endweb)^0 * endweb)
-
-local texlexer = lexer.load('scite-context-lexer-tex')
-
-lexer.embed_lexer(weblexer, texlexer, #beginweb * startofline * token("comment",beginweb), token("comment",endweb))
-
-weblexer._rules = {
- { 'whitespace', spacing },
- { 'keyword', keyword },
- { 'type', datatype },
- { 'identifier', identifier },
- { 'string', shortstring },
- -- { 'webcomment', webcomment },
- { 'texcomment', texcomment },
- { 'longcomment', longcomment },
- { 'shortcomment', shortcomment },
- { 'number', number },
- { 'macro', macro },
- { 'operator', operator },
- { 'rest', rest },
-}
-
-weblexer._tokenstyles = context.styleset
-
-weblexer._foldpattern = P("/*") + P("*/") + S("{}") -- separate entry else interference
-
-weblexer._foldsymbols = {
- _patterns = {
- '[{}]',
- '/%*',
- '%*/',
- },
- -- ["data"] = { -- macro
- -- ['region'] = 1,
- -- ['endregion'] = -1,
- -- ['if'] = 1,
- -- ['ifdef'] = 1,
- -- ['ifndef'] = 1,
- -- ['endif'] = -1,
- -- },
- ["special"] = { -- operator
- ['{'] = 1,
- ['}'] = -1,
- },
- ["comment"] = {
- ['/*'] = 1,
- ['*/'] = -1,
- }
-}
-
--- -- by indentation:
---
-weblexer._foldpatterns = nil
-weblexer._foldsymbols = nil
-
-return weblexer
diff --git a/context/data/scite/lexers/scite-context-lexer-xml-comment.lua b/context/data/scite/lexers/scite-context-lexer-xml-comment.lua
deleted file mode 100644
index 104310f94..000000000
--- a/context/data/scite/lexers/scite-context-lexer-xml-comment.lua
+++ /dev/null
@@ -1,42 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for xml comments",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
-local lexer = lexer
-local token = lexer.token
-local P = lpeg.P
-
-local xmlcommentlexer = { _NAME = "xml-comment", _FILENAME = "scite-context-lexer-xml-comment" }
-local whitespace = lexer.WHITESPACE
-local context = lexer.context
-
-local space = lexer.space
-local nospace = 1 - space - P("-->")
-
-local p_spaces = token(whitespace, space ^1)
-local p_comment = token("comment", nospace^1)
-
-xmlcommentlexer._rules = {
- { "whitespace", p_spaces },
- { "comment", p_comment },
-}
-
-xmlcommentlexer._tokenstyles = context.styleset
-
-xmlcommentlexer._foldpattern = P("<!--") + P("-->")
-
-xmlcommentlexer._foldsymbols = {
- _patterns = {
- "<%!%-%-", "%-%->", -- comments
- },
- ["comment"] = {
- ["<!--"] = 1,
- ["-->" ] = -1,
- }
-}
-
-return xmlcommentlexer
diff --git a/context/data/scite/lexers/scite-context-lexer-xml-script.lua b/context/data/scite/lexers/scite-context-lexer-xml-script.lua
deleted file mode 100644
index fd1aae7f7..000000000
--- a/context/data/scite/lexers/scite-context-lexer-xml-script.lua
+++ /dev/null
@@ -1,30 +0,0 @@
-local info = {
- version = 1.002,
- comment = "scintilla lpeg lexer for xml cdata",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
-local lexer = lexer
-local token = lexer.token
-local P = lpeg.P
-
-local xmlscriptlexer = { _NAME = "xml-script", _FILENAME = "scite-context-lexer-xml-script" }
-local whitespace = lexer.WHITESPACE -- triggers states
-local context = lexer.context
-
-local space = lexer.space
-local nospace = 1 - space - (P("</") * P("script") + P("SCRIPT")) * P(">")
-
-local p_spaces = token(whitespace, space ^1)
-local p_cdata = token("default", nospace^1)
-
-xmlscriptlexer._rules = {
- { "whitespace", p_spaces },
- { "script", p_cdata },
-}
-
-xmlscriptlexer._tokenstyles = context.styleset
-
-return xmlscriptlexer
diff --git a/context/data/scite/lexers/scite-context-lexer.lua b/context/data/scite/lexers/scite-context-lexer.lua
deleted file mode 100644
index 5c7f40e7d..000000000
--- a/context/data/scite/lexers/scite-context-lexer.lua
+++ /dev/null
@@ -1,876 +0,0 @@
-local info = {
- version = 1.324,
- comment = "basics for scintilla lpeg lexer for context/metafun",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
- comment = "contains copyrighted code from mitchell.att.foicica.com",
-
-}
-
--- todo: move all code here
--- todo: explore adapted dll ... properties + init
--- todo: play with hotspot and other properties
-
--- wish: replace errorlist lexer (per language!)
--- wish: access to all scite properties
-
--- The fold and lex functions are copied and patched from original code by Mitchell (see
--- lexer.lua). All errors are mine. The ability to use lpeg is a real nice adition and a
--- brilliant move. The code is a byproduct of the (mainly Lua based) textadept (still a
--- rapidly moving target) that unfortunately misses a realtime output pane. On the other
--- hand, SciTE is somewhat crippled by the fact that we cannot pop in our own (language
--- dependent) lexer into the output pane (somehow the errorlist lexer is hard coded into
--- the editor). Hopefully that will change some day.
---
--- Starting with SciTE version 3.20 there is an issue with coloring. As we still lack
--- a connection with scite itself (properties as well as printing to the log pane) we
--- cannot trace this (on windows). As far as I can see, there are no fundamental
--- changes in lexer.lua or LexLPeg.cxx so it must be in scintilla itself. So for the
--- moment I stick to 3.10. Indicators are: no lexing of 'next' and 'goto <label>' in the
--- Lua lexer and no brace highlighting either. Interesting is that it does work ok in
--- the cld lexer (so the Lua code is okay). Also the fact that char-def.lua lexes fast
--- is a signal that the lexer quits somewhere halfway.
---
--- After checking 3.24 and adapting to the new lexer tables things are okay again. So,
--- this version assumes 3.24 or higher. In 3.24 we have a different token result, i.e. no
--- longer a { tag, pattern } but just two return values. I didn't check other changes but
--- will do that when I run into issues. I had optimized these small tables by hashing which
--- was more efficient but this is no longer needed.
---
--- In 3.3.1 another major change took place: some helper constants (maybe they're no
--- longer constants) and functions were moved into the lexer modules namespace but the
--- functions are assigned to the Lua module afterward so we cannot alias them beforehand.
--- We're probably getting close to a stable interface now.
---
--- I've considered making a whole copy and patch the other functions too as we need
--- an extra nesting model. However, I don't want to maintain too much. An unfortunate
--- change in 3.03 is that no longer a script can be specified. This means that instead
--- of loading the extensions via the properties file, we now need to load them in our
--- own lexers, unless of course we replace lexer.lua completely (which adds another
--- installation issue).
---
--- Another change has been that _LEXERHOME is no longer available. It looks like more and
--- more functionality gets dropped so maybe at some point we need to ship our own dll/so
--- files. For instance, I'd like to have access to the current filename and other scite
--- properties. For instance, we could cache some info with each file, if only we had
--- knowledge of what file we're dealing with.
---
--- For huge files folding can be pretty slow and I do have some large ones that I keep
--- open all the time. Loading is normally no ussue, unless one has remembered the status
--- and the cursor is at the last line of a 200K line file. Optimizing the fold function
--- brought down loading of char-def.lua from 14 sec => 8 sec. Replacing the word_match
--- function and optimizing the lex function gained another 2+ seconds. A 6 second load
--- is quite ok for me. The changed lexer table structure (no subtables) brings loading
--- down to a few seconds.
---
--- When the lexer path is copied to the textadept lexer path, and the theme definition to
--- theme path (as lexer.lua), the lexer works there as well. When I have time and motive
--- I will make a proper setup file to tune the look and feel a bit and associate suffixes
--- with the context lexer. The textadept editor has a nice style tracing option but lacks
--- the tabs for selecting files that scite has. It also has no integrated run that pipes
--- to the log pane (I wonder if it could borrow code from the console2 project). Interesting
--- is that the jit version of textadept crashes on lexing large files (and does not feel
--- faster either).
---
--- Function load(lexer_name) starts with _M.WHITESPACE = lexer_name..'_whitespace' which
--- means that we need to have it frozen at the moment we load another lexer. Because spacing
--- is used to revert to a parent lexer we need to make sure that we load children as late
--- as possible in order not to get the wrong whitespace trigger. This took me quite a while
--- to figure out (not being that familiar with the internals). The lex and fold functions
--- have been optimized. It is a pitty that there is no proper print available. Another thing
--- needed is a default style in ourown theme style definition, as otherwise we get wrong
--- nested lexers, especially if they are larger than a view. This is the hardest part of
--- getting things right.
---
--- Eventually it might be safer to copy the other methods from lexer.lua here as well so
--- that we have no dependencies, apart from the c library (for which at some point the api
--- will be stable I hope).
---
--- It's a pitty that there is no scintillua library for the OSX version of scite. Even
--- better would be to have the scintillua library as integral part of scite as that way I
--- could use OSX alongside windows and linux (depending on needs). Also nice would be to
--- have a proper interface to scite then because currently the lexer is rather isolated and the
--- lua version does not provide all standard libraries. It would also be good to have lpeg
--- support in the regular scite lua extension (currently you need to pick it up from someplace
--- else).
-
-local lpeg = require 'lpeg'
-
-local R, P, S, C, V, Cp, Cs, Ct, Cmt, Cc, Cf, Cg, Carg = lpeg.R, lpeg.P, lpeg.S, lpeg.C, lpeg.V, lpeg.Cp, lpeg.Cs, lpeg.Ct, lpeg.Cmt, lpeg.Cc, lpeg.Cf, lpeg.Cg, lpeg.Carg
-local lpegmatch = lpeg.match
-local find, gmatch, match, lower, upper, gsub = string.find, string.gmatch, string.match, string.lower, string.upper, string.gsub
-local concat = table.concat
-local global = _G
-local type, next, setmetatable, rawset = type, next, setmetatable, rawset
-
--- less confusing as we also use lexer for the current lexer and local _M = lexer is just ugly
-
-local lexers = lexer or { } -- + fallback for syntax check
-
--- ok, let's also move helpers here (todo: all go here)
-
-local sign = S("+-")
-local digit = R("09")
-local octdigit = R("07")
-local hexdigit = R("09","AF","af")
-
-lexers.sign = sign
-lexers.digit = digit
-lexers.octdigit = octdigit
-lexers.hexdigit = hexdigit
-lexers.xdigit = hexdigit
-
-lexers.dec_num = digit^1
-lexers.oct_num = P("0")
- * octdigit^1
-lexers.hex_num = P("0") * S("xX")
- * (hexdigit^0 * '.' * hexdigit^1 + hexdigit^1 * '.' * hexdigit^0 + hexdigit^1)
- * (S("pP") * sign^-1 * hexdigit^1)^-1
-lexers.float = sign^-1
- * (digit^0 * '.' * digit^1 + digit^1 * '.' * digit^0 + digit^1)
- * S("eE") * sign^-1 * digit^1
-
-lexers.dec_int = sign^-1 * lexers.dec_num
-lexers.oct_int = sign^-1 * lexers.oct_num
-lexers.hex_int = sign^-1 * lexers.hex_num
-
--- these helpers are set afterwards so we delay their initialization ... there is no need to alias
--- each time again and this way we can more easily adapt to updates
-
-local get_style_at, get_indent_amount, get_property, get_fold_level, FOLD_BASE, FOLD_HEADER, FOLD_BLANK, initialize
-
-initialize = function()
- FOLD_BASE = lexers.FOLD_BASE or SC_FOLDLEVELBASE
- FOLD_HEADER = lexers.FOLD_HEADER or SC_FOLDLEVELHEADERFLAG
- FOLD_BLANK = lexers.FOLD_BLANK or SC_FOLDLEVELWHITEFLAG
- get_style_at = lexers.get_style_at or GetStyleAt
- get_indent_amount = lexers.get_indent_amount or GetIndentAmount
- get_property = lexers.get_property or GetProperty
- get_fold_level = lexers.get_fold_level or GetFoldLevel
- --
- initialize = nil
-end
-
--- we create our own extra namespace for extensions and helpers
-
-lexers.context = lexers.context or { }
-local context = lexers.context
-
-context.patterns = context.patterns or { }
-local patterns = context.patterns
-
-lexers._CONTEXTEXTENSIONS = true
-
-local locations = {
- -- lexers.context.path,
- "data", -- optional data directory
- "..", -- regular scite directory
-}
-
-local function collect(name)
--- local definitions = loadfile(name .. ".luc") or loadfile(name .. ".lua")
- local okay, definitions = pcall(function () return require(name) end)
- if okay then
- if type(definitions) == "function" then
- definitions = definitions()
- end
- if type(definitions) == "table" then
- return definitions
- end
- end
-end
-
-function context.loaddefinitions(name)
- for i=1,#locations do
- local data = collect(locations[i] .. "/" .. name)
- if data then
- return data
- end
- end
-end
-
-function context.word_match(words,word_chars,case_insensitive)
- local chars = '%w_' -- maybe just "" when word_chars
- if word_chars then
- chars = '^([' .. chars .. gsub(word_chars,'([%^%]%-])', '%%%1') ..']+)'
- else
- chars = '^([' .. chars ..']+)'
- end
- if case_insensitive then
- local word_list = { }
- for i=1,#words do
- word_list[lower(words[i])] = true
- end
- return P(function(input, index)
- local s, e, word = find(input,chars,index)
- return word and word_list[lower(word)] and e + 1 or nil
- end)
- else
- local word_list = { }
- for i=1,#words do
- word_list[words[i]] = true
- end
- return P(function(input, index)
- local s, e, word = find(input,chars,index)
- return word and word_list[word] and e + 1 or nil
- end)
- end
-end
-
-local idtoken = R("az","AZ","\127\255","__")
-local digit = R("09")
-local sign = S("+-")
-local period = P(".")
-local space = S(" \n\r\t\f\v")
-
-patterns.idtoken = idtoken
-
-patterns.digit = digit
-patterns.sign = sign
-patterns.period = period
-
-patterns.cardinal = digit^1
-patterns.integer = sign^-1 * digit^1
-
-patterns.real =
- sign^-1 * ( -- at most one
- digit^1 * period * digit^0 -- 10.0 10.
- + digit^0 * period * digit^1 -- 0.10 .10
- + digit^1 -- 10
- )
-
-patterns.restofline = (1-S("\n\r"))^1
-patterns.space = space
-patterns.spacing = space^1
-patterns.nospacing = (1-space)^1
-patterns.anything = P(1)
-
-local endof = S("\n\r\f")
-
-patterns.startofline = P(function(input,index)
- return (index == 1 or lpegmatch(endof,input,index-1)) and index
-end)
-
-function context.exact_match(words,word_chars,case_insensitive)
- local characters = concat(words)
- local pattern -- the concat catches _ etc
- if word_chars == true or word_chars == false or word_chars == nil then
- word_chars = ""
- end
- if type(word_chars) == "string" then
- pattern = S(characters) + idtoken
- if case_insensitive then
- pattern = pattern + S(upper(characters)) + S(lower(characters))
- end
- if word_chars ~= "" then
- pattern = pattern + S(word_chars)
- end
- elseif word_chars then
- pattern = word_chars
- end
- if case_insensitive then
- local list = { }
- for i=1,#words do
- list[lower(words[i])] = true
- end
- return Cmt(pattern^1, function(_,i,s)
- return list[lower(s)] -- and i or nil
- end)
- else
- local list = { }
- for i=1,#words do
- list[words[i]] = true
- end
- return Cmt(pattern^1, function(_,i,s)
- return list[s] -- and i or nil
- end)
- end
-end
-
--- spell checking (we can only load lua files)
---
--- return {
--- min = 3,
--- max = 40,
--- n = 12345,
--- words = {
--- ["someword"] = "someword",
--- ["anotherword"] = "Anotherword",
--- },
--- }
-
-local lists = { }
-
-function context.setwordlist(tag,limit) -- returns hash (lowercase keys and original values)
- if not tag or tag == "" then
- return false, 3
- end
- local list = lists[tag]
- if not list then
- list = context.loaddefinitions("spell-" .. tag)
- if not list or type(list) ~= "table" then
- list = { words = false, min = 3 }
- else
- list.words = list.words or false
- list.min = list.min or 3
- end
- lists[tag] = list
- end
- return list.words, list.min
-end
-
-patterns.wordtoken = R("az","AZ","\127\255")
-patterns.wordpattern = patterns.wordtoken^3 -- todo: if limit and #s < limit then
-
-function context.checkedword(validwords,validminimum,s,i) -- ,limit
- if not validwords then -- or #s < validminimum then
- return true, "text", i -- true, "default", i
- else
- -- keys are lower
- local word = validwords[s]
- if word == s then
- return true, "okay", i -- exact match
- elseif word then
- return true, "warning", i -- case issue
- else
- local word = validwords[lower(s)]
- if word == s then
- return true, "okay", i -- exact match
- elseif word then
- return true, "warning", i -- case issue
- elseif upper(s) == s then
- return true, "warning", i -- probably a logo or acronym
- else
- return true, "error", i
- end
- end
- end
-end
-
-function context.styleofword(validwords,validminimum,s) -- ,limit
- if not validwords or #s < validminimum then
- return "text"
- else
- -- keys are lower
- local word = validwords[s]
- if word == s then
- return "okay" -- exact match
- elseif word then
- return "warning" -- case issue
- else
- local word = validwords[lower(s)]
- if word == s then
- return "okay" -- exact match
- elseif word then
- return "warning" -- case issue
- elseif upper(s) == s then
- return "warning" -- probably a logo or acronym
- else
- return "error"
- end
- end
- end
-end
-
--- overloaded functions
-
-local h_table, b_table, n_table = { }, { }, { } -- from the time small tables were used (optimization)
-
-setmetatable(h_table, { __index = function(t,level) local v = { level, FOLD_HEADER } t[level] = v return v end })
-setmetatable(b_table, { __index = function(t,level) local v = { level, FOLD_BLANK } t[level] = v return v end })
-setmetatable(n_table, { __index = function(t,level) local v = { level } t[level] = v return v end })
-
-local newline = P("\r\n") + S("\r\n")
-local p_yes = Cp() * Cs((1-newline)^1) * newline^-1
-local p_nop = newline
-
-local folders = { }
-
-local function fold_by_parsing(text,start_pos,start_line,start_level,lexer)
- local folder = folders[lexer]
- if not folder then
- --
- local pattern, folds, text, start_pos, line_num, prev_level, current_level
- --
- local fold_symbols = lexer._foldsymbols
- local fold_pattern = lexer._foldpattern -- use lpeg instead (context extension)
- --
- if fold_pattern then
- -- if no functions are found then we could have a faster one
- fold_pattern = Cp() * C(fold_pattern) / function(s,match)
- local symbols = fold_symbols[get_style_at(start_pos + s)]
- if symbols then
- local l = symbols[match]
- if l then
- current_level = current_level + l
- end
- end
- end
- local action_y = function()
- folds[line_num] = prev_level
- if current_level > prev_level then
- folds[line_num] = prev_level + FOLD_HEADER
- end
- if current_level < FOLD_BASE then
- current_level = FOLD_BASE
- end
- prev_level = current_level
- line_num = line_num + 1
- end
- local action_n = function()
- folds[line_num] = prev_level + FOLD_BLANK
- line_num = line_num + 1
- end
- pattern = ((fold_pattern + (1-newline))^1 * newline / action_y + newline/action_n)^0
-
- else
- -- the traditional one but a bit optimized
- local fold_symbols_patterns = fold_symbols._patterns
- local action_y = function(pos,line)
- for j = 1, #fold_symbols_patterns do
- for s, match in gmatch(line,fold_symbols_patterns[j]) do -- '()('..patterns[i]..')'
- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)]
- local l = symbols and symbols[match]
- local t = type(l)
- if t == 'number' then
- current_level = current_level + l
- elseif t == 'function' then
- current_level = current_level + l(text, pos, line, s, match)
- end
- end
- end
- folds[line_num] = prev_level
- if current_level > prev_level then
- folds[line_num] = prev_level + FOLD_HEADER
- end
- if current_level < FOLD_BASE then
- current_level = FOLD_BASE
- end
- prev_level = current_level
- line_num = line_num + 1
- end
- local action_n = function()
- folds[line_num] = prev_level + FOLD_BLANK
- line_num = line_num + 1
- end
- pattern = (p_yes/action_y + p_nop/action_n)^0
- end
- --
- local reset_parser = lexer._reset_parser
- --
- folder = function(_text_,_start_pos_,_start_line_,_start_level_)
- if reset_parser then
- reset_parser()
- end
- folds = { }
- text = _text_
- start_pos = _start_pos_
- line_num = _start_line_
- prev_level = _start_level_
- current_level = prev_level
- lpegmatch(pattern,text)
- -- make folds collectable
- local t = folds
- folds = nil
- return t
- end
- folders[lexer] = folder
- end
- return folder(text,start_pos,start_line,start_level,lexer)
-end
-
-local folds, current_line, prev_level
-
-local function action_y()
- local current_level = FOLD_BASE + get_indent_amount(current_line)
- if current_level > prev_level then -- next level
- local i = current_line - 1
- local f
- while true do
- f = folds[i]
- if not f then
- break
- elseif f[2] == FOLD_BLANK then
- i = i - 1
- else
- f[2] = FOLD_HEADER -- low indent
- break
- end
- end
- folds[current_line] = { current_level } -- high indent
- elseif current_level < prev_level then -- prev level
- local f = folds[current_line - 1]
- if f then
- f[1] = prev_level -- high indent
- end
- folds[current_line] = { current_level } -- low indent
- else -- same level
- folds[current_line] = { prev_level }
- end
- prev_level = current_level
- current_line = current_line + 1
-end
-
-local function action_n()
- folds[current_line] = { prev_level, FOLD_BLANK }
- current_line = current_line + 1
-end
-
-local pattern = ( S("\t ")^0 * ( (1-S("\n\r"))^1 / action_y + P(true) / action_n) * newline )^0
-
-local function fold_by_indentation(text,start_pos,start_line,start_level)
- -- initialize
- folds = { }
- current_line = start_line
- prev_level = start_level
- -- define
- -- -- not here .. pattern binds and local functions are not frozen
- -- analyze
- lpegmatch(pattern,text)
- -- flatten
- for line, level in next, folds do
- folds[line] = level[1] + (level[2] or 0)
- end
- -- done, make folds collectable
- local t = folds
- folds = nil
- return t
-end
-
-local function fold_by_line(text,start_pos,start_line,start_level)
- local folds = { }
- -- can also be lpeg'd
- for _ in gmatch(text,".-\r?\n") do
- folds[start_line] = n_table[start_level] -- { start_level } -- stile tables ? needs checking
- start_line = start_line + 1
- end
- return folds
-end
-
-local threshold_by_lexer = 512 * 1024 -- we don't know the filesize yet
-local threshold_by_parsing = 512 * 1024 -- we don't know the filesize yet
-local threshold_by_indentation = 512 * 1024 -- we don't know the filesize yet
-local threshold_by_line = 512 * 1024 -- we don't know the filesize yet
-
-function context.fold(text,start_pos,start_line,start_level) -- hm, we had size thresholds .. where did they go
- if text == '' then
- return { }
- end
- if initialize then
- initialize()
- end
- local lexer = global._LEXER
- local fold_by_lexer = lexer._fold
- local fold_by_symbols = lexer._foldsymbols
- local filesize = 0 -- we don't know that
- if fold_by_lexer then
- if filesize <= threshold_by_lexer then
- return fold_by_lexer(text,start_pos,start_line,start_level,lexer)
- end
- elseif fold_by_symbols then -- and get_property('fold.by.parsing',1) > 0 then
- if filesize <= threshold_by_parsing then
- return fold_by_parsing(text,start_pos,start_line,start_level,lexer)
- end
- elseif get_property('fold.by.indentation',1) > 0 then
- if filesize <= threshold_by_indentation then
- return fold_by_indentation(text,start_pos,start_line,start_level,lexer)
- end
- elseif get_property('fold.by.line',1) > 0 then
- if filesize <= threshold_by_line then
- return fold_by_line(text,start_pos,start_line,start_level,lexer)
- end
- end
- return { }
-end
-
--- The following code is mostly unchanged:
-
-local function add_rule(lexer,id,rule)
- if not lexer._RULES then
- lexer._RULES = { }
- lexer._RULEORDER = { }
- end
- lexer._RULES[id] = rule
- lexer._RULEORDER[#lexer._RULEORDER + 1] = id
-end
-
-local function add_style(lexer,token_name,style)
- local len = lexer._STYLES.len
- if len == 32 then
- len = len + 8
- end
- if len >= 128 then
- print('Too many styles defined (128 MAX)')
- end
- lexer._TOKENS[token_name] = len
- lexer._STYLES[len] = style
- lexer._STYLES.len = len + 1
-end
-
-local function join_tokens(lexer)
- local patterns = lexer._RULES
- local order = lexer._RULEORDER
- local token_rule = patterns[order[1]]
- for i=2,#order do
- token_rule = token_rule + patterns[order[i]]
- end
- lexer._TOKENRULE = token_rule
- return token_rule
-end
-
-local function add_lexer(grammar, lexer, token_rule)
- local token_rule = join_tokens(lexer)
- local lexer_name = lexer._NAME
- local children = lexer._CHILDREN
- for i=1,#children do
- local child = children[i]
- if child._CHILDREN then
- add_lexer(grammar, child)
- end
- local child_name = child._NAME
- local rules = child._EMBEDDEDRULES[lexer_name]
- local rules_token_rule = grammar['__'..child_name] or rules.token_rule
- grammar[child_name] = (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1 * V(lexer_name)
- local embedded_child = '_' .. child_name
- grammar[embedded_child] = rules.start_rule * (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1
- token_rule = V(embedded_child) + token_rule
- end
- grammar['__' .. lexer_name] = token_rule
- grammar[lexer_name] = token_rule^0
-end
-
-local function build_grammar(lexer, initial_rule)
- local children = lexer._CHILDREN
- if children then
- local lexer_name = lexer._NAME
- if not initial_rule then
- initial_rule = lexer_name
- end
- local grammar = { initial_rule }
- add_lexer(grammar, lexer)
- lexer._INITIALRULE = initial_rule
- lexer._GRAMMAR = Ct(P(grammar))
- else
- lexer._GRAMMAR = Ct(join_tokens(lexer)^0)
- end
-end
-
--- so far. We need these local functions in the next one.
-
-local lineparsers = { }
-
-function context.lex(text,init_style)
- local lexer = global._LEXER
- local grammar = lexer._GRAMMAR
- if initialize then
- initialize()
- end
- if not grammar then
- return { }
- elseif lexer._LEXBYLINE then -- we could keep token
- local tokens = { }
- local offset = 0
- local noftokens = 0
- local lineparser = lineparsers[lexer]
- if not lineparser then -- probably a cmt is more efficient
- lineparser = C((1-newline)^0 * newline) / function(line)
- local length = #line
- local line_tokens = length > 0 and lpegmatch(grammar,line)
- if line_tokens then
- for i=1,#line_tokens,2 do
- noftokens = noftokens + 1
- tokens[noftokens] = line_tokens[i]
- noftokens = noftokens + 1
- tokens[noftokens] = line_tokens[i + 1] + offset
- end
- end
- offset = offset + length
- if noftokens > 0 and tokens[noftokens] ~= offset then
- noftokens = noftokens + 1
- tokens[noftokens] = 'default'
- noftokens = noftokens + 1
- tokens[noftokens] = offset + 1
- end
- end
- lineparser = lineparser^0
- lineparsers[lexer] = lineparser
- end
- lpegmatch(lineparser,text)
- return tokens
-
- elseif lexer._CHILDREN then
- -- as we cannot print, tracing is not possible ... this might change as we can as well
- -- generate them all in one go (sharing as much as possible)
- local hash = lexer._HASH -- hm, was _hash
- if not hash then
- hash = { }
- lexer._HASH = hash
- end
- grammar = hash[init_style]
- if grammar then
- lexer._GRAMMAR = grammar
- else
- for style, style_num in next, lexer._TOKENS do
- if style_num == init_style then
- -- the name of the lexers is filtered from the whitespace
- -- specification
- local lexer_name = match(style,'^(.+)_whitespace') or lexer._NAME
- if lexer._INITIALRULE ~= lexer_name then
- grammar = hash[lexer_name]
- if not grammar then
- build_grammar(lexer,lexer_name)
- grammar = lexer._GRAMMAR
- hash[lexer_name] = grammar
- end
- end
- break
- end
- end
- grammar = grammar or lexer._GRAMMAR
- hash[init_style] = grammar
- end
- return lpegmatch(grammar,text)
- else
- return lpegmatch(grammar,text)
- end
-end
-
--- todo: keywords: one lookup and multiple matches
-
--- function context.token(name, patt)
--- return Ct(patt * Cc(name) * Cp())
--- end
---
--- -- hm, changed in 3.24 .. no longer a table
-
-function context.token(name, patt)
- return patt * Cc(name) * Cp()
-end
-
-lexers.fold = context.fold
-lexers.lex = context.lex
-lexers.token = context.token
-lexers.exact_match = context.exact_match
-
--- helper .. alas ... the lexer's lua instance is rather crippled .. not even
--- math is part of it
-
-local floor = math and math.floor
-local char = string.char
-
-if not floor then
-
- floor = function(n)
- return tonumber(string.format("%d",n))
- end
-
- math = math or { }
-
- math.floor = floor
-
-end
-
-local function utfchar(n)
- if n < 0x80 then
- return char(n)
- elseif n < 0x800 then
- return char(
- 0xC0 + floor(n/0x40),
- 0x80 + (n % 0x40)
- )
- elseif n < 0x10000 then
- return char(
- 0xE0 + floor(n/0x1000),
- 0x80 + (floor(n/0x40) % 0x40),
- 0x80 + (n % 0x40)
- )
- elseif n < 0x40000 then
- return char(
- 0xF0 + floor(n/0x40000),
- 0x80 + floor(n/0x1000),
- 0x80 + (floor(n/0x40) % 0x40),
- 0x80 + (n % 0x40)
- )
- else
- -- return char(
- -- 0xF1 + floor(n/0x1000000),
- -- 0x80 + floor(n/0x40000),
- -- 0x80 + floor(n/0x1000),
- -- 0x80 + (floor(n/0x40) % 0x40),
- -- 0x80 + (n % 0x40)
- -- )
- return "?"
- end
-end
-
-context.utfchar = utfchar
-
--- a helper from l-lpeg:
-
-local gmatch = string.gmatch
-
-local function make(t)
- local p
- for k, v in next, t do
- if not p then
- if next(v) then
- p = P(k) * make(v)
- else
- p = P(k)
- end
- else
- if next(v) then
- p = p + P(k) * make(v)
- else
- p = p + P(k)
- end
- end
- end
- return p
-end
-
-function lpeg.utfchartabletopattern(list)
- local tree = { }
- for i=1,#list do
- local t = tree
- for c in gmatch(list[i],".") do
- if not t[c] then
- t[c] = { }
- end
- t = t[c]
- end
- end
- return make(tree)
-end
-
-patterns.invisibles = lpeg.utfchartabletopattern {
- utfchar(0x00A0), -- nbsp
- utfchar(0x2000), -- enquad
- utfchar(0x2001), -- emquad
- utfchar(0x2002), -- enspace
- utfchar(0x2003), -- emspace
- utfchar(0x2004), -- threeperemspace
- utfchar(0x2005), -- fourperemspace
- utfchar(0x2006), -- sixperemspace
- utfchar(0x2007), -- figurespace
- utfchar(0x2008), -- punctuationspace
- utfchar(0x2009), -- breakablethinspace
- utfchar(0x200A), -- hairspace
- utfchar(0x200B), -- zerowidthspace
- utfchar(0x202F), -- narrownobreakspace
- utfchar(0x205F), -- math thinspace
-}
-
--- now we can make:
-
-patterns.iwordtoken = patterns.wordtoken - patterns.invisibles
-patterns.iwordpattern = patterns.iwordtoken^3
-
--- require("themes/scite-context-theme")
-
--- In order to deal with some bug in additional styles (I have no cue what is
--- wrong, but additional styles get ignored and clash somehow) I just copy the
--- original lexer code ... see original for comments.
-
-return lexers
diff --git a/context/data/scite/lexers/themes/scite-context-theme-keep.lua b/context/data/scite/lexers/themes/scite-context-theme-keep.lua
deleted file mode 100644
index 7f9423d9a..000000000
--- a/context/data/scite/lexers/themes/scite-context-theme-keep.lua
+++ /dev/null
@@ -1,233 +0,0 @@
-local info = {
- version = 1.002,
- comment = "theme for scintilla lpeg lexer for context/metafun",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
--- context_path = string.split(os.resultof("mtxrun --find-file context.mkiv"))[1] or ""
--- global.trace("OEPS") -- how do we get access to the regular lua extensions
-
--- The regular styles set the main lexer styles table but we avoid that in order not
--- to end up with updating issues. We just use another table.
-
--- if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
-
-local context_path = "t:/sources" -- c:/data/tex-context/tex/texmf-context/tex/base
-local font_name = 'Dejavu Sans Mono'
-local font_size = 14
-
-if not WIN32 then
- font_name = '!' .. font_name
-end
-
-local color = lexer.color
-local style = lexer.style
-
-lexer.context = lexer.context or { }
-local context = lexer.context
-
-context.path = context_path
-
-colors = {
- red = color('7F', '00', '00'),
- green = color('00', '7F', '00'),
- blue = color('00', '00', '7F'),
- cyan = color('00', '7F', '7F'),
- magenta = color('7F', '00', '7F'),
- yellow = color('7F', '7F', '00'),
- orange = color('B0', '7F', '00'),
- --
- white = color('FF', 'FF', 'FF'),
- light = color('CF', 'CF', 'CF'),
- grey = color('80', '80', '80'),
- dark = color('4F', '4F', '4F'),
- black = color('00', '00', '00'),
- --
- selection = color('F7', 'F7', 'F7'),
- logpanel = color('E7', 'E7', 'E7'),
- textpanel = color('CF', 'CF', 'CF'),
- linepanel = color('A7', 'A7', 'A7'),
- tippanel = color('44', '44', '44'),
- --
- right = color('00', '00', 'FF'),
- wrong = color('FF', '00', '00'),
-}
-
-colors.teal = colors.cyan
-colors.purple = colors.magenta
-
--- to be set:
---
--- style_nothing
--- style_class
--- style_comment
--- style_constant
--- style_definition
--- style_error
--- style_function
--- style_keyword
--- style_number
--- style_operator
--- style_string
--- style_preproc
--- style_tag
--- style_type
--- style_variable
--- style_embedded
--- style_label
--- style_regex
--- style_identifier
---
--- style_line_number
--- style_bracelight
--- style_bracebad
--- style_controlchar
--- style_indentguide
--- style_calltip
-
-style_default = style {
- font = font_name,
- size = font_size,
- fore = colors.black,
- back = colors.textpanel,
-}
-
-style_nothing = style {
- -- empty
-}
-
-style_number = style { fore = colors.cyan }
-style_comment = style { fore = colors.yellow }
-style_string = style { fore = colors.magenta }
-style_keyword = style { fore = colors.blue, bold = true }
-
-style_quote = style { fore = colors.blue, bold = true }
-style_special = style { fore = colors.blue }
-style_extra = style { fore = colors.yellow }
-
-style_embedded = style { fore = colors.black, bold = true }
-
-style_char = style { fore = colors.magenta }
-style_reserved = style { fore = colors.magenta, bold = true }
-style_class = style { fore = colors.black, bold = true }
-style_constant = style { fore = colors.cyan, bold = true }
-style_definition = style { fore = colors.black, bold = true }
-style_okay = style { fore = colors.dark }
-style_error = style { fore = colors.red }
-style_warning = style { fore = colors.orange }
-style_invisible = style { back = colors.orange }
-style_function = style { fore = colors.black, bold = true }
-style_operator = style { fore = colors.blue }
-style_preproc = style { fore = colors.yellow, bold = true }
-style_tag = style { fore = colors.cyan }
-style_type = style { fore = colors.blue }
-style_variable = style { fore = colors.black }
-style_identifier = style_nothing
-
-style_standout = style { fore = colors.orange, bold = true }
-
-style_line_number = style { back = colors.linepanel }
-style_bracelight = style_standout
-style_bracebad = style_standout
-style_indentguide = style { fore = colors.linepanel, back = colors.white }
-style_calltip = style { fore = colors.white, back = colors.tippanel }
-style_controlchar = style_nothing
-
-style_label = style { fore = colors.red, bold = true } -- style { fore = colors.cyan, bold = true }
-style_regex = style_string
-
-style_command = style { fore = colors.green, bold = true }
-
--- only bold seems to work
-
-lexer.style_nothing = style_nothing
-lexer.style_class = style_class
-lexer.style_comment = style_comment
-lexer.style_constant = style_constant
-lexer.style_definition = style_definition
-lexer.style_error = style_error
-lexer.style_function = style_function
-lexer.style_keyword = style_keyword
-lexer.style_number = style_number
-lexer.style_operator = style_operator
-lexer.style_string = style_string
-lexer.style_preproc = style_preproc
-lexer.style_tag = style_tag
-lexer.style_type = style_type
-lexer.style_variable = style_variable
-lexer.style_embedded = style_embedded
-lexer.style_label = style_label
-lexer.style_regex = style_regex
-lexer.style_identifier = style_nothing
-
-local styles = { -- as we have globals we could do with less
-
- -- ["whitespace"] = style_whitespace, -- not to be set!
-
-["default"] = style_nothing,
-["number"] = style_number,
-["comment"] = style_comment,
-["keyword"] = style_keyword,
-["string"] = style_string,
-["preproc"] = style_preproc,
-
- ["reserved"] = style_reserved,
- ["internal"] = style_standout,
-
- ["command"] = style_command,
- ["preamble"] = style_comment,
- ["embedded"] = style_embedded,
- ["grouping"] = style { fore = colors.red },
-["label"] = style_label,
- ["primitive"] = style_keyword,
- ["plain"] = style { fore = colors.dark, bold = true },
- ["user"] = style { fore = colors.green },
- ["data"] = style_constant,
- ["special"] = style_special,
- ["extra"] = style_extra,
- ["quote"] = style_quote,
-
- ["okay"] = style_okay,
- ["warning"] = style_warning,
- ["invisible"] = style_invisible,
-["error"] = style_error,
-
-}
-
--- Old method (still available):
-
-local styleset = { }
-
-for k, v in next, styles do
- styleset[#styleset+1] = { k, v }
-end
-
-context.styles = styles
-context.styleset = styleset
-
--- We need to be sparse due to some limitation (and the number of built in styles
--- growing).
-
--- function context.newstyleset(list)
--- local t = { }
--- if list then
--- for i=1,#list do
--- t[list[i]] = true
--- end
--- end
--- return t
--- end
-
--- function context.usestyle(set,name)
--- set[name] = true
--- return name
--- end
-
--- function context.usestyleset(set)
--- local t = { }
--- for k, _ in next, set do
--- t[#t+1] = { k, styles[k] or styles.default }
--- end
--- end
diff --git a/context/data/scite/lexers/themes/scite-context-theme.lua b/context/data/scite/lexers/themes/scite-context-theme.lua
deleted file mode 100644
index 6e161b22f..000000000
--- a/context/data/scite/lexers/themes/scite-context-theme.lua
+++ /dev/null
@@ -1,226 +0,0 @@
-local info = {
- version = 1.002,
- comment = "theme for scintilla lpeg lexer for context/metafun",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files",
-}
-
--- context_path = string.split(os.resultof("mtxrun --find-file context.mkiv"))[1] or ""
--- global.trace("OEPS") -- how do we get access to the regular lua extensions
-
--- The regular styles set the main lexer styles table but we avoid that in order not
--- to end up with updating issues. We just use another table.
-
-if not lexer._CONTEXTEXTENSIONS then require("scite-context-lexer") end
-
-local context_path = "t:/sources" -- c:/data/tex-context/tex/texmf-context/tex/base
-local font_name = 'Dejavu Sans Mono'
-local font_size = 14
-
-if not WIN32 then
- font_name = '!' .. font_name
-end
-
-local color = lexer.color
-local style = lexer.style
-
-lexer.context = lexer.context or { }
-local context = lexer.context
-
-context.path = context_path
-
-local colors = {
- red = color('7F', '00', '00'),
- green = color('00', '7F', '00'),
- blue = color('00', '00', '7F'),
- cyan = color('00', '7F', '7F'),
- magenta = color('7F', '00', '7F'),
- yellow = color('7F', '7F', '00'),
- orange = color('B0', '7F', '00'),
- --
- white = color('FF', 'FF', 'FF'),
- light = color('CF', 'CF', 'CF'),
- grey = color('80', '80', '80'),
- dark = color('4F', '4F', '4F'),
- black = color('00', '00', '00'),
- --
- selection = color('F7', 'F7', 'F7'),
- logpanel = color('E7', 'E7', 'E7'),
- textpanel = color('CF', 'CF', 'CF'),
- linepanel = color('A7', 'A7', 'A7'),
- tippanel = color('44', '44', '44'),
- --
- right = color('00', '00', 'FF'),
- wrong = color('FF', '00', '00'),
-}
-
-colors.teal = colors.cyan
-colors.purple = colors.magenta
-
-lexer.colors = colors
-
--- defaults:
-
-local style_nothing = style { }
------ style_whitespace = style { }
-local style_comment = style { fore = colors.yellow }
-local style_string = style { fore = colors.magenta }
-local style_number = style { fore = colors.cyan }
-local style_keyword = style { fore = colors.blue, bold = true }
-local style_identifier = style_nothing
-local style_operator = style { fore = colors.blue }
-local style_error = style { fore = colors.red }
-local style_preproc = style { fore = colors.yellow, bold = true }
-local style_constant = style { fore = colors.cyan, bold = true }
-local style_variable = style { fore = colors.black }
-local style_function = style { fore = colors.black, bold = true }
-local style_class = style { fore = colors.black, bold = true }
-local style_type = style { fore = colors.blue }
-local style_label = style { fore = colors.red, bold = true }
-local style_regex = style { fore = colors.magenta }
-
--- reserved:
-
-local style_default = style { font = font_name, size = font_size, fore = colors.black, back = colors.textpanel }
-local style_text = style { font = font_name, size = font_size, fore = colors.black, back = colors.textpanel }
-local style_line_number = style { back = colors.linepanel }
-local style_bracelight = style { fore = colors.orange, bold = true }
-local style_bracebad = style { fore = colors.orange, bold = true }
-local style_indentguide = style { fore = colors.linepanel, back = colors.white }
-local style_calltip = style { fore = colors.white, back = colors.tippanel }
-local style_controlchar = style_nothing
-
--- extras:
-
-local style_quote = style { fore = colors.blue, bold = true }
-local style_special = style { fore = colors.blue }
-local style_extra = style { fore = colors.yellow }
-local style_embedded = style { fore = colors.black, bold = true }
------ style_char = style { fore = colors.magenta }
-local style_reserved = style { fore = colors.magenta, bold = true }
-local style_definition = style { fore = colors.black, bold = true }
-local style_okay = style { fore = colors.dark }
-local style_warning = style { fore = colors.orange }
-local style_invisible = style { back = colors.orange }
-local style_tag = style { fore = colors.cyan }
------ style_standout = style { fore = colors.orange, bold = true }
-local style_command = style { fore = colors.green, bold = true }
-local style_internal = style { fore = colors.orange, bold = true }
-
-local style_preamble = style { fore = colors.yellow }
-local style_grouping = style { fore = colors.red }
-local style_primitive = style { fore = colors.blue, bold = true }
-local style_plain = style { fore = colors.dark, bold = true }
-local style_user = style { fore = colors.green }
-local style_data = style { fore = colors.cyan, bold = true }
-
-
--- used by the generic lexer:
-
-lexer.style_nothing = style_nothing -- 0
------.whitespace = style_whitespace -- 1
-lexer.style_comment = style_comment -- 2
-lexer.style_string = style_string -- 3
-lexer.style_number = style_number -- 4
-lexer.style_keyword = style_keyword -- 5
-lexer.style_identifier = style_nothing -- 6
-lexer.style_operator = style_operator -- 7
-lexer.style_error = style_error -- 8
-lexer.style_preproc = style_preproc -- 9
-lexer.style_constant = style_constant -- 10
-lexer.style_variable = style_variable -- 11
-lexer.style_function = style_function -- 12
-lexer.style_class = style_class -- 13
-lexer.style_type = style_type -- 14
-lexer.style_label = style_label -- 15
-lexer.style_regex = style_regexp -- 16
-
-lexer.style_default = style_default -- 32
-lexer.style_line_number = style_line_number -- 33
-lexer.style_bracelight = style_bracelight -- 34
-lexer.style_bracebad = style_bracebad -- 35
-lexer.style_indentguide = style_indentguide -- 36
-lexer.style_calltip = style_calltip -- 37
-lexer.style_controlchar = style_controlchar -- 38
-
-local styles = { -- as we have globals we could do with less
-
- -- ["whitespace"] = style_whitespace, -- not to be set!
- ["default"] = style_nothing, -- else no good backtracking to start-of-child
- -- ["number"] = style_number,
- -- ["comment"] = style_comment,
- -- ["keyword"] = style_keyword,
- -- ["string"] = style_string,
- -- ["preproc"] = style_preproc,
- -- ["error"] = style_error,
- -- ["label"] = style_label,
-
- ["invisible"] = style_invisible,
- ["quote"] = style_quote,
- ["special"] = style_special,
- ["extra"] = style_extra,
- ["embedded"] = style_embedded,
- -- ["char"] = style_char,
- ["reserved"] = style_reserved,
- -- ["definition"] = style_definition,
- ["okay"] = style_okay,
- ["warning"] = style_warning,
- -- ["standout"] = style_standout,
- ["command"] = style_command,
- ["internal"] = style_internal,
- ["preamble"] = style_preamble,
- ["grouping"] = style_grouping,
- ["primitive"] = style_primitive,
- ["plain"] = style_plain,
- ["user"] = style_user,
- ["data"] = style_data,
-
- ["text"] = style_text, -- style_default
-
-}
-
-local styleset = { }
-
-for k, v in next, styles do
- styleset[#styleset+1] = { k, v }
-end
-
-context.styles = styles
-context.styleset = styleset
-
-function context.stylesetcopy()
- local t = { }
- for i=1,#styleset do
- local s = styleset[i]
- t[i] = s
-t[s[1]] = t[s[2]] -- new style ?
- end
- t[#t+1] = { "whitespace", style_nothing }
-t.whitespace = style_nothing -- new style ?
- return t
-end
-
--- We can be sparse if needed:
-
--- function context.newstyleset(list)
--- local t = { }
--- if list then
--- for i=1,#list do
--- t[list[i]] = true
--- end
--- end
--- return t
--- end
-
--- function context.usestyle(set,name)
--- set[name] = true
--- return name
--- end
-
--- function context.usestyleset(set)
--- local t = { }
--- for k, _ in next, set do
--- t[#t+1] = { k, styles[k] or styles.default }
--- end
--- end
diff --git a/context/data/scite/metapost.properties b/context/data/scite/metapost.properties
deleted file mode 100644
index fe89b65eb..000000000
--- a/context/data/scite/metapost.properties
+++ /dev/null
@@ -1 +0,0 @@
-import scite-metapost
diff --git a/context/data/scite/scite-context-data-context.properties b/context/data/scite/scite-context-data-context.properties
deleted file mode 100644
index fbd958f8a..000000000
--- a/context/data/scite/scite-context-data-context.properties
+++ /dev/null
@@ -1,191 +0,0 @@
-keywordclass.context.constants=\
-zerocount minusone minustwo plusone \
-plustwo plusthree plusfour plusfive plussix \
-plusseven pluseight plusnine plusten plussixteen \
-plushundred plusthousand plustenthousand plustwentythousand medcard \
-maxcard zeropoint onepoint halfapoint onebasepoint \
-maxdimen scaledpoint thousandpoint points halfpoint \
-zeroskip zeromuskip onemuskip pluscxxvii pluscxxviii \
-pluscclv pluscclvi normalpagebox endoflinetoken outputnewlinechar \
-emptytoks empty undefined voidbox emptybox \
-emptyvbox emptyhbox bigskipamount medskipamount smallskipamount \
-fmtname fmtversion texengine texenginename texengineversion \
-luatexengine pdftexengine xetexengine unknownengine etexversion \
-pdftexversion xetexversion xetexrevision activecatcode bgroup \
-egroup endline conditionaltrue conditionalfalse attributeunsetvalue \
-uprotationangle rightrotationangle downrotationangle leftrotationangle inicatcodes \
-ctxcatcodes texcatcodes notcatcodes txtcatcodes vrbcatcodes \
-prtcatcodes nilcatcodes luacatcodes tpacatcodes tpbcatcodes \
-xmlcatcodes escapecatcode begingroupcatcode endgroupcatcode mathshiftcatcode \
-alignmentcatcode endoflinecatcode parametercatcode superscriptcatcode subscriptcatcode \
-ignorecatcode spacecatcode lettercatcode othercatcode activecatcode \
-commentcatcode invalidcatcode tabasciicode newlineasciicode formfeedasciicode \
-endoflineasciicode endoffileasciicode spaceasciicode hashasciicode dollarasciicode \
-commentasciicode ampersandasciicode colonasciicode backslashasciicode circumflexasciicode \
-underscoreasciicode leftbraceasciicode barasciicode rightbraceasciicode tildeasciicode \
-delasciicode lessthanasciicode morethanasciicode doublecommentsignal atsignasciicode \
-exclamationmarkasciicode questionmarkasciicode doublequoteasciicode singlequoteasciicode forwardslashasciicode \
-primeasciicode activemathcharcode activetabtoken activeformfeedtoken activeendoflinetoken \
-batchmodecode nonstopmodecode scrollmodecode errorstopmodecode bottomlevelgroupcode \
-simplegroupcode hboxgroupcode adjustedhboxgroupcode vboxgroupcode vtopgroupcode \
-aligngroupcode noaligngroupcode outputgroupcode mathgroupcode discretionarygroupcode \
-insertgroupcode vcentergroupcode mathchoicegroupcode semisimplegroupcode mathshiftgroupcode \
-mathleftgroupcode vadjustgroupcode charnodecode hlistnodecode vlistnodecode \
-rulenodecode insertnodecode marknodecode adjustnodecode ligaturenodecode \
-discretionarynodecode whatsitnodecode mathnodecode gluenodecode kernnodecode \
-penaltynodecode unsetnodecode mathsnodecode charifcode catifcode \
-numifcode dimifcode oddifcode vmodeifcode hmodeifcode \
-mmodeifcode innerifcode voidifcode hboxifcode vboxifcode \
-xifcode eofifcode trueifcode falseifcode caseifcode \
-definedifcode csnameifcode fontcharifcode fontslantperpoint fontinterwordspace \
-fontinterwordstretch fontinterwordshrink fontexheight fontemwidth fontextraspace \
-slantperpoint interwordspace interwordstretch interwordshrink exheight \
-emwidth extraspace mathsupdisplay mathsupnormal mathsupcramped \
-mathsubnormal mathsubcombined mathaxisheight startmode stopmode \
-startnotmode stopnotmode startmodeset stopmodeset doifmode \
-doifmodeelse doifnotmode startallmodes stopallmodes startnotallmodes \
-stopnotallmodes doifallmodes doifallmodeselse doifnotallmodes startenvironment \
-stopenvironment environment startcomponent stopcomponent component \
-startproduct stopproduct product startproject stopproject \
-project starttext stoptext startnotext stopnotext \
-startdocument stopdocument documentvariable setupdocument startmodule \
-stopmodule usemodule usetexmodule useluamodule setupmodule \
-currentmoduleparameter moduleparameter startTEXpage stopTEXpage enablemode \
-disablemode preventmode globalenablemode globaldisablemode globalpreventmode \
-pushmode popmode typescriptone typescripttwo typescriptthree \
-mathsizesuffix mathordcode mathopcode mathbincode mathrelcode \
-mathopencode mathclosecode mathpunctcode mathalphacode mathinnercode \
-mathnothingcode mathlimopcode mathnolopcode mathboxcode mathchoicecode \
-mathaccentcode mathradicalcode constantnumber constantnumberargument constantdimen \
-constantdimenargument constantemptyargument continueifinputfile luastringsep !!bs \
-!!es lefttorightmark righttoleftmark breakablethinspace nobreakspace \
-narrownobreakspace zerowidthnobreakspace ideographicspace ideographichalffillspace twoperemspace \
-threeperemspace fourperemspace fiveperemspace sixperemspace figurespace \
-punctuationspace hairspace zerowidthspace zerowidthnonjoiner zerowidthjoiner \
-zwnj zwj
-
-keywordclass.context.helpers=\
-startsetups stopsetups startxmlsetups stopxmlsetups \
-startluasetups stopluasetups starttexsetups stoptexsetups startrawsetups \
-stoprawsetups startlocalsetups stoplocalsetups starttexdefinition stoptexdefinition \
-starttexcode stoptexcode startcontextcode stopcontextcode doifsetupselse \
-doifsetups doifnotsetups setup setups texsetup \
-xmlsetup luasetup directsetup doifelsecommandhandler doifnotcommandhandler \
-doifcommandhandler newmode setmode resetmode newsystemmode \
-setsystemmode resetsystemmode pushsystemmode popsystemmode booleanmodevalue \
-newcount newdimen newskip newmuskip newbox \
-newtoks newread newwrite newmarks newinsert \
-newattribute newif newlanguage newfamily newfam \
-newhelp then begcsname strippedcsname firstargumentfalse \
-firstargumenttrue secondargumentfalse secondargumenttrue thirdargumentfalse thirdargumenttrue \
-fourthargumentfalse fourthargumenttrue fifthargumentfalse fifthsargumenttrue sixthargumentfalse \
-sixtsargumenttrue doglobal dodoglobal redoglobal resetglobal \
-donothing dontcomplain forgetall donetrue donefalse \
-htdp unvoidbox hfilll vfilll mathbox \
-mathlimop mathnolop mathnothing mathalpha currentcatcodetable \
-defaultcatcodetable catcodetablename newcatcodetable startcatcodetable stopcatcodetable \
-startextendcatcodetable stopextendcatcodetable pushcatcodetable popcatcodetable restorecatcodes \
-setcatcodetable letcatcodecommand defcatcodecommand uedcatcodecommand hglue \
-vglue hfillneg vfillneg hfilllneg vfilllneg \
-ruledhss ruledhfil ruledhfill ruledhfilneg ruledhfillneg \
-normalhfillneg ruledvss ruledvfil ruledvfill ruledvfilneg \
-ruledvfillneg normalvfillneg ruledhbox ruledvbox ruledvtop \
-ruledvcenter ruledmbox ruledhskip ruledvskip ruledkern \
-ruledmskip ruledmkern ruledhglue ruledvglue normalhglue \
-normalvglue ruledpenalty filledhboxb filledhboxr filledhboxg \
-filledhboxc filledhboxm filledhboxy filledhboxk scratchcounter \
-globalscratchcounter scratchdimen globalscratchdimen scratchskip globalscratchskip \
-scratchmuskip globalscratchmuskip scratchtoks globalscratchtoks scratchbox \
-globalscratchbox normalbaselineskip normallineskip normallineskiplimit availablehsize \
-localhsize setlocalhsize nextbox dowithnextbox dowithnextboxcs \
-dowithnextboxcontent dowithnextboxcontentcs scratchwidth scratchheight scratchdepth \
-scratchoffset scratchdistance scratchhsize scratchvsize scratchxoffset \
-scratchyoffset scratchhoffset scratchvoffset scratchxposition scratchyposition \
-scratchtopoffset scratchbottomoffset scratchleftoffset scratchrightoffset scratchcounterone \
-scratchcountertwo scratchcounterthree scratchdimenone scratchdimentwo scratchdimenthree \
-scratchskipone scratchskiptwo scratchskipthree scratchmuskipone scratchmuskiptwo \
-scratchmuskipthree scratchtoksone scratchtokstwo scratchtoksthree scratchboxone \
-scratchboxtwo scratchboxthree scratchnx scratchny scratchmx \
-scratchmy scratchunicode scratchleftskip scratchrightskip scratchtopskip \
-scratchbottomskip doif doifnot doifelse doifinset \
-doifnotinset doifinsetelse doifnextcharelse doifnextoptionalelse doifnextbgroupelse \
-doifnextparenthesiselse doiffastoptionalcheckelse doifundefinedelse doifdefinedelse doifundefined \
-doifdefined doifelsevalue doifvalue doifnotvalue doifnothing \
-doifsomething doifelsenothing doifsomethingelse doifvaluenothing doifvaluesomething \
-doifelsevaluenothing doifdimensionelse doifnumberelse doifnumber doifnotnumber \
-doifcommonelse doifcommon doifnotcommon doifinstring doifnotinstring \
-doifinstringelse doifassignmentelse docheckassignment tracingall tracingnone \
-loggingall removetoks appendtoks prependtoks appendtotoks \
-prependtotoks to endgraf endpar everyendpar \
-reseteverypar finishpar empty null space \
-quad enspace obeyspaces obeylines obeyedspace \
-obeyedline normalspace executeifdefined singleexpandafter doubleexpandafter \
-tripleexpandafter dontleavehmode removelastspace removeunwantedspaces keepunwantedspaces \
-wait writestatus define defineexpandable redefine \
-setmeasure setemeasure setgmeasure setxmeasure definemeasure \
-freezemeasure measure measured installcorenamespace getvalue \
-getuvalue setvalue setevalue setgvalue setxvalue \
-letvalue letgvalue resetvalue undefinevalue ignorevalue \
-setuvalue setuevalue setugvalue setuxvalue globallet \
-glet udef ugdef uedef uxdef \
-checked unique getparameters geteparameters getgparameters \
-getxparameters forgetparameters copyparameters getdummyparameters dummyparameter \
-directdummyparameter setdummyparameter letdummyparameter usedummystyleandcolor usedummystyleparameter \
-usedummycolorparameter processcommalist processcommacommand quitcommalist quitprevcommalist \
-processaction processallactions processfirstactioninset processallactionsinset unexpanded \
-expanded startexpanded stopexpanded protected protect \
-unprotect firstofoneargument firstoftwoarguments secondoftwoarguments firstofthreearguments \
-secondofthreearguments thirdofthreearguments firstoffourarguments secondoffourarguments thirdoffourarguments \
-fourthoffourarguments firstoffivearguments secondoffivearguments thirdoffivearguments fourthoffivearguments \
-fifthoffivearguments firstofsixarguments secondofsixarguments thirdofsixarguments fourthofsixarguments \
-fifthofsixarguments sixthofsixarguments firstofoneunexpanded gobbleoneargument gobbletwoarguments \
-gobblethreearguments gobblefourarguments gobblefivearguments gobblesixarguments gobblesevenarguments \
-gobbleeightarguments gobbleninearguments gobbletenarguments gobbleoneoptional gobbletwooptionals \
-gobblethreeoptionals gobblefouroptionals gobblefiveoptionals dorecurse doloop \
-exitloop dostepwiserecurse recurselevel recursedepth dofastloopcs \
-dowith newconstant setnewconstant setconstant setconstantvalue \
-newconditional settrue setfalse settruevalue setfalsevalue \
-newmacro setnewmacro newfraction newsignal dosingleempty \
-dodoubleempty dotripleempty doquadrupleempty doquintupleempty dosixtupleempty \
-doseventupleempty dosingleargument dodoubleargument dotripleargument doquadrupleargument \
-doquintupleargument dosixtupleargument doseventupleargument dosinglegroupempty dodoublegroupempty \
-dotriplegroupempty doquadruplegroupempty doquintuplegroupempty permitspacesbetweengroups dontpermitspacesbetweengroups \
-nopdfcompression maximumpdfcompression normalpdfcompression modulonumber dividenumber \
-getfirstcharacter doiffirstcharelse startnointerference stopnointerference twodigits \
-threedigits leftorright strut setstrut strutbox \
-strutht strutdp strutwd struthtdp begstrut \
-endstrut lineheight ordordspacing ordopspacing ordbinspacing \
-ordrelspacing ordopenspacing ordclosespacing ordpunctspacing ordinnerspacing \
-opordspacing opopspacing opbinspacing oprelspacing opopenspacing \
-opclosespacing oppunctspacing opinnerspacing binordspacing binopspacing \
-binbinspacing binrelspacing binopenspacing binclosespacing binpunctspacing \
-bininnerspacing relordspacing relopspacing relbinspacing relrelspacing \
-relopenspacing relclosespacing relpunctspacing relinnerspacing openordspacing \
-openopspacing openbinspacing openrelspacing openopenspacing openclosespacing \
-openpunctspacing openinnerspacing closeordspacing closeopspacing closebinspacing \
-closerelspacing closeopenspacing closeclosespacing closepunctspacing closeinnerspacing \
-punctordspacing punctopspacing punctbinspacing punctrelspacing punctopenspacing \
-punctclosespacing punctpunctspacing punctinnerspacing innerordspacing inneropspacing \
-innerbinspacing innerrelspacing inneropenspacing innerclosespacing innerpunctspacing \
-innerinnerspacing normalreqno startimath stopimath normalstartimath \
-normalstopimath startdmath stopdmath normalstartdmath normalstopdmath \
-uncramped cramped triggermathstyle mathstylefont mathsmallstylefont \
-mathstyleface mathsmallstyleface mathstylecommand mathpalette mathstylehbox \
-mathstylevbox mathstylevcenter mathstylevcenteredhbox mathstylevcenteredvbox mathtext \
-setmathsmalltextbox setmathtextbox triggerdisplaystyle triggertextstyle triggerscriptstyle \
-triggerscriptscriptstyle triggeruncrampedstyle triggercrampedstyle triggersmallstyle triggeruncrampedsmallstyle \
-triggercrampedsmallstyle triggerbigstyle triggeruncrampedbigstyle triggercrampedbigstyle luaexpr \
-expdoifelse expdoif expdoifnot expdoifcommonelse expdoifinsetelse \
-ctxdirectlua ctxlatelua ctxsprint ctxwrite ctxcommand \
-ctxdirectcommand ctxlatecommand ctxreport ctxlua luacode \
-lateluacode directluacode registerctxluafile ctxloadluafile luaversion \
-luamajorversion luaminorversion ctxluacode luaconditional luaexpanded \
-startluaparameterset stopluaparameterset luaparameterset definenamedlua obeylualines \
-obeyluatokens startluacode stopluacode startlua stoplua \
-carryoverpar assumelongusagecs Umathbotaccent righttolefthbox lefttorighthbox \
-righttoleftvbox lefttorightvbox righttoleftvtop lefttorightvtop rtlhbox \
-ltrhbox rtlvbox ltrvbox rtlvtop ltrvtop \
-autodirhbox autodirvbox autodirvtop lefttoright righttoleft \
-synchronizelayoutdirection synchronizedisplaydirection synchronizeinlinedirection lesshyphens morehyphens \
-nohyphens dohyphens Ucheckedstartdisplaymath Ucheckedstopdisplaymath
-
diff --git a/context/data/scite/scite-context-user.properties b/context/data/scite/scite-context-user.properties
deleted file mode 100644
index 88e803031..000000000
--- a/context/data/scite/scite-context-user.properties
+++ /dev/null
@@ -1,15 +0,0 @@
-# this loades the basics
-
-import scite-context
-
-# internal lexing
-
-import scite-context-internal
-
-# external lexing (tex, mps, cld/lua, xml)
-
-import scite-context-external
-
-# this does some tuning
-
-import scite-pragma
diff --git a/context/data/scite/scite-context-visual.tex b/context/data/scite/scite-context-visual.tex
deleted file mode 100644
index 0a1b8bb71..000000000
--- a/context/data/scite/scite-context-visual.tex
+++ /dev/null
@@ -1,52 +0,0 @@
-% language=uk
-
-\usemodule[art-01]
-
-\defineframedtext
- [entry]
-
-\starttext
-
-\startchapter[title=Some fancy title]
-
- \startluacode
- local entries = { -- there can be more
- { text = "The third entry!" },
- { text = "The fourth entry!" },
- }
-
- for i=1,#entries do
- context.startentry()
- context(entries[i].text)
- context.stopentry()
- end
- \stopluacode
-
- This is just some text to demonstrate the realtime spellchecker
- in combination with the embedded lua and metapost lexers and
- inline as well as display \ctxlua{context("lua code")}.
-
- Non breakable spaces in for instance 10 mm and quads like here
- are shown as well.
-
- \startlinecorrection
- \startMPcode
- for i=1 upto 100 :
- draw fullcircle scaled (i*mm) ;
- endfor ;
- \stopMPcode
- \stoplinecorrection
-
- \iftrue
- \def\crap{some text} % who cares
- \else
- \def\crap{some crap} % about this
- \fi
-
- \blank[2*big]
-
- \crap
-
-\stopchapter
-
-\stoptext
diff --git a/context/data/scite/tex.properties b/context/data/scite/tex.properties
deleted file mode 100644
index 3fbad41cb..000000000
--- a/context/data/scite/tex.properties
+++ /dev/null
@@ -1 +0,0 @@
-import scite-tex