summaryrefslogtreecommitdiff
path: root/doc/context/sources/general/manuals/about
diff options
context:
space:
mode:
authorContext Git Mirror Bot <phg42.2a@gmail.com>2016-08-01 16:40:14 +0200
committerContext Git Mirror Bot <phg42.2a@gmail.com>2016-08-01 16:40:14 +0200
commit96f283b0d4f0259b7d7d1c64d1d078c519fc84a6 (patch)
treee9673071aa75f22fee32d701d05f1fdc443ce09c /doc/context/sources/general/manuals/about
parentc44a9d2f89620e439f335029689e7f0dff9516b7 (diff)
downloadcontext-96f283b0d4f0259b7d7d1c64d1d078c519fc84a6.tar.gz
2016-08-01 14:21:00
Diffstat (limited to 'doc/context/sources/general/manuals/about')
-rw-r--r--doc/context/sources/general/manuals/about/about-calls.tex739
-rw-r--r--doc/context/sources/general/manuals/about/about-contents.tex17
-rw-r--r--doc/context/sources/general/manuals/about/about-environment.tex71
-rw-r--r--doc/context/sources/general/manuals/about/about-expanding.tex188
-rw-r--r--doc/context/sources/general/manuals/about/about-hashing.tex616
-rw-r--r--doc/context/sources/general/manuals/about/about-hz.tex96
-rw-r--r--doc/context/sources/general/manuals/about/about-introduction.tex38
-rw-r--r--doc/context/sources/general/manuals/about/about-jitting-jit-2013-06-04.lua148
-rw-r--r--doc/context/sources/general/manuals/about/about-jitting-jit.lua163
-rw-r--r--doc/context/sources/general/manuals/about/about-jitting.tex439
-rw-r--r--doc/context/sources/general/manuals/about/about-luafunctions.tex292
-rw-r--r--doc/context/sources/general/manuals/about/about-mathstackers.tex765
-rw-r--r--doc/context/sources/general/manuals/about/about-mathstyles.tex457
-rw-r--r--doc/context/sources/general/manuals/about/about-metafun.tex834
-rw-r--r--doc/context/sources/general/manuals/about/about-mobility.tex205
-rw-r--r--doc/context/sources/general/manuals/about/about-nodes.tex603
-rw-r--r--doc/context/sources/general/manuals/about/about-nuts.tex619
-rw-r--r--doc/context/sources/general/manuals/about/about-properties.tex209
-rw-r--r--doc/context/sources/general/manuals/about/about-speed.tex732
-rw-r--r--doc/context/sources/general/manuals/about/about-threequarters.tex330
-rw-r--r--doc/context/sources/general/manuals/about/about-titlepage.tex31
-rw-r--r--doc/context/sources/general/manuals/about/about.tex46
-rw-r--r--doc/context/sources/general/manuals/about/demo-data.lua10
-rw-r--r--doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-JIT20.lua406
-rw-r--r--doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-LUA51-40-6.lua406
-rw-r--r--doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-LUA51.lua406
-rw-r--r--doc/context/sources/general/manuals/about/luatest-hash-luatex-073-LUA52-40-6.lua406
-rw-r--r--doc/context/sources/general/manuals/about/luatest-hash-luatex-073-LUA52.lua406
-rw-r--r--doc/context/sources/general/manuals/about/pi-speed-1.tex3
-rw-r--r--doc/context/sources/general/manuals/about/pi-speed-2.tex3
-rw-r--r--doc/context/sources/general/manuals/about/pi-speed-3.tex10
-rw-r--r--doc/context/sources/general/manuals/about/pi-speed-4.tex10
-rw-r--r--doc/context/sources/general/manuals/about/pi-speed-5.tex16
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-1.pngbin0 -> 15785 bytes
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-10.pngbin0 -> 21898 bytes
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-2.pngbin0 -> 20975 bytes
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-3.pngbin0 -> 7745 bytes
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-4.pngbin0 -> 11516 bytes
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-5.pngbin0 -> 14316 bytes
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-6.pngbin0 -> 12295 bytes
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-7.pngbin0 -> 8126 bytes
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-8.pngbin0 -> 14815 bytes
-rw-r--r--doc/context/sources/general/manuals/about/still-expanding-9.pngbin0 -> 16643 bytes
43 files changed, 9720 insertions, 0 deletions
diff --git a/doc/context/sources/general/manuals/about/about-calls.tex b/doc/context/sources/general/manuals/about/about-calls.tex
new file mode 100644
index 000000000..83bf89aad
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-calls.tex
@@ -0,0 +1,739 @@
+% language=uk
+
+\startcomponent about-calls
+
+\environment about-environment
+
+\startchapter[title={Calling Lua}]
+
+\startsection[title=Introduction]
+
+One evening, on Skype, Luigi and I were pondering about the somewhat
+disappointing impact of jit in \LUAJITTEX\ and one of the reasons we could come
+up with is that when you invoke \LUA\ from inside \TEX\ each \type {\directlua}
+gets an extensive treatment. Take the following:
+
+\starttyping
+\def\SomeValue#1%
+ {\directlua{tex.print(math.sin(#1)/math.cos(2*#1))}}
+\stoptyping
+
+Each time \type {\SomeValue} is expanded, the \TEX\ parser will do the following:
+
+\startitemize[packed]
+\startitem
+ It sees \type {\directlua} and will jump to the related scanner.
+\stopitem
+\startitem
+ There it will see a \type +{+ and enter a special mode in which it starts
+ collecting tokens.
+\stopitem
+\startitem
+ In the process, it will expand control sequences that are expandable.
+\stopitem
+\startitem
+ The scanning ends when a matching \type +}+ is seen.
+\stopitem
+\startitem
+ The collected tokens are converted into a regular (C) string.
+\stopitem
+\startitem
+ This string is passed to the \type {lua_load} function that compiles it into
+ bytecode.
+\stopitem
+\startitem
+ The bytecode is executed and characters that are printed to \TEX\ are
+ injected into the input buffer.
+\stopitem
+\stopitemize
+
+In the process, some state information is set and reset and errors are dealt
+with. Although it looks like a lot of actions, this all happens very fast, so
+fast actually that for regular usage you don't need to bother about it.
+
+There are however applications where you might want to see a performance boost,
+for instance when you're crunching numbers that end up in tables or graphics
+while processing the document. Again, this is not that typical for jobs, but with
+the availability of \LUA\ more of that kind of usage will show up. And, as we now
+also have \LUAJITTEX\ its jitting capabilities could be an advantage.
+
+Back to the example: there are two calls to functions there and apart from the
+fact that they need to be resolved in the \type {math} table, they also are
+executed C functions. As \LUAJIT\ optimizes known functions like this, there can
+be a potential speed gain but as \type {\directlua} is parsed and loaded each
+time, the jit machinery will not do that, unless the same code gets exercised
+lots of time. In fact, the jit related overhead would be a waste in this one time
+usage.
+
+In the next sections we will show two variants that follow a different approach
+and as a consequence can speed up a bit. But, be warned: the impact is not as
+large as you might expect, and as the code might look less intuitive, the good
+old \type {\directlua} command is still the advised method.
+
+Before we move on it's important to realize that a \type {\directlua} call is
+in fact a function call. Say that we have this:
+
+\starttyping
+\def\SomeValue{1.23}
+\stoptyping
+
+This becomes:
+
+\starttyping
+\directlua{tex.print(math.sin(1.23)/math.cos(2*1.23))}
+\stoptyping
+
+Which in \LUA\ is wrapped up as:
+
+\starttyping
+function()
+ tex.print(math.sin(1.23)/math.cos(2*1.23))
+end
+\stoptyping
+
+that gets executed. So, the code is always wrapped in a function. Being a
+function it is also a closure and therefore local variables are local to this
+function and are invisible at the outer level.
+
+\stopsection
+
+\startsection[title=Indirect \LUA]
+
+The first variant is tagged as indirect \LUA. With indirect we mean that instead
+of directly parsing, compiling and executing the code, it is done in steps. This
+method is not as generic a the one discussed in the next section, but for cases
+where relatively constant calls are used it is fine. Consider the next call:
+
+\starttyping
+\def\NextValue
+ {\indirectlua{myfunctions.nextvalue()}}
+\stoptyping
+
+This macro does not pass values and always looks the same. Of course there can be
+much more code, for instance the following is equally valid:
+
+\starttyping
+\def\MoreValues {\indirectlua{
+ for i=1,100 do
+ myfunctions.nextvalue(i)
+ end
+}}
+\stoptyping
+
+Again, there is no variable information passed from \TEX. Even the next variant
+is relative constant:
+
+\starttyping
+\def\SomeValues#1{\indirectlua{
+ for i=1,#1 do
+ myfunctions.nextvalue(i)
+ end
+}}
+\stoptyping
+
+especially when this macro is called many times with the same value. So how does
+\type {\indirectlua} work? Well, it's behaviour is in fact undefined! It does,
+like \type {\directlua}, parse the argument and makes the string, but instead of
+calling \LUA\ directly, it will pass the string to a \LUA\ function \type
+{lua_call}.
+
+\starttyping
+lua.call = function(s) load(s)() end
+\stoptyping
+
+The previous definition is quite okay and in fact makes \type {\indirectlua}
+behave like \type {\directlua}. This definition makes
+
+% \ctxlua{lua.savedcall = lua.call lua.call = function(s) load(s)() end}
+% \testfeatureonce{10000}{\directlua {math.sin(1.23)}}
+% \testfeatureonce{10000}{\indirectlua{math.sin(1.23)}}
+% \ctxlua{lua.call = lua.savedcall}
+
+\starttyping
+\directlua {tex.print(math.sin(1.23))}
+\indirectlua{tex.print(math.sin(1.23))}
+\stoptyping
+
+equivalent calls but the second one is slightly slower, which is to be expected
+due to the wrapping and indirect loading. But look at this:
+
+\starttyping
+local indirectcalls = { }
+
+function lua.call(code)
+ local fun = indirectcalls[code]
+ if not fun then
+ fun = load(code)
+ if type(fun) ~= "function" then
+ fun = function() end
+ end
+ indirectcalls[code] = fun
+ end
+ fun()
+end
+\stoptyping
+
+This time the code needs about one third of the runtime. How much we gain depends
+on the size of the code and its complexity, but on the average its's much faster.
+Of course, during a \TEX\ job only a small part of the time is spent on this, so
+the overall impact is much smaller, but it makes runtime number crunching more
+feasible.
+
+If we bring jit into the picture, the situation becomes somewhat more diffuse.
+When we use \LUAJITTEX\ the whole job processed faster, also this part, but
+because loading and interpreting is more optimized the impact might be less. If
+you enable jit, in most cases a run is slower than normal. But as soon as you
+have millions of calls to e.g.\ type {math.sin} it might make a difference.
+
+This variant of calling \LUA\ is quite intuitive and also permits us to implement
+specific solutions because the \type {lua.call} function can be defined as you
+with. Of course macro package writers can decide to use this feature too, so you
+need to beware of unpleasant side effects if you redefine this function.
+
+% \testfeatureonce{100000}{\directlua {math.sin(1.23)}}
+% \testfeatureonce{100000}{\indirectlua{math.sin(1.23)}}
+
+\stopsection
+
+\startsection[title=Calling \LUA]
+
+In the process we did some tests with indirect calls in \CONTEXT\ core code and
+indeed some gain in speed could be noticed. However, many calls get variable
+input and therefore don't qualify. Also, as a mixture of \type {\directlua} and
+\type {\indirectlua} calls in the source can be confusing it only makes sense to
+use this feature in real time|-|critical cases, because even in moderately
+complex documents there are not that many calls anyway.
+
+The next method uses a slightly different approach. Here we stay at the \TEX\
+end, parse some basic type arguments, push them on the \LUA\ stack, and call a
+predefined function. The amount of parsing \TEX\ code is not less, but especially
+when we pass numbers stored in registers, no tokenization (serialization of a
+number value into the input stream) and stringification (converting the tokens
+back to a \LUA\ number) takes place.
+
+\starttyping
+\indirectluacall 123
+ {some string}
+ \scratchcounter
+ {another string}
+ true
+ \dimexpr 10pt\relax
+\relax
+\stoptyping
+
+Actually, an extension like this had been on the agenda for a while, but never
+really got much priority. The first number is a reference to a function to be
+called.
+
+\starttyping
+lua.calls = lua.calls or { }
+lua.calls[123] = function(s1,n1,s2,b,n2)
+ -- do something with
+ --
+ -- string s1
+ -- number n1
+ -- string s2
+ -- boolean b
+ -- number n2
+end
+\stoptyping
+
+The first number to \type {indirectluacall} is mandate. It can best also be a
+number that has a function associated in the \type {lua.calls} table. Following
+that number and before the also mandate \type {\relax}, there can be any number
+of arguments: strings, numbers and booleans.
+
+Anything surrounded by \type {{}} becomes a string. The keywords \type {true} and
+\type {false} become boolean values. Spaces are skipped and everything else is
+assumed to be a number. This means that if you omit the final \type {\relax}, you
+get a error message mentioning a \quote {missing number}. The normal number
+parser applies, so when a dimension register is passed, it is turned into a
+number. The example shows that wrapping a more verbose dimension into a \type
+{\dimexpr} also works.
+
+Performance wise, each string goes from list of tokens to temporary C string to
+\LUA\ string, so that adds some overhead. A number is more efficient, especially
+when you pass it using a register. The booleans are simple sequences of character
+tokens so they are relatively efficient too. Because \LUA\ functions accept an
+arbitrary number of arguments, you can provide as many as you like, or even less
+than the function expects: it is all driven by the final \type {\relax}.
+
+An important characteristic of this kind of call is that there is no \type {load}
+involved, which means that the functions in \type {lua.calls} can be subjected to
+jitting.
+
+\stopsection
+
+\startsection[title=Name spaces]
+
+As with \type {\indirectlua} there is a potential clash when users mess with the
+\type {lua.calls} table without taking the macro package usage into account. It not
+that complex to define a variant that provides namespaces:
+
+\starttyping
+\newcount\indirectmain \indirectmain=1
+\newcount\indirectuser \indirectuser=2
+
+\indirectluacall \indirectmain
+ {function 1}
+ {some string}
+\relax
+
+\indirectluacall \indirectuser
+ {function 1}
+ {some string}
+\relax
+\stoptyping
+
+A matching implementation is this:
+
+\starttyping
+lua.calls = lua.calls or { }
+
+local main = { }
+
+lua.calls[1] = function(name,...)
+ main[name](...)
+end
+
+main["function 1"] = function(a,b,c)
+ -- do something with a,b,c
+end
+
+local user = { }
+
+lua.calls[2] = function(name,...)
+ user[name](...)
+end
+
+user["function 1"] = function(a,b,c)
+ -- do something with a,b,c
+end
+\stoptyping
+
+Of course this is also ok:
+
+\starttyping
+\indirectluacall \indirectmain 1
+ {some string}
+\relax
+
+\indirectluacall \indirectuser 1
+ {some string}
+\relax
+\stoptyping
+
+with:
+
+\starttyping
+main[1] = function(a,b,c)
+ -- do something with a,b,c
+end
+
+user[1] = function(a,b,c)
+ -- do something with a,b,c
+end
+\stoptyping
+
+Normally a macro package, if it wants to expose this mechanism, will provide a
+more abstract interface that hides the implementation details. In that case the
+user is not supposed to touch \type {lua.calls} but this is not much different
+from the limitations in redefining primitives, so users can learn to live with
+this.
+
+\stopsection
+
+\startsection[title=Practice]
+
+There are some limitations. For instance in \CONTEXT\ we often pass tables and
+this is not implemented. Providing a special interface for that is possible but
+does not really help. Often the data passed that way is far from constant, so it
+can as well be parsed by \LUA\ itself, which is quite efficient. We did some
+experiments with the more simple calls and the outcome is somewhat disputable. If
+we replace some of the \quote {critital} calls we can gain some 3\% on a run of
+for instance the \type {fonts-mkiv.pdf} manual and a bit more on the command
+reference \type {cont-en.pdf}. The first manual uses lots of position tracking
+(an unfortunate side effect of using a specific feature that triggers continuous
+tracking) and low level font switches and many of these can benefit from the
+indirect call variant. The command reference manual uses \XML\ processing and
+that involves many calls to the \XML\ mapper and also does quite some string
+manipulations so again there is something to gain there.
+
+The following numbers are just an indication, as only a subset of \type
+{\directlua} calls has been replaced. The 166 page font manual processes in about
+9~seconds which is not bad given its complexity. The timings are on a Dell
+Precision M6700 with Core i7 3840QM, 16 GB memory, a fast SSD and 64 bit Windows
+8. The binaries were cross compiled mingw 32 bit by Luigi. \footnote {While
+testing with several function definitions we noticed that \type {math.random} in
+our binaries made jit twice as slow as normal, while for instance \type
+{math.sin} was 100 times faster. As the font manual uses the random function for
+rendering random punk examples it might have some negative impact. Our experience
+is that binaries compiled with the ms compiler are somewhat faster but as long as
+the engines that we test are compiled similarly the numbers can be compared.}
+
+% old: 8.870 8.907 9.089 / jit: 6.948 6.966 7.009 / jiton: 7.449 7.586 7.609
+% new: 8.710 8.764 8.682 | 8.64 / jit: 6.935 6.969 6.967 | 6.82 / jiton: 7.412 7.223 7.481
+%
+% 3% on total, 6% on lua
+
+\starttabulate[|lT|cT|cT|cT|]
+\HL
+\NC \NC \LUATEX \NC \LUAJITTEX \NC \LUAJITTEX\ + jit \NC \NR
+\HL
+\NC direct \NC 8.90 \NC 6.95 \NC 7.50 \NC \NR
+\NC indirect \NC 8.65 \NC 6.80 \NC 7.30 \NC \NR
+\HL
+\stoptabulate
+
+So, we can gain some 3\% on such a document and given that we spend probably half
+the time in \LUA, this means that these new features can make \LUA\ run more than
+5\% faster which is not that bad for a couple of lines of extra code. For regular
+documents we can forget about jit which confirms earlier experiments. The
+commands reference has these timings:
+
+\starttabulate[|lT|cT|cT|cT|]
+\HL
+\NC \NC \LUATEX \NC \LUAJITTEX \NC \NR
+\HL
+\NC direct \NC 2.55 \NC 1.90 \NC \NR
+\NC indirect \NC 2.40 \NC 1.80 \NC \NR
+\HL
+\stoptabulate
+
+Here the differences are larger which is due to the fact that we can indirect
+most of the calls used in this processing. The document is rather simple but as
+mentioned is encoded in \XML\ and the \TEX||\XML\ interface qualifies for this
+kind of speedups.
+
+As Luigi is still trying to figure out why jitting doesn't work out so well, we
+also did some tests with (in itself useless) calculations. After all we need
+proof. The first test was a loop with 100.000 step doing a regular \type
+{\directlua}:
+
+\starttyping
+\directlua {
+ local t = { }
+ for i=1,10000
+ do t[i] = math.sin(i/10000)
+ end
+}
+\stoptyping
+
+The second test is a bit optimized. When we use jit this kind of optimizations
+happens automatically for known (!) functions so there is not much won.
+
+\starttyping
+\directlua {
+ local sin = math.sin
+ local t = { }
+ for i=1,10000
+ do t[i] = sin(i/10000)
+ end
+}
+\stoptyping
+
+We also tested this with \type {\indirectlua} and therefore defined some
+functions to test the call variant:
+
+\starttyping
+lua.calls[1] = function()
+ -- overhead
+end
+
+lua.calls[2] = function()
+ local t = { }
+ for i=1,10000 do
+ t[i] = math.sin(i/10000) -- naive
+ end
+end
+
+lua.calls[3] = function()
+ local sin = math.sin
+ local t = { }
+ for i=1,10000 do
+ t[i] = sin(i/10000) -- normal
+ end
+end
+\stoptyping
+
+These are called with:
+
+\starttyping
+\indirectluacall0\relax
+\indirectluacall1\relax
+\indirectluacall2\relax
+\stoptyping
+
+The overhead variant demonstrated that there was hardly any: less than 0.1 second.
+
+\starttabulate[|lT|lT|cT|cT|cT|]
+\HL
+\NC \NC \NC \LUATEX \NC \LUAJITTEX \NC \LUAJITTEX\ + jit \NC \NR
+\HL
+\NC directlua \NC normal \NC 167 \NC 64 \NC 46 \NC \NR
+\NC \NC local \NC 122 \NC 57 \NC 46 \NC \NR
+\NC indirectlua \NC normal \NC 166 \NC 63 \NC 45 \NC \NR
+\NC \NC local \NC 121 \NC 56 \NC 45 \NC \NR
+\NC indirectluacall \NC normal \NC 165 \NC 66 \NC 48 \NC \NR
+\NC \NC local \NC 120 \NC 60 \NC 47 \NC \NR
+\HL
+\stoptabulate
+
+The results are somewhat disappoint but not that unexpected. We do see a speedup
+with \LUAJITTEX\ and in this case even jitting makes sense. However in a regular
+typesetting run jitting will never catch up with the costs it carries for the
+overall process. The indirect call is somewhat faster than the direct call.
+Possible reasons are that hashing at the \LUA\ end also costs time and the
+100.000 calls from \TEX\ to \LUA\ is not that big a burden. The indirect call is
+therefore also not much faster because it has some additional parsing overhead at
+the \TEX\ end. That one only speeds up when we pass arguments and even then not
+always the same amount. It is therefore mostly a convenience feature.
+
+We left one aspect out and that is garbage collection. It might be that in large
+runs less loading has a positive impact on collecting garbage. We also need to
+keep in mind that careful application can have some real impact. Take the
+following example of \CONTEXT\ code:
+
+\startntyping
+\dorecurse {1000} {
+
+ \startsection[title=section #1]
+
+ \startitemize[n,columns]
+ \startitem test \stopitem
+ \startitem test \stopitem
+ \startitem test \stopitem
+ \startitem test \stopitem
+ \stopitemize
+
+ \starttabulate[|l|p|]
+ \NC test \NC test \NC \NR
+ \NC test \NC test \NC \NR
+ \NC test \NC test \NC \NR
+ \stoptabulate
+
+ test {\setfontfeature{smallcaps} abc} test
+ test {\setfontfeature{smallcaps} abc} test
+ test {\setfontfeature{smallcaps} abc} test
+ test {\setfontfeature{smallcaps} abc} test
+ test {\setfontfeature{smallcaps} abc} test
+ test {\setfontfeature{smallcaps} abc} test
+
+ \framed[align={lohi,middle}]{test}
+
+ \startembeddedxtable
+ \startxrow \startxcell x \stopxcell \startxcell x \stopxcell \stopxrow
+ \startxrow \startxcell x \stopxcell \startxcell x \stopxcell \stopxrow
+ \startxrow \startxcell x \stopxcell \startxcell x \stopxcell \stopxrow
+ \startxrow \startxcell x \stopxcell \startxcell x \stopxcell \stopxrow
+ \startxrow \startxcell x \stopxcell \startxcell x \stopxcell \stopxrow
+ \stopembeddedxtable
+
+ \stopsection
+
+ \page
+
+}
+\stopntyping
+
+These macros happen to use mechanism that are candidates for indirectness.
+However, it doesn't happen often you you process thousands of pages with mostly
+tables and smallcaps (although tabular digits are a rather valid font feature in
+tables). For instance, in web services squeezing out a few tens of seconds might
+make sense if there is a large queue of documents.
+
+\starttabulate[|lT|cT|cT|cT|]
+\HL
+\NC \NC \LUATEX \NC \LUAJITTEX \NC \LUAJITTEX\ + jit \NC \NR
+\HL
+\NC direct \NC 19.1 \NC 15.9 \NC 15.8 \NC \NR
+\NC indirect \NC 18.0 \NC 15.2 \NC 15.0 \NC \NR
+\HL
+\stoptabulate
+
+Surprisingly, even jitting helps a bit here. Maybe it relates the the number of
+pages and the amount of calls but we didn't investigate this. By default jitting
+is off anyway. The impact of indirectness is more than in previous examples.
+
+For this test a file was loaded that redefines some core \CONTEXT\ code. This
+also has some overhead which means that numbers for the indirect case will be
+somewhat better if we decide to use these mechanisms in the core code. It is
+tempting to do that but it involves some work and it's always the question if a
+week of experimenting and coding will ever be compensated by less. After all, in
+this last test, a speed of 50 pages per second is not that bad a performance.
+
+When looking at these numbers, keep in mind that it is still not clear if we end
+up using this functionality, and when \CONTEXT\ will use it, it might be in a way
+that gives better or worse timings than mentioned above. For instance, storing \LUA\
+code in the format is possible, but these implementations force us to serialize
+the \type {lua.calls} mechanism and initialize them after format loading. For that
+reason alone, a more native solution is better.
+
+\stopsection
+
+\startsection[title=Exploration]
+
+In the early days of \LUATEX\ Taco and I discussed an approach similar do
+registers which means that there is some \type {\...def} command available. The
+biggest challenge there is to come up with a decent way to define the arguments.
+On the one hand, using a hash syntax is natural to \TEX, but using names is more
+natural to \LUA. So, when we picked up that thread, solutions like this came up
+in a Skype session with Taco:
+
+\starttyping
+\luadef\myfunction#1#2{ tex.print(arg[1]+arg[2]) }
+\stoptyping
+
+The \LUA\ snippet becomes a function with this body:
+
+\starttyping
+local arg = { #1, #2 } -- can be preallocated and reused
+-- the body as defined at the tex end
+tex.print(arg[1]+arg[2])
+\stoptyping
+
+Where \type {arg} is set each time. As we wrapped it in a function we can
+also put the arguments on the stack and use:
+
+\starttyping
+\luadef\myfunction#1#2{ tex.print((select(1,...))+(select(2,...)) }
+\stoptyping
+
+Given that we can make select work this way (either or not by additional
+wrapping). Anyway, both these solutions are ugly and so we need to look further.
+Also, the \type {arg} variant mandates building a table. So, a natural next
+iteration is:
+
+\starttyping
+\luadef\myfunction a b { tex.print(a+b) }
+\stoptyping
+
+Here it becomes already more natural:
+
+\starttyping
+local a = #1
+local b = #2
+-- the body as defined at the tex end
+tex.print(a+b)
+\stoptyping
+
+But, as we don't want to reload the body we need to push \type {#1} into the
+closure. This is a more static definition equivalent:
+
+\starttyping
+local a = select(1,...)
+local b = select(2,...)
+tex.print(a+b)
+\stoptyping
+
+Keep in mind that we are not talking of some template that gets filled in and
+loaded, but about precompiled functions! So, a \type {#1} is not really put there
+but somehow pushed into the closure (we know the stack offsets).
+
+Yet another issue is more direct alias. Say that we define a function at the
+\LUA\ end and want to access it using this kind of interface.
+
+\starttyping
+function foo(a,b)
+ tex.print(a+b)
+end
+\stoptyping
+
+Given that we have something:
+
+\starttyping
+\luadef \myfunctiona a b { tex.print(a+b) }
+\stoptyping
+
+We can consider:
+
+\starttyping
+\luaref \myfunctionb 2 {foo}
+\stoptyping
+
+The explicit number is debatable as it can be interesting to permit
+an arbitrary number of arguments here.
+
+\starttyping
+\myfunctiona{1}{2}
+\myfunctionb{1}{2}
+\stoptyping
+
+So, if we go for:
+
+\starttyping
+\luaref \myfunctionb {foo}
+\stoptyping
+
+we can use \type {\relax} as terminator:
+
+\starttyping
+\myfunctiona{1}{2}
+\myfunctionb{1}{2}\relax
+\stoptyping
+
+In fact, the call method discussed in a previous section can be used here as well
+as it permits less arguments as well as mixed types. Think of this:
+
+\starttyping
+\luadef \myfunctiona a b c { tex.print(a or 0 + b or 0 + c or 0) }
+\luaref \myfunctionb {foo}
+\stoptyping
+
+with
+
+\starttyping
+function foo(a,b,c)
+ tex.print(a or 0 + b or 0 + c or 0)
+end
+\stoptyping
+
+This could be all be valid:
+
+\starttyping
+\myfunctiona{1}{2}{3]\relax
+\myfunctiona{1}\relax
+\myfunctionb{1}{2}\relax
+\stoptyping
+
+or (as in practice we want numbers):
+
+\starttyping
+\myfunctiona 1 \scratchcounter 3\relax
+\myfunctiona 1 \relax
+\myfunctionb 1 2 \relax
+\stoptyping
+
+We basicaly get optional arguments for free, as long as we deal with it properly
+at the \LUA\ end. The only condition with the \type {\luadef} case is that there
+can be no more than the given number of arguments, because that's how the function
+body gets initialized set up. In practice this is quite okay.
+
+% After this exploration we can move on to the final implementation and see what we
+% ended up with.
+
+\stopsection
+
+% \startsection[title=The final implementation]
+% {\em todo}
+% \stopsection
+
+\startsection[title=The follow up]
+
+We don't know what eventually will happen with \LUATEX. We might even (at least
+in \CONTEXT) stick to the current approach because there not much to gain in
+terms of speed, convenience and (most of all) beauty.
+
+{\em Note:} In \LUATEX\ 0.79 onward \type {\indirectlua} has been implemented as
+\type {\luafunction} and the \type {lua.calls} table is available as \type
+{lua.get_functions_table()}. A decent token parser has been discussed at the
+\CONTEXT\ 2013 conference and will show up in due time. In addition, so called
+\type {latelua} nodes support function assignments and \type {user} nodes support
+a field for \LUA\ values. Additional information can be associated with any nodes
+using the properties subsystem.
+
+\stopsection
+
+\stopchapter
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-contents.tex b/doc/context/sources/general/manuals/about/about-contents.tex
new file mode 100644
index 000000000..0b8cdad97
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-contents.tex
@@ -0,0 +1,17 @@
+% language=uk
+
+\startcomponent about-contents
+
+\environment about-environment
+
+\starttitle[title={Contents}]
+
+\setuplist[chapter][aligntitle=yes,after=,before=]
+
+\placelist[chapter][criterium=frontpart:all] \blank
+\placelist[chapter][criterium=bodypart:all] \blank
+\placelist[chapter][criterium=backpart:all] \blank
+
+\stoptitle
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-environment.tex b/doc/context/sources/general/manuals/about/about-environment.tex
new file mode 100644
index 000000000..893039550
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-environment.tex
@@ -0,0 +1,71 @@
+\startenvironment about-environment
+
+\setupbodyfont
+ [pagella]
+
+\usemodule
+ [abr-02,narrowtt]
+
+\definecolor [maincolor] [.6(orange)]
+
+\setupwhitespace
+ [big]
+
+\setuptyping
+ [color=maincolor]
+
+\setuptype
+ [color=maincolor]
+
+\setupitemgroup
+ [itemize]
+ [each]
+ [color=maincolor,
+ symcolor=maincolor]
+
+\setupcaptions
+ [color=maincolor]
+
+\setuphead
+ [chapter]
+ [before=,
+ after={\blank[2*big]},
+ style=\bfc,
+ color=maincolor]
+
+\setuphead
+ [section]
+ [before={\blank[2*big]},
+ after=\blank,
+ style=\bfb,
+ color=maincolor]
+
+\setuphead
+ [subsection]
+ [before=\blank,
+ after=\blank,
+ style=\bfa,
+ color=maincolor]
+
+\setuplayout
+ [width=middle,
+ height=middle,
+ header=0cm,
+ topspace=2cm,
+ bottomspace=1cm,
+ footer=1cm,
+ footerdistance=.5cm]
+
+\setupfootertexts
+ [][{\getmarking[chapter]\hbox to 2em{\hss\pagenumber}}]
+ [{\hbox to 2em{\pagenumber\hss}\getmarking[chapter]}][]
+
+\setuppagenumbering
+ [alternative=doublesided]
+
+\startMPextensions
+ % color maincolor ; maincolor := \MPcolor{maincolor} ;
+ string maincolor ; maincolor := "maincolor" ;
+\stopMPextensions
+
+\stopenvironment
diff --git a/doc/context/sources/general/manuals/about/about-expanding.tex b/doc/context/sources/general/manuals/about/about-expanding.tex
new file mode 100644
index 000000000..6f643ffad
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-expanding.tex
@@ -0,0 +1,188 @@
+% engine=luatex macros=mkvi language=uk
+
+\startcomponent about-exploring
+
+\environment about-environemnt
+
+\startchapter[title=Still Expanding]
+
+In the beginning of October 2013 Luigi figured out that \LUAJITTEX\ could
+actually deal with \UTF\ identifiers. After we played a bit with this, a patch
+was made for stock \LUATEX\ to provide the same. In the process I found out that
+I needed to adapt the \SCITE\ lexer a bit and that some more characters had to
+get catcode~11 (letter). In the following text screendumps from the editor will
+be used instead of verbatim code. This also demonstrates how \SCITE\ deals with
+syntax highlighting.
+
+\starttexdefinition ShowExample #1
+ \startbaselinecorrection
+ \externalfigure[still-expanding-#1][scale=500]
+ \stopbaselinecorrection
+ \getbuffer
+\stoptexdefinition
+
+First we define a proper font for to deal with \CJK\ characters and a helper
+macro that wraps an example using that font.
+
+\startbuffer
+\definefont
+ [GoodForJapanese]
+ [heiseiminstd-w3]
+ [script=kana,
+ language=jan]
+
+\definestartstop
+ [example]
+ [style=GoodForJapanese]
+\stopbuffer
+
+\ShowExample{1}
+
+According to the Google translator, \example {例題} means example and \example
+{æ•°} means number. It doesn't matter much as we only use these characters as
+demo. Of course one can wonder if it makes sense to define functions, variables
+and keys in a script other than basic Latin, but at least it looks kind of
+modern.
+
+\startbuffer
+\startluacode
+ local function 例題(str)
+ context.formatted.example("例題 1.%s: 数 %s",str,str)
+ context.par()
+ end
+
+ for i=1,3 do
+ 例題(i)
+ end
+\stopluacode
+\stopbuffer
+
+We only show the first three lines. Because using the formatter gives nicer
+source code we operate in that subnamespace.
+
+\ShowExample{2}
+
+As \CONTEXT\ is already \UTF\ aware for a while you can define macros with such
+characters. It was a sort of coincidence that this specific range of characters
+had not yet gotten the proper catcodes, but that is something users don't need to
+worry about. If your script doesn't work, we just need to initialize a few more
+characters.
+
+\startbuffer
+\def\例題#1{\example{例題 2: 数 #1}\par}
+
+\例題{2.1}
+\stopbuffer
+
+\ShowExample{3}
+
+Of course this command is now also present at the \LUA\ end:
+
+\startbuffer
+\startluacode
+ context.startexample()
+ context.例題(2.2)
+ context.stopexample()
+\stopluacode
+\stopbuffer
+
+\ShowExample{4}
+
+The \type {MKVI} parser has also been adapted to this phenomena as have the
+alternative ways of defining macros. We could already do this:
+
+\startbuffer
+\starttexdefinition test #1
+ \startexample
+ 例題 3: 数 #1 \par
+ \stopexample
+\stoptexdefinition
+
+\test{3}
+\stopbuffer
+
+\ShowExample{5}
+
+But now we can also do this:
+
+\startbuffer
+\starttexdefinition 例題 #1
+ \startexample
+ 例題 4: 数 #1 \par
+ \stopexample
+\stoptexdefinition
+
+\例題{4}
+\stopbuffer
+
+\ShowExample{6}
+
+Named parameters support a wider range of characters too:
+
+\startbuffer
+\def\例題#数{\example{例題 5: 数 #数}\par}
+
+\例題{5}
+\stopbuffer
+
+\ShowExample{7}
+
+So, in the end we can have definitions like this:
+
+\startbuffer
+\starttexdefinition 例題 #数
+ \startexample
+ 例題 6: 数 #数 \par
+ \stopexample
+\stoptexdefinition
+
+\例題{6}
+\stopbuffer
+
+\ShowExample{8}
+
+Of course the optional (first) arguments still are supported but these stay
+Latin.
+
+\startbuffer
+\starttexdefinition unexpanded 例題 #数
+ \startexample
+ 例題 7: 数 #数 \par
+ \stopexample
+\stoptexdefinition
+
+\例題{7}
+\stopbuffer
+
+\ShowExample{9}
+
+Finally Luigi wondered of we could use math symbols too and of course there is no
+reason why not:
+
+\startbuffer
+\startluacode
+ function commands.∑(...)
+ local t = { ... }
+ local s = 0
+ for i=1,#t do
+ s = s + t[i]
+ end
+ context("% + t = %s",t,s)
+ end
+\stopluacode
+
+\ctxcommand{∑(1,3,5,7,9)}
+\stopbuffer
+
+\ShowExample{10}
+
+The \CONTEXT\ source code will of course stay \ASCII, although some of the multi
+lingual user interfaces already use characters other than that, for instance
+accented characters or completely different scripts (like Persian). We just went
+a step further and supported it at the \LUA\ end which in turn introduced those
+characters into \MKVI.
+
+\stopchapter
+
+\stopcomponent
+
diff --git a/doc/context/sources/general/manuals/about/about-hashing.tex b/doc/context/sources/general/manuals/about/about-hashing.tex
new file mode 100644
index 000000000..3a9a74c61
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-hashing.tex
@@ -0,0 +1,616 @@
+% language=uk
+
+\startcomponent about-hashing
+
+\environment about-environment
+
+\usemodule[lua-hashing]
+
+\startchapter[title={Lua strings}]
+
+\startsection[title=Introduction]
+
+In the crited project \footnote {This is a project by Thomas Schmitz, Alan
+Braslau, Luigi Scarso and Hans Hagen funded by the Institut für Klassische und
+Romanische Philologie Universität Bonn.} we have to deal with large amounts of
+data. The sources are in \TEI\ \XML\ and processed directly in \CONTEXT\ \MKIV,
+and we have to filter content from different places in the \XML\ tree. Processing
+relies on \LUA\ a lot because we use \LUA\ for dealing with the \XML. We're
+talking about Latin and Greek texts so there is no demand for extensive font
+processing in \LUA\ is moderate. But as critical editions have lots of line
+specific referencing and notes there are some more complex layout elements
+involved, and again these use \LUA. There is also extensive use of bibliographies
+and it will be no surprise that \LUA\ comes to help too. \footnote {One of the
+objectives of the project is to update and enhance the bibliographic subsystem.}
+
+One secondary objective is to be able to process the complex documents at a speed
+of at least 20 pages per second on a modern 2014 workstation laptop. One way of
+achieving this is to use \LUAJITTEX\ which has a faster virtual \LUA\ machine.
+However, we ran into several issues with the \LUAJIT\ interpreter, which is fully
+\LUA\ language 5.1 and partly 5.2 compatible but definitely has a different low
+level implementation. In the next sections I will discuss two issues that Luigi
+and I ran into and for which we could come up with reasonable workarounds.
+
+\stopsection
+
+\startsection[title=The stacks]
+
+A \TEX\ job is normally a multi|-|pass experience. One run can produce information
+that is used in a successive one. The reason is that something can happen on page
+15 that influences the typesetting of page~9. There can even be a partial chain
+reaction: you typeset a document the first time the table of contents (and the
+pages it refers to) is not known yet but information is saved that makes it
+possible next time. That next run it gets included and it takes for instance 4
+pages. This means that all page numbers shift up. This in turn will trigger a new
+run because all cross references might change too: two digit page numbers can
+become three digits, so paragraphs can run wider, and that again can trigger more
+pages. Normally an initial three runs is enough, and with minor updates of the
+source one or two runs are enough after that.
+
+The multi|-|pass information is saved in tables in the so called utility file and
+loaded a next run. Common subtables are shared in the process. In order to
+determine if there has been crucial changes that demand an extra run, we have to
+make sure that random order in these tables is eliminated. Normally we already
+sort keys in tables when writing them to file but some tables come out in the
+order the traversing \type {next} function delivers them. In the more recent 5.2
+versions \LUA\ has added some randomness to the order in which hashed tables are
+organized, so while in previous versions we could assume that for a specific
+binary the order was the same each time, we cannot rely on that any longer. This is
+not that important for normal cases, but we compare previous and current versions
+of the utility file and pack shared tables in them as well, which means that we
+are sensitive for a change in order. But, this could be dealt with at the cost of
+some extra sorting. \footnote {In \CONTEXT\ we also pack font tables which saves
+lots of memory and also some load time).}
+
+Anyway, this kind of changes in the \LUA\ machinery is harmless apart from taking
+some time to adapt to it. It is also the reason why we cannot simply push a new
+update of \LUA\ into \LUATEX\ because low level changes can have an (yet unknown)
+impact. Of course performance is the biggest issue here: we don't want a slower
+\LUATEX.
+
+In the past we already reported on the benefits of \LUAJITTEX, especially its
+faster virtual machine. We don't benefit from jitting; on the contrary it slows
+us down. One reason is that we cross the \LUA||\CCODE\ boundary often and hardly
+use any of the optimized functions. Part of the speed is achieved by a different
+implementation deep down and one of them is a different virtual machine
+instruction set. While \LUA\ can go real big in terms of memory and table
+construction, \LUAJIT\ limits us to at most 2G memory and poses some 64K
+limitations in functions and table constructors. The memory is not so much the
+issue in the crited project but the (nested) table constructor is. When we have a
+few tens of thousands of cross references, index entries and|/|or list entries we
+simply cannot load the multi|-|pass data. A few days of playing with splitting up
+nested tables didn't help much: it made the code look horrible and eventually we
+again ran into a maximum of 64K someplace as a \type {dofile} effectively makes a
+function that gets run and \LUAJIT\ doesn't like that size. For the record: we
+don't have such issues with large font tables probably because they are just one
+big table. The reason why we cannot use that approach is that serializing the
+potentially very large tables in the utility file also has limitations.
+
+Eventually this could be solved by assuming only forward referencing for certain
+registers. That way we only used the index entries collected in memory during the
+run and as long as we don't put a register before it's entries are defined we're
+okay. So here we have a typical case where one can set an option to circumvent
+an engine limitation. \footnote {A decade ago similar tricks had to be used to
+support hundreds of thousands of hyperlinks in \TEX\ engines with at that time
+limited memory capabilities.} Explaining this in a user manual is a challenge,
+because an error message like the following is not that helpful:
+
+\starttyping
+main function has more than 65536 constants
+\stoptyping
+
+But, once we could generate these indices again by posing some limitations,
+\LUAJITTEX\ had other issues. This time we got excessive runtime and we spent
+quite some time sorting that one out. More on that in the next section.
+
+\stopsection
+
+\startsection[title=Hashing]
+
+One of the reasons why (text processing with) \LUA\ is rather fast is that it
+hashes its strings so that a test for equality is real fast. This means that for
+each string that enters \LUA\ a hash value is calculated and that hash is used in
+comparisons. Of course hashing takes time, but especially when you work with lots
+of tables the advantage of a simple hash compare outweighs this one||time
+hashing. On the other hand, if you work with files and process lines, and maybe
+split these in words, you might end up with a lot of unneeded hashing. But, in
+\LUATEX\ and therefore \MKIV\ we benefit from hashing a lot. In \LUA\ 5.2 the
+hash function was adapted so that only strings upto than (default) 40 characters
+get hashed. In practice we're not affected much by this, as most keywords we use
+are shorter than this boundary. And in \CONTEXT\ we do quite some keyword checking.
+
+So, when we were conducting tests with these large registers, we were surprised
+that \LUAJITTEX\ performed significantly slower (ten times or more) that stock
+\LUATEX, while until then we had observed that a \LUAJITTEX\ run was normally
+some 20 to 40\% faster.
+
+The first impression was that it related to the large amount of strings that are
+written from \LUA\ to \TEX. After index entries are collected, they are sorted
+and the index is flushed to \TEX. This happens in one go, and \TEX\ code ends up
+in the \TEX\ input stack. Some actions are delayed and create callbacks to \LUA,
+so some wrapping in functions happens too. That means that some (\LUA) strings
+are only freed later on, but that proved not to be the main problem.
+
+When the entries are typeset, an interactive cross reference is kept track of and
+these exist till the document is closed and the referencing information is
+written to the \PDF\ file. Of course we could tweak this but once you start along
+that path there is no end to writing ugly hacks.
+
+Eventually we found that the slowdown relates to hashing, especially because that is
+not the first area where you look. Why is this? The specific register concerned lots
+of small greek words, pointing to locations in a text, where locations looked like
+\type {1.2.3}. In case you wonder why greek is mentioned: in multi|-|byte \UTF\
+sequences there is a lot of repetition:
+
+\startluacode
+local byte = string.byte
+function sample(s)
+ context.NC() context(s)
+ context.NC() context.ttx(false)
+ for b in string.utfvalues(s) do
+ context("%02X ",b)
+ end
+ context.NC() context.ttx(false)
+ for b in string.gmatch(s,".") do
+ context("%02X ",byte(b))
+ end
+ context.NC() context.NR()
+end
+
+context.starttabulate { "||||" }
+context.FL()
+context.NC() context.bold("word")
+context.NC() context.bold("unicode")
+context.NC() context.bold("bytes")
+context.NC() context.NR()
+context.FL()
+sample("βίον")
+sample("βίου")
+sample("βιοὺς")
+sample("βουλὴν")
+sample("βουλῆς")
+context.LL()
+context.stoptabulate()
+\stopluacode
+
+When cross referencing these index entries with their origin, you end up with
+reference identifiers like \type {foo:1.2.3} or, because \CONTEXT\ has automated
+internal references (which are rather efficient in the resulting \PDF), we get
+\type {aut:1}, \type {aut:2} upto in this case some 30.000 of them.
+
+The problem with hashing is as follows. When we write commands to \TEX\ or use
+data with a repetitive property, the similarity of these strings can be hard on
+the hasher as it can produce similar hash keys in which case collisions need to
+be dealt with. I'm no expert on hashing but looking at the code shows that in
+\LUAJIT\ (at least in the version we're talking about) the string is seen as
+chunks of 4 bytes. The first, last, middle and halfway middle chunks are
+consulted and after some bit juggling we get a hash value. In the case of strings
+like the following it is clear that the beginning and end look quite the same:
+
+\starttyping
+foo:000001 foo:010001 foo:100001
+\stoptyping
+
+or:
+
+\starttyping
+foo:1.2.12 foo:1.3.12 foo:1.4.12 foo:1.5.12
+\stoptyping
+
+It seems that the used method of hashing is somewhat arbitrary and maybe tuned
+for specific applications. In order to see what the impact is of hashing quite
+similar strings, some experiments were conducted: with \LUATEX\ 0.73 using \LUA\
+5.2 hashing, with \LUAJITTEX\ 0.73, and with the same \LUAJITTEX\ but using the
+hash variant of native \LUA\ 5.1. For each variant we ran tests where strings of
+increasing length were combined with a number (running from one to one million).
+
+\starttabulate[|||]
+\NC none \NC <string> \NC \NR
+\NC right \NC <string> <number> \NC \NR
+\NC left \NC <number> <string> \NC \NR
+\NC center \NC <string> <number> <string> \NC \NR
+\NC edges \NC <number> <string> <number> \NC \NR
+\stoptabulate
+
+The differences between engines can be seen in tables in the next page. In the
+fourth table we summarize which engine performs best. Keep in mind that
+\LUAJITTEX\ has the advantage of the faster virtual machine so it has an
+additional speed advantage.
+
+We show three tables with measurements. The \type {none} column shows the
+baseline of the test:
+
+\starttyping
+
+local t = { }
+for i=1,1000000 do
+ t[i] = i
+end
+\stoptyping
+
+The column tagged \quote {right} does this:
+
+\starttyping
+local t = { }
+for i=1,1000000 do
+ t[i] = text .. i
+end
+\stoptyping
+
+And \quote {left} does:
+
+\starttyping
+local t = { }
+for i=1,1000000 do
+ t[i] = i .. text
+end
+\stoptyping
+
+That leaves \quote {center}:
+
+\starttyping
+local t = { }
+for i=1,1000000 do
+ t[i] = text .. i .. text
+end
+\stoptyping
+
+and \quote {edges}:
+
+\starttyping
+local t = { }
+for i=1,1000000 do
+ t[i] = i .. text .. i
+end
+\stoptyping
+
+Of course there is also the loop and the concatenation involved so the last two
+variants have some more overhead. We show some measurements in \in {tables}
+[tab:torture-1], \in [tab:torture-2] \in {and} [tab:torture-3]. So, there we have
+strings like:
+
+\starttyping
+2abc
+222abc
+22222abc
+abc222222
+222222abc222222
+222222abc222222
+abc2222abc
+\stoptyping
+
+and so on. Of course a million such strings makes not much sense in practice but
+it serves our purpose of testing.
+
+\startplacetable[reference=tab:torture-1,location=page,title=\type{context test.tex}]
+ \scale
+ [height=\the\dimexpr\textheight-3\lineheight\relax]
+ % [width=\the\dimexpr\textwidth+.5\backspace\relax]
+ {\vbox{\ctxlua{moduledata.luatests.showhashing { filename = "luatest-hash-luatex-073-LUA52.lua" }}}}
+\stopplacetable
+
+\startplacetable[reference=tab:torture-2,location=page,title=\type{context --jit --jithash=luajit20 test.tex}]
+ \scale
+ [height=\the\dimexpr\textheight-3\lineheight\relax]
+ % [width=\the\dimexpr\textwidth+.5\backspace\relax]
+ {\vbox{\ctxlua{moduledata.luatests.showhashing { filename = "luatest-hash-luajittex-073-JIT20.lua" }}}}
+\stopplacetable
+
+\startplacetable[reference=tab:torture-3,location=page,title=\type{context --jit --jithash=lua51 test.tex}]
+ \scale
+ [height=\the\dimexpr\textheight-3\lineheight\relax]
+ % [width=\the\dimexpr\textwidth+.5\backspace\relax]
+ {\vbox{\ctxlua{moduledata.luatests.showhashing { filename = "luatest-hash-luajittex-073-LUA51.lua" }}}}
+\stopplacetable
+
+In these tables you can see some extremes. On the average \LUA\ 5.2 performs
+quite okay as does standard \LUAJIT. However, when we bring the 5.1 hash variant
+into \LUAJITTEX\ we get a more predictable average performance as it deals better
+with some of the extreme cases that make \LUAJITTEX\ crawl compared to \LUATEX.
+We have done more tests and interesting is to see that in the 5.1 (and derived
+5,2) method there are sometimes cases where odd lengths perform much worse than
+even lengths. Red values are larger than two times the average, blue values
+larger than average while green values indicate a less than half average value.
+
+In \in {table} [tab:compare-1] we show which method performs best relative to each
+other. Of course in many applications there will be no such extreme cases, but
+we happen to ran into them. But, even if \type {JIT20} is a winner in most cases,
+the fact that it has extreme slow exceptions makes it a bit of a gamble.
+
+\startplacetable[location=page,reference=tab:compare-1,title=The best performances per engine and hasher.]
+ \startcombination
+ \startcontent
+ \scale
+ [height=\the\dimexpr\textheight-4\lineheight\relax]
+ {\vbox{\ctxlua{moduledata.luatests.showhashing {
+ fileset = {
+ { tag = "JIT20", filename = "luatest-hash-luajittex-073-JIT20.lua" },
+ { tag = "JIT51", filename = "luatest-hash-luajittex-073-LUA51.lua" },
+ } } }}}
+ \stopcontent
+ \startcaption
+ \LUAJITTEX\ only
+ \stopcaption
+ \startcontent
+ \scale
+ [height=\the\dimexpr\textheight-4\lineheight\relax]
+ {\vbox{\ctxlua{moduledata.luatests.showhashing {
+ fileset = {
+ { tag = "LUA52", filename = "luatest-hash-luatex-073-LUA52.lua" },
+ { tag = "JIT20", filename = "luatest-hash-luajittex-073-JIT20.lua" },
+ { tag = "JIT51", filename = "luatest-hash-luajittex-073-LUA51.lua" },
+ } } }}}
+ \stopcontent
+ \startcaption
+ Both engines.
+ \stopcaption
+ \stopcombination
+\stopplacetable
+
+The 5.1 hasher runs over the string with a step that depends on the length of the
+string. We've seen that in 5.2 it doesn't hash strings larger than 40 characters.
+The step is calculated by shifting the length (by default) over 5 bits. This
+means that for strings of size 32 and more the step becomes 2 which is why we see
+this odd|/|even timing issue in the tables. Basically we hash at most 32
+characters of the 40. The next table shows that the less characters we take
+into account (first column) the less unique keys we get (second column).
+
+\starttabulate[|c|r|l|]
+\FL
+\NC \bf n \NC \bf unique \NC \bf text \NC \NR
+\FL
+\NC 3 \NC 22 \NC \tt\tx /Border [ 0 0 0 ] /F 4 /Subtype /Link /A * 0 R \NC \NR
+\NC 3 \NC 31 \NC \tt\tx << /D [ * 0 R /Fit ] /S /GoTo >> \NC \NR
+\NC 4 \NC 43 \NC \tt\tx /Border [ 0 0 0 ] /F 4 /Subtype /Link /A * 0 R \NC \NR
+\NC 4 \NC 51 \NC \tt\tx << /D [ * 0 R /Fit ] /S /GoTo >> \NC \NR
+\NC 5 \NC 410 \NC \tt\tx /Border [ 0 0 0 ] /F 4 /Subtype /Link /A * 0 R \NC \NR
+\NC 5 \NC 210 \NC \tt\tx << /D [ * 0 R /Fit ] /S /GoTo >> \NC \NR
+\NC 6 \NC 29947 \NC \tt\tx /Border [ 0 0 0 ] /F 4 /Subtype /Link /A * 0 R \NC \NR
+\NC 6 \NC 29823 \NC \tt\tx << /D [ * 0 R /Fit ] /S /GoTo >> \NC \NR
+\LL
+\stoptabulate
+
+In the next table we show a few cases. The characters that are taken into account
+are colored red. \footnote {Again the first column indicates the shift applied to
+the length in order to determine the step.}
+
+\starttabulate[|c|l|l|]
+\FL
+\NC \bf n \NC \bf text \NC \bf consulted \NC \NR
+\FL
+\NC 3\NC \tt\tx << /D [ 8 0 R /Fit ] /S /GoTo >> \NC \tt\tx <{\darkred <} /{\darkred D} [{\darkred \space }8 {\darkred 0} R{\darkred \space }/F{\darkred i}t {\darkred ]} /{\darkred S} /{\darkred G}oT{\darkred o} >{\darkred >} \NC \NR
+\NC 3\NC \tt\tx << /D [ 9 0 R /Fit ] /S /GoTo >> \NC \tt\tx <{\darkred <} /{\darkred D} [{\darkred \space }9 {\darkred 0} R{\darkred \space }/F{\darkred i}t {\darkred ]} /{\darkred S} /{\darkred G}oT{\darkred o} >{\darkred >} \NC \NR
+\NC 3\NC \tt\tx << /D [ 10 0 R /Fit ] /S /GoTo >> \NC \tt\tx <<{\darkred \space }/D{\darkred \space}[ {\darkred 1}0 {\darkred 0} R{\darkred \space }/F{\darkred i}t {\darkred ]} /{\darkred S} /{\darkred G}oT{\darkred o} >{\darkred >} \NC \NR
+\NC 3\NC \tt\tx << /D [ 11 0 R /Fit ] /S /GoTo >> \NC \tt\tx <<{\darkred \space }/D{\darkred \space}[ {\darkred 1}1 {\darkred 0} R{\darkred \space }/F{\darkred i}t {\darkred ]} /{\darkred S} /{\darkred G}oT{\darkred o} >{\darkred >} \NC \NR
+\NC 3\NC \tt\tx << /D [ 12 0 R /Fit ] /S /GoTo >> \NC \tt\tx <<{\darkred \space }/D{\darkred \space}[ {\darkred 1}2 {\darkred 0} R{\darkred \space }/F{\darkred i}t {\darkred ]} /{\darkred S} /{\darkred G}oT{\darkred o} >{\darkred >} \NC \NR
+\ML
+\NC 4\NC \tt\tx << /D [ 8 0 R /Fit ] /S /GoTo >> \NC \tt\tx <{\darkred <} {\darkred /}D{\darkred \space }[{\darkred \space }8{\darkred \space }0{\darkred \space }R{\darkred \space }/{\darkred F}i{\darkred t} {\darkred ]} {\darkred /}S{\darkred \space }/{\darkred G}o{\darkred T}o{\darkred \space }>{\darkred >} \NC \NR
+\NC 4\NC \tt\tx << /D [ 9 0 R /Fit ] /S /GoTo >> \NC \tt\tx <{\darkred <} {\darkred /}D{\darkred \space }[{\darkred \space }9{\darkred \space }0{\darkred \space }R{\darkred \space }/{\darkred F}i{\darkred t} {\darkred ]} {\darkred /}S{\darkred \space }/{\darkred G}o{\darkred T}o{\darkred \space }>{\darkred >} \NC \NR
+\NC 4\NC \tt\tx << /D [ 10 0 R /Fit ] /S /GoTo >> \NC \tt\tx {\darkred <}<{\darkred \space}/{\darkred D} {\darkred [} {\darkred 1}0{\darkred \space }0{\darkred \space }R{\darkred \space }/{\darkred F}i{\darkred t} {\darkred ]} {\darkred /}S{\darkred \space }/{\darkred G}o{\darkred T}o{\darkred \space }>{\darkred >} \NC \NR
+\NC 4\NC \tt\tx << /D [ 11 0 R /Fit ] /S /GoTo >> \NC \tt\tx {\darkred <}<{\darkred \space}/{\darkred D} {\darkred [} {\darkred 1}1{\darkred \space }0{\darkred \space }R{\darkred \space }/{\darkred F}i{\darkred t} {\darkred ]} {\darkred /}S{\darkred \space }/{\darkred G}o{\darkred T}o{\darkred \space }>{\darkred >} \NC \NR
+\NC 4\NC \tt\tx << /D [ 12 0 R /Fit ] /S /GoTo >> \NC \tt\tx {\darkred <}<{\darkred \space}/{\darkred D} {\darkred [} {\darkred 1}2{\darkred \space }0{\darkred \space }R{\darkred \space }/{\darkred F}i{\darkred t} {\darkred ]} {\darkred /}S{\darkred \space }/{\darkred G}o{\darkred T}o{\darkred \space }>{\darkred >} \NC \NR
+\LL
+\stoptabulate
+
+Of course, in practice, in \LUA\ 5.2 the longer string exceeds 40 characters so
+is never hashed anyway. Apart from this maximum, the \LUA\ hash code looks like this:
+
+\starttyping
+/* Lua will use at most ~(2^LUAI_HASHLIMIT) bytes from
+a string to compute its hash */
+...
+h = cast(unsigned int,len) ;
+step = (len>>LUAI_HASHLIMIT) + 1 ;
+for (l1=len; l1>=step; l1-=step) {
+ h = h ^ ((h<<5) + (h>>2) + cast(unsigned char,str[l1-1])) ;
+}
+...
+\stoptyping
+
+This translates in verbose \LUA\ function as follows:
+
+\starttyping
+function string.luahash(str,shift)
+ local len = #str
+ local hash = len
+ local step = bit32.rshift(len,shift or 5) + 1
+ for i=len,1,-step do
+ hash = bit32.bxor(hash, (
+ bit32.lshift(hash,5) +
+ bit32.rshift(hash,2) +
+ string.byte(string.sub(str,i,i))
+ ) )
+ end
+ return hash
+end
+\stoptyping
+
+The reader can argue that the following string would perform better:
+
+\starttyping
+/Subtype/Link/Border[0 0 0]/F 4/A 12 0 R
+\stoptyping
+
+but this is not the case. Also, here we use \PDF\ code, but similar cases can
+happen if we flush \TEX\ commands:
+
+\starttyping
+\dothisorthat{1}
+\dothisorthat{101}
+\dothisorthat{10101}
+\stoptyping
+
+And in the case of \UTF\ strings, it remains a fact that when characters need two
+bytes a sequence can end up with each odd or even byte being the same. This is
+one more reason to support upto 64 byte (or 40 in practice) hashing.
+
+Because of this we decided to experiment with a value of 64 instead. \footnote {Of
+course, in \LUATEX, the length limit kicks in before we get to 64.} We can do the
+same when we use the \LUA\ 5.1 method in \LUAJIT. In \in {table} [tab:torture-4]
+\in {and} [tab:torture-5] we show the timings. Interesting is that we lost the
+extremes now. The performance of the default settings are compared with the higher
+values in \in {table} [tab:compare-2]. Of course the numbers are just indications
+and there might be small differences between test runs. Therefore we use a threshold
+of 5\% when we compare two methods.
+
+\startplacetable[reference=tab:torture-4,location=page,title={\type{context test.tex} with len<=40 and hash<=64}]
+ \scale
+ [height=\the\dimexpr\textheight-3\lineheight\relax]
+ % [width=\the\dimexpr\textwidth+.5\backspace\relax]
+ {\vbox{\ctxlua{moduledata.luatests.showhashing { filename = "luatest-hash-luatex-073-LUA52-40-6.lua" }}}}
+\stopplacetable
+
+\startplacetable[reference=tab:torture-5,location=page,title={\type{context --jit test.tex} with hash<=64}]
+ \scale
+ [height=\the\dimexpr\textheight-3\lineheight\relax]
+ % [width=\the\dimexpr\textwidth+.5\backspace\relax]
+ {\vbox{\ctxlua{moduledata.luatests.showhashing { filename = "luatest-hash-luajittex-073-LUA51-40-6.lua" }}}}
+\stopplacetable
+
+\startplacetable[location=page,reference=tab:compare-2,title=More than 5\% difference between 32 byte or 64 byte hashing.]
+ \startcombination
+ \startcontent
+ \scale
+ [height=\the\dimexpr\textheight-4\lineheight\relax]
+ {\vbox{\ctxlua{moduledata.luatests.showhashing {
+ fileset = {
+ { tag = "40 / 32", filename = "luatest-hash-luatex-073-LUA52.lua" },
+ { tag = "40 / 64", filename = "luatest-hash-luatex-073-LUA52-40-6.lua" },
+ } } }}}
+
+ \stopcontent
+ \startcaption
+ \LUATEX\ (size limit 40)
+ \stopcaption
+ \startcontent
+ \scale
+ [height=\the\dimexpr\textheight-4\lineheight\relax]
+ {\vbox{\ctxlua{moduledata.luatests.showhashing {
+ fileset = {
+ { tag = "40 / 32", filename = "luatest-hash-luajittex-073-LUA51.lua" },
+ { tag = "40 / 64", filename = "luatest-hash-luajittex-073-LUA51-40-6.lua" },
+ } } }}}
+
+ \stopcontent
+ \startcaption
+ \LUAJITTEX\ (no size limit)
+ \stopcaption
+ \stopcombination
+\stopplacetable
+
+So how does this affect us in document production? It is not that hard to get a
+processing rate of a few dozen pages per second on a modern machine, even with
+somewhat complex documents, where \XML\ turns into \PDF. However, interactivity
+comes somehow with a price when we use \LUAJITTEX. In \CONTEXT\ \MKIV\ we do all
+\PDF\ annotations in \LUA\ and that involves assembling dictionaries. Here are
+two examples, a destination:
+
+\starttyping
+<< /D [ 15 0 R /Fit ] /S /GoTo >>
+\stoptyping
+
+and a reference:
+
+\starttyping
+/Subtype /Link /Border [ 0 0 0 ] /F 4 /A 16 0 R
+\stoptyping
+
+These strings are build with small variations and at some point end up in the \PDF\
+file. The same string can end up in the file several times, although sometimes we
+can create a reusable object. In the last case we keep them at the \LUA\ end as
+reference to such a shareable object, a key in an object reference hash. Now imagine
+that we have some 30K of such references and/or destinations, which indeed happens in
+crited documents. In the next two lines we use a \type {*} to show where the
+differences are:
+
+\starttyping
+<< /D [ * 0 R /Fit ] /S /GoTo >>
+/Subtype /Link /Border [ 0 0 0 ] /F 4 /A * 0 R
+\stoptyping
+
+If we replace these \type {*} by a number, there are big differences between the
+engines with respect to the time needed. This is summarized in the next table.
+\footnote {The numbers concern 30K hash creations. The time shown is the average
+over 30 runs.}
+
+\starttabulate[|c|c|c|l|]
+\FL
+\NC \bf \LUA\ 5.2 \NC \bf \LUAJIT\ 2.0 \NC \bf \LUAJIT\ 2.0+5.1 \NC \NR
+\FL
+\NC 0.096 \NC 0.046 \NC 0.047 \NC \ttx << /D [ * 0 R /Fit ] /S /GoTo >> \NC \NR
+\NC 0.054 \NC 6.017 \NC 0.055 \NC \ttx /Subtype /Link /Border [ 0 0 0 ] /F 4 /A * 0 R \NC \NR
+\LL
+\stoptabulate
+
+Especially the second case behaves bad in \LUAJIT. Say that a result comes out
+as:
+
+\starttyping
+/Subtype /Link /Border [ 0 0 0 ] /F 4 /A 12 0 R
+/Subtype /Link /Border [ 0 0 0 ] /F 4 /A 123 0 R
+/Subtype /Link /Border [ 0 0 0 ] /F 4 /A 1234 0 R
+\stoptyping
+
+The \LUAJIT\ hasher (more or less) looks at the first~4, last~4, middle~4 and
+somewhere a quarter along the string, and uses these sequences for the
+calculation, so you can imagine that there are clashes. The \LUA\ 5.1 hasher runs
+over part of the string and sees more of the difference. The 5.2 hasher has a
+threshold and doesn't hash at all when the length exceeds (by default) 40
+characters, which is the case with the second string. Looking at only specific
+parts of a string is somewhat arbitrary and what works for one kind of
+application is not always good for another.
+
+After these tests we decided that it makes sense to replace the \LUAJIT\ hash
+calculation by the traditional \LUA\ one (or at least give users a choice at
+startup. The choice of hash is a runtime option:
+
+\starttyping
+mtxrunjit --script context --jithash=lua51 ......
+mtxrunjit --script context --jithash=luajit20 ......
+\stoptyping
+
+For the moment we default to the traditional \LUA\ 5.1 hashing method. Although
+it can behave real bad on some large strings we think that chances are low that
+this will happen in practice. An overall good performance on strings like the
+hyperlink examples is more important. Using the \LUA\ 5.2 method would be even
+better but it required a change in the virtual machine and that is not something
+we have in mind.
+
+\stopsection
+
+\stopchapter
+
+\stopcomponent
+
+% Luatex manual:
+%
+% In \LUA\ strings are hashed which makes a test for equality fast and in \LUATEX\
+% we benefit from that fact. Starting with \LUA\ 5.2 the hash function is no longer
+% hashing strings larger than (by default) 40 characters. Of these at most 32
+% characters are hashed in stock \LUA\ but for a string rich environment as \TEX\
+% this can lead to many collisions. Therefore we have now set that constant limit
+% to 64 characters (so in practice it's now 40 too).
+%
+% In \LUAJIT\ the hash function is not the same as in \LUA\ and can in some cases
+% lead to a significant slowdown. We ran into cases where a \LUAJITTEX\ run was 20
+% times slower than a normal \LUATEX\ run while normally such run is 30\% faster.
+% For this reason we have replaced the hash code with the \LUA\ 5.1 hash code. This
+% change is minimal and gives less collisions. The impact on speed can be neglected.
+%
+% For \LUAJITTEX\ you can control the hash method:
+%
+% \starttyping
+% --jithash=luajit
+% --jithash=lua51
+% \stoptyping
+%
+% The current status of the hash function is available in:
+%
+% \starttyping
+% status.list().luatex_hashtype
+% status.list().luatex_hashchars
+% \stoptyping
+%
+% The first one returns \type {lua}, \type{luajit} or \type {lua51} depending on
+% the engine. The second one should always return 6. If it returns 5 then you have
+% a non|-|optimized binary. Other values are suspicious.
diff --git a/doc/context/sources/general/manuals/about/about-hz.tex b/doc/context/sources/general/manuals/about/about-hz.tex
new file mode 100644
index 000000000..5f3d6c6b4
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-hz.tex
@@ -0,0 +1,96 @@
+% language=uk
+
+\startcomponent about-hz
+
+\environment about-environment
+
+\startchapter[title={Font expansion}]
+
+\startsection[title=Introduction]
+
+A lot in \LUATEX\ is not new. It started as a mix of \PDFTEX\ (which itself is
+built on top of original \TEX\ and \ETEX) and the directional bits of \ALEPH\
+(which is a variant of \OMEGA). Of course large portions have been changed in the
+meantime, most noticeably the input encoding (\UNICODE), fonts with a more
+generic fontloader and \LUA\ based processing, \UNICODE\ math and related font
+rendering, and many subsystems can be overloaded or extended. But at the time I
+write this (end of January 2013) the parbuilder still has the \PDFTEX\ font
+expansion code.
+
+This code is the result of a research project by \THANH. By selectively widening
+shapes a better greyness of the paragraph can be achieved. This trick is inspired
+by the work of Hermann Zapf and therefore, instead of expansion, we often talk of
+{\em hz} optimization.
+
+It started with (runtime) generated \METAFONT\ bitmap fonts and as a consequence
+we ended up with many more font instances. However, when eventually bitmap
+support was dropped and outlines became the norm, the implementation didn't
+change much. Also some of the real work was delegated to the backend and as it
+goes then: never change a working system if there's no reason.
+
+When I played with the \LUA\ based par builder I quickly realized that this
+implementation was far from efficient. It was already known that enabling it
+slowed down par building and I saw that this was largely due to many redundant
+calculations, generating auxiliary fonts, and the interaction between front- and
+backend. And, as I seldom hesitate to reimplement something that can be done
+better (one reason why \CONTEXT\ is never finished) I came to an alternative
+implementation. That was 2010. What helped was that by that time Hartmut Henkel
+already had made the backend part cleaner, in the sense that instead of including
+multiple instances of the same font (but with different glyph widths) the base
+font was transformed in|-|line. This made me realize that we could use just one
+font in the frontend and pass the scale with the glyph node to the backend. And
+so, an extra field was added to glyphs nodes in order to make experiments
+possible.
+
+More than two years later (January 2013) I finally took up this pet project and
+figured out how to adapt the backend. Interestingly a few lines of extra code
+we all that was needed. At the same time the frontend part became much simpler,
+that is, in the \LUA\ parbuilder. But eventually it will be retrofitted into the
+core engine, if only because that's much faster.
+
+\stopsection
+
+\startsection[title=The changes]
+
+The most important changes are the following. Instead of multiple font instances,
+only one is used. This way less memory is used, no extra font instances need to
+be created (and those \OPENTYPE\ fonts can be large).
+
+Because less calculations are needed the code looks less complex and more elegant.
+Okay, the parbuilder code will never really look easy, if only because much more
+is involved.
+
+The glyph related factors are related to the emwidth. This makes not much sense
+so in \CONTEXT\ we define them in fractions of the character width, map them onto
+emwidths, and in the parbuilder need to go to glyph related widths again. If we can
+get rid of these emwidths, we have less complex code.
+
+Probably for reasons of efficiency an expanded font carries a definition that
+tells how much stretch and shrink is permitted and how large the steps are. So,
+for instance a font can be widened 5\% and narrowed 3\% in steps of 1\% which
+gives at most 8 extra instances. There is no real reason why this should be a
+font property and the parbuilder cannot deal with fonts with different steps
+anyway, so it makes more sense to make it a property of the paragraph and treat
+all fonts alike. In the \LUA\ based variant we can even have more granularity but
+we leave that for now. In any case this will lift the limitation of mixed font
+usage that is present in the original mechanism.
+
+The front- and backend code with repect to expansion gets clearly separated. In
+fact, the backend doesn't need to do any calculations other than applying the
+factor that is carried with the glyph. This and previously mentioned simplifications
+make the mechanism more efficient.
+
+It is debatable if expansion needs to be applied to font kerns, as is the case in
+the old mechanism. So, at least it should be an option. Removing this feature
+would again made the code nicer. If we keep it, we should keep in mind that
+expansion doesn't work well with complex fonts (say Arabic) but I will look into
+this later. It might be feasible when using the \LUA\ based variant because then
+we can use some of the information that is carried around with the related
+mechanisms. Of course this then related to the \LUA\ based font builder.
+
+\stopsection
+
+\stopchapter
+
+\stopcomponent
+
diff --git a/doc/context/sources/general/manuals/about/about-introduction.tex b/doc/context/sources/general/manuals/about/about-introduction.tex
new file mode 100644
index 000000000..92b386a75
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-introduction.tex
@@ -0,0 +1,38 @@
+% language=uk
+
+\startcomponent about-introduction
+
+\environment about-environment
+
+\startchapter[title={Introduction}]
+
+During the development of \LUATEX\ we wrapped up the state of affairs in articles
+and reports. Upto version 0.50 we tagged them as \quote {\MKIV} (the transition
+from \MKII), while for the next 0.25 versions we bundled them in \quote {hybrid}
+(the rewrite of \CONTEXT). The next series goes under the name \type {about} as
+one might wonder what all this \LUATEX\ and \CONTEXT\ is about. After all we've
+now reached a state where we can think about future applications instead of
+improving older features as that process is ongoing.
+
+As we're a bit beyond experimenting now, the focus will be on practical usage and
+of course we target on applications that the \LUA\ and \TEX\ combination makes
+possible, either new or in a renewed form. Some of the chapters will eventually
+become part of manuals.
+
+As with the two preceding collections of \LUATEX\ development stories, \quote
+{mk} and \quote {hybrid}, this one, called \quote {about}, covers a stretch of
+development, mostly between versions 0.50 and 0.75. The forth stretch, upto 1.00
+is covered in \quote {still}.
+
+\startlines
+Hans Hagen
+Hasselt NL
+2013\endash2015
+\blank
+\type {http://www.luatex.org}
+\type {http://www.pragma-ade.com}
+\stoplines
+
+\stopchapter
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-jitting-jit-2013-06-04.lua b/doc/context/sources/general/manuals/about/about-jitting-jit-2013-06-04.lua
new file mode 100644
index 000000000..189f5244c
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-jitting-jit-2013-06-04.lua
@@ -0,0 +1,148 @@
+return {
+ {
+ ["off"]="0.399",
+ ["on"]="0.081",
+ },
+ {
+ ["off"]="1.339",
+ ["on"]="0.081",
+ },
+ {
+ ["off"]="4.527",
+ ["on"]="2.817",
+ },
+ {
+ ["off"]="0.644",
+ ["on"]="0.640",
+ },
+ {
+ ["off"]="3.032",
+ ["on"]="3.007",
+ },
+ {
+ ["off"]="4.046",
+ ["on"]="4.977",
+ },
+ ["local fc = font.current\
+\
+function font.current()\
+ return fc()\
+end\
+\
+return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + font.current()\
+ end\
+end"]={
+ ["off"]="1.998",
+ ["on"]="2.417",
+ },
+ ["local function whatever(i)\
+ return i\
+end\
+\
+return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + whatever(i)\
+ end\
+end"]={
+ ["off"]="0.675",
+ ["on"]="0.041",
+ },
+ ["local tostring, tonumber = tostring, tonumber\
+return function()\
+ local a = 0\
+ for i=1,1000 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="4.762",
+ ["on"]="0.172",
+ },
+ ["local tostring, tonumber = tostring, tonumber\
+return function()\
+ local a = 0\
+ for i=1,10000 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="79.316",
+ ["on"]="5.640",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,100 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="0.703",
+ ["on"]="0.047",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,1000 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="4.786",
+ ["on"]="0.171",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + font.current()\
+ end\
+end"]={
+ ["off"]="1.417",
+ ["on"]="1.427",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + i\
+ end\
+end"]={
+ ["off"]="0.198",
+ ["on"]="0.041",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + math.sin(1/i)\
+ end\
+end"]={
+ ["off"]="2.206",
+ ["on"]="1.440",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,10000 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="79.456",
+ ["on"]="5.703",
+ },
+ ["return function()\
+ local a = 0\
+ local p = (1-lpeg.P(\"5\"))^0 * lpeg.P(\"5\")\
+ for i=1,100 do\
+ local a = a + (tonumber(lpeg.match(p,tostring(i))) or 0)\
+ end\
+end"]={
+ ["off"]="0.859",
+ ["on"]="0.843",
+ },
+ ["return function()\
+ local a = 0\
+ local p = (1-lpeg.P(\"5\"))^0 * lpeg.P(\"5\") + lpeg.Cc(0)\
+ for i=1,100 do\
+ local a = a + lpeg.match(p,tostring(i))\
+ end\
+end"]={
+ ["off"]="0.514",
+ ["on"]="0.516",
+ },
+} \ No newline at end of file
diff --git a/doc/context/sources/general/manuals/about/about-jitting-jit.lua b/doc/context/sources/general/manuals/about/about-jitting-jit.lua
new file mode 100644
index 000000000..ec669a253
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-jitting-jit.lua
@@ -0,0 +1,163 @@
+return {
+ {
+ ["off"]="0.399",
+ ["on"]="0.081",
+ },
+ {
+ ["off"]="1.339",
+ ["on"]="0.081",
+ },
+ {
+ ["off"]="4.527",
+ ["on"]="2.817",
+ },
+ {
+ ["off"]="0.644",
+ ["on"]="0.640",
+ },
+ {
+ ["off"]="3.032",
+ ["on"]="3.007",
+ },
+ {
+ ["off"]="4.046",
+ ["on"]="4.977",
+ },
+ ["local fc = font.current\
+\
+function font.current()\
+ return fc()\
+end\
+\
+return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + font.current()\
+ end\
+end"]={
+ ["off"]="1.966",
+ ["on"]="2.492",
+ },
+ ["local fc = font.current\
+\
+function font.xcurrent()\
+ return fc()\
+end\
+\
+return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + font.xcurrent()\
+ end\
+end"]={
+ ["off"]="1.912",
+ ["on"]="1.915",
+ },
+ ["local function whatever(i)\
+ return i\
+end\
+\
+return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + whatever(i)\
+ end\
+end"]={
+ ["off"]="0.648",
+ ["on"]="0.042",
+ },
+ ["local tostring, tonumber = tostring, tonumber\
+return function()\
+ local a = 0\
+ for i=1,1000 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="2.620",
+ ["on"]="0.162",
+ },
+ ["local tostring, tonumber = tostring, tonumber\
+return function()\
+ local a = 0\
+ for i=1,10000 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="79.316",
+ ["on"]="5.640",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,100 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="0.703",
+ ["on"]="0.047",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,1000 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="2.607",
+ ["on"]="0.162",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + font.current()\
+ end\
+end"]={
+ ["off"]="1.292",
+ ["on"]="1.296",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + i\
+ end\
+end"]={
+ ["off"]="0.207",
+ ["on"]="0.042",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,10000 do\
+ a = a + math.sin(1/i)\
+ end\
+end"]={
+ ["off"]="2.204",
+ ["on"]="1.482",
+ },
+ ["return function()\
+ local a = 0\
+ for i=1,10000 do\
+ local a = a + tonumber(tostring(i))\
+ end\
+end"]={
+ ["off"]="79.456",
+ ["on"]="5.703",
+ },
+ ["return function()\
+ local a = 0\
+ local p = (1-lpeg.P(\"5\"))^0 * lpeg.P(\"5\")\
+ for i=1,100 do\
+ local a = a + (tonumber(lpeg.match(p,tostring(i))) or 0)\
+ end\
+end"]={
+ ["off"]="0.859",
+ ["on"]="0.843",
+ },
+ ["return function()\
+ local a = 0\
+ local p = (1-lpeg.P(\"5\"))^0 * lpeg.P(\"5\") + lpeg.Cc(0)\
+ for i=1,100 do\
+ local a = a + lpeg.match(p,tostring(i))\
+ end\
+end"]={
+ ["off"]="0.313",
+ ["on"]="0.316",
+ },
+} \ No newline at end of file
diff --git a/doc/context/sources/general/manuals/about/about-jitting.tex b/doc/context/sources/general/manuals/about/about-jitting.tex
new file mode 100644
index 000000000..4a8bc763a
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-jitting.tex
@@ -0,0 +1,439 @@
+% language=uk engine=luajittex
+
+\startluacode
+
+ local nofjitruns = 5000
+
+ local runnow = string.find(environment.jobname,"about%-jitting") and jit
+
+ local runtimes = table.load("about-jitting-jit.lua") or {
+ nofjitruns = nofjitruns,
+ timestamp = os.currenttime(),
+ }
+
+ document.NOfJitRuns = runtimes.nofjitruns or nofjitruns
+ document.JitRunTimes = runtimes
+
+ function document.JitRun(specification)
+
+ local code = buffers.getcontent(specification.name)
+
+ if runnow then
+
+ local function testrun(how)
+ local test = load(code)()
+ collectgarbage("collect")
+ jit[how]()
+ local t = os.clock()
+ for i=1,document.NOfJitRuns do
+ test()
+ end
+ t = os.clock() - t
+ jit.off()
+ return string.format("%0.3f",t)
+ end
+
+ local rundata = {
+ off = testrun("off"),
+ on = testrun("on"),
+ }
+
+ runtimes[code] = rundata
+ document.JitTiming = rundata
+
+ else
+
+ local rundata = runtimes[code] or { }
+
+ document.JitTiming = {
+ off = rundata.off or "0",
+ on = rundata.on or "0",
+ }
+
+
+ end
+
+ end
+
+\stopluacode
+
+\starttexdefinition LuaJitTest #1%
+
+ \ctxlua{document.JitRun { name = "#1" } }
+
+ \starttabulate[|lT|lT|]
+ \NC off \NC \cldcontext{document.JitTiming.off} \NC \NR
+ \NC on \NC \cldcontext{document.JitTiming.on } \NC \NR
+ \stoptabulate
+
+\stoptexdefinition
+
+\starttexdefinition NOfLuaJitRuns
+ \cldcontext{document.NOfJitRuns}
+\stoptexdefinition
+
+% end of code
+
+\startcomponent about-jitting
+
+\environment about-environment
+
+\definehead[jittestsection][subsubsection][color=,style=bold]
+
+\startchapter[title=Luigi's nightmare]
+
+\startsection[title=Introduction]
+
+If you have a bit of a background in programming and watch kids playing video
+games, either or not on a dedicates desktop machine, a console or even a mobile
+device, there is a good change that you realize how much processing power is
+involved. All those pixels get calculated many times per second, based on a
+dynamic model that not only involves characters, environment, physics and a story
+line but also immediately reacts on user input.
+
+If on the other hand in your text editor hit the magic key combination that
+renders a document source into for instance a \PDF\ file, you might wonder why
+that takes so many seconds. Of course it does matter that some resources are
+loaded, that maybe images are included, and lots of fuzzy logic makes things
+happen, but the most important factor is without doubt that \TEX\ macros are not
+compiled into machine code but into an intermediate representation. Those macros
+then get expanded, often over and over again, and that a relative slow process.
+As (local) macros can be redefined any time, the engine needs to take that into
+account and there is not much caching going on, unless you explicitly define
+macros that do so. Take this:
+
+\starttyping
+\def\bar{test}
+\def\foo{test \bar\space test}
+\stoptyping
+
+Even if the definition of \type {\test} stays the same, that if \type {\bar} can
+change:
+
+\starttyping
+\foo \def\bar{foo} \foo
+\stoptyping
+
+There is no mechanism to freeze the meaning of \type {\bar} in \type {\foo},
+something that is possible in the other language used in \CONTEXT:
+
+\starttyping
+local function bar() context("test") end
+function foo() context("test ") bar() context(" test") end
+\stoptyping
+
+Here we can use local functions to limit their scope.
+
+\starttyping
+foo() local function bar() context("foo") end foo()
+\stoptyping
+
+In a way you can say that \TEX\ is a bit more dynamic that \LUA, and optimizing
+(as well as hardening) it is much more difficult. In \CONTEXT\ we already
+stretched that to the limits, although occasionally I find ways to speed up a
+bit. Given that we spend a considerable amount of runtime in \LUA\ it makes sense
+to see what we can gain there. We have less possible interference and often a more
+predictable outcome as \type {bar}s won't suddenly become \type {foo}s.
+
+Nevertheless, the dynamic nature of both \TEX\ and \LUA\ has some impact on
+performance, especially when they do most of the work. While in games there are
+dedicated chips to do tasks, for \TEX\ there aren't. So, we're sort of stuck when
+it comes to speeding up the process to the level that is similar to advanced
+games. In the next sections I will discuss a few aspects of possible speedups and
+the reason why it doesn't work out as expected.
+
+\stopsection
+
+\startsection[title=Jitting]
+
+Let's go back once more to Luigi's nightmare of disappointing jit \footnote
+{Luigi Scarso is the author of \LUAJITTEX\ and we have reported on experiments
+with this variant of \LUATEX\ on several occasions.} We already know that the
+virtual machine of \LUAJIT\ is about twice as fast as the standard machine. We
+also experienced that enabling jit can degrade performance. Although we did
+observe some real drastic drop in performance when testing functions like \type
+{math.random} using the \type {mingw} compiler, we also saw a performance boost
+with simple pure \LUA\ functions. In that respect \LUAJIT\ is an impressive
+effort. So, it makes sense to use \LUAJITTEX\ even if in theory it could be
+faster.
+
+Next some tests will be shown. The timings are snapshots so different versions of
+\LUAJITTEX\ can have different outcomes. The tests are mostly used for
+discussions between Luigi and me and further experiments and believe me: we've
+really done all kind of tests to see if we can get some speed out of jitting.
+After all it's hard to believe that we can't gain something from it, so we might
+as do something wrong.
+
+Each test is run \NOfLuaJitRuns\ times. These are of course non|-|typical
+examples but they illustrate the principle. Each time we show two measurements:
+one with jit turned on, and one with jit off, but in both cases the faster
+virtual machine is enabled. The times shown are of course dependent on the
+architecture and operating system, but as we are only interested in relative
+times it's enough to know that we run 32 bit mingw binaries under 64 bit Windows
+8 on a modern quad core Ivy bridge \CPU. We did most tests with \LUAJIT\ 2.0.1
+but as far as we can see 2.0.2 has a similar performance.
+
+\startjittestsection[title={simple loops, no function calls}]
+
+\startbuffer[jittest]
+return function()
+ local a = 0
+ for i=1,10000 do
+ a = a + i
+ end
+end
+\stopbuffer
+
+\typebuffer[jittest] \LuaJitTest{jittest}
+
+\stopjittestsection
+
+\startjittestsection[title={simple loops, with simple function}]
+
+\startbuffer[jittest]
+local function whatever(i)
+ return i
+end
+
+return function()
+ local a = 0
+ for i=1,10000 do
+ a = a + whatever(i)
+ end
+end
+\stopbuffer
+
+\typebuffer[jittest] \LuaJitTest{jittest}
+
+\stopjittestsection
+
+\startjittestsection[title={simple loops, with built-in basic functions}]
+
+\startbuffer[jittest]
+return function()
+ local a = 0
+ for i=1,10000 do
+ a = a + math.sin(1/i)
+ end
+end
+\stopbuffer
+
+\typebuffer[jittest] \LuaJitTest{jittest}
+
+\stopjittestsection
+
+\startjittestsection[title={simple loops, with built-in simple functions}]
+
+\startbuffer[jittest]
+return function()
+ local a = 0
+ for i=1,1000 do
+ local a = a + tonumber(tostring(i))
+ end
+end
+\stopbuffer
+
+\typebuffer[jittest] \LuaJitTest{jittest}
+
+\stopjittestsection
+
+\startjittestsection[title={simple loops, with built-in simple functions}]
+
+\startbuffer[jittest]
+local tostring, tonumber = tostring, tonumber
+return function()
+ local a = 0
+ for i=1,1000 do
+ local a = a + tonumber(tostring(i))
+ end
+end
+\stopbuffer
+
+\typebuffer[jittest] \LuaJitTest{jittest}
+
+\stopjittestsection
+
+\startjittestsection[title={simple loops, with built-in complex functions}]
+
+\startbuffer[jittest]
+return function()
+ local a = 0
+ local p = (1-lpeg.P("5"))^0 * lpeg.P("5") + lpeg.Cc(0)
+ for i=1,100 do
+ local a = a + lpeg.match(p,tostring(i))
+ end
+end
+\stopbuffer
+
+\typebuffer[jittest] \LuaJitTest{jittest}
+
+\stopjittestsection
+
+\startjittestsection[title={simple loops, with foreign function}]
+
+\startbuffer[jittest]
+return function()
+ local a = 0
+ for i=1,10000 do
+ a = a + font.current()
+ end
+end
+\stopbuffer
+
+\typebuffer[jittest] \LuaJitTest{jittest}
+
+\stopjittestsection
+
+\startjittestsection[title={simple loops, with wrapped foreign functions}]
+
+\startbuffer[jittest]
+local fc = font.current
+
+function font.xcurrent()
+ return fc()
+end
+
+return function()
+ local a = 0
+ for i=1,10000 do
+ a = a + font.xcurrent()
+ end
+end
+\stopbuffer
+
+\typebuffer[jittest] \LuaJitTest{jittest}
+
+\stopjittestsection
+
+What we do observe here is that turning on jit doesn't always help. By design the
+current just|-|in|-|time compiler aborts optimization when it sees a function
+that is not known. This means that in \LUAJITTEX\ most code will not get jit,
+because we use built|-|in library calls a lot. Also, in version 2.0 we notice
+that a bit of extra wrapping will make performance worse too. This might be why
+for us jitting doesn't work out the way it is advertised. Often performance tests
+are done with simple functions that use built in functions that do get jit. And
+the more of those are supported, the better it gets. Although, when you profile a
+\CONTEXT\ run, you will notice that we don't call that many standard library
+functions, at least not so often that jitting would get noticed.
+
+A safe conclusion is that you can benefit a lot from the fast virtual machine but
+should check carefully if jit is not having a negative impact. As it is turned on
+by default in \LUAJIT\ (but off in \LUAJITTEX) it might as well get unnoticed,
+especially because there is always a performance gain due to the faster virtual
+machine and that might show more overall gain than the drawback of jitting
+unjittable code. It might just be a bit less drastic then possible because of
+artifacts mentioned here, but who knows what future versions of \LUAJIT\ will
+bring.
+
+Maybe sometime we can benefit from \type {ffi} but it makes no sense to mess up
+the \CONTEXT\ code with related calls: it looks ugly and also makes the code
+unusable in stock \LUA, so it is a a sort of no|-|go. There are some suggestions
+in \LUAJIT\ related posts about adapting the code to suit the jitter, but again,
+that makes no sense. If we need to keep a specific interpreter in mind, we could
+as well start writing everything in C. So, our hopes are on future versions of
+stock \LUA\ and \LUAJIT. Luigi uncovered the following comment in the source code:
+
+\starttyping
+/* C functions can have arbitrary side-effects and are not
+recorded (yet). */
+\stoptyping
+
+Although the \type {(yet)} indicates that at some point this restriction can be
+lifted, we don't expect this to happen soon. And patching the jit machinery
+ourselves to suite \LUATEX\ is no option.
+
+There is an important difference between a \LUATEX\ run and other programs: they
+are runs and these live short. A lot of code gets executed only once of a few
+times (like loading fonts), or gets executed in such different ways that (branch)
+prediction is hard. If you run a web server using \LUA\ it runs for weeks in a
+row so optimizing a function pays off, given that it gets optimized. When you
+have a \LUA\ enhanced interactive program, again, the session is long enough to
+benefit from jitting (if applied). And, when you crunch numbers, it might pay off
+too. In practice, a \TEX\ run has no such characteristics.
+
+\stopsection
+
+\startsection[title=Implementation]
+
+In \LUA\ 5.2 there are some changes in the implementation compared to 5.1 and
+before. It is hard to measure the impact of that but it's probably a win some
+here and loose some there situation. A good example is the way \LUA\ deals with
+strings. Before 5.2 all strings were hashed, but now only short strings are
+(at most 32 bytes are looked at). Now, consider this:
+
+\startitemize
+ \startitem
+ In \CONTEXT\ we do all font handling in \LUA\ and that involves lots of
+ tables with lots of (nicely hashed) short keys. So, comparing them is
+ pretty fast.
+ \stopitem
+ \startitem
+ We also read a lot from files, and each line passes filters and such
+ before it gets passed to \TEX. There hashing is not really needed,
+ although when it gets processed by filters it might as well save some
+ time.
+ \stopitem
+ \startitem
+ When we go from \TEX\ to \LUA\ and reverse, lots of strings are involved
+ and many of them are unique and used once. There hashing might bring a
+ penalty.
+ \stopitem
+ \startitem
+ When we loop over a string with \type {gmatch} or some \type {lpeg}
+ subprogram lots of (small) strings can get created and each gets hashed,
+ even if they have a short livespan.
+ \stopitem
+\stopitemize
+
+The above items indicate that we can benefit from hashing but that sometimes it
+might have a performance hit. My impression is that on the average we're better
+off by hashing and it's one of the reasons why \LUA\ is so fast (and useable).
+
+In \TEX\ all numbers are integers and in \LUA\ all numbers are floats. On modern
+computers dealing with floating point is fast and we're not crunching numbers
+anyway. We definitely would have an issue when numbers were just integers and an
+upcoming mixed integer|/|float model might not be in our advantage. We'll see.
+
+I had expected to benefit from bitwise operations but so far never could find a
+real application in \CONTEXT, at least not one that had a positive impact. But
+maybe it's just a way of thinking that hasn't evolved yet. Also, the fact that
+functions are used instead of a real language extension makes it less possible
+that there is a speedup involved.
+
+\stopsection
+
+\startsection[title=Garbage collection]
+
+In the beginning I played with tuning the \LUA\ garbage collector in order to
+improve performance. For some documents changing the step and multiplier worked
+out well, but for others it didn't, so I decided that one can best leave the
+values as they are. Turning the garbage collector off as expected gives a
+relative small speedup, and for the average run the extra memory used can be
+neglected. Just keep in mind that a \TEX\ run are never persistent so memory
+can't keep filling. I did some tests with the in theory faster (experimental)
+generational mode of the garbage collector but it made runs significantly slower.
+For instance processing the \type {fonts-mkiv.pdf} went from 9 to 9.5 seconds.
+
+\stopsection
+
+\startsection[title=Conclusion]
+
+So what is, given unpredictable performance hits of advertised optimizations, the
+best approach. It all starts by the \LUA\ (and \TEX) code: sloppy coding can have
+a price. Some of that can be disguised by clever interpreters but some can't. If
+the code is already fast, there is not much to gain. When going from \MKII\ to
+\MKIV\ more and more \LUA\ got introduced and lots of approaches were
+benchmarked, so, I'm already rather confident that there is not that much to
+gain. It will never have the impressive performance of interactive games and
+that's something we have to live with. As long as \LUA\ stays lean and mean,
+things can only get better over time.
+
+\stopsection
+
+\startluacode
+ table.save("about-jitting-jit.lua",document.JitRunTimes)
+\stopluacode
+
+\stopchapter
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-luafunctions.tex b/doc/context/sources/general/manuals/about/about-luafunctions.tex
new file mode 100644
index 000000000..810de10fd
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-luafunctions.tex
@@ -0,0 +1,292 @@
+% language=uk
+
+\startcomponent about-properties
+
+\environment about-environment
+
+\startchapter[title=Functions]
+
+\startsection[title=Introduction]
+
+As part of the crited project Luigi and I also tried to identity weak spots in
+the engine and although we found some issues not all were dealt with because
+complicating the machinery makes no sense. However just like the new \type
+{properties} mechanism provides a real simple way to associate extra \LUA\ data
+to a node without bothering about freeing it when a node is flushed, the next
+\type {luafunctions} mechanism provides an additional and fast way to cross the
+\TEX||\LUA\ boundary.
+
+\stopsection
+
+\startsection[title=Callbacks]
+
+In \LUATEX\ we can create more functionality by using \LUA\ which means that we
+end up (at least in \CONTEXT) with a constant switching between \TEX\ macro
+expansion and \LUA\ code interpretation. The magic word in this process is \type
+{callback} and there are two variants:
+
+\startitemize
+
+\startitem At well defined moments in processing its input and node lists, \TEX\
+will check if a specific callback is defined and if so, it will run that code.
+\stopitem
+
+\startitem As part of the input you can have a \type {\directlua} command and
+that one gets expanded and processed. It can print back content into the current
+input buffer. \footnote {Currently this process is somewhat more complex than
+needed, which is a side effect of supporting multiple \LUA\ states in the first
+versions of \LUATEX. We will clean up this mechanism at some point.} \stopitem
+
+\stopitemize
+
+The first type is call a \quote {direct} callback because \TEX\ calls it
+directly, and the second one is an \quote {indirect} one (even if the command is
+\type {\directlua}). It has a deferred cousin \type {\latelua} that results in a
+node being inserted that will become a \LUA\ call during shipout, when the page
+is turned into a \PDF\ stream.
+
+A callback of the first category is pretty fast because the code is already
+translated in \LUA\ bytecode. Checking if a callback has been assigned at all is
+fast too. The second variant is slower because each time the input has to be
+interpreted and checked on validity. Then there is of course some overhead in
+making the call itself.
+
+There is a subtle aspect there. If you have a document that needs say ten calls
+like:
+
+\starttyping
+\directlua{tex.print("[x]")}
+\stoptyping
+
+and you have these calls inlined, you end up with ten times conversion into
+tokens (\TEX's internal view) and ten times conversion back to a string that gets
+fed into \LUA. On the other hand,
+
+\starttyping
+\def\MyCall{\directlua{tex.print("[x]")}}
+\stoptyping
+
+where we call \type {\MyCall} ten times is more efficient because we have already
+tokenized the \type {\directlua}. If we have
+
+\starttyping
+foo foo foo \directlua{tex.print("[1]")} ...
+bar bar bar \directlua{tex.print("[2]")} ...
+\stoptyping
+
+It makes sense to wrap this into a definition:
+
+\starttyping
+\def\MyCall#1{\directlua{tex.print("[#1]")}}
+\stoptyping
+
+and use:
+
+\starttyping
+foo foo foo \MyCall{1} bar bar bar \MyCall{1} ...
+\stoptyping
+
+Of course this is not unique for \type {\directlua} and to be honest, apart from
+convenience (read: less input) the gain often can be neglected. Because a macro
+package wraps functionality in (indeed) macros we already save us the tokenization
+step. We can save some time by wrapping more in a function at the \LUA\ end:
+
+\starttyping
+\startluacode
+function MyFloat(f)
+ tex.print(string.format("%0.5f",f))
+end
+\stopluacode
+
+\def\MyFloat#1%
+ {\directlua{MyFloat(#1)}}
+\stoptyping
+
+This is somewhat more efficient than:
+
+\starttyping
+\def\MyFloat#1%
+ {\directlua{tex.print(string.format("\letterpercent0.5f",#1))}}
+\stoptyping
+
+\stopsection
+
+Of course this is only true when we call this macro a lot of times.
+
+\startsection[title=Shortcuts]
+
+When we talk of \quote {often} or \quote {a lot} we mean many thousands of calls.
+There are some places in \CONTEXT\ where this is indeed the case, for instance
+when we process large registers in critical editions: a few hundred pages of
+references generated in \LUA\ is no exception there. Think of the following:
+
+\starttyping
+\startluacode
+function GetTitle(n)
+ tex.print(Entries[n].title)
+end
+\stopluacode
+
+\def\GetTitle#1%
+ {\directlua{GetTitle(#1)}}
+\stoptyping
+
+If we call \type {\GetTitle} ourselves it's the same as the \type {\MyFloat}
+example, but how about this:
+
+\starttyping
+\def\GetTitle#1%
+ {{\bf \directlua{GetTitle(#1)}}}
+
+\startluacode
+function GetTitle(n)
+ tex.print(Entries[n].title)
+end
+
+function GetEntry(n)
+ if Entries[n] then
+ tex.print("\\directlua{GetTitle(",n,")}")
+ -- some more action
+ end
+end
+\stopluacode
+\stoptyping
+
+Here we have two calls where one is delayed till a later time. This delay results
+in a tokenization and transation to \LUA\ so it will cost time. A way out is this:
+
+\starttyping
+\def\GetTitle#1%
+ {{\bf \luafunction#1}}
+
+\startluacode
+local functions = tex.get_functions_table()
+
+function GetTitle(n)
+ tex.print(Entries[n].title)
+end
+
+function GetEntry(n)
+ if Entries[n] then
+ local m = #functions+1
+ functions[m] = function() GetTitle(n) end
+ tex.print("\\GetTitle{",m,"}")
+ -- some more action
+ end
+end
+\stopluacode
+\stoptyping
+
+We define a function at the \LUA\ end and just print a macro call. That call itself
+calls the defined function using \type {\luafunction}. For a large number
+of calls this is more efficient but it will be clear that you need to make sure that
+used functions are cleaned up. A simple way is to start again at slot one after (say)
+100.000 functions, another method is to reset used functions and keep counting.
+
+\starttyping
+\startluacode
+local functions = tex.get_functions_table()
+
+function GetTitle(n)
+ tex.print(Entries[n].title)
+end
+
+function GetEntry(n)
+ if Entries[n] then
+ local m = #functions+1
+ functions[m] = function(slot) -- the slot number is always
+ GetTitle(n) -- passed as argument so that
+ functions[slot] = nil -- we can reset easily
+ end
+ tex.print("\\GetTitle{",m,"}")
+ -- some more action
+ end
+end
+\stopluacode
+\stoptyping
+
+As you can expect, in \CONTEXT\ users are not expect to deal directly with
+functions at all. Already for years you can so this:
+
+\starttyping
+\def\GetTitle#1%
+ {{\bf#1}}
+
+\startluacode
+function GetEntry(n)
+ if Entries[n] then
+ context(function() context.GetTitle(Entries[n].title) end)
+ -- some more action
+ end
+end
+\stopluacode
+\stoptyping
+
+Upto \LUATEX\ 0.78 we had a \CONTEXT\ specific implementation of functions and
+from 0.79 onwards we use this new mechanism but users won't see that in practice.
+In the \type {cld-mkiv.pdf} manual you can find more about accessing \CONTEXT\
+from the \LUA\ end.
+
+Keep in mind that \type {\luafunction} is not that clever: it doesn't pick up
+arguments. That will be part of future more extensive token handling but of
+course that will then also be a real slow downer because a mix of \TEX\
+tokenization and serialization is subtoptimal (we already did extensive tests
+with that).
+
+\stopsection
+
+\startsection[title=Helpers]
+
+The above mechanism demands some orchestration in the macro package. For instance
+freeing slots should be consistent and therefore user should not mess directly
+with the functions table. If you really want to use this feature you can best do this:
+
+\starttyping
+\startctxfunction MyFunctionA
+ context(" A1 ")
+\stopctxfunction
+
+\startctxfunctiondefinition MyFunctionB
+ context(" B2 ")
+\stopctxfunctiondefinition
+
+\starttext
+ \dorecurse{10000}{\ctxfunction{MyFunctionA}} \page
+ \dorecurse{10000}{\MyFunctionB} \page
+ \dorecurse{10000}{\ctxlua{context(" C3 ")}} \page
+ \dorecurse{10000}{\ctxlua{tex.sprint(" D4 ")}} \page
+\stoptext
+\stoptyping
+
+In case you're curious about performance, here are timing. Given that we have
+10.000 calls the gain is rather neglectable especially because the whole run
+takes 2.328 seconds for 52 processed pages resulting in 22.4 pages per second.
+The real gain is in more complex calls with more tokens involved and in \CONTEXT\
+we have some placed where we run into the hundreds of thousands. A similar
+situation occurs when your input comes from databases and is fetched stepwise.
+
+\starttabulate[|c|c|c|c|]
+\NC \bf A \NC \bf B \NC \bf C \NC \bf D \NC \NR
+\NC 0.053 \NC 0.044 \NC 0.081 \NC 0.081 \NC \NR
+\stoptabulate
+
+So, we can save 50\% runtime but on a simple document like this a few percent is
+not that much. Of course many such few percentages can add up, and it's one of
+the reasons why \CONTEXT\ \MKIV\ is pretty fast in spite of all the switching
+between \TEX\ and \LUA. One objective is that an average complex document should
+be processed with a rate of at least 20 pages per second and in most cases we
+succeed. This fast function accessing can of course trigger new features in
+\CONTEXT, ones we didn't consider useful because of overhead.
+
+Keep in mind that in most cases, especially when programming in \LUA\ directly
+the \type {context} command already does all kind of housekeeping for you. For
+instance it also keeps track of so called trial typesetting runs and can inject
+nodes in the current stream as well. So, be warned: there is no real need to
+complicate your code with this kind of hackery if some high level subsystem
+provides the functionality already.
+
+\stopsection
+
+\stopchapter
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-mathstackers.tex b/doc/context/sources/general/manuals/about/about-mathstackers.tex
new file mode 100644
index 000000000..07fadf102
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-mathstackers.tex
@@ -0,0 +1,765 @@
+% language=uk
+
+\startcomponent about-mathstackers
+
+\environment about-environment
+
+\startchapter[title=Math stackers]
+
+\startsection[title=Introduction]
+
+In the next sections I will discuss the way we deal with stacked content in
+\CONTEXT\ \MKIV\ and in particular extensible characters. The mechanism describe
+here is actually more generic and can also deal with regular text. The stacker
+code is an evolution of the mechanisms that combine math arrows with text. From
+the users perspective there is not that much difference with the old methods
+because in practice \quote {defined} commands are used and their name stayed.
+However, we use different definition and setup commands and provide much more
+control. The new implementation is leaner but not meaner and fits the way \MKIV\
+is set up.
+
+How does \LUA\ fits in? We use a helper in order to determine some
+characteristics of extensibles, but we could have done without. We also use some
+new \LUATEX\ math primitives and of course we depend on \OPENTYPE\ font
+technoloygy.
+
+\stopsection
+
+\startsection[title=Extensibles]
+
+The command \type {\leftarrowfill} was introduced in plain \TEX\ and gives, as
+the name indicates, a \leftarrowfill\ that stretches itself so that it takes the
+available space. Take the following example:
+
+\starttyping
+\hbox to 4cm{\leftarrowfill}
+\stoptyping
+
+This will make an arrow of length 4cm:
+
+\blank \mathstylehbox{\Umathaccent\fam\zerocount"2190{\hskip4cm}} \blank
+
+This arrow is made out of small snippets:
+
+\blank {\showglyphs\scale[width=\textwidth]{\mathstylehbox{\Umathaccent\fam\zerocount"2190{\hskip4cm}}}} \blank
+
+Here is another one:
+
+\starttyping
+\hbox to 4cm{\rightoverleftarrowfill}
+\stoptyping
+
+or:
+
+\blank {\mathstylehbox{\Umathaccent\fam\zerocount"21C4{\hskip4cm}}} \blank
+
+This time we have three different snippets:
+
+\blank {\showglyphs\scale[width=\textwidth]{\mathstylehbox{\Umathaccent\fam\zerocount"21C4{\hskip4cm}}}} \blank
+
+The \TEX\ engine has a concept of extensible characters. In fact there are two
+mechanisms: there is a list of larger glyphs and when that list is exhausted
+larger characters can be constructed out of snippets. Examples are left and right
+fences in math like braces and brackets, and, also in math, some top and bottom
+accents.
+
+For reasons unknown to me, some of these extensibles are handled by the engine
+directly, using properties of a font, while others are composed using macros.
+Given that \TEX\ is quite popular for typesetting scientific articles it is
+beyond my understanding why no one decided to provide some more fonts and|/|or
+extend the \TEX\ engine. After all, the whole idea of Donald Knuth with \TEX\ was
+that it could be adapted to future needs by its users. And so, more that 30 years
+after \TEX\ and macro packages showed up we're stuck with not only incomplete
+fonts, but also an engine that was never adapted to demands.
+
+\stopsection
+
+\startsection[title=The traditional way]
+
+In \CONTEXT\ we have support for extensibles built into the core but it uses the
+traditional approach: take some snippets and paste them together, making sure to
+achieve some overlap and get rid of side bearings. In terms of \TEX\ code this can
+best be illustrated with the plain \TEX\ definition of such a command:
+
+\starttyping
+\def\leftarrowfill
+ {$%
+ \mathsurround0pt%
+ \mathord\leftarrow\mkern-7mu%
+ \cleaders\hbox{$\mkern-2mu\smash-\mkern-2mu$}\hfill
+ \mkern-7mu\smash-%
+ $}
+\stoptyping
+
+Here we create a tight formula starting with a \type {leftarrow}, ending with a
+minus sign and glued together with the number of minus signs that are needed to
+fill the available space. This macro eventually expands to something like this (a
+bit spaced out):
+
+\starttyping
+\def\leftarrowfill { $
+ % \leftarrow = \mathchardef\leftarrow="3220 in plain but in
+ % unicode it's character 0x2190 so we use that one here
+ \mathsurround=0pt
+ \mathord{\mathchar"2190}
+ \mkern-7mu
+ \cleaders
+ \hbox { $
+ \mkern-2mu
+ \mathchoice
+ {\setbox0\hbox{$\displaystyle -$}\ht0=0pt\dp0=0pt\box0}
+ {\setbox0\hbox{$\textstyle -$}\ht0=0pt\dp0=0pt\box0}
+ {\setbox0\hbox{$\scriptstyle -$}\ht0=0pt\dp0=0pt\box0}
+ {\setbox0\hbox{$\scriptscriptstyle-$}\ht0=0pt\dp0=0pt\box0}
+ \mkern-2mu
+ $ }
+ \hfill
+ \mkern-7mu
+ \mathchoice
+ {\setbox0\hbox{$\displaystyle -$}\ht0=0pt\dp0=0pt\box0}
+ {\setbox0\hbox{$\textstyle -$}\ht0=0pt\dp0=0pt\box0}
+ {\setbox0\hbox{$\scriptstyle -$}\ht0=0pt\dp0=0pt\box0}
+ {\setbox0\hbox{$\scriptscriptstyle-$}\ht0=0pt\dp0=0pt\box0}
+$ }
+\stoptyping
+
+If you look at the code you see a few hacks. First of all we see that we need to
+add kerns in order to make the symbols overlap. For the middle shapes this is
+understandable as there we don't want rounding errors to lead to gaps. Also,
+because the minus in Computer Modern (and therefore Latin Modern) has rounded
+tips, we need to make sure that we end up beyond the tips. Next we see two blobs
+of \type {mathchoice}. This primitive chooses one of the four variants and
+switches to the right math style. It packages the minus and smashes it. In our
+case smashing makes not much sense as the arrowhead has height and depth anyway,
+but it's a side effect of using general purpose macros that there can be some
+unneeded overhead.
+
+\blank
+\hbox \bgroup \quad
+ \scale[sx=5,sy=5]{\hbox{\showglyphs$\mathsurround\zeropoint\char"2190$}}\quad
+ \scale[sx=5,sy=5]{\hbox{\showglyphs$\mathsurround\zeropoint\char"002D$}}\quad
+ \scale[sx=5,sy=5]{\hbox{\showglyphs$\mathsurround\zeropoint\char"27F8$}}\quad
+ \scale[sx=5,sy=5]{\hbox{\showglyphs$\mathsurround\zeropoint\char"003D$}}\quad
+\egroup
+\blank
+
+Above you see the two characters that traditionally are combined into a leftward
+pointing arrows. Watch the whitespace on the left and right of the actual glyph.
+
+\stopsection
+
+\startsection[title=The new way]
+
+These zero height and depth don't show up in our rendered examples. Why is this?
+The reason is that I cheated a bit. I used this to get the arrow: \footnote {In
+this example I misuse the accent placement mechanism. Upto \LUATEX\ 0.75 that was
+the way to go.}
+
+\starttyping
+\mathstylehbox{\Umathaccent\fam\zerocount"21C4{\hskip4cm}}
+\stoptyping
+
+The \CONTEXT\ support macro \type {\mathstylehbox} is an efficient variant of
+\type {\mathchoice}. More significant is that we don't assemble the arrow, but
+just put it as an accent on top of a skip. The \type {\Umathaccent} primitive
+will assemble the long arrow for us, using information in the font. If we look
+into the definition of the (Latin Modern) font in \MKIV\ we see this:
+
+\starttyping
+[8592]={
+ ["boundingbox"]={ 57, -10, 942, 510 },
+ ["class"]="base",
+ ["index"]=1852,
+ ["math"]={
+ ["horiz_parts"]={
+ {
+ ["advance"]=507,
+ ["end"]=169,
+ ["extender"]=0,
+ ["glyph"]=984274,
+ ["start"]=0,
+ },
+ {
+ ["advance"]=337,
+ ["end"]=337,
+ ["extender"]=1,
+ ["glyph"]=984275,
+ ["start"]=337,
+ },
+ {
+ ["advance"]=507,
+ ["end"]=0,
+ ["extender"]=0,
+ ["glyph"]=984276,
+ ["start"]=169,
+ },
+ },
+ ["horiz_variants"]={ 10229 },
+ },
+ ["name"]="arrowleft",
+ ["width"]=1000,
+}
+\stoptyping
+
+This arrow symbol comes in two sizes. The extra size is mentioned in \type
+{horiz_variants}. When no more variants are seen, it switches to the extensible
+definition, that uses \type {horiz_parts}. The dimensions are in basepoints, the
+references to glyphs are decimal. The \type {end} and \type {start} fields
+specify the overlap. When \type {extender} equals 1 it signals a repeatable
+snippet.
+
+In the \TEX\ engine the slot allocated for the left arrow symbol has a \type
+{next} pointer to a larger shape. Here there is only one such shape but when
+there are more they form a linked list. The the last one in such a list gets the
+specification of the extenders.
+
+We hard|-|coded the width to 4cm so how does it work when the arrow has to adapt
+itself? There are two cases there. When we are putting text on top of or below an
+arrow, we know what the width is because we can measure the text. But when we use
+the arrow as a filler, we have to leave it to the engine to arrange it. In recent
+\LUATEX\ the definition can be as simple as:
+
+\starttyping
+\def\leftarrowfill{\leaders "2190 \hfill}
+\stoptyping
+
+or:
+
+\starttyping
+\def\leftarrowfill{\mathstylehbox{\leaders"2190\hfill}}
+\stoptyping
+
+In fact, we can use this new \LUATEX\ extension to \type {\leaders} to
+replace the accent hacks as well.
+
+\stopsection
+
+\startsection[title=Wrapping it in macros]
+
+If this was all, we would be done in a few lines of definitions but as usual
+there is more involved: especially text. The prerequisites can be summarized as
+follows:
+
+\startitemize[packed]
+\startitem
+ The width of the extensible need to adapt itself automatically.
+\stopitem
+\startitem
+ We need to be able to control horizontal and vertical offsets.
+\stopitem
+\startitem
+ We best have a math as well as a text variant (which is handy for chemistry).
+\stopitem
+\startitem
+ For historic reasons we need to deal with optional arguments in a special
+ (reverse) way.
+\stopitem
+\startitem
+ We need alternatives for extensibles on top, in the middle and at the bottom.
+\stopitem
+\stopitemize
+
+Using a low level command we can do this:
+
+\startbuffer[math]
+$x \directmathextensible{"2192}{top}{bottom} x$
+\stopbuffer
+
+\typebuffer[math] \blank \getbuffer[math] \blank
+
+This is not that exiting too look at, but the next might be:
+
+\enabletrackers[math.stackers.texts]
+
+\blank \getbuffer[math] \blank
+
+Here we have turned on a tracker:
+
+\starttyping
+\enabletrackers[math.stackers.texts]
+\stoptyping
+
+The toppart is transparent blue, the middlepart transparent red and the bottom
+part becomes transparent green. When the areas overlap you see the mixed color.
+
+Before we explore some options, we show some variants. Often extensibles are used
+in math mode, if only because they originate in math and come from math fonts.
+
+\startbuffer[text]
+$x \textstacker{"2192}{top}{bottom} x$
+\stopbuffer
+
+\typebuffer[text] \blank \getbuffer[text] \blank
+
+These commands also work outside math mode:
+
+\startbuffer[none]
+x \textstacker{"2192}{top}{bottom} x
+\stopbuffer
+
+\typebuffer[none] \blank \getbuffer[none] \blank
+
+and to some extend can adapt themselves:
+
+\startbuffer[high]
+x\high{x \textstacker{"2192}{top}{bottom} x} x
+\stopbuffer
+
+\typebuffer[high] \blank[2*big] \getbuffer[high] \blank
+
+\stopsection
+
+\startsection[title=Influencing the spacing]
+
+We will use the text example to illustrate some options.
+
+\startbuffer[demo]
+\ruledhbox \bgroup \quad
+ \setupmathstackers[location=top]%
+ \textstacker{"21C4}{top}{bottom}\quad
+ \setupmathstackers[location=high]%
+ \textstacker{"21C4}{top}{bottom}\quad
+ \setupmathstackers[location=middle]%
+ \textstacker{"21C4}{top}{bottom}\quad
+ \setupmathstackers[location=low]%
+ \textstacker{"21C4}{top}{bottom}\quad
+ \setupmathstackers[location=bottom]%
+ \textstacker{"21C4}{top}{bottom}\quad
+\egroup
+\stopbuffer
+
+\typebuffer[demo]
+
+You can set up extensibles to be shifted up and down.
+
+\blank \getbuffer[demo] \blank
+
+The above rendering uses the default spacing. When we set all values to zero we
+get this:
+
+\startbuffer[setup]
+\setupmathstackers
+ [voffset=\zeropoint,
+ hoffset=\zeropoint,
+ minheight=\exheight,
+ mindepth=\zeropoint,
+ minwidth=\zeropoint]
+\stopbuffer
+
+\blank \start \getbuffer[setup,demo] \stop \blank
+
+The setup looks like this:
+
+\typebuffer[setup]
+
+and gives a pretty tight rendering. The default values are:
+
+\starttyping
+\setupmathstackers
+ [voffset=.25\exheight,
+ hoffset=.5\emwidth,
+ minheight=\exheight,
+ mindepth=\zeropoint,
+ minwidth=\emwidth]
+\stoptyping
+
+\startbuffer[setup]
+\setupmathstackers
+ [voffset=2\exheight,
+ hoffset=\emwidth,
+ minheight=\exheight,
+ mindepth=\zeropoint,
+ minwidth=\zeropoint]
+\stopbuffer
+
+When we set \type {voffset} to twice the ex|-|height and \type {hoffset} to
+the em|-|width we get:
+
+\blank \start \getbuffer[setup,demo] \stop \blank
+
+We can enforce a (consistent) height and depth of the extensible by setting the
+minimum values:
+
+\startbuffer[setup]
+\setupmathstackers
+ [voffset=\zeropoint,
+ hoffset=\zeropoint,
+ minheight=5\exheight,
+ mindepth=3\exheight,
+ minwidth=6\emwidth]
+\stopbuffer
+
+\typebuffer
+
+\blank \start \getbuffer[setup,demo] \stop \blank
+
+\stopsection
+
+\startsection[title=A neat feature]
+
+A more obscure feature relates to the visual appearance. When we put something
+on top of for instance an arrow, it sometimes looks better when we only consider
+the middle part. Watch the following:
+
+\startbuffer[setup]
+\setupmathstackers
+ [voffset=\zeropoint,
+ hoffset=\zeropoint,
+ minheight=\zeropoint,
+ mindepth=\zeropoint,
+ minwidth=\zeropoint]
+\stopbuffer
+
+\startbuffer[demo]
+\ruledhbox \bgroup \quad
+ \setupmathstackers[offset=normal]%
+ \textstacker{"21C4}{top}{bottom}\quad
+ \setupmathstackers[offset=min]%
+ \textstacker{"21C4}{top}{bottom}\quad
+ \setupmathstackers[offset=max]%
+ \textstacker{"21C4}{top}{bottom}\quad
+\egroup
+\stopbuffer
+
+\typebuffer[demo]
+
+The \type {min} and \type {max} values will add extra offsets that relate to the
+width of the edge snippets.
+
+\blank \start \getbuffer[setup,demo] \stop \blank
+
+In this case both have the same result but the difference becomes clear when we
+set the \type {hoffset} to the em|-|width. In the case of \type {min} we don't
+add some extra space if the \type {hoffset} is applied.
+
+\startbuffer[setup]
+\setupmathstackers
+ [voffset=\zeropoint,
+ hoffset=\emwidth,
+ minheight=\zeropoint,
+ mindepth=\zeropoint,
+ minwidth=\zeropoint]
+\stopbuffer
+
+\startbuffer[demo]
+\ruledhbox \bgroup \quad
+ \setupmathstackers[offset=normal]%
+ \textstacker{"21C4}{top}{bottom}\quad
+ \setupmathstackers[offset=min]%
+ \textstacker{"21C4}{top}{bottom}\quad
+ \setupmathstackers[offset=max]%
+ \textstacker{"21C4}{top}{bottom}\quad
+\egroup
+\stopbuffer
+
+\typebuffer[demo]
+
+Of course in this example we have a symmetrical correction.
+
+\blank \start \getbuffer[setup,demo] \stop \blank
+
+A one sided arrow behaves different:
+
+\startbuffer[demo]
+\ruledhbox \bgroup \quad
+ \setupmathstackers[offset=normal]%
+ \textstacker{"2192}{top}{bottom}\quad
+ \setupmathstackers[offset=min]%
+ \textstacker{"2192}{top}{bottom}\quad
+ \setupmathstackers[offset=max]%
+ \textstacker{"2192}{top}{bottom}\quad
+\egroup
+\stopbuffer
+
+\blank \start \getbuffer[setup,demo] \stop \blank
+
+\stopsection
+
+\startsection[title=The user interface]
+
+It all starts out with categories. We have a couple of predefined categories in
+the core. The \type {mathematics} category typesets the top and bottom texts in
+mathmode, while the \type {text} category doesn't. The \type {reverse} category
+swaps its arguments. There are \type {upper} and \type {under} categories too.
+
+As with most \CONTEXT\ mechanisms inheritance is part of the picture:
+
+\starttyping
+\definemathextensibles [mine] [big] [offset=min]
+\stoptyping
+
+You can change settings with:
+
+\starttyping
+\setupmathstackers [mine] [big] [voffset=\exheight]
+\stoptyping
+
+For downward compatibility we also provide these:
+
+\starttyping
+\definemathextensibles [normal] [hoffset=0.5\emwidth]
+\definemathextensibles [none] [hoffset=\zeropoint]
+\definemathextensibles [small] [hoffset=1\emwidth]
+\definemathextensibles [medium] [hoffset=1.5\emwidth]
+\definemathextensibles [big] [hoffset=2\emwidth]
+\stoptyping
+
+They inherit from \type {mathematics} so choosing this also forces the top and
+bottomtexts to be typeset in math mode.
+
+These commands don't define extensibles, they only provide a way to categorize
+them. There are couple of definers and one reason for that is that we want to
+define downward compatible commands.
+
+\starttyping
+\definemathextensible [reverse] [xleftarrow] ["2190]
+\definemathextensible [reverse] [xrightarrow] ["2192]
+\stoptyping
+
+The \type {x} in the name is sort of standard for an extensible symbol with
+optionally some text on top or below. The reverse forced compatible behaviour.
+
+\startbuffer
+\xrightarrow{stuff below} {stuff on top} \quad
+\xrightarrow{stuff on top} \quad
+\xrightarrow{} {stuff on top} \quad
+\xrightarrow{stuff below} {} \quad
+\xrightarrow{} {} \quad
+\xrightarrow \quad
+\stopbuffer
+
+\typebuffer \getbuffer
+
+New in \MKIV\ is the \type {t} variant that typesets the text as (indeed) text.
+In addition we have a normal|-|order \type {m} variant:
+
+\starttyping
+\definemathextensible [text] [tleftarrow] ["2190]
+\definemathextensible [text] [trightarrow] ["2192]
+
+\definemathextensible [mathematics] [mleftarrow] ["2190]
+\definemathextensible [mathematics] [mrightarrow] ["2192]
+\stoptyping
+
+This time the order is always top first and bottom next:
+
+\startbuffer
+\trightarrow{stuff on top} {stuff below} \quad
+\trightarrow{stuff on top} {} \quad
+\trightarrow{stuff on top} \quad
+\trightarrow{} {stuff below} \quad
+\trightarrow \quad
+\stopbuffer
+
+\typebuffer
+
+So we get:
+
+\getbuffer
+
+As you can see, there is an optional first argument that specifies the category
+that applies. This permits you to define extra commands that have their own
+(spacing) properties.
+
+Earlier on we saw that defined commands can be forced into a category:
+
+\startbuffer
+\trightarrow[big] {stuff on top} {stuff below} \quad
+\trightarrow[medium]{stuff on top} {stuff below} \quad
+\trightarrow[small] {stuff on top} {stuff below}
+\stopbuffer
+
+\typebuffer
+
+Here we get:
+
+\getbuffer
+
+A variation on this kind of extensibles are over- and underarrows. This time the
+text is the nucleus.
+
+\starttyping
+\definemathoverextensible [top] [overleftarrow] ["2190]
+\definemathoverextensible [top] [overrightarrow] ["2192]
+
+\definemathunderextensible [bottom] [underleftarrow] ["2190]
+\definemathunderextensible [bottom] [underrightarrow] ["2192]
+\stoptyping
+
+In action this looks like:
+
+\startbuffer
+\ruledhbox \bgroup $ \quad
+ \overleftarrow {a} \quad \overleftarrow {ABC} $ \quad
+ x_{\overleftarrow {a}} \quad x_{\overleftarrow {ABC}} $ \quad
+ \underleftarrow{a} \quad \underleftarrow{ABC} $ \quad
+ x_{\underleftarrow{a}} \quad x_{\underleftarrow{ABC}} $ \quad
+$ \egroup
+\stopbuffer
+
+\typebuffer
+
+Here we also have tracing enabled, and we also show the bounding box:
+
+\blank \getbuffer \blank
+
+This leaves us one command: the one that defines the basic filler:
+
+\starttyping
+\definemathextensiblefiller [leftarrowfill] ["2190]
+\definemathextensiblefiller [rightarrowfill] ["2192]
+\stoptyping
+
+Commands defined like this will stretch themselves to fit the circumstances,
+and normally they will fill op the available space.
+
+\startbuffer
+\hbox to 4cm {from here \leftarrowfill\ to there}
+\hbox to 8cm {from there \rightarrowfill\ to here}
+\stopbuffer
+
+\typebuffer
+
+These commands (like the others) work in text mode as well as in math mode.
+
+\blank \getbuffer \blank
+
+\stopsection
+
+\startsection[title=Special cases]
+
+One of the reasons why the arrows mechanism has always been somewhat
+configureable is that we need arrows in the chemistry code.
+
+\starttyping
+\definemathextensibles
+ [chemistry]
+ [offset=max,
+ left=\enspace,
+ right=\enspace,
+ hoffset=.5\emwidth]
+
+\definemathextensible [chemistry] [cleftarrow] ["2190]
+\definemathextensible [chemistry] [crightarrow] ["2192]
+\definemathextensible [chemistry] [crightoverleftarrow] ["21C4]
+\stoptyping
+
+\startbuffer
+2H + O \crightarrow{explosive}\ H\low{2}O
+\stopbuffer
+
+\typebuffer
+
+Of course normally such code is wrapped into the chemistry enviroments and
+support macros.
+
+\blank \getbuffer \blank
+
+If you want something else than an extensible you can use definitions like the
+following:
+
+\startbuffer
+\definemathtriplet [tripleta]
+\definemathtriplet [text] [tripletb]
+\definemathtriplet [text] [tripletc] [\otimes]
+\stopbuffer
+
+\typebuffer \getbuffer
+
+\startbuffer
+\tripleta{\ominus}{top}{botom} and
+\tripletb{\oplus} {top}{botom} and
+\tripletc {top}{botom}
+\stopbuffer
+
+\typebuffer
+
+\blank \hbox{\getbuffer} \blank
+
+As optional first argument you can pass a category.
+
+\startbuffer
+\tripleta[mathematics]{\ominus}{top}{botom} and
+\tripletb[mathematics]{\oplus}{top}{botom} and
+\tripletc[mathematics]{top}{botom}
+\stopbuffer
+
+\typebuffer
+
+Which gives:
+
+\blank \hbox{\getbuffer} \blank
+
+Instead of \type {mathematics} you could have given its synonym \type {math}.
+Keep in mind that categories are shared among stackers. There is also a direct
+command:
+
+\starttyping
+before \mathtriplet{\otimes}{top}{botom} after
+\stoptyping
+
+\stopsection
+
+\startsection[title=An overview]
+
+We end with showing a list of extensibles that come with the font used here, the
+\TEX Gyre Pagella. First we load a module:
+
+\startbuffer
+\usemodule[s][math-extensibles]
+\stopbuffer
+
+\typebuffer \getbuffer
+
+This module provides a couple of commands that typesets a table with the
+extensibles as known in \CONTEXT. Beware: not all fonts have all those
+characters.
+
+\startbuffer
+\showmathextensibles[alternative=a]
+\stopbuffer
+
+A second command is:
+
+\typebuffer
+
+This commands shows the base glyph, and the stretched variant with text on top
+and below. When no symbol is found in the font a rule is rendered.
+
+\getbuffer
+
+\startbuffer
+\showmathextensibles[alternative=b]
+\stopbuffer
+
+\typebuffer
+
+This command typesets a list with \UNICODE\ entries and defined commands. There
+are empty entries due to lack of glyphs in the used font. Not all characters have
+an associated command. Some have multiple commands with different math classes.
+
+\getbuffer
+
+\stopsection
+
+\startsection[title=Remark]
+
+The number of extensions to the \LUATEX\ core math engine is not that large and
+mostly involves more control over spacing and support for \UNICODE\ math as
+\OPENTYPE\ math extensibles. However, a few years after writing this chapter the
+machinery was cleaned up a bit and in the process some more control was added to
+constructors for radicals, fractions and delimiters. The spacing and composition
+can be controlled in a bit more detail using keywords (and dimensions). Because
+in \CONTEXT\ we already have mechanisms in place not much of that new
+functionality is used (yet). Also, in the meantime \CONTEXT\ evolved further.
+This chapter is just a snapshot and it might even render a bit different in more
+recent versions of \CONTEXT\ and|/|or \LUATEX. After all, it was written as part
+of the development story.
+
+\stopsection
+
+\stopchapter
+
+\disabletrackers[math.extensibles.texts]
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-mathstyles.tex b/doc/context/sources/general/manuals/about/about-mathstyles.tex
new file mode 100644
index 000000000..205e98561
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-mathstyles.tex
@@ -0,0 +1,457 @@
+% language=uk
+
+\startcomponent about-speed
+
+\environment about-environment
+
+\startchapter[title=Math Styles]
+
+\startsection[title=Introduction]
+
+Because \CONTEXT\ is often considered somewhat less math savvy than for instance
+\LATEX\ we have more freedom to experiment with new insights and move forward. Of
+course \CONTEXT\ always could deal with math, and even provides rather advanced
+support when it comes to combining fonts, which at some point was needed for a
+magazine that used two completely different sets of fonts in one issue. Also,
+many of the mechanisms had ways to influence the rendering, but often by means of
+constants and flags.
+
+Already in an early stage of \LUATEX\ we went \UNICODE\ and after that the low
+level code has been cleaned up stepwise. In fact, we probably have less code now
+than before because we need less hacks. Well, this might not be that true, if we
+consider that we also introduced code at the \LUA\ end which wasn't there before,
+but which makes makes support easier.
+
+Because we don't need to support all kind of third party math extensions that
+themselves might depend on overloading low level implementations, we can
+rigourously replace mechanisms. In the process we also make things easier to
+configure, easier to define and we promote some previously low level tuning
+options at the user level.
+
+Or course, by introducing new features and more options, there is a price to pay
+in terms of speed, but in practice users will seldom use the more complex
+constructs thousands of times in one document. Elsewhere arrows and alike were
+discussed, here I will spend some words on math styles and will use fences and
+fractions as an example as these mechanisms were used to experiment.
+
+\stopsection
+
+\startsection[title=Math styles]
+
+In \TEX\ a formula can used three different sizes of a font: text, script and
+scriptscript. In addition a formula can be typeset using rules for display math
+or rules for inline math. This means that we have the following so called math
+styles:
+
+\starttabulate[||||]
+% \FL
+\NC \bf keyword \NC \bf meaning \NC \bf command \NC \NR
+% \FL
+\NC \type{display} \NC used for display math \NC \type {\displaystyle} \NC \NR
+\NC \type{text} \NC used for inline math \NC \type {\textstyle} \NC \NR
+\NC \type{script} \NC smaller than text style \NC \type {\scriptstyle} \NC \NR
+\NC \type{scriptscript} \NC smaller than script style \NC \type {\scriptscriptstyle} \NC \NR
+% \LL
+\stoptabulate
+
+Each of these commands will force a style but in practice you seldom need to do
+that because \TEX\ does it automatically. In addition there is are cramped styles
+with corresponding commands.
+
+\starttabulate
+ \NC \ruledhbox{$\displaystyle x^2 + \sqrt{x^2+2x} + \sqrt{\displaystyle x^2+2x}$} \NC \type{\displaystyle } \NC \NR
+ \NC \ruledhbox{$\crampeddisplaystyle x^2 + \sqrt{x^2+2x} + \sqrt{\crampeddisplaystyle x^2+2x}$} \NC \type{\crampeddisplaystyle } \NC \NR
+ \NC \ruledhbox{$\textstyle x^2 + \sqrt{x^2+2x} + \sqrt{\textstyle x^2+2x}$} \NC \type{\textstyle } \NC \NR
+ \NC \ruledhbox{$\crampedtextstyle x^2 + \sqrt{x^2+2x} + \sqrt{\crampedtextstyle x^2+2x}$} \NC \type{\crampedtextstyle } \NC \NR
+ \NC \ruledhbox{$\scriptstyle x^2 + \sqrt{x^2+2x} + \sqrt{\scriptstyle x^2+2x}$} \NC \type{\scriptstyle } \NC \NR
+ \NC \ruledhbox{$\crampedscriptstyle x^2 + \sqrt{x^2+2x} + \sqrt{\crampedscriptstyle x^2+2x}$} \NC \type{\crampedscriptstyle } \NC \NR
+ \NC \ruledhbox{$\scriptscriptstyle x^2 + \sqrt{x^2+2x} + \sqrt{\scriptscriptstyle x^2+2x}$} \NC \type{\scriptscriptstyle } \NC \NR
+ \NC \ruledhbox{$\crampedscriptscriptstyle x^2 + \sqrt{x^2+2x} + \sqrt{\crampedscriptscriptstyle x^2+2x}$} \NC \type{\crampedscriptscriptstyle} \NC \NR
+\stoptabulate
+
+Here we applied the styles as follows:
+
+\startbuffer
+$\textstyle x^2 + \sqrt{x^2+2x} + \sqrt{\textstyle x^2+2x}$
+\stopbuffer
+
+\typebuffer
+
+The differences are subtle: the superscripts in the square root are positioned a
+bit lower than normal: the radical forces them to be cramped.
+
+\startlinecorrection
+\scale[width=\hsize]{\maincolor \getbuffer}
+\stoplinecorrection
+
+Although the average user will not bother about styles, a math power user might
+get excited about the possibility to control the size of fonts being used, of
+course wit the danger of creating a visually inconsistent document. And, as in
+\CONTEXT\ we try to avoid such low level commands \footnote {Although \unknown\
+it's pretty hard to convince users to stay away from \type {\vskip} and friends.}
+it will be no surprise that we have ways to set them beforehand.
+
+\startbuffer
+\definemathstyle[mystyle][scriptscript]
+
+$ 2x + \startmathstyle [mystyle] 4y^2 \stopmathstyle = 10 $
+\stopbuffer
+
+\typebuffer
+
+So, if you want it this ugly, you can get it:
+
+\blank \start \getbuffer \stop \blank
+
+A style can be a combination of keywords. Of course we have \type {display},
+\type {text}, \type {script} and \type {scriptscript}. Then there are \type
+{uncramped} and \type {cramped} along with their synonyms \type {normal} and
+\type {packed}. In some cases you can also use \type {small} and \type {big}
+which will promote the size up or down, relative to what we have currently.
+
+A style definition can be combination of such keywords:
+
+\starttyping
+\definemathstyle[mystyle][scriptscript,cramped]
+\stoptyping
+
+Gradually we will introduce the \type {mathstyle} keyword in math related
+setups commands.
+
+In most cases a user will limit the scope of some setting by using braces, like
+this:
+
+\startbuffer
+$x{\setupmathstyle[script]x}x$
+\stopbuffer
+
+This gives {\maincolor \ignorespaces \getbuffer \removeunwantedspaces}: a smaller
+symbol between two with text size. Equally valid is this:
+
+\startbuffer
+$x\startmathstyle[script]x\stopmathstyle x$
+\stopbuffer
+
+\typebuffer
+
+Again we get {\maincolor \ignorespaces \getbuffer \removeunwantedspaces}, but at
+the cost of more verbose coding.
+
+The use of \type {{}} (either or not hidden in commands) has a few side effects.
+In text mode, when we use this at the start of a paragraph, the paragraph will
+start inside the group and when we end the group, specific settings that were
+done at that time get lost. So, in practice you will force a paragraph outside
+the group using \type {\dontleavehmode}, \type {\strut}, or one of the
+indentation commands. \stopitem
+
+In math mode a new math group is created which limits local style settings to
+this group. But as such groups also can trigger special kinds of spacing you
+sometimes don't want that. One pitfall is then to do this:
+
+\startbuffer
+$x\begingroup\setupmathstyle[script]x\endgroup x$
+\stopbuffer
+
+\typebuffer
+
+Alas, now we get {\maincolor \ignorespaces \getbuffer \removeunwantedspaces}. A
+\type {\begingroup} limits the scope of many things but it will not create a math
+group! This kind of subtle issues is the reason why we have pre|-|built solutions
+that take care of style switching, grouping, spacing and positioning.
+
+\stopsection
+
+\startsection[title=Fences]
+
+Fences are symbols at the left and right of an expression: braces, brackets,
+curly braces, and bars are the most well known. Often they are supposed to adapt
+their size to the content that they wrap. Here you see some in action:
+
+\starttabulate[||c||]
+\NC \type {$|x|$} \NC $|x|$ \NC okay \NC \NR
+\NC \type {$||x||$} \NC $||x||$ \NC okay \NC \NR
+\NC \type {$a\left | \frac{1}{b}\right | c$} \NC $a\left | \frac{1}{b}\right | c$ \NC okay \NC \NR
+\NC \type {$a\left ||\frac{1}{b}\right ||c$} \NC $a\left || \frac{1}{b}\right ||c$ \NC wrong \NC \NR
+\NC \type {$a\left ‖ \frac{1}{b}\right ‖ c$} \NC $a\left ‖ \frac{1}{b}\right ‖ c$ \NC okay \NC \NR
+\stoptabulate
+
+Often authors like to code their math with minimal structure and if you use
+\UNICODE\ characters that is actually quite doable. Just look at the double bar
+in the example above: if we input \type {||} we don't get what we want, but with
+\type {‖} the result is okay. This is because the \type {\left} and \type
+{\right} commands expect one character. But, even then, coding a bit more
+verbose sometimes makes sense.
+
+In stock \CONTEXT\ we have a couple of predefined fences:
+
+\starttyping
+\definemathfence [parenthesis] [left=0x0028,right=0x0029]
+\definemathfence [bracket] [left=0x005B,right=0x005D]
+\definemathfence [braces] [left=0x007B,right=0x007D]
+\definemathfence [bar] [left=0x007C,right=0x007C]
+\definemathfence [doublebar] [left=0x2016,right=0x2016]
+\definemathfence [angle] [left=0x003C,right=0x003E]
+\stoptyping
+
+\startbuffer
+test $a \fenced[bar] {\frac{1}{b}} c$ test
+test $a \fenced[doublebar]{\frac{1}{b}} c$ test
+test $a \fenced[bracket] {\frac{1}{b}} c$ test
+\stopbuffer
+
+You use these by name:
+
+\typebuffer
+
+and get
+
+\startlines \getbuffer \stoplines
+
+\startbuffer
+\definemathfence [nooffence] [left=0x005B]
+\stopbuffer
+
+You can stick to only one fence:
+
+\typebuffer \getbuffer
+
+\startbuffer
+on $a \fenced[nooffence]{\frac{1}{b}} c$ off
+\stopbuffer
+
+Here \CONTEXT\ will take care of the dummy fence that \TEX\ expects instead.
+
+\startlines \getbuffer \stoplines
+
+You can define new fences and clone existing ones. You can also assign some
+properties:
+
+\startbuffer
+\definemathfence
+ [fancybracket]
+ [bracket]
+ [command=yes,
+ color=blue]
+\stopbuffer
+
+\typebuffer \getbuffer
+
+\startbuffer
+test $a\fancybracket{\frac{1}{b}}c$ test
+test \color[red]{$a\fancybracket{\frac{1}{b}}c$} test
+\stopbuffer
+
+\typebuffer
+
+The color is only applied to the fence. This makes sense as the formula can
+follow the main color but influencing the fences is technically somewhat more
+complex.
+
+\getbuffer
+
+Here are some more examples:
+
+\startbuffer
+\definemathfence
+ [normalbracket]
+ [bracket]
+ [command=yes,
+ color=blue]
+
+\definemathfence
+ [scriptbracket]
+ [normalbracket]
+ [mathstyle=script]
+
+\definemathfence
+ [smallbracket]
+ [normalbracket]
+ [mathstyle=small]
+\stopbuffer
+
+\typebuffer \getbuffer
+
+\starttabulate
+\NC \type{$a \frac{1}{b} c$} \NC $a \frac{1}{b} c$ \NC \NR
+\TB
+\NC \type{$a \normalbracket{\frac{1}{b} c$}} \NC $a \normalbracket{\frac{1}{b}} c$ \NC \NR
+\TB
+\NC \type{$a \scriptbracket{\frac{1}{b} c$}} \NC $a \scriptbracket{\frac{1}{b}} c$ \NC \NR
+\TB
+\NC \type{$a \smallbracket {\frac{1}{b} c$}} \NC $a \smallbracket {\frac{1}{b}} c$ \NC \NR
+\stoptabulate
+
+As with most commands, the fences inherit from the parents so we can say:
+
+\starttyping
+\setupmathfences [color=red]
+\stoptyping
+
+and get all our fences colored red. The \type {command} option results in a
+command being defined, which saves you some keying.
+
+\stopsection
+
+\startsection[title=Fractions]
+
+In \TEX\ the mechanism to put something on top of something else, separated by a
+horizontal rule, is driven by the \type {\over} primitive. That one has a
+(compared to other commands) somewhat different specification, in the sense that
+one of its arguments sits in front:
+
+\starttyping
+$ {{2x}\over{x^1}} $
+\stoptyping
+
+Although to some extend this is considered to be more readable, macro packages
+often provide a \type {\frac} commands that goes like this:
+
+\starttyping
+$ \frac{2x}{x^1} $
+\stoptyping
+
+There we have less braces and the arguments come after the command. As with the
+fences in the previous section, you can define your own fractions:
+
+\startbuffer
+\definemathfraction
+ [innerfrac]
+ [frac]
+ [alternative=inner,
+ mathstyle=script,
+ color=red]
+
+\definemathfraction
+ [outerfrac]
+ [frac]
+ [alternative=outer,
+ mathstyle=script,
+ color=blue]
+\stopbuffer
+
+\typebuffer \getbuffer
+
+The mathstyle and color are already discussed but the \type {alternative} is
+specific for these fractions. It determines if the style is applied to the whole
+fraction or to its components.
+
+\startbuffer
+\startformula
+\outerfrac{2a}{3b} = \innerfrac{2a}{3b} = \frac{2a}{3b}
+\stopformula
+\stopbuffer
+
+\typebuffer
+
+As with fences, the color is only applied to the horizontal bar as there is no
+other easy way to color that otherwise.
+
+\getbuffer
+
+As \TEX\ has a couple of low level stackers, we provide an interface to that as
+well, but we hide the dirty details. For instance you can define left and right
+fences and influence the rule
+
+\startbuffer
+\definemathfraction[fraca][rule=no,left=0x005B,right=0x007C]
+\definemathfraction[fracb][rule=yes,left=0x007B,right=0x007D]
+\definemathfraction[fracc][rule=auto,left=0x007C]
+\definemathfraction[fracd][rule=yes,rulethickness=2pt,left=0x007C]
+\stopbuffer
+
+\typebuffer \getbuffer
+
+When \type {rule} is set to \type {auto}, we use \TEX's values (derived from font
+metrics) for the thickness of rules, while \type {yes} triggers usage of the
+specified \type {rulethickness}.
+
+\startbuffer
+\startformula
+\fraca{a}{b} + \fracb{a}{b} + \fracc{a}{b} + \fracd{a}{b}
+\stopformula
+\stopbuffer
+
+\typebuffer
+
+Gives:
+
+\getbuffer
+
+\startbuffer
+\definemathfraction
+ [frace]
+ [rule=yes,
+ color=blue,
+ rulethickness=1pt,
+ left=0x005B,
+ right=0x007C]
+\stopbuffer
+
+\typebuffer \getbuffer
+
+This fraction looks as follows (scaled up):
+
+\startlinecorrection
+\midaligned{\scale[height=5ex]{$\displaystyle\frace{a}{b}$}}
+\stoplinecorrection
+
+So, the color is applied to the (optional) fences as well as to the (optional)
+rule. And when you color the whole formula as part of the context, you get
+
+\startlinecorrection
+\midaligned{\scale[height=5ex]{\color[maincolor]{$\displaystyle\frace{a}{b}$}}}
+\stoplinecorrection
+
+There is a (maybe not so) subtle difference between fences that come with
+fractions and regular fences, Take these definitions:
+
+\startbuffer
+\definemathfence [parenta] [left=0x28,right=0x29,command=yes]
+\definemathfraction [parentb] [left=0x28,right=0x29,rule=auto]
+\stopbuffer
+
+\typebuffer \getbuffer
+
+Of course the \type {b} variant takes less code:
+
+\startbuffer
+\startformula
+\parenta{\frac{a}{b}} + \parentb{a}{b}
+\stopformula
+\stopbuffer
+
+\typebuffer
+
+But watch how the parentheses are also larger. At some point \CONTEXT\ will
+provide a bit more control over this,
+
+\getbuffer
+
+You can also influence the width of the rule, but that is not related to the
+style.
+
+\startbuffer
+\definemathfraction
+ [wfrac]
+ [margin=.25em]
+
+\definemathfraction
+ [wwfrac]
+ [margin=.50em]
+
+\startformula
+ \frac { a } { \frac { b } { c } } +
+ \wfrac { a } { \frac { b } { c } } =
+ \wwfrac { 2a } { \frac { 2b } { 2c } }
+\stopformula
+\stopbuffer
+
+\typebuffer
+
+Both the nominator and denominator are widened by the margin:
+
+\getbuffer
+
+\stopsection
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-metafun.tex b/doc/context/sources/general/manuals/about/about-metafun.tex
new file mode 100644
index 000000000..8daff05a7
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-metafun.tex
@@ -0,0 +1,834 @@
+% language=uk
+
+\startcomponent about-metafun
+
+\environment about-environment
+
+\startchapter[title={\LUA\ in \METAPOST}]
+
+% Hans Hagen, PRAGMA ADE, April 2014
+
+\startsection[title=Introduction]
+
+Already for some years I have been wondering how it would be if we could escape
+to \LUA\ inside \METAPOST, or in practice, in \MPLIB\ in \LUATEX. The idea is
+simple: embed \LUA\ code in a \METAPOST\ file that gets run as soon as it's seen.
+In case you wonder why \LUA\ code makes sense, imagine generating graphics using
+external data. The capabilities of \LUA\ to deal with that is more flexible and
+advanced than in \METAPOST. Of course we could generate a \METAPOST\ definition
+of a graphic from data but it often makes more sense to do the reverse. I finally
+found time and reason to look into this and in the following sections I will
+describe how it's done.
+
+\stopsection
+
+\startsection[title=The basics]
+
+The approach is comparable to \LUATEX's \type {\directlua}. That primitive can be
+used to execute \LUA\ code and in combination with \type {tex.print} we can pipe
+strings back into the \TEX\ input stream. A complication is that we have to be
+able to operate under different so called catcode regimes: the meaning of
+characters can differ per regime. We also have to deal with line endings in
+special ways as they relate to paragraphs and such. In \METAPOST\ we don't have
+that complication so getting back input into the \METAPOST\ input, we can do so
+with simple strings. For that a mechanism similar to \type {scantokens} can be
+used. That way we can return anything (including nothing) as long as \METAPOST\
+can interpret it and as long as it fulfils the expectations.
+
+\starttyping
+numeric n ; n := scantokens("123.456") ;
+\stoptyping
+
+A script is run as follows:
+
+\starttyping
+numeric n ; n := runscript("return '123.456'") ;
+\stoptyping
+
+This primitive doesn't have the word \type {lua} in its name so in principle any
+wrapper around the library can use it as a hook. In the case of \LUATEX\ the
+script language is of course \LUA. At the \METAPOST\ end we only expect a string.
+How that string is constructed is completely up to the \LUA\ script. In fact, the
+user is completely free to implement the runner any way she or he wants, like:
+
+\starttyping
+local function scriptrunner(code)
+ local f = loadstring(code)
+ if f then
+ return tostring(f())
+ else
+ return ""
+ end
+end
+\stoptyping
+
+This is hooked into an instance as follows:
+
+\starttyping
+local m = mplib.new {
+ ...
+ run_script = scriptrunner,
+ ...
+}
+\stoptyping
+
+Now, beware, this is not the \CONTEXT\ way. We provide print functions and other
+helpers, which we will explain in the next section.
+
+\stopsection
+
+\startsection[title=Helpers]
+
+After I got this feature up and running I played a bit with possible interfaces
+at the \CONTEXT\ (read: \METAFUN) end and ended up with a bit more advanced runner
+where no return value is used. The runner is wrapped in the \type {lua} macro.
+
+\startbuffer
+numeric n ; n := lua("mp.print(12.34567)") ;
+draw textext(n) xsized 4cm withcolor maincolor ;
+\stopbuffer
+
+\typebuffer
+
+This renders as:
+
+\startlinecorrection[blank]
+\processMPbuffer
+\stoplinecorrection
+
+In case you wonder how efficient calling \LUA\ is, don't worry: it's fast enough,
+especially if you consider suboptimal \LUA\ code and the fact that we switch
+between machineries.
+
+\startbuffer
+draw image (
+ lua("statistics.starttiming()") ;
+ for i=1 upto 10000 : draw
+ lua("mp.pair(math.random(-200,200),math.random(-50,50))") ;
+ endfor ;
+ setbounds currentpicture to fullsquare xyscaled (400,100) ;
+ lua("statistics.stoptiming()") ;
+ draw textext(lua("mp.print(statistics.elapsedtime())"))
+ ysized 50 ;
+) withcolor maincolor withpen pencircle scaled 1 ;
+\stopbuffer
+
+\typebuffer
+
+Here the line:
+
+\starttyping
+draw lua("mp.pair(math.random(-200,200),math.random(-50,50))") ;
+\stoptyping
+
+effectively becomes (for instance):
+
+\starttyping
+draw scantokens "(25,40)" ;
+\stoptyping
+
+which in turn becomes:
+
+\starttyping
+draw scantokens (25,40) ;
+\stoptyping
+
+The same happens with this:
+
+\starttyping
+draw textext(lua("mp.print(statistics.elapsedtime())")) ...
+\stoptyping
+
+This becomes for instance:
+
+\starttyping
+draw textext(scantokens "1.23") ...
+\stoptyping
+
+and therefore:
+
+\starttyping
+draw textext(1.23) ...
+\stoptyping
+
+We can use \type {mp.print} here because the \type {textext} macro can deal with
+numbers. The following also works:
+
+\starttyping
+draw textext(lua("mp.quoted(statistics.elapsedtime())")) ...
+\stoptyping
+
+Now we get (in \METAPOST\ speak):
+
+\starttyping
+draw textext(scantokens (ditto & "1.23" & ditto) ...
+\stoptyping
+
+Here \type {ditto} represents the double quotes that mark a string. Of course,
+because we pass the strings directly to \type {scantokens}, there are no outer
+quotes at all, but this is how it can be simulated. In the end we have:
+
+\starttyping
+draw textext("1.23") ...
+\stoptyping
+
+What print variant you use, \type {mp.print} or \type {mp.quoted}, depends on
+what the expected code is: an assignment to a numeric can best be a number or an
+expression resulting in a number.
+
+This graphic becomes:
+
+\startlinecorrection[blank]
+\processMPbuffer
+\stoplinecorrection
+
+The runtime on my current machine is some 0.25 seconds without and 0.12 seconds
+with caching. But to be honest, speed is not really a concern here as the amount
+of complex \METAPOST\ graphics can be neglected compared to extensive node list
+manipulation. Generating the graphic with \LUAJITTEX\ takes 15\% less time.
+\footnote {Processing a small 8 page document like this takes about one second,
+which includes loading a bunch of fonts.}
+
+\startbuffer
+numeric n ; n := lua("mp.print(1) mp.print('+') mp.print(2)") ;
+draw textext(n) xsized 1cm withcolor maincolor ;
+\stopbuffer
+
+The three print command accumulate their arguments:
+
+\typebuffer
+
+As expected we get:
+
+\startlinecorrection[blank]
+\processMPbuffer
+\stoplinecorrection
+
+\startbuffer
+numeric n ; n := lua("mp.print(1,'+',2)") ;
+draw textext(n) xsized 1cm withcolor maincolor ;
+\stopbuffer
+
+Equally valid is:
+
+\typebuffer
+
+This gives the same result:
+
+\startlinecorrection[blank]
+\processMPbuffer
+\stoplinecorrection
+
+Of course all kind of action can happen between the prints. It is also legal to
+have nothing returned as could be seen in the 10.000 dot example: there the timer
+related code returns nothing, so effectively we have \type {scantokens("")}.
+Another helper is \type {mp.quoted}, as in:
+
+\startbuffer
+draw
+ textext(lua("mp.quoted('@0.3f'," & decimal n & ")"))
+ withcolor maincolor ;
+\stopbuffer
+
+\typebuffer
+
+This typesets \processMPbuffer. Note the \type {@}. When no percent character is
+found in the format specifier, we assume that an \type {@} is used instead.
+
+\startbuffer
+\startluacode
+table.save("demo-data.lua",
+ {
+ { 1, 2 }, { 2, 4 }, { 3, 3 }, { 4, 2 },
+ { 5, 2 }, { 6, 3 }, { 7, 4 }, { 8, 1 },
+ }
+)
+\stopluacode
+\stopbuffer
+
+But, the real benefit of embedded \LUA\ is when we deal with data that is stored
+at the \LUA\ end. First we define a small dataset:
+
+\typebuffer
+
+\getbuffer
+
+There are several ways to deal with this table. I will show clumsy as well as
+better looking ways.
+
+\startbuffer
+lua("MP = { } MP.data = table.load('demo-data.lua')") ;
+numeric n ;
+lua("mp.print('n := ',\#MP.data)") ;
+for i=1 upto n :
+ drawdot
+ lua("mp.pair(MP.data[" & decimal i & "])") scaled cm
+ withpen pencircle scaled 2mm
+ withcolor maincolor ;
+endfor ;
+\stopbuffer
+
+\typebuffer
+
+Here we load a \LUA\ table and assign the size to a \METAPOST\ numeric. Next we
+loop over the table entries and draw the coordinates.
+
+\startlinecorrection[blank]
+\processMPbuffer
+\stoplinecorrection
+
+We will stepwise improve this code. In the previous examples we omitted wrapper
+code but here we show it:
+
+\startbuffer
+\startluacode
+ MP.data = table.load('demo-data.lua')
+ function MP.n()
+ mp.print(#MP.data)
+ end
+ function MP.dot(i)
+ mp.pair(MP.data[i])
+ end
+\stopluacode
+
+\startMPcode
+ numeric n ; n := lua("MP.n()") ;
+ for i=1 upto n :
+ drawdot
+ lua("MP.dot(" & decimal i & ")") scaled cm
+ withpen pencircle scaled 2mm
+ withcolor maincolor ;
+ endfor ;
+\stopMPcode
+\stopbuffer
+
+\typebuffer
+
+So, we create a few helpers in the \type {MP} table. This table is predefined so
+normally you don't need to define it. You may however decide to wipe it clean.
+
+\startlinecorrection[blank]
+\getbuffer
+\stoplinecorrection
+
+You can decide to hide the data:
+
+\startbuffer
+\startluacode
+ local data = { }
+ function MP.load(name)
+ data = table.load(name)
+ end
+ function MP.n()
+ mp.print(#data)
+ end
+ function MP.dot(i)
+ mp.pair(data[i])
+ end
+\stopluacode
+\stopbuffer
+
+\typebuffer \getbuffer
+
+It is possible to use less \LUA, for instance in:
+
+\startbuffer
+\startluacode
+ local data = { }
+ function MP.loaded(name)
+ data = table.load(name)
+ mp.print(#data)
+ end
+ function MP.dot(i)
+ mp.pair(data[i])
+ end
+\stopluacode
+
+\startMPcode
+ for i=1 upto lua("MP.loaded('demo-data.lua')") :
+ drawdot
+ lua("MP.dot(",i,")") scaled cm
+ withpen pencircle scaled 4mm
+ withcolor maincolor ;
+ endfor ;
+\stopMPcode
+\stopbuffer
+
+\typebuffer
+
+Here we also omit the \type {decimal} because the \type {lua} macro is clever
+enough to recognize it as a number.
+
+\startlinecorrection[blank]
+\getbuffer
+\stoplinecorrection
+
+By using some \METAPOST\ magic we can even go a step further in readability:
+
+\startbuffer
+\startMPcode{doublefun}
+ lua.MP.load("demo-data.lua") ;
+
+ for i=1 upto lua.MP.n() :
+ drawdot
+ lua.MP.dot(i) scaled cm
+ withpen pencircle scaled 4mm
+ withcolor maincolor ;
+ endfor ;
+
+ for i=1 upto MP.n() :
+ drawdot
+ MP.dot(i) scaled cm
+ withpen pencircle scaled 2mm
+ withcolor white ;
+ endfor ;
+\stopMPcode
+\stopbuffer
+
+\typebuffer
+
+Here we demonstrate that it also works well in \type {double} mode, which makes
+much sense when processing data from other sources. Note how we omit the
+type {lua.} prefix: the \type {MP} macro will deal with that.
+
+\startlinecorrection[blank]
+\getbuffer
+\stoplinecorrection
+
+So in the end we can simplify the code that we started with to:
+
+\starttyping
+\startMPcode{doublefun}
+ for i=1 upto MP.loaded("demo-data.lua") :
+ drawdot
+ MP.dot(i) scaled cm
+ withpen pencircle scaled 2mm
+ withcolor maincolor ;
+ endfor ;
+\stopMPcode
+\stoptyping
+
+\stopsection
+
+\startsection[title=Access to variables]
+
+The question with such mechanisms is always: how far should we go. Although
+\METAPOST\ is a macro language, it has properties of procedural languages. It also
+has more introspective features at the user end. For instance, one can loop over
+the resulting picture and manipulate it. This means that we don't need full
+access to \METAPOST\ internals. However, it makes sense to provide access to
+basic variables: \type {numeric}, \type {string}, and \type {boolean}.
+
+\startbuffer
+draw textext(lua("mp.quoted('@0.15f',mp.get.numeric('pi')-math.pi)"))
+ ysized 1cm
+ withcolor maincolor ;
+\stopbuffer
+
+\typebuffer
+
+In double mode you will get zero printed but in scaled mode we definitely get a
+different results:
+
+\startlinecorrection[blank]
+\processMPbuffer
+\stoplinecorrection
+
+\startbuffer
+boolean b ; b := true ;
+draw textext(lua("mp.quoted(mp.get.boolean('b') and 'yes' or 'no')"))
+ ysized 1cm
+ withcolor maincolor ;
+\stopbuffer
+
+In the next example we use \type {mp.quoted} to make sure that indeed we pass a
+string. The \type {textext} macro can deal with numbers, but an unquoted \type
+{yes} or \type {no} is asking for problems.
+
+\typebuffer
+
+Especially when more text is involved it makes sense to predefine a helper in
+the \type {MP} namespace, if only because \METAPOST\ (currently) doesn't like
+newlines in the middle of a string, so a \type {lua} call has to be on one line.
+
+\startlinecorrection[blank]
+\processMPbuffer
+\stoplinecorrection
+
+Here is an example where \LUA\ does something that would be close to impossible,
+especially if more complex text is involved.
+
+% \enabletrackers[metapost.lua]
+
+\startbuffer
+string s ; s := "ΤΕΧ" ; % "τεχ"
+draw textext(lua("mp.quoted(characters.lower(mp.get.string('s')))"))
+ ysized 1cm
+ withcolor maincolor ;
+\stopbuffer
+
+\typebuffer
+
+As you can see here, the whole repertoire of helper functions can be used in
+a \METAFUN\ definition.
+
+\startlinecorrection[blank]
+\processMPbuffer
+\stoplinecorrection
+
+\stopsection
+
+\startsection[title=The library]
+
+In \CONTEXT\ we have a dedicated runner, but for the record we mention the
+low level constructor:
+
+\starttyping
+local m = mplib.new {
+ ...
+ script_runner = function(s) return loadstring(s)() end,
+ script_error = function(s) print(s) end,
+ ...,
+}
+\stoptyping
+
+An instance (in this case \type {m}) has a few extra methods. Instead you can use
+the helpers in the library.
+
+\starttabulate[|l|l|]
+\HL
+\NC \type {m:get_numeric(name)} \NC returns a numeric (double) \NC \NR
+\NC \type {m:get_boolean(name)} \NC returns a boolean (\type {true} or \type {false}) \NC \NR
+\NC \type {m:get_string (name)} \NC returns a string \NC \NR
+\HL
+\NC \type {mplib.get_numeric(m,name)} \NC returns a numeric (double) \NC \NR
+\NC \type {mplib.get_boolean(m,name)} \NC returns a boolean (\type {true} or \type {false}) \NC \NR
+\NC \type {mplib.get_string (m,name)} \NC returns a string \NC \NR
+\HL
+\stoptabulate
+
+In \CONTEXT\ the instances are hidden and wrapped in high level macros, so there
+you cannot use these commands.
+
+\stopsection
+
+\startsection[title=\CONTEXT\ helpers]
+
+The \type {mp} namespace provides the following helpers:
+
+\starttabulate[|l|l|]
+\HL
+\NC \type {print(...)} \NC returns one or more values \NC \NR
+\NC \type {pair(x,y)}
+ \type {pair(t)} \NC returns a proper pair \NC \NR
+\NC \type {triplet(x,y,z)}
+ \type {triplet(t)} \NC returns an \RGB\ color \NC \NR
+\NC \type {quadruple(w,x,y,z)}
+ \type {quadruple(t)} \NC returns an \CMYK\ color \NC \NR
+\NC \type {format(fmt,...)} \NC returns a formatted string \NC \NR
+\NC \type {quoted(fmt,...)}
+ \type {quoted(s)} \NC returns a (formatted) quoted string \NC \NR
+\NC \type {path(t[,connect][,close])} \NC returns a connected (closed) path \NC \NR
+\HL
+\stoptabulate
+
+The \type {mp.get} namespace provides the following helpers:
+
+\starttabulate[|l|l|]
+\NC \type {numeric(name)} \NC gets a numeric from \METAPOST \NC \NR
+\NC \type {boolean(name)} \NC gets a boolean from \METAPOST \NC \NR
+\NC \type {string(name)} \NC gets a string from \METAPOST \NC \NR
+\HL
+\stoptabulate
+
+\stopsection
+
+\startsection[title=Paths]
+
+% {\em This section will move to the metafun manual.} \blank
+
+In the meantime we got several questions on the \CONTEXT\ mailing list about turning
+coordinates into paths. Now imagine that we have this dataset:
+
+\startbuffer[dataset]
+10 20 20 20 -- sample 1
+30 40 40 60
+50 10
+
+10 10 20 30 % sample 2
+30 50 40 50
+50 20
+
+10 20 20 10 # sample 3
+30 40 40 20
+50 10
+\stopbuffer
+
+\typebuffer[dataset]
+
+In this case I have put the data in a buffer, so that it can be shown
+here, as well as used in a demo. Look how we can add comments. The
+following code converts this into a table with three subtables.
+
+\startbuffer
+\startluacode
+ MP.myset = mp.dataset(buffers.getcontent("dataset"))
+\stopluacode
+\stopbuffer
+
+\typebuffer \getbuffer
+
+We use the \type {MP} (user) namespace to store the table. Next we turn
+these subtables into paths:
+
+\startbuffer
+\startMPcode
+ for i=1 upto lua("mp.print(mp.n(MP.myset))") :
+ draw
+ lua("mp.path(MP.myset[" & decimal i & "])")
+ xysized (HSize,10ExHeight)
+ withpen pencircle scaled .25ExHeight
+ withcolor basiccolors[i]/2 ;
+ endfor ;
+\stopMPcode
+\stopbuffer
+
+\typebuffer
+
+This gives:
+
+\startlinecorrection[blank] \getbuffer \stoplinecorrection
+
+Instead we can fill the path, in which case we will also need to close it. The
+\type {true} argument deals with that:
+
+\startbuffer
+\startMPcode
+ for i=1 upto lua("mp.print(mp.n(MP.myset))") :
+ path p ; p :=
+ lua("mp.path(MP.myset[" & decimal i & "],true)")
+ xysized (HSize,10ExHeight) ;
+ fill p
+ withcolor basiccolors[i]/2
+ withtransparency (1,.5) ;
+ endfor ;
+\stopMPcode
+\stopbuffer
+
+\typebuffer
+
+We get:
+
+\startlinecorrection[blank] \getbuffer \stoplinecorrection
+
+\startbuffer
+\startMPcode
+ for i=1 upto lua("mp.print(mp.n(MP.myset))") :
+ path p ; p :=
+ lua("mp.path(MP.myset[" & decimal i & "])")
+ xysized (HSize,10ExHeight) ;
+ p :=
+ (xpart llcorner boundingbox p,0) --
+ p --
+ (xpart lrcorner boundingbox p,0) --
+ cycle ;
+ fill p
+ withcolor basiccolors[i]/2
+ withtransparency (1,.25) ;
+ endfor ;
+\stopMPcode
+\stopbuffer
+
+The following makes more sense:
+
+\typebuffer
+
+So this gives:
+
+\startlinecorrection[blank] \getbuffer \stoplinecorrection
+
+This (area) fill is so common, that we have a helper for it:
+
+\startbuffer
+\startMPcode
+ for i=1 upto lua("mp.size(MP.myset)") :
+ fill area
+ lua("mp.path(MP.myset[" & decimal i & "])")
+ xysized (HSize,5ExHeight)
+ withcolor basiccolors[i]/2
+ withtransparency (2,.25) ;
+ endfor ;
+\stopMPcode
+\stopbuffer
+
+\typebuffer
+
+So this gives:
+
+\startlinecorrection[blank] \getbuffer \stoplinecorrection
+
+This snippet of \METAPOST\ code still looks kind of horrible, so how can we make
+it look better? Here is an attempt. First we define a bit more \LUA:
+
+\startbuffer
+\startluacode
+local data = mp.dataset(buffers.getcontent("dataset"))
+
+MP.dataset = {
+ Line = function(n) mp.path(data[n]) end,
+ Size = function() mp.size(data) end,
+}
+\stopluacode
+\stopbuffer
+
+\typebuffer \getbuffer
+
+\startbuffer
+\startMPcode
+ for i=1 upto lua.MP.dataset.Size() :
+ path p ; p :=
+ lua.MP.dataset.Line(i)
+ xysized (HSize,20ExHeight) ;
+ draw
+ p
+ withpen pencircle scaled .25ExHeight
+ withcolor basiccolors[i]/2 ;
+ drawpoints
+ p
+ withpen pencircle scaled ExHeight
+ withcolor .5white ;
+ endfor ;
+\stopMPcode
+\stopbuffer
+
+We can now make the \METAPOST\ look more natural. Of course, this is possible
+because in \METAFUN\ the \type {lua} macro does some extra work.
+
+\typebuffer
+
+As expected, we get the desired result:
+
+\startlinecorrection[blank] \getbuffer \stoplinecorrection
+
+Once we start making things look nicer and more convenient, we quickly end up
+with helpers like those in the next example. First we save some demo data in
+files:
+
+\startbuffer
+\startluacode
+ io.savedata("foo.tmp","10 20 20 20 30 40 40 60 50 10")
+ io.savedata("bar.tmp","10 10 20 30 30 50 40 50 50 20")
+\stopluacode
+\stopbuffer
+
+\typebuffer \getbuffer
+
+We load the data in datasets:
+
+\startbuffer
+\startMPcode
+ lua.mp.datasets.load("foo","foo.tmp") ;
+ lua.mp.datasets.load("bar","bar.tmp") ;
+ fill area
+ lua.mp.datasets.foo.Line()
+ xysized (HSize/2-EmWidth,10ExHeight)
+ withpen pencircle scaled .25ExHeight
+ withcolor green/2 ;
+ fill area
+ lua.mp.datasets.bar.Line()
+ xysized (HSize/2-EmWidth,10ExHeight)
+ shifted (HSize/2+EmWidth,0)
+ withpen pencircle scaled .25ExHeight
+ withcolor red/2 ;
+\stopMPcode
+\stopbuffer
+
+\typebuffer
+
+Because the datasets are stored by name, we can use them without worrying about
+them being forgotten:
+
+\startlinecorrection[blank] \getbuffer \stoplinecorrection
+
+If no tag is given, the filename (without suffix) is used as a tag, so the
+following is valid:
+
+\starttyping
+\startMPcode
+ lua.mp.datasets.load("foo.tmp") ;
+ lua.mp.datasets.load("bar.tmp") ;
+\stopMPcode
+\stoptyping
+
+The following methods are defined for a dataset:
+
+\starttabulate[|l|pl|]
+\HL
+\NC \type {method} \NC usage \NC \NR
+\HL
+\NC \type {Size} \NC the number of subsets in a dataset \NC \NR
+\NC \type {Line} \NC the joined pairs in a dataset making a non|-|closed path \NC \NR
+\NC \type {Data} \NC the table containing the data (in subsets, so there is always at least one subset) \NC \NR
+\HL
+\stoptabulate
+
+{\em Due to limitations in \METAPOST\ suffix handling the methods start with an
+uppercase character.}
+
+\stopsection
+
+\startsection[title=Remark]
+
+The features described here are currently still experimental but the interface
+will not change. There might be a few more accessors and for sure more \LUA\
+helpers will be provided. As usual I need some time to play with it before I make
+up my mind. It is also possible to optimize the \METAPOST||\LUA\ script call a
+bit, but I might do that later.
+
+When we played with this interface we ran into problems with loop variables
+and macro arguments. These are internally kind of anonymous. Take this:
+
+\starttyping
+for i=1 upto 100 : draw(i,i) endfor ;
+\stoptyping
+
+The \type {i} is not really a variable with name \type {i} but becomes an object
+(capsule) when the condition is scanned, and a reference to that object when the
+body is scanned. The body of the for loop gets expanded for each step, but at that
+time there is no longer a variable \type {i}. The same is true for variables in:
+
+\starttyping
+def foo(expr x, y, delta) = draw (x+delta,y+delta) enddef ;
+\stoptyping
+
+We are still trying to get this right with the \LUA\ interface. Interesting is
+that when we were exploring this, we ran into quite some cases where we could
+make \METAPOST\ abort due some memory or stack overflow. Some are just bugs in
+the new code (due to the new number model) while others come with the design of
+the system: border cases that never seem to happen in interactive use while the
+library use assumes no interaction in case of errors.
+
+In \CONTEXT\ there are more features and helpers than shown here but these are
+discussed in the \METAFUN\ manual.
+
+\stopsection
+
+\stopchapter
+
+\stopcomponent
+
+% \startMPcode{doublefun}
+% numeric n ; n := 123.456 ;
+% lua("print('>>>>>>>>>>>> number',mp.get.number('n'))") ;
+% lua("print('>>>>>>>>>>>> number',mp.get.boolean('n'))") ;
+% lua("print('>>>>>>>>>>>> number',mp.get.string('n'))") ;
+% boolean b ; b := true ;
+% lua("print('>>>>>>>>>>>> boolean',mp.get.number('b'))") ;
+% lua("print('>>>>>>>>>>>> boolean',mp.get.boolean('b'))") ;
+% lua("print('>>>>>>>>>>>> boolean',mp.get.string('b'))") ;
+% string s ; s := "TEST" ;
+% lua("print('>>>>>>>>>>>> string',mp.get.number('s'))") ;
+% lua("print('>>>>>>>>>>>> string',mp.get.boolean('s'))") ;
+% lua("print('>>>>>>>>>>>> string',mp.get.string('s'))") ;
+% \stopMPcode
+
diff --git a/doc/context/sources/general/manuals/about/about-mobility.tex b/doc/context/sources/general/manuals/about/about-mobility.tex
new file mode 100644
index 000000000..c844ea2f7
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-mobility.tex
@@ -0,0 +1,205 @@
+% language=uk
+
+\startcomponent about-mobility
+
+\environment about-environment
+
+\startchapter[title=Flash forward]
+
+\startsection[title=Introduction]
+
+At the 2013 \CONTEXT\ meeting in Breslov, Harald König has taken some of his
+gadgets with him and this time the target was to get \CONTEXT\ running on small
+devices, most noticeably a mobile phone. You may wonder what purpose this serves,
+but with such devices becoming more powerful each year, and desktops and laptops
+getting less popular, we might see the small devices taking their place.
+Especially when we can dock them in a cradle and connect them to a proper monitor
+and keyboard we might end up with universal devices. Combine that with projection
+on our retinas and less tactile input and it will be clear that we should at
+least look into this from the perspective of \TEX\ usage.
+
+\stopsection
+
+\startsection[title=The tests]
+
+We used five tests for measuring basic performance. Of course we made sure that
+binaries and resources were cached.
+
+Test 1 measures some basics, like typesetting a paragraph, flushing pages and
+loading a file. Because we do lots of pages we can also see if garbage collection
+is a problem.
+
+\typefile{pi-speed-1.tex}
+
+A normal \CONTEXT\ run is triggered with:
+
+\starttyping
+context speed-1
+\stoptyping
+
+but with
+
+\starttyping
+context --timing speed-1
+\stoptyping
+
+memory consumption is measured and one can generate a visual representation of
+this afterwards.
+
+\starttyping
+context --extra=timing speed-1
+\stoptyping
+
+We don't show them here, simply because we saw nothing exciting in the ones
+for these tests.
+
+The second test is rather stupid but it gives an indication of how efficient the
+base pagebuilder is:
+
+\typefile{pi-speed-2.tex}
+
+The numbers are normally 10 to 20 times more impressive than those for regular runs.
+
+Test three is a variation on test one but this time we avoid the file being read
+in many times, so we inline \type{ward.tex}. We also add no page breaks so we get
+less pages but with more content.
+
+\typefile{pi-speed-3.tex}
+
+The fourth test draws a few \METAPOST\ graphics, which themselves use a bit of
+typeset text.
+
+\typefile{pi-speed-4.tex}
+
+The last test, number five, is more demanding. Here we use some colors (which
+stresses the backend) and a dynamic switch to smallcaps, which puts a bit of a
+burden on the \OPENTYPE\ handler.
+
+\typefile{pi-speed-5.tex}
+
+\stopsection
+
+\startsection[title=Regular laptops]
+
+We started measuring on Haralds laptop, a Lenovo X201i, and got the following
+timings (that matched our expectations). The second column shows the runtime, the
+last column the pages per second.
+
+\starttabulate[|l|r|r|]
+\BC speed-1 \NC 5.8 \NC 17.1 \NC \NR
+\BC speed-2 \NC 3.6 \NC 275.6 \NC \NR
+\BC speed-3 \NC 5.1 \NC 19.8 \NC \NR
+\BC speed-4 \NC 0.6 \NC 1.8 \NC \NR
+\BC speed-5 \NC 11.9 \NC 10.6 \NC \NR
+\stoptabulate
+
+Just for comparison, as I'm wrapping this up in 2016, on my current Dell 7600 I
+get these timings (the last two columns are with \LUAJITTEX):
+
+\starttabulate[|l|r|r|r|r|]
+\BC speed-1 \NC 4.6 \NC 21.9 \NC 3.0 \NC 33.5 \NC \NR
+\BC speed-2 \NC 3.6 \NC 278.2 \NC 2.8 \NC 357.7 \NC \NR
+\BC speed-3 \NC 4.2 \NC 23.6 \NC 2,7 \NC 37.0 \NC \NR
+\BC speed-4 \NC 0.8 \NC 1.3 \NC 0.6 \NC 1.7 \NC \NR
+\BC speed-5 \NC 6.2 \NC 20.3 \NC 4.0 \NC 31.9 \NC \NR
+\stoptabulate
+
+These tests were run with a \LUATEX\ 0.98 and the most recent \CONTEXT\
+\OPENTYPE\ font processor. As we do more in \LUA\ that a few years back, one
+can't expect a much faster run, even when the Dell has a faster processor than
+the Lenovo. However, what gets noticed is that the fifth speed test runs about
+twice as fast which is mostly due to improvements in the handling of \OPENTYPE\
+features.
+
+\startsection[title=The Nexus IV]
+
+This mobile phone has a quad-core arm processor running at 1.5 GHz. With 2 Gb
+memory this should be sufficient for running \TEX. The operating system is
+Android, which means that some effort is needed to put \TEX\ with its resources
+on the internal flash disk. Access was remote from a laptop.
+
+\starttabulate[|l|r|r|]
+\BC speed-1 \NC 41.9 \NC 2.4 \NC \NR
+\BC speed-2 \NC 27.5 \NC 36.4 \NC \NR
+\BC speed-3 \NC 38.7 \NC 2.6 \NC \NR
+\BC speed-4 \NC 3.4 \NC 3.0 \NC \NR
+\BC speed-5 \NC 87.9 \NC 1.4 \NC \NR
+\stoptabulate
+
+So it looks like the phone runs these tests about five times slower than the
+laptop. The fifth test is most stressful on the hardware but as noted, a more
+recent \CONTEXT\ will give better times there due to improvements in feature
+processing.
+
+\stopsection
+
+\startsection[title=The Raspbery Pi]
+
+The Pi (we're talking of the first model here) has an extension bus and can be
+used to control whatever device, it has more the properties (and build) of a
+media player and indeed there are dedicated installations for that. But as this
+popular small device can host any \LINUX\ distribution this is what was done. The
+distribution of choice was OpenSuse. The setup was really experimental with an
+unboxed Pi, an unframed \LCD\ panel, a keyboard and mouse, a power supply and
+some wires to connect this all. With an ethernet cable running directly to the
+router a distribution could be fetched and installed.
+
+This device has a single core arm processor running at 700 Mhz with half a
+gigabyte of memory. Persistent memory is a flash card, not that fast but
+acceptable. The maximum read speed was some 20 MB per second. It was no real
+surprise that the set of tests ran much slower than on the phone.
+
+It took a bit of experimenting but a 200 Mhz overclock of the \CPU\ combined with
+overclocked memory made performance jump up. In fact, we got a speed that we
+could somehow relate to the phone that has a more modern \CPU\ and runs at 1.5
+times that speed.
+
+Being a regular \LINUX\ setup, installation was more straightforward than on the
+phone but of course it took a while before all was in place. The default clock
+timings are:
+
+\starttabulate[|l|r|r|]
+\BC speed-1 \NC 95.841 \NC 1.043 \NC \NR
+\BC speed-2 \NC 76.817 \NC 13.018 \NC \NR
+\BC speed-3 \NC 84.890 \NC 1.178 \NC \NR
+\BC speed-4 \NC 13.241 \NC 0.076 \NC \NR
+\BC speed-5 \NC 192.288 \NC 0.660 \NC \NR
+\stoptabulate
+
+Again, the main conclusion here is that documents that need lots of \OPENTYPE\
+feature juggling, this is not the best platform.
+
+\stopsection
+
+\startsection[title=Summary]
+
+We see small devices gaining more performance each iteration than larger
+machines. Their screens and input method also evolve at a higher speed. The
+question is if arm will keep dominating this segment, but at least it is clear
+that they are useable for \TEX\ processing. Keep in mind that we used \LUATEX,
+which means that we also have \LUA\ with its garbage collector. Add \CONTEXT\ to
+that, which is not that small and preloads quite some resources, and it will be
+clear that these devices actually perform quite well, given slower memory, slower
+disks, small caches etc. With down|-|scaled intel chips showing up it can only
+get better. Keep in mind that we only need one core, so the speed of one core
+matters more than having multiple cores available, although the other cores can
+be wasted on keeping up with your social demands on such a device in parallel
+with the \TEX\ run.
+
+A runtime five to ten times slower than a decent laptop is not something that we
+look forward to in a production environment, but when you're on the road it is
+quite okay, especially if it can replace a somewhat heavy portable workstation
+like we do. Okay, how much \TEX\ processing do you need when mobile, but still.
+As vendors of server hardware are looking into high density servers with lots of
+small fast processors, we might at some point actually use \TEX\ on such
+hardware. By then performance might be en par with virtual machines running on
+average loaded machines.
+
+We are pretty sure that on following \CONTEXT\ meetings more such experiments
+will be done so we'll keep you posted.
+
+\stopsection
+
+\stopchapter
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-nodes.tex b/doc/context/sources/general/manuals/about/about-nodes.tex
new file mode 100644
index 000000000..f365f1fc4
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-nodes.tex
@@ -0,0 +1,603 @@
+% language=uk
+
+\usemodule[nodechart]
+
+\startcomponent about-nodes
+
+\environment about-environment
+
+\startchapter[title={Juggling nodes}]
+
+\startsection[title=Introduction]
+
+When you use \TEX, join the community, follow mailing lists, read manuals,
+and|/|or attend meetings, there will come a moment when you run into the word
+\quote {node}. But, as a regular user, even if you write macros, you can happily
+ignore them because in practice you will never really see them. They are hidden
+deep down in \TEX.
+
+Some expert \TEX ies love to talk about \TEX's mouth, stomach, gut and other
+presumed bodily elements. Maybe it is seen as proof of the deeper understanding
+of this program as Don Knuth uses these analogies in his books about \TEX\ when
+he discusses how \TEX\ reads the input, translates it and digests it into a
+something that can be printed or viewed. No matter how your input gets digested,
+at some point we get nodes. However, as users have no real access to the
+internals, nodes never show themselves to the user. They have no bodily analogy
+either.
+
+A character that is read from the input can become a character node. Multiple
+characters can become a linked list of nodes. Such a list can contain other kind
+of nodes as well, for instance spaced become glue. There can also be penalties
+that steer the machinery. And kerns too: fixed displacements. Such a list can be
+wrapped in a box. In the process hyphenation is applied, characters become glyphs
+and intermediate math nodes becomes a combination of regular glyphs, kerns and
+glue, wrapped into boxes. So, an hbox that contains the three glyphs \type {tex}
+can be represented as follows:
+
+\startlinecorrection
+ \setupFLOWchart
+ [dx=2em,
+ dy=1em,
+ width=4em,
+ height=2em]
+ \setupFLOWshapes
+ [framecolor=maincolor]
+ \startFLOWchart[nodes]
+ \startFLOWcell
+ \name {box}
+ \location {1,1}
+ \shape {action}
+ \text {hbox}
+ \connection [rl] {t}
+ \stopFLOWcell
+ \startFLOWcell
+ \name {t}
+ \location {2,1}
+ \shape {action}
+ \text {t}
+ \connection [+t-t] {e}
+ \stopFLOWcell
+ \startFLOWcell
+ \name {e}
+ \location {3,1}
+ \shape {action}
+ \text {e}
+ \connection [+t-t] {x}
+ \connection [-b+b] {t}
+ \stopFLOWcell
+ \startFLOWcell
+ \name {x}
+ \location {4,1}
+ \shape {action}
+ \text {x}
+ \connection [-b+b] {e}
+ \stopFLOWcell
+ \stopFLOWchart
+ \FLOWchart[nodes]
+\stoplinecorrection
+
+Eventually a long sequence of nodes can become a paragraph of lines and each line
+is a box. The lines together make a page which is also a box. There are many kind
+of nodes but some are rather special and don't translate directly to some visible
+result. When dealing with \TEX\ as user we can forget about nodes: we never really
+see them.
+
+In this example we see an hlist (hbox) node. Such a node has properties like
+width, height, depth, shift etc. The characters become glyph nodes that have
+(among other properties) a reference to a font, character, language.
+
+Because \TEX\ is also about math, and because math is somewhat special, we have
+noads, some intermediate kind of node that makes up a math list, that eventually
+gets transformed into a list of nodes. And, as proof of extensibility, Knuth came
+up with a special node that is more or less ignored by the machinery but travels
+with the list and can be dealt with in special backend code. Their name indicates
+what it's about: they are called whatsits (which sounds better that whatevers).
+In \LUATEX\ some whatsits are used in the frontend, for instance directional
+information is stored in whatsits.
+
+The \LUATEX\ engine not only opens up the \UNICODE\ and \OPENTYPE\ universes, but
+also the traditional \TEX\ engine. It gives us access to nodes. And this permits
+us to go beyond what was possible before and therefore on mailing lists like the
+\CONTEXT\ list, the word node will pop up more frequently. If you look into the
+\LUA\ files that ship with \CONTEXT\ you cannot avoid seeing them. And, when you
+use the \CLD\ interface you might even want to manipulate them. A nice side
+effect is that you can sound like an expert without having to refer to bodily
+aspects of \TEX: you just see them as some kind of \LUA\ userdata variable. And
+you access them like tables: they are abstracts units with properties.
+
+\stopsection
+
+\startsection[title=Basics]
+
+Nodes are kind of special in the sense that you need to keep an eye on creation
+and destruction. In \TEX\ itself this is mostly hidden:
+
+\startbuffer
+\setbox0\hbox{some text}
+\stopbuffer
+
+\typebuffer
+
+If we look {\em into} this box we get a list of glyphs (see \in {figure}
+[fig:dummy:1]).
+
+\startplacefigure[reference=fig:dummy:1]
+ \getbuffer
+ \boxtoFLOWchart[dummy]{0}
+ \small
+ \FLOWchart[dummy][width=14em,height=3em,dx=1em,dy=.75em] % ,hcompact=yes]
+\stopplacefigure
+
+In \TEX\ you can flush such a box using \type {\box0} or copy it using \type
+{\copy0}. You can also flush the contents i.e.\ omit the wrapper using \type
+{\unhbox0} and \type {\unhcopy0}. The possibilities for disassembling the
+content of a box (or any list for that matter) are limited. In practice you
+can consider disassembling to be absent.
+
+This is different at the \LUA\ end: there we can really start at the beginning of
+a list, loop over it and see what's in there as well as change, add and remove
+nodes. The magic starts with:
+
+\starttyping
+local box = tex.box[0]
+\stoptyping
+
+Now we have a variable that has a so called \type {hlist} node. This node has not
+only properties like \type {width}, \type {height}, \type {depth} and \type
+{shift}, but also a pointer to the content: \type {list}.
+
+\starttyping
+local list = box.list
+\stoptyping
+
+Now, when we start messing with this list, we need to keep into account that the
+nodes are in fact userdata objects, that is: they are efficient \TEX\ data
+structures that have a \LUA\ interface. At the \TEX\ end the repertoire of
+commands that we can use to flush boxes is rather limited and as we cannot mess
+with the content we have no memory management issues. However, at the \LUA\ end
+this is different. Nodes can have pointers to other nodes and they can even have
+special properties that relate to other resources in the program.
+
+Take this example:
+
+\starttyping
+\setbox0\hbox{some text}
+\directlua{node.write(tex.box[0])}
+\stoptyping
+
+At the \TEX\ end we wrap something in a box. Then we can at the \LUA\ end access
+that box and print it back into the input. However, as \TEX\ is no longer in
+control it cannot know that we already flushed the list. Keep in mind that this
+is a simple example, but imagine more complex content, that contains hyperlinks
+or so. Now take this:
+
+\starttyping
+\setbox0\hbox{some text 1}
+\setbox0\hbox{some text 2}
+\stoptyping
+
+Here \TEX\ knows that the box has content and it will free the memory beforehand
+and forget the first text. Or this:
+
+\starttyping
+\setbox0\hbox{some text}
+\box0 \box0
+\stoptyping
+
+The box will be used and after that it's empty so the second flush is basically a
+harmless null operation: nothing gets inserted. But this:
+
+\starttyping
+\setbox0\hbox{some text}
+\directlua{node.write(tex.box[0])}
+\directlua{node.write(tex.box[0])}
+\stoptyping
+
+will definitely fail. The first call flushes the box and the second one sees
+no box content and will bark. The best solution is to use a copy:
+
+\starttyping
+\setbox0\hbox{some text}
+\directlua{node.write(node.copy_list(tex.box[0]))}
+\stoptyping
+
+That way \TEX\ doesn't see a change in the box and will free it when needed: when
+it gets flushed, reassigned, at the end of a group, wherever.
+
+In \CONTEXT\ a somewhat shorter way of printing back to \TEX\ is the following
+and we will use that:
+
+\starttyping
+\setbox0\hbox{some text}
+\ctxlua{context(node.copy_list(tex.box[0])}
+\stoptyping
+
+or shortcut into \CONTEXT:
+
+\starttyping
+\setbox0\hbox{some text}
+\cldcontext{node.copy_list(tex.box[0])}
+\stoptyping
+
+As we've now arrived at the \LUA\ end, we have more possibilities with nodes. In
+the next sections we will explore some of these.
+
+\stopsection
+
+\startsection[title=Management]
+
+The most important thing to keep in mind is that each node is unique in the sense
+that it can be used only once. If you don't need it and don't flush it, you
+should free it. If you need it more than once, you need to make a copy. But let's
+first start with creating a node.
+
+\starttyping
+local g = node.new("glyph")
+\stoptyping
+
+This node has some properties that need to be set. The most important are the font
+and the character. You can find more in the \LUATEX\ manual.
+
+\starttyping
+g.font = font.current()
+g.char = utf.byte("a")
+\stoptyping
+
+After this we can write it to the \TEX\ input:
+
+\starttyping
+context(g)
+\stoptyping
+
+This node is automatically freed afterwards. As we're talking \LUA\ you can use
+all kind of commands that are defined in \CONTEXT. Take fonts:
+
+\startbuffer
+\startluacode
+local g1 = node.new("glyph")
+local g2 = node.new("glyph")
+
+g1.font = fonts.definers.internal {
+ name = "dejavuserif",
+ size = "60pt",
+}
+
+g2.font = fonts.definers.internal {
+ name = "dejavusansmono",
+ size = "60pt",
+}
+
+g1.char = utf.byte("a")
+g2.char = utf.byte("a")
+
+context(g1)
+context(g2)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+We get: \getbuffer, but there is one pitfall: the nodes have to be flushed in
+horizontal mode, so either put \type {\dontleavehmode} in front or add \type
+{context.dontleavehmode()}. If you get error messages like \typ {this can't
+happen} you probably forgot to enter horizontal mode.
+
+In \CONTEXT\ you have some helpers, for instance:
+
+\starttyping
+\startluacode
+local id = fonts.definers.internal { name = "dejavuserif" }
+
+context(nodes.pool.glyph(id,utf.byte("a")))
+context(nodes.pool.glyph(id,utf.byte("b")))
+context(nodes.pool.glyph(id,utf.byte("c")))
+\stopluacode
+\stoptyping
+
+or, when we need these functions a lot and want to save some typing:
+
+\startbuffer
+\startluacode
+local getfont = fonts.definers.internal
+local newglyph = nodes.pool.glyph
+local utfbyte = utf.byte
+
+local id = getfont { name = "dejavuserif" }
+
+context(newglyph(id,utfbyte("a")))
+context(newglyph(id,utfbyte("b")))
+context(newglyph(id,utfbyte("c")))
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+This renders as: \getbuffer. We can make copies of nodes too:
+
+\startbuffer
+\startluacode
+local id = fonts.definers.internal { name = "dejavuserif" }
+local a = nodes.pool.glyph(id,utf.byte("a"))
+
+for i=1,10 do
+ context(node.copy(a))
+end
+
+node.free(a)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+This gives: \getbuffer. Watch how afterwards we free the node. If we have not one
+node but a list (for instance because we use box content) you need to use the
+alternatives \type {node.copy_list} and \type {node.free_list} instead.
+
+In \CONTEXT\ there is a convenient helper to create a list of text nodes:
+
+\startbuffer
+\startluacode
+context(nodes.typesetters.tonodes("this works okay"))
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+And indeed, \getbuffer, even when we use spaces. Of course it makes
+more sense (and it is also more efficient) to do this:
+
+\startbuffer
+\startluacode
+context("this works okay")
+\stopluacode
+\stopbuffer
+
+In this case the list is constructed at the \TEX\ end. We have now learned enough
+to start using some convenient operations, so these are introduced next. Instead
+of the longer \type {tonodes} call we will use the shorter one:
+
+\starttyping
+local head, tail = string.tonodes("this also works"))
+\stoptyping
+
+As you see, this constructor returns the head as well as the tail of the
+constructed list.
+
+\stopsection
+
+\startsection[title=Operations]
+
+If you are familiar with \LUA\ you will recognize this kind of code:
+
+\starttyping
+local str = "time: " .. os.time()
+\stoptyping
+
+Here a string \type {str} is created that is built out if two concatinated
+snippets. And, \LUA\ is clever enough to see that it has to convert the number to
+a string.
+
+In \CONTEXT\ we can do the same with nodes:
+
+\startbuffer
+\startluacode
+local foo = string.tonodes("foo")
+local bar = string.tonodes("bar")
+local amp = string.tonodes(" & ")
+
+context(foo .. amp .. bar)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+This will append the two node lists: \getbuffer.
+
+\startbuffer
+\startluacode
+local l = string.tonodes("l")
+local m = string.tonodes(" ")
+local r = string.tonodes("r")
+
+context(5 * l .. m .. r * 5)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+You can have the multiplier on either side of the node: \getbuffer.
+Addition and subtraction is also supported but it comes in flavors:
+
+\startbuffer
+\startluacode
+local l1 = string.tonodes("aaaaaa")
+local r1 = string.tonodes("bbbbbb")
+local l2 = string.tonodes("cccccc")
+local r2 = string.tonodes("dddddd")
+local m = string.tonodes(" + ")
+
+context((l1 - r1) .. m .. (l2 + r2))
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+In this case, as we have two node (lists) involved in the addition and
+subtraction, we get one of them injected into the other: after the first, or
+before the last node. This might sound weird but it happens.
+
+\dontleavehmode \start \maincolor \getbuffer \stop
+
+We can use these operators to take a slice of the given node list.
+
+\startbuffer
+\startluacode
+local l = string.tonodes("123456")
+local r = string.tonodes("123456")
+local m = string.tonodes("+ & +")
+
+context((l - 3) .. (1 + m - 1).. (3 + r))
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+So we get snippets that get appended: \getbuffer. The unary operator
+reverses the list:
+
+\startbuffer
+\startluacode
+local l = string.tonodes("123456")
+local r = string.tonodes("123456")
+local m = string.tonodes(" & ")
+
+context(l .. m .. - r)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+This is probably not that useful, but it works as expected: \getbuffer.
+
+We saw that \type {*} makes copies but sometimes that is not enough. Consider the
+following:
+
+\startbuffer
+\startluacode
+local n = string.tonodes("123456")
+
+context((n - 2) .. (2 + n))
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+Because the slicer frees the unused nodes, the value of \type {n} in the second
+case is undefined. It still points to a node but that one already has been freed.
+So you get an error message. But of course (as already demonstrated) this is
+valid:
+
+\startbuffer
+\startluacode
+local n = string.tonodes("123456")
+
+context(2 + n - 2)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+We get the two middle characters: \getbuffer. So, how can we use a
+node (list) several times in an expression? Here is an example
+
+\startbuffer
+\startluacode
+local l = string.tonodes("123")
+local m = string.tonodes(" & ")
+local r = string.tonodes("456")
+
+context((l^1 .. r^1)^2 .. m^1 .. r .. m .. l)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+Using \type {^} we create copies, so we can still use the original later on. You
+can best make sure that one reference to a node is not copied because otherwise
+we get a memory leak. When you write the above without copying \LUATEX\ most
+likely end up in a loop. The result of the above is:
+
+\blank \start \dontleavehmode \maincolor \getbuffer \stop \blank
+
+Let's repeat it once more time: keep in mind that we need to do the memory
+management ourselves. In practice we will seldom need more than the
+concatination, but if you make complex expressions be prepared to loose some
+memory when you copy and don't free them. As \TEX\ runs are normally limited in
+time this is hardly an issue.
+
+So what about the division. We needed some kind of escape and as with \type
+{lpeg} we use the \type {/} to apply additional operations.
+
+\startbuffer
+\startluacode
+local l = string.tonodes("123")
+local m = string.tonodes(" & ")
+local r = string.tonodes("456")
+
+local function action(n)
+ for g in node.traverse_id(node.id("glyph"),n) do
+ g.char = string.byte("!")
+ end
+ return n
+end
+
+context(l .. m / action .. r)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+And indeed we the middle glyph gets replaced: \getbuffer.
+
+\startbuffer
+\startluacode
+local l = string.tonodes("123")
+local r = string.tonodes("456")
+
+context(l .. nil .. r)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+When you construct lists programmatically it can happen that one of the
+components is nil and to some extend this is supported: so the above
+gives: \getbuffer.
+
+Here is a summary of the operators that are currently supported. Keep in mind that
+these are not built in \LUATEX\ but extensions in \MKIV. After all, there are many
+ways to map operators on actions and this is just one.
+
+\starttabulate[|l|l|]
+\NC \type{n1 .. n2} \NC append nodes (lists) \type {n1} and \type {n2}, no copies \NC \NR
+\NC \type{n * 5} \NC append 4 copies of node (list) \type {n} to \type {n} \NC \NR
+\NC \type{5 + n} \NC discard the first 5 nodes from list \type {n} \NC \NR
+\NC \type{n - 5} \NC discard the last 5 nodes from list \type {n} \NC \NR
+\NC \type{n1 + n2} \NC inject (list) \type {n2} after first of list \type {n1} \NC \NR
+\NC \type{n1 - n2} \NC inject (list) \type {n2} before last of list \type {n1} \NC \NR
+\NC \type{n^2} \NC make two copies of node (list) \type {n} and keep the orginal \NC \NR
+\NC \type{- n} \NC reverse node (list) \type {n} \NC \NR
+\NC \type{n / f} \NC apply function \type {f} to node (list) \type {n} \NC \NR
+\stoptabulate
+
+As mentioned, you can only use a node or list once, so when you need it more times, you need
+to make copies. For example:
+
+\startbuffer
+\startluacode
+local l = string.tonodes( -- maybe: nodes.maketext
+ " 1 2 3 "
+)
+local r = nodes.tracers.rule( -- not really a user helper (spec might change)
+ string.todimen("1%"), -- or maybe: nodes.makerule("1%",...)
+ string.todimen("2ex"),
+ string.todimen(".5ex"),
+ "maincolor"
+)
+
+context(30 * (r^1 .. l) .. r)
+\stopluacode
+\stopbuffer
+
+\typebuffer
+
+This gives a mix of glyphs, glue and rules: \getbuffer. Of course you can wonder
+how often this kind of juggling happens in use cases but at least in some core
+code the concatination (\type {..}) gives a bit more readable code and the
+overhead is quite acceptable.
+
+\stopsection
+
+\stopchapter
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-nuts.tex b/doc/context/sources/general/manuals/about/about-nuts.tex
new file mode 100644
index 000000000..9ca1ba345
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-nuts.tex
@@ -0,0 +1,619 @@
+% language=uk
+
+\startcomponent about-calls
+
+\environment about-environment
+
+\startchapter[title={Going nuts}]
+
+\startsection[title=Introduction]
+
+This is not the first story about speed and it will probably not be the last one
+either. This time we discuss a substantial speedup: upto 50\% with \LUAJITTEX.
+So, if you don't want to read further at least know that this speedup came at the
+cost of lots of testing and adapting code. Of course you could be one of those
+users who doesn't care about that and it may also be that your documents don't
+qualify at all.
+
+Often when I see a kid playing a modern computer game, I wonder how it gets done:
+all that high speed rendering, complex environments, shading, lightning,
+inter||player communication, many frames per second, adapted story lines,
+\unknown. Apart from clever programming, quite some of the work gets done by
+multiple cores working together, but above all the graphics and physics
+processors take much of the workload. The market has driven the development of
+this hardware and with success. In this perspective it's not that much of a
+surprise that complex \TEX\ jobs still take some time to get finished: all the
+hard work has to be done by interpreted languages using rather traditional
+hardware. Of course all kind of clever tricks make processors perform better than
+years ago, but still: we don't get much help from specialized hardware. \footnote
+{Apart from proper rendering on screen and printing on paper.} We're sort of
+stuck: when I replaced my 6 year old laptop (when I buy one, I always buy the
+fastest one possible) for a new one (so again a fast one) the gain in speed of
+processing a document was less than twice. The many times faster graphic
+capabilities are not of much help there, not is twice the amount of cores.
+
+So, if we ever want to go much faster, we need to improve the software. The
+reason for trying to speed up \MKIV\ has been mentioned before, but let's
+summarize it here:
+
+\startitemize
+
+\startitem
+ There was a time when users complained about the speed of \CONTEXT,
+ especially compared to other macro packages. I'm not so sure if this is still
+ a valid complaint, but I do my best to avoid bottlenecks and much time goes
+ into testing efficiency.
+\stopitem
+
+\startitem
+ Computers don't get that much faster, at least we don't see an impressive
+ boost each year any more. We might even see a slowdown when battery live
+ dominates: more cores at a lower speed seems to be a trend and that doesn't
+ suit current \TEX\ engines well. Of course we assume that \TEX\ will be
+ around for some time.
+\stopitem
+
+\startitem
+ Especially in automated workflows where multiple products each demanding a
+ couple of runs are produced speed pays back in terms of resources and
+ response time. Of course the time invested in the speedup is never regained
+ by ourselves, but we hope that users appreciate it.
+\stopitem
+
+\startitem
+ The more we do in \LUA, read: the more demanding users get and the more
+ functionality is enabled, the more we need to squeeze out of the processor.
+ And we want to do more in \LUA\ in order to get better typeset results.
+\stopitem
+
+\startitem
+ Although \LUA\ is pretty fast, future versions might be slower. So, the more
+ efficient we are, the less we probably suffer from changes.
+\stopitem
+
+\startitem
+ Using more complex scripts and fonts is so demanding that the number of pages
+ per second drops dramatically. Personally I consider a rate of 15 pps with
+ \LUATEX\ or 20 pps with \LUAJITTEX\ reasonable minima on my laptop. \footnote
+ {A Dell 6700 laptop with Core i7 3840QM, 16 GB memory and SSD, running 64 bit
+ Windows 8.}
+\stopitem
+
+\startitem
+ Among the reasons why \LUAJIT\ jitting does not help us much is that (at
+ least in \CONTEXT) we don't use that many core functions that qualify for
+ jitting. Also, as runs are limited in time and much code kicks in only a few
+ times the analysis and compilation doesn't pay back in runtime. So we cannot
+ simply sit down and wait till matters improve.
+\stopitem
+
+\stopitemize
+
+Luigi Scarso and I have been exploring several options, with \LUATEX\ as well as
+\LUAJITTEX. We observed that the virtual machine in \LUAJITTEX\ is much faster so
+that engine already gives a boots. The advertised jit feature can best be
+disabled as it slows down a run noticeably. We played with \type {ffi} as well,
+but there is additional overhead involved (\type {cdata}) as well as limited
+support for userdata, so we can forget about that too. \footnote {As we've now
+introduced getters we can construct a metatable at the \LUA\ end as that is what
+\type {ffi} likes most. But even then, we don't expect much from it: the four
+times slow down that experiments showed will not magically become a large gain.}
+Nevertheless, the twice as fast virtual machine of \LUAJIT\ is a real blessing,
+especially if you take into account that \CONTEXT\ spends quite some time in
+\LUA. We're also looking forward to the announced improved garbage collector of
+\LUAJIT.
+
+In the end we started looking at \LUATEX\ itself. What can be gained there,
+within the constraints of not having to completely redesign existing
+(\CONTEXT) \LUA\ code? \footnote {In the end a substantial change was needed but
+only in accessing node properties. The nice thing about C is that there macros
+often provide a level of abstraction which means that a similar adaption of \TEX\
+source code would be more convenient.}
+
+\stopsection
+
+\startsection[title={Two access models}]
+
+Because the \CONTEXT\ code is reasonably well optimized already, the only option
+is to look into \LUATEX\ itself. We had played with the \TEX||\LUA\ interface
+already and came to the conclusion that some runtime could be gained there. On
+the long run it adds up but it's not too impressive; these extensions are
+awaiting integration. Tracing and bechmarking as well as some quick and dirty
+patches demonstrated that there were two bottlenecks in accessing fields in
+nodes: checking (comparing the metatables) and constructing results (userdata
+with metatable).
+
+In case you're infamiliar with the concept this is how nodes work. There is an
+abstract object called node that is in \LUA\ qualified as user data. This object
+contains a pointer to \TEX's node memory. \footnote {The traditional \TEX\ node
+memory manager is used, but at some point we might change to regular C
+(de)allocation. This might be slower but has some advantages too.} As it is real
+user data (not so called light) it also carries a metatable. In the metatble
+methods are defined and one of them is the indexer. So when you say this:
+
+\starttyping
+local nn = n.next
+\stoptyping
+
+given that \type {n} is a node (userdata) the \type {next} key is resolved up
+using the \type {__index} metatable value, in our case a function. So, in fact,
+there is no \type {next} field: it's kind of virtual. The index function that
+gets the relevant data from node memory is a fast operation: after determining
+the kind of node, the requested field is located. The return value can be a
+number, for instance when we ask for \type {width}, which is also fast to return.
+But it can also be a node, as is the case with \type {next}, an then we need to
+allocate a new userdata object (memory management overhead) and a metatable has
+to be associated. And that comes at a cost.
+
+In a previous update we had already optimized the main \type {__index} function
+but felt that some more was possible. For instance we can avoid the lookup of the
+metatable for the returned node(s). And, if we don't use indexed access but a
+instead a function for frequently accessed fields we can sometimes gain a bit too.
+
+A logical next step was to avoid some checking, which is okay given that one pays
+a bit attention to coding. So, we provided a special table with some accessors of
+frequently used fields. We actually implemented this as a so called \quote {fast}
+access model, and adapted part of the \CONTEXT\ code to this, as we wanted to see
+if it made sense. We were able to gain 5 to 10\% which is nice but still not
+impressive. In fact, we concluded that for the average run using fast was indeed
+faster but not enough to justify rewriting code to the (often) less nice looking
+faster access. A nice side effect of the recoding was that I can add more advanced
+profiling.
+
+But, in the process we ran into another possibility: use accessors exclusively
+and avoiding userdata by passing around references to \TEX\ node memory directly.
+As internally nodes can be represented by numbers, we ended up with numbers, but
+future versions might use light userdata instead to carry pointers around. Light
+userdata is cheap basic object with no garbage collection involved. We tagged
+this method \quote {direct} and one can best treat the values that gets passed
+around as abstract entities (in \MKIV\ we call this special view on nodes
+\quote {nuts}).
+
+So let's summarize this in code. Say that we want to know the next node of
+\type {n}:
+
+\starttyping
+local nn = n.next
+\stoptyping
+
+Here \type {__index} will be resolved and the associated function be called. We
+can avoid that lookup by applying the \type {__index} method directly (after all,
+that one assumes a userdata node):
+
+\starttyping
+local getfield = getmetatable(n).__index
+
+local nn = getfield(n,"next") -- userdata
+\stoptyping
+
+But this is not a recomended interface for regular users. A normal helper that
+does checking is as about fast as the indexed method:
+
+\starttyping
+local getfield = node.getfield
+
+local nn = getfield(n,"next") -- userdata
+\stoptyping
+
+So, we can use indexes as well as getters mixed and both perform more of less
+equal. A dedicated getter is somewhat more efficient:
+
+\starttyping
+local getnext = node.getnext
+
+local nn = getnext(n) -- userdata
+\stoptyping
+
+If we forget about checking, we can go fast, in fact the nicely interfaced \type
+{__index} is the fast one.
+
+\starttyping
+local getfield = node.fast.getfield
+
+local nn = getfield(n,"next") -- userdata
+\stoptyping
+
+Even more efficient is the following as that one knows already what to fetch:
+
+\starttyping
+local getnext = node.fast.getnext
+
+local nn = getnext(n) -- userdata
+\stoptyping
+
+The next step, away from userdata was:
+
+\starttyping
+local getfield = node.direct.getfield
+
+local nn = getfield(n,"next") -- abstraction
+\stoptyping
+
+and:
+
+\starttyping
+local getnext = node.direct.getnext
+
+local nn = getnext(n) -- abstraction
+\stoptyping
+
+Because we considered three variants a bit too much and because \type {fast} was
+only 5 to 10\% faster in extreme cases, we decided to drop that experimental code
+and stick to providing accessors in the node namespace as well as direct variants
+for critical cases.
+
+Before you start thinking: \quote {should I rewrite all my code?} think twice!
+First of all, \type {n.next} is quite fast and switching between the normal and
+direct model also has some cost. So, unless you also adapt all your personal
+helper code or provide two variants of each, it only makes sense to use direct
+mode in critical situations. Userdata mode is much more convenient when
+developing code and only when you have millions of access you can gain by direct
+mode. And even then, if the time spent in \LUA\ is small compared to the time
+spent in \TEX\ it might not even be noticeable. The main reason we made direct
+variants is that it does pay of in \OPENTYPE\ font processing where complex
+scripts can result in many millions of calls indeed. And that code will be set up
+in such a way that it will use userdata by default and only in well controlled
+case (like \MKIV) we will use direct mode. \footnote {When we are confident
+that \type {direct} node code is stable we can consider going direct in generic
+code as well, although we need to make sure that third party code keeps working.}
+
+Another thing to keep in mind is that when you provide hooks for users you should
+assume that they use the regular mode so you need to cast the plugins onto direct
+mode then. Because the idea is that one should be able to swap normal functions
+by direct ones (which of course is only possible when no indexes are used) all
+relevant function in the \type {node} namespace are available in \type {direct}
+as well. This means that the following code is rather neutral:
+
+\starttyping
+local x = node -- or: x = node.direct
+
+for n in x.traverse(head) do
+ if x.getid(n) == node.id("glyph") and x.getchar(n) == 0x123 then
+ x.setfield(n,"char",0x456)
+ end
+end
+\stoptyping
+
+Of course one needs to make sure that \type {head} fits the model. For this you
+can use the cast functions:
+
+\starttyping
+node.direct.todirect(node or direct)
+node.direct.tonode(direct or node)
+\stoptyping
+
+These helpers are flexible enough to deal with either model. Aliasing the
+functions to locals is of course more efficient when a large number of calls
+happens (when you use \LUAJITTEX\ it will do some of that for you automatically).
+Of course, normally we use a more natural variant, using an id traverser:
+
+\starttyping
+for n in node.traverse_id(head,node.id("glyph")) do
+ if n.char == 0x123 then
+ n.char = 0x456
+ end
+end
+\stoptyping
+
+This is not that much slower, especially when it's only ran once. Just count the
+number of characters on a page (or in your document) and you will see that it's
+hard to come up with that many calls. Of course, processing many pages of Arabic
+using a mature font with many features enabled and contextual lookups, you do run
+into quantities. Tens of features times tens of contextual lookup passes can add
+up considerably. In Latin scripts you never reach such numbers, unless you use
+fonts like Zapfino.
+
+\stopsection
+
+\startsection[title={The transition}]
+
+After weeks of testing, rewriting, skyping, compiling and making decisions, we
+reached a more or less stable situation. At that point we were faced with a
+speedup that gave us a good feeling, but transition to the faster variant has a
+few consequences.
+
+\startitemize
+
+\startitem We need to use an adapted code base: indexes are to be replaced by
+function calls. This is a tedious job that can endanger stability so it has to be
+done with care. \footnote {The reverse is easier, as converting getters and
+setters to indexed is a rather simple conversion, while for instance changing
+type {.next} into a \type {getnext} needs more checking because that key is not
+unique to nodes.} \stopitem
+
+\startitem When using an old engine with the new \MKIV\ code, this approach will
+result in a somewhat slower run. Most users will probably accept a temporary
+slowdown of 10\%, so we might take this intermediate step. \stopitem
+
+\startitem When the regular getters and setters become available we get back to
+normal. Keep in mind that these accessors do some checking on arguments so that
+slows down to the level of using indexes. On the other hand, the dedicated ones
+(like \type {getnext}) are more efficient so there we gain. \stopitem
+
+\startitem As soon as direct becomes available we suddenly see a boost in speed.
+In documents of average complexity this is 10-20\% and when we use more complex
+scripts and fonts it can go up to 40\%. Here we assume that the macro package
+spends at least 50\% of its time in \LUA. \stopitem
+
+\stopitemize
+
+If we take the extremes: traditional indexed on the one hand versus optimized
+direct in \LUAJITTEX, a 50\% gain compared to the old methods is feasible.
+Because we also retrofitted some fast code into the regular accessor, indexed
+mode should also be somewhat faster compared to the older engine.
+
+In addition to the already provide helpers in the \type {node} namespace, we
+added the following:
+
+\starttabulate[|Tl|p|]
+\HL
+\NC getnext \NC this one is used a lot when analyzing and processing node lists \NC \NR
+\NC getprev \NC this one is used less often but fits in well (companion to \type {getnext}) \NC \NR
+\NC getfield \NC this is the general accessor, in userdata mode as fast as indexed \NC \NR
+\HL
+\NC getid \NC one of the most frequent called getters when parsing node lists \NC \NR
+\NC getsubtype \NC especially in fonts handling this getter gets used \NC \NR
+\HL
+\NC getfont \NC especially in complex font handling this is a favourite \NC \NR
+\NC getchar \NC as is this one \NC \NR
+\HL
+\NC getlist \NC we often want to recurse into hlists and vlists and this helps \NC \NR
+\NC getleader \NC and also often need to check if glue has leader specification (like list) \NC \NR
+\HL
+\NC setfield \NC we have just one setter as setting is less critical \NC \NR
+\HL
+\stoptabulate
+
+As \type {getfield} and \type {setfield} are just variants on indexed access, you
+can also use them to access attributes. Just pass a number as key. In the \type
+{direct} namespace, helpers like \type {insert_before} also deal with direct
+nodes.
+
+We currently only provide \type {setfield} because setting happens less than
+getting. Of course you can construct nodelists at the \LUA\ end but it doesn't
+add up that fast and indexed access is then probably as efficient. One reason why
+setters are less an issue is that they don't return nodes so no userdata overhead
+is involved. We could (and might) provide \type {setnext} and \type {setprev},
+although, when you construct lists at the \LUA\ end you will probably use the
+type {insert_after} helper anyway.
+
+\stopsection
+
+\startsection[title={Observations}]
+
+So how do these variants perform? As we no longer have \type {fast} in the engine
+that I use for this text, we can only check \type {getfield} where we can simulate
+fast mode with calling the \type{__index} metamethod. In practice the \type
+{getnext} helper will be somewhat faster because no key has to be checked,
+although the \type {getfield} functions have been optimized according to the
+frequencies of accessed keys already.
+
+\starttabulate
+\NC node[*] \NC 0.516 \NC \NR
+\NC node.fast.getfield \NC 0.616 \NC \NR
+\NC node.getfield \NC 0.494 \NC \NR
+\NC node.direct.getfield \NC 0.172 \NC \NR
+\stoptabulate
+
+Here we simulate a dumb 20 times node count of 200 paragraphs \type {tufte.tex}
+with a little bit of overhead for wrapping in functions. \footnote {When
+typesetting Arabic or using complex fonts we quickly get a tenfold.} We encounter
+over three million nodes this way. We average a couple or runs.
+
+\starttyping
+local function check(current)
+ local n = 0
+ while current do
+ n = n + 1
+ current = getfield(current,"next") -- current = current.next
+ end
+ return n
+end
+\stoptyping
+
+What we see here is that indexed access is quite okay given the amount of nodes,
+but that direct is much faster. Of course we will never see that gain in practice
+because much more happens than counting and because we also spend time in \TEX.
+The 300\% speedup will eventually go down to one tenth of that.
+
+Because \CONTEXT\ avoids node list processing when possible the baseline
+performance is not influenced much.
+
+\starttyping
+\starttext \dorecurse{1000}{test\page} \stoptext
+\stoptyping
+
+With \LUATEX\ we get some 575 pages per second and with \LUAJITTEX\ more than 610
+pages per second.
+
+\starttyping
+\setupbodyfont[pagella]
+
+\edef\zapf{\cldcontext
+ {context(io.loaddata(resolvers.findfile("zapf.tex")))}}
+
+\starttext \dorecurse{1000}{\zapf\par} \stoptext
+\stoptyping
+
+For this test \LUATEX\ needs 3.9 seconds and runs at 54 pages per second, while
+\LUAJITTEX\ needs only 2.3 seconds and gives us 93 pages per second.
+
+Just for the record, if we run this:
+
+\starttyping
+\starttext
+\stoptext
+\stoptyping
+
+a \LUATEX\ runs takes 0.229 seconds and a \LUAJITTEX\ run 0.178 seconds. This includes
+initializing fonts. If we run just this:
+
+\starttyping
+\stoptext
+\stoptyping
+
+\LUATEX\ needs 0.199 seconds and \LUAJITTEX\ only 0.082 seconds. So, in the
+meantime, we hardly spend any time on startup. Launching the binary and managing
+the job with \type {mtxrun} calling \type {mtx-context} adds 0.160 seconds
+overhead. Of course this is only true when you have already ran \CONTEXT\ once as
+the operating system normally caches files (in our case format files and fonts).
+This means that by now an edit|-|preview cycle is quite convenient. \footnote {I
+use \SCITE\ with dedicated lexers as editor and currently \type {sumatrapdf} as
+previewer.}
+
+As a more practical test we used the current version of \type {fonts-mkiv} (166
+pages, using all kind of font tricks and tracing), \type {about} (60 pages, quite
+some traced math) and a torture test of Arabic text (61 pages dense text). The
+following measurements are from 2013-07-05 after adapting some 50 files to the
+new model. Keep in mind that the old binary can fake a fast getfield and setfield
+but that the other getters are wrapped functions. The more we have, the slower it
+gets. We used the mingw versions.
+
+\starttabulate[|l|r|r|r|]
+\HL
+\NC version \NC fonts \NC about \NC arabic \NC \NR
+\HL
+\NC old mingw, indexed plus some functions \NC 8.9 \NC 3.2 \NC 20.3 \NC \NR
+\NC old mingw, fake functions \NC 9.9 \NC 3.5 \NC 27.4 \NC \NR
+\HL
+\NC new mingw, node functions \NC 9.0 \NC 3.1 \NC 20.8 \NC \NR
+\NC new mingw, indexed plus some functions \NC 8.6 \NC 3.1 \NC 19.6 \NC \NR
+\NC new mingw, direct functions \NC 7.5 \NC 2.6 \NC 14.4 \NC \NR
+\HL
+\stoptabulate
+
+The second row shows what happens when we use the adapted \CONTEXT\ code with an
+older binary. We're slower. The last row is what we will have eventually. All
+documents show a nice gain in speed and future extensions to \CONTEXT\ will no
+longer have the same impact as before. This is because what we here see also
+includes \TEX\ activity. The 300\% increase of speed of node access makes node
+processing less influential. On the average we gain 25\% here and as on these
+documents \LUAJITTEX\ gives us some 40\% gain on indexed access, it gives more
+than 50\% on the direct function based variant.
+
+In the fonts manual some 25 million getter accesses happen while the setters
+don't exceed one million. I lost the tracing files but at some point the Arabic
+test showed more than 100 millions accesses. So it's save to conclude that
+setters are sort of neglectable. In the fonts manual the amount of accesses to
+the previous node were less that 5000 while the id and next fields were the clear
+winners and list and leader fields also scored high. Of course it all depends on
+the kind of document and features used, but we think that the current set of
+helpers is quite adequate. And because we decided to provide that for normal
+nodes as well, there is no need to go direct for more simple cases.
+
+Maybe in the future further tracing might show that adding getters for width,
+height, depth and other properties of glyph, glue, kern, penalty, rule, hlist and
+vlist nodes can be of help, but quite probably only in direct mode combined with
+extensive list manipulations. We will definitely explore other getters but only
+after the current set has proven to be useful.
+
+\stopsection
+
+\startsection[title={Nuts}]
+
+So why going nuts and what are nuts? In Dutch \quote {node} sounds a bit like
+\quote {noot} and translates back to \quote {nut}. And as in \CONTEXT\ I needed
+word for these direct nodes they became \quote {nuts}. It also suits this
+project: at some point we're going nuts because we could squeeze more out
+of \LUAJITTEX, so we start looking at other options. And we're sure some folks
+consider us being nuts anyway, because we spend time on speeding up. And adapting
+the \LUATEX\ and \CONTEXT\ \MKIV\ code mid||summer is also kind of nuts.
+
+At the \CONTEXT\ 2013 conference we will present this new magic and about that
+time we've done enough tests to see if it works our well. The \LUATEX\ engine
+will provide the new helpers but they will stay experimental for a while as one
+never knows where we messed up.
+
+I end with another measurement set. Every now and and then I play with a \LUA\
+variant of the \TEX\ par builder. At some point it will show up on \MKIV\ but
+first I want to abstract it a bit more and provide some hooks. In order to test
+the performance I use the following tests:
+
+% \testfeatureonce{1000}{\tufte \par}
+
+\starttyping
+\testfeatureonce{1000}{\setbox0\hbox{\tufte}}
+
+\testfeatureonce{1000}{\setbox0\vbox{\tufte}}
+
+\startparbuilder[basic]
+ \testfeatureonce{1000}{\setbox0\vbox{\tufte}}
+\stopparbuilder
+\stoptyping
+
+We use a \type {\hbox} to determine the baseline performance. Then we break lines
+using the built|-|in parbuilder. Next we do the same but now with the \LUA\
+variant. \footnote {If we also enable protrusion and hz the \LUA\ variant suffers
+less because it implements this more efficient.}
+
+\starttabulate[|l|l|l|l|l|]
+\HL
+\NC \NC \bf \rlap{luatex} \NC \NC \bf \rlap{luajittex} \NC \NC \NR
+\HL
+\NC \NC \bf total \NC \bf linebreak \NC \bf total \NC \bf linebreak \NC \NR
+\HL
+\NC 223 pp nodes \NC 5.67 \NC 2.25 flushing \NC 3.64 \NC 1.58 flushing \NC \NR
+\HL
+\NC hbox nodes \NC 3.42 \NC \NC 2.06 \NC \NC \NR
+\NC vbox nodes \NC 3.63 \NC 0.21 baseline \NC 2.27 \NC 0.21 baseline \NC \NR
+\NC vbox lua nodes \NC 7.38 \NC 3.96 \NC 3.95 \NC 1.89 \NC \NR
+\HL
+\NC 223 pp nuts \NC 4.07 \NC 1.62 flushing \NC 2.36 \NC 1.11 flushing \NC \NR
+\HL
+\NC hbox nuts \NC 2.45 \NC \NC 1.25 \NC \NC \NR
+\NC vbox nuts \NC 2.53 \NC 0.08 baseline \NC 1.30 \NC 0.05 baseline \NC \NR
+\NC vbox lua nodes \NC 6.16 \NC 3.71 \NC 3.03 \NC 1.78 \NC \NR
+\NC vbox lua nuts \NC 5.45 \NC 3.00 \NC 2.47 \NC 1.22 \NC \NR
+\HL
+\stoptabulate
+
+We see that on this test nuts have an advantage over nodes. In this case we
+mostly measure simple font processing and there is no markup involved. Even a 223
+page document with only simple paragraphs needs to be broken across pages,
+wrapped in page ornaments and shipped out. The overhead tagged as \quote
+{flushed} indicates how much extra time would have been involved in that. These
+numbers demonstrate that with nuts the \LUA\ parbuilder is performing 10\% better
+so we gain some. In a regular document only part of the processing involves
+paragraph building so switching to a \LUA\ variant has no big impact anyway,
+unless we have simple documents (like novels). When we bring hz into the picture
+performance will drop (and users occasionally report this) but here we already
+found out that this is mostly an implementation issue: the \LUA\ variant suffers
+less so we will backport some of the improvements. \footnote {There are still
+some aspects that can be approved. For instance these tests still checks lists
+for \type {prev} fields, something that is not needed in future versions.}
+
+\stopsection
+
+\startsection[title={\LUA\ 5.3}]
+
+When we were working on this the first working version of \LUA\ 5.3 was
+announced. Apart from some minor changes that won't affect us, the most important
+change is the introduction of integers deep down. On the one hand we can benefit
+from this, given that we adapt the \TEX|-|\LUA\ interfaces a bit: the distinction
+between \type {to_number} and \type {to_integer} for instance. And, numbers are
+always somewhat special in \TEX\ as it relates to reproduction on different
+architectures, also over time. There are some changes in conversion to string
+(needs attention) and maybe at some time also in the automated casting from
+strings to numbers (the last is no big deal for us).
+
+On the one hand the integers might have a positive influence on performance
+especially as scaled points are integers and because fonts use them too (maybe
+there is some advantage in memory usage). But we also need a proper efficient
+round function (or operator) then. I'm wondering if mixed integer and float usage
+will be efficient, but on the the other hand we do not that many calculations so
+the benefits might outperform the drawbacks.
+
+We noticed that 5.2 was somewhat faster but that the experimental generational
+garbage collecter makes runs slower. Let's hope that the garbage collector
+performance doesn't degrade. But the relative gain of node versus direct will
+probably stay.
+
+Because we already have an experimental setup we will probably experiment a bit
+with this in the future. Of course the question then is how \LUAJITTEX\ will work
+out, because it is already not 5.2 compatible it has to be seen if it will
+support the next level. At least in \CONTEXT\ \MKIV\ we can prepare ourselves as
+we did with \LUA\ 5.2 so that we're ready when we follow up.
+
+\stopsection
+
+\stopchapter
diff --git a/doc/context/sources/general/manuals/about/about-properties.tex b/doc/context/sources/general/manuals/about/about-properties.tex
new file mode 100644
index 000000000..07bb2924c
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-properties.tex
@@ -0,0 +1,209 @@
+% language=uk
+
+\startcomponent about-properties
+
+\environment about-environment
+
+\startchapter[title=Properties]
+
+\startsection[title=Introduction]
+
+Attributes are a nice extension to \TEX\ as they permits us to let information
+travel with nodes. Internally they are represented as a linked list that
+travels with a node. Because often a sequence of nodes has the same attributes,
+this mechanism is quite efficient. Access is relatively fast too. Attributes
+have a number and a value (also a number) which is fine. Of course one could
+wish for them to be anything, but imagine the amount of management needed
+in the engine if that were the case. Not only does saving and restoring (due to
+grouping) at the \TEX\ end has no \LUA\ equivalent, an overload of the \LUA\
+registry (the most natural interface for this) is not what we want. Of course
+it is also not acceptable that (future) extensions slow down a run. In fact,
+leaner and meaner should be the main objective.
+
+At some point I thought that packing crucial information in a node using a bitset
+would help to speed up some critical mechanisms (mostly fonts) but although
+managing some 32 or 64 on||off states is possible in a more closed macro package,
+in practice it would lead to conflicts in use. Also, an experimental
+implementation of this idea was not faster than using attributes due to the fact
+that manipulating bits also involves function calls that deal with setting,
+resetting, masking and more. It also makes nodes larger and increases the memory
+footprint.
+
+So, when I discarded that idea, I moved to another one, which is associating a
+\LUA\ table with each node (that makes sense). Again, an implementation where
+some way a reference to a table is carried with a node, is non||trivial because
+it has to go via the \LUA\ registry and will not be too efficient in terms of
+speed. Also, when dealing with such information one wants to stay at the \LUA\
+end and not cross the C||boundary too often.
+
+Therefore a different approach was taken which involves a \LUA\ table. The main
+issue with carrying information with a node is not to associate that information,
+but to make sure that it gets cleaned up when a node is freed and copied when a
+node is copied. All nodes that have attributes, also get properties.
+
+\stopsection
+
+\startsection[title=The implementation]
+
+The implementation is rather minimalistic. This is because hard codes solutions
+don't fit in the \LUATEX\ design philosophy. Also, there are many ways to use
+such a mechanism so too much hard coded behaviour only complicates usage.
+
+When a node is copied, we also copy the associated property entry. Normally its
+type is \type {nil} or \type {table}. Depending on how you enabled this
+mechanism, the table copy is shallow (just a reference to the same table), or we
+assign en empty table with the original as metatable index. The second approach
+as some more overhead.
+
+When a new node is assigned, nothing extra is done with the properties. The
+overhead is zero. This means that when you want to assign properties at the \LUA\
+end, you also have to check if a node property already has a table and if not,
+create one. The same is true for querying properties: you have to test if there
+are properties at all.
+
+When you use the \quote {direct} node model, you can directly access the property
+table. But, with direct as well as wrapped nodes, you can also use setters and
+getters. The property table has no metatable so you can add your own one for
+alternative access if needed. In \CONTEXT\ you can best stay away from such hacks
+and use the provided mechanisms because otherwise you get a performance hit.
+
+\stopsection
+
+\startsection[title=The \LUA\ interface]
+
+The interface (in regular nodes as well as direct ones) is quite simple and
+provides five functions:
+
+\starttyping
+set_properties_mode(boolean,boolean)
+flush_properties_table()
+get_properties_table()
+getproperty(node_id)
+setproperty(node_id,value)
+\stoptyping
+
+By default this mechanism is disabled so that when it's not used, there is no
+overhead involved. With \type {set_properties_mode} the first argument determines
+if you enable or disable this mechanism. The properties themselves are untouched.
+When the second argument is \type {true} copied properties create a new table
+with a metatable pointing to the original. You can flush all properties with
+\type {flush_properties_table}.
+
+You can access and set properties with \type {getproperty} and \type
+{setproperty}. Instead you can also use the table approach, where you can reach
+the table with \type {get_properties_table}. Keep in mind that the normal and
+direct calls to this function return a different table.
+
+\stopsection
+
+\startsection[title=A few examples]
+
+The following examples use \CONTEXT\ but apart from the calls to the \type
+{context} namespace, they are rather generic. We have enabled the property
+mechanism with:
+
+\starttyping
+set_properties_mode(true)
+\stoptyping
+
+We fill a box:
+
+\startbuffer
+\newbox\MyPropertyBox
+
+\setbox\MyPropertyBox=\hbox{test}
+\stopbuffer
+
+\typebuffer \getbuffer
+
+\startbuffer[common]
+local list = tex.getbox("MyPropertyBox").list
+
+local function start()
+ context.starttabulate { "||||" }
+ context.HL()
+end
+
+local function stop()
+ context.HL()
+ context.stoptabulate()
+end
+
+local function row(n,p)
+ context.NC() context(tostring(n==p))
+ context.NC() context(tostring(n))
+ context.NC() context(tostring(p))
+ context.NC() context.NR()
+end
+\stopbuffer
+
+\typebuffer[common]
+
+We will demonstrate the four access models. First regular properties
+using functions:
+
+\startbuffer[example]
+for n in node.traverse(list) do
+ node.setproperty(n,{ vif = n })
+end
+start()
+for n in node.traverse(list) do
+ row(n,node.getproperty(n).vif)
+end
+stop()
+\stopbuffer
+
+\typebuffer[example] {\ttxx\ctxluabuffer[common,example]}
+
+We can use a table instead (in fact, we can use both approaches
+mixed:
+
+\startbuffer[example]
+local n_properties = node.get_properties_table()
+
+for n in node.traverse(list) do
+ n_properties[n] = { vit = n }
+ node.direct.setproperty(n,{ vdf = n })
+end
+start()
+for n in node.traverse(list) do
+ row(n,n_properties[n].vit)
+end
+stop()
+\stopbuffer
+
+\typebuffer[example] {\ttxx\ctxluabuffer[common,example]}
+
+The direct method looks the same, apart from a cast to direct:
+
+\startbuffer[example]
+for n in node.direct.traverse(node.direct.todirect(list)) do
+ node.direct.setproperty(n,{ vdf = n })
+end
+start()
+for n in node.direct.traverse(node.direct.todirect(list)) do
+ row(n,node.direct.getproperty(n).vdf)
+end
+stop()
+\stopbuffer
+
+\typebuffer[example] {\tt\ctxluabuffer[common,example]}
+
+Again, we can use the table approach:
+
+\startbuffer[example]
+local d_properties = node.direct.get_properties_table()
+
+for n in node.direct.traverse(node.direct.todirect(list)) do
+ d_properties[n] = { vdt = n }
+end
+start()
+for n in node.direct.traverse(node.direct.todirect(list)) do
+ row(n,d_properties[n].vdt)
+end
+stop()
+\stopbuffer
+
+\typebuffer[example] {\tt\ctxluabuffer[common,example]}
+
+\stoptext
diff --git a/doc/context/sources/general/manuals/about/about-speed.tex b/doc/context/sources/general/manuals/about/about-speed.tex
new file mode 100644
index 000000000..4b4a376e8
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-speed.tex
@@ -0,0 +1,732 @@
+% language=uk
+
+\startcomponent about-speed
+
+\environment about-environment
+
+\startchapter[title=Speed]
+
+\startsection[title=Introduction]
+
+In the \quote {mk} and \type {hybrid} progress reports I have spend some words
+on speed. Why is speed this important?
+
+In the early days of \CONTEXT\ I often had to process documents with thousands of
+pages and hundreds of thousands of hyperlinks. You can imagine that this took a
+while, especially when all kind of ornaments had to be added to the page:
+backgrounds, buttons with their own backgrounds and offsets, hyperlink colors
+dependent on their state, etc. Given that multiple runs were needed, this could
+mean that you'd leave the machine running all night in order to get the final
+document.
+
+It was the time when computers got twice the speed with each iteration of
+hardware, so I suppose that it would run substantially faster on my current
+laptop, an old Dell M90 workhorse. Of course a recently added SSD drive adds a
+boost as well. But still, processing such documents on a machine with a 8Mhz 286
+processor and 640 megabytes of memory was close to impossible. But, when I
+compare the speed of core duo M90 with for instance an M4600 with a i5 \CPU\
+running the same clock speed as the M90, I see a factor 2 improvement at most. Of
+course going for a extremely clocked desktop will be much faster, but we're no
+longer seeing a tenfold speedup every few years. On the contrary: we see a shift
+multiple cores, often running at a lower clock speed, with the assumption that
+threaded applications are used. This scales perfectly for web services and
+graphic manipulations but not so much for \TEX. If we want go faster, we need to
+see where we can be more efficient within more or less frozen clock speeds.
+
+Of course there are some developments that help us. First of all, for programs
+like \TEX\ clever caching of files by the operating system helps a lot. Memory
+still becomes faster and \CPU\ cached become larger too. For large documents with
+lots of resources an SSD works out great. As \LUA\ uses floating point, speedup
+in that area also help with \LUATEX. We use virtual machines for \TEX\ related
+services and for some reason that works out quite well, as the underlying
+operating system does lots of housekeeping in parallel. But, with all maxing out,
+we finally end up at the software itself, and in \TEX\ this boils down to a core
+of compiled code along with lots of macro expansions and interpret \LUA\ code.
+
+In the end, the question remains what causes excessive runtimes. Is it the nature
+of the \TEX\ expansion engine? Is it bad macro writing? Is there too much
+overhead? If you notice how fast processing the \TEX\ book goes on modern
+hardware it is clear that the core engine is not the problem. It's no big deal to
+get 100 pages per second on documents that use relative a simple page builder and
+have macros that lack a flexible user interface.
+
+Take the following example:
+
+\starttyping
+\starttext
+\dorecurse{1000}{test\page}
+\stoptext
+\stoptyping
+
+We do nothing special here. We use the default Latin Modern fonts and process
+single words. No burden is put on the pagebuilder either. This way we get on a
+2.33 Ghz T7600 \CPU\ a performance of 185 pages per second. \footnote {In this
+case the mingw version was used. A version using the native \WINDOWS\ compiler
+runs somewhat faster, although this depends on the compiler options. \footnote
+{We've noticed that sometimes the mingw binaries are faster than native binaries,
+but sometimes they're slower.} With \LUAJITTEX\ the 185 pages per second become
+becomes 195 on a 1000 page document.} The estimated \LUA\ overhead in this 1000
+page document is some 1.5 to 2 seconds. The following table shows the performance
+on such a test document with different page numbers in pps (reported pages per
+second).
+
+\starttabulate[|r|r|]
+\HL
+\NC \bf \# pages \NC \bf pps \NC \NR
+\HL
+\NC 1 \NC 2 \NC \NR
+\NC 10 \NC 15 \NC \NR
+\NC 100 \NC 90 \NC \NR
+\NC 1000 \NC 185 \NC \NR
+\NC 10000 \NC 215 \NC \NR
+\HL
+\stoptabulate
+
+The startup time, measured on a zero page document, is 0.5 seconds. This includes
+loading the format, loading the embedded \LUA\ scripts and initializing them,
+initializing and loading the file database, locating and loading some runtime
+files and loading the absolute minumum number of fonts: a regular and math Latin
+Modern. A few years before this writing that was more than a second, and the gain
+is due to a slightly faster \LUA\ interpreter as well as improvements in
+\CONTEXT.
+
+So why does this matter at all, if on a larger document the startup time can be
+neglected? It does because when I have to implement a style for a project or are
+developing some functionality a fast edit||run||preview cycle is a must, if only
+because even a few second wait feels uncomfortable. On the other hand, when I
+process a manual of say 150 pages, which uses some tricks to explain matters, I
+don't care if the processing rate is between 5 and 15 pages per second, simply
+because you get (done) what you asked for. It mostly has to do with feeling
+comfortable.
+
+There is one thing to keep in mind: such measurements can vary over time, as they
+depend on several factors. Even in the trivial case we need to:
+
+\startitemize[packed]
+\startitem
+ load macros and \LUA\ code
+\stopitem
+\startitem
+ load additional files
+\stopitem
+\startitem
+ initialize the system, think of fonts and languages
+\stopitem
+\startitem
+ package the pages, which includes reverting to global document states
+\stopitem
+\startitem
+ create the final output stream (\PDF)
+\stopitem
+\stopitemize
+
+The simple one word per page test is not that slow, and normally for 1000 pages we
+measure around 200 pps. However, due to some small speedups (that somehow add up)
+in three months time I could gain a lot:
+
+\starttabulate[|r|r|r|r|]
+\HL
+\NC \bf \# pages \NC \bf Januari \NC \bf April \NC \bf May\rlap{\quad(2013)} \NR
+\HL
+\NC 1 \NC 2 \NC 2 \NC 2 \NC \NR
+\NC 10 \NC 15 \NC 17 \NC 17 \NC \NR
+\NC 100 \NC 90 \NC 109 \NC 110 \NC \NR
+\NC 1000 \NC 185 \NC 234 \NC 259 \NC \NR
+\NC 10000 \NC 215 \NC 258 \NC 289 \NC \NR
+\HL
+\stoptabulate
+
+Among the improvements in April were a faster output to the console (first
+prototyped in \LUA, later done in the \LUATEX\ engine itself), and a couple of
+low level \LUA\ optimizations. In May a dirty (maybe too tricky) global document
+state restore trick has beeing introduced. Although these changes give nice speed
+bump, they will mostly go unnoticed in more realistic documents. There we are
+happy if we end up in the 20 pps range. So, in practice a more than 10 percent
+speedup between Januari and April is just a dream. \footnote {If you wonder why I
+still bother with such things: sometimes speedups are just a side effect of
+trying to accomplish something else, like less verbose output in full tracing
+mode.}
+
+There are many cases where it does matter to squeeze out every second possible.
+We run workflows where some six documents are generated from one source. If we
+forget about the initial overhead of fetching the source from a remote server
+\footnote {In the user interface we report the time it takes to fetch the source
+so that the typesetter can't be blamed for delays.} gaining half a second per
+document (if we start frech each needs two runs at least) means that the user
+will see the first result one second faster and have them all in six less than
+before. In that case it makes sense to identify bottlenecks in the more high
+level mechanisms.
+
+And this is why during the development of \CONTEXT\ and the transition from
+\MKII\ to \MKIV\ quite some time has been spent on avoiding bottlenecks. And, at
+this point we can safely conclude that, in spite of more advanced functionality,
+the current version of \MKIV\ runs faster than the \MKII\ versions in most cases,
+especially if you take the additional functionality into account (like \UNICODE\
+input and fonts).
+
+\stopsection
+
+\startsection[title=The \TEX\ engine]
+
+Writing inefficient macros is not that hard. If they are used only a few times,
+for instance in setting up properties it plays no role. But if they're expanded
+many times it may make a difference. Because use and development of \CONTEXT\
+went hand in hand we always made sure that the overhead was kept at a minimum.
+
+\startsubject[title=The parbuilder]
+
+There are a couple of places where document processing in a traditional \TEX\
+engine gets a performance hit. Let's start with the parbuilder. Although the
+paragraph builder is quite fast it can responsible for a decent amount of runtime.
+It is also a fact that the parbuilder of the engines derived from original \TEX\
+are more complex. For instance, \OMEGA\ adds bidirectionality to the picture
+which involves some extra checking as well as more nodes in the list. The \PDFTEX\
+engine provides protrusion and expansions, and as that feature was primarily a
+topic of research it was never optimized.
+
+In \LUATEX\ the parbuilder is a mixture of the \PDFTEX\ and \OMEGA\ builders and
+adapted to the fact that we have split the hyphenation, ligature building,
+kerning and breaking a paragraph into lines. The protrusion and expansion code is
+still there but already for a few years I have alternative code for \LUATEX\ that
+simplifies the implementation and could in principle give a speed boost as well
+but till now we never found time to adapt the engine. Take the following test code:
+
+\ifdefined\tufte \else \let\tufte\relax \fi
+
+\starttyping
+\testfeatureonce{100}{\setbox0\hbox{\tufte \par}} \tufte \par
+\stoptyping
+
+In \MKIV\ we use \LUA\ for doing fonts so when we measure this bit we get the
+used time for typesetting our \type {\tufte} quote without breaking it into
+lines. A normal \LUATEX\ run needs 0.80 seconds and a \LUAJITTEX\ run takes 0.47
+seconds. \footnote {All measurements are on a Dell M90 laptop running Windows 8.
+I keep using this machine because it has a decent high res 4:3 screen. It's the
+same machine Luigi Scarso and I used when experimenting with \LUAJITTEX.}
+
+\starttyping
+\testfeatureonce{100}{\setbox0\vbox{\tufte \par}} \tufte \par
+\stoptyping
+
+In this case \LUATEX\ needs 0.80 seconds and \LUAJITTEX\ needs 0.50 seconds and
+as we break the list into lines, we can deduct that close to zero seconds are
+needed to break 100 samples. This (often used) sample text has the interesting
+property that it has many hyphenation points and always gives multiple hyphenated
+lines. So, the parbuilder, if no protrusion and expansion are used, is real fast!
+
+\starttyping
+\startparbuilder[basic]
+ \testfeatureonce{100}{\setbox0\vbox{\tufte \par}} \tufte \par
+\stopparbuilder
+\stoptyping
+
+Here we kick in our \LUA\ version of the par builder. This takes 1.50 seconds for
+\LUATEX\ and 0.90 seconds for \LUAJITTEX. So, \LUATEX\ needs 0.70 seconds to
+break the quote into lines while \LUAJITTEX\ needs 0.43. If we stick to stock
+\LUATEX, this means that a medium complex paragraph needs 0.007 seconds of \LUA\
+time and this is not that is not a time to be worried about. Of course these
+numbers are not that accurate but the measurements are consistent over multiple
+runs for a specific combination of \LUATEX\ and \MKIV. On a more modern machine
+it's probably also close to zero.
+
+These measurements demonstrate that we should add some nuance to the assumption
+that parbuilding takes time. For this we need to distinguish between traditional
+\TEX\ and \LUATEX. In traditional \TEX\ you build an horizontal box or vertical
+box. In \TEX\ speak these are called horizontal and vertical lists. The main text
+flow is a special case and called the main vertical list, but in this perspective
+you can consider it to be like a vertical box.
+
+Each vertical box is split into lines. These lines are packed into horizontal
+boxes. In traditional \TEX\ constructing a list starts with turning references to
+characters into glyphs and ligatures. Kerns get inserted between characters if
+the font requests that. When a vertical box is split into lines, discretionary
+nodes get inserted (hyphenation) and when font expansion or protrusion is enabled
+extra fonts with expanded dimensions get added.
+
+So, in the case of vertical box, building the paragraph is not really
+distinguished from ligaturing, kerning and hyphenation which means that the
+timing of this process is somewhat fuzzy. Also, because after the lines are
+identified some final packing of lines happens and the result gets added to a
+vertical list.
+
+In \LUATEX\ all these stages are split into hyphenation, ligature building,
+kerning, line breaking and finalizing. When the callbacks are not enabled the
+normal machinery kicks in but still the stages are clearly separated. In the case
+of \CONTEXT\ the font ligaturing and kerning get preceded by so called node mode
+font handling. This means that we have extra steps and there can be even more
+steps before and afterwards. And, hyphenation always happens on the whole list,
+contrary to traditional \TEX\ that interweaves this. Keep in mind that because we
+can box and unbox and in that process add extra text the whole process can get
+repeated several times for the same list. Of course already treated glyphs and
+kerns are normally kept as they are.
+
+So, because in \LUATEX\ the process of splitting into lines is separated we can
+safely conclude that it is real fast. Definitely compared to al the font related
+steps. So, let's go back to the tests and let's do the following:
+
+\starttyping
+\testfeatureonce{1000}{\setbox0\hbox{\tufte}}
+
+\testfeatureonce{1000}{\setbox0\vbox{\tufte}}
+
+\startparbuilder[basic]
+ \testfeatureonce{1000}{\setbox0\vbox{\tufte}}
+\stopparbuilder
+\stoptyping
+
+We've put the text into a macro so that we don't have interference from reading
+files. The test wrapper does the timing. The following measurements are somewhat
+rough but repetition gives similar results. \footnote {Before and between runs
+we do a garbage collection.}
+
+\starttabulate[|c|c|c|c|c|]
+\HL
+\NC \NC \bf engine \NC \bf method \NC \bf normal \NC \bf hz \NC \NR % comment
+\HL
+\NC 1 \NC luatex \NC tex hbox \NC ~9.64 \NC ~9.64 \NC \NR % baseline font feature processing, hyphenation etc: 9.74
+\NC 2 \NC \NC tex vbox \NC ~9.84 \NC 10.16 \NC \NR % 0.20 linebreak / 0.52 with hz -> 0.32 hz overhead (150pct more)
+\NC 3 \NC \NC lua vbox \NC 17.28 \NC 18.43 \NC \NR % 7.64 linebreak / 8.79 with hz -> 1.33 hz overhead ( 20pct more)
+\HL
+\NC 4 \NC luajittex \NC tex hbox \NC ~6.33 \NC ~6.33 \NC \NR % baseline font feature processing, hyphenation etc: 6.33
+\NC 5 \NC \NC tex vbox \NC ~6.53 \NC ~6.81 \NC \NR % 0.20 linebreak / 0.48 with hz -> 0.28 hz overhead (expected 0.32)
+\NC 6 \NC \NC lua vbox \NC 11.06 \NC 11.81 \NC \NR % 4.53 linebreak / 5.28 with hz -> 0.75 hz overhead
+\HL
+\stoptabulate
+
+In line~1 we see the basline: hyphenation, processing fonts and hpacking takes
+9.74 seconds. In the second line we see that breaking the 1000 paragraphs costs
+some 0.20 seconds and when expansion is enabled an extra 12 seconds is needed.
+This means that expansion takes 150\% more runtime. If we delegate the task to
+\LUA\ we need 7.64 seconds for breaking into lines which can not be neglected
+but is still ok given the fact that we break 1000 paragraphs. But, interesting
+is to see that our alternative expansion routine only adds 1.33 seconds which is
+less than 20\%. It must be said that the built|-|in method is not that efficient
+by design if only because it started out differently as part of research.
+
+When measured three months later, the numbers for regular \LUATEX\ (at that time
+version 0.77) with the latest \CONTEXT\ were: 8.52, 8.72 and 15.40 seconds for
+the normal run, which demonstrates that we should not draw too many conclusions
+from such measurements. It's the overal picture that matters.
+
+As with earlier timings, if we use \LUAJITTEX\ we see that the runtime of \LUA\
+is much lower (due to the virtual machine). Of course we're still 20 times slower
+than the built|-| in method but only 10 times slower when we use expansion. To put
+these numbers in perspective: 5 seconds for 1000 paragraphs.
+
+\starttyping
+\setupbodyfont[dejavu]
+
+\starttext
+ \dontcomplain \dorecurse{1000}{\tufte\par}
+\stoptext
+\stoptyping
+
+This results in 295 pages in the default layout and takes 17.8 seconds or 16.6
+pages per second. Expansion is not enabled.
+
+\starttext
+\startparbuilder[basic]
+ \dontcomplain \dorecurse{1000}{\tufte\par}
+\stopparbuilder
+\stoptext
+
+That one takes 24.7 seconds and runs at 11.9 pages per second. This is indeed
+slower but on a bit more modern machine I expect better results. We should also
+realize that with Dejavu being a relative large font a difficult paragraph like
+the tufte example gives overfull boxes which in turn is an indication that quite
+some alternative breaks are tried.
+
+When typeset with Latin Modern we don't get overfull boxes and interesting is
+that the native method needs less time (15.9 seconds or 14.1 pages per second)
+while the \LUA\ variant also runs a bit faster: 23.4 or 9.5 pages per second. The
+number of pages is 223 because this font is smaller by design.
+
+When we disable hyphenation the the Dejavu variant takes 16.5 (instead of 17.8)
+seconds and 23.1 (instead of 24.7) seconds for \LUA, so this process is not that
+demanding.
+
+For typesetting so many paragraphs without anything special it makes no sense to
+bother with using a \LUA\ based parbuilder. I must admit that I never had to typeset
+novels so all my 300 page runs are much longer anyway. Anyway, when at some point
+we introduce alternative parbuilding to \CONTEXT, the speed penalty is probably
+acceptable.
+
+Just to indicate that predictions are fuzzy: when we put a \type {\blank} between
+the paragraphs we end up with 313 pages and the traditional method takes 18.3
+while \LUA\ needs 23.6 seconds. One reason for this is that the whitespace is
+also handled by \LUA\ and in the pagebuilder we do some finalizing, so we
+suddenly get interference of other processes (as well as the garbage collector).
+Again an indication that we should not bother too much about speed. I try to make
+sure that the \LUA\ (as well as \TEX) code is reasonably efficient, so in
+practice it's the document style that is a more important factor than the
+parbuilder, it being the traditional one or the \LUA\ variant.
+
+\stopsubject
+
+\startsubject[title=Copying boxes]
+
+As soon as in \CONTEXT\ you start enhancing the page with headers and footers and
+backgrounds you will see that the pps rate drops. This is partly due to the fact
+that suddenly quite some macro expansion takes place in order to check what needs
+to happen (like font and color switches, offsets, overlays etc). But what has
+more impact is that we might end up with copying boxes and that takes time. Also,
+by wrapping and repackaging boxes, we add additional levels of recursion in
+postprocessing code.
+
+\stopsubject
+
+\startsubject[title=Macro expansion]
+
+Taco and I once calculated that \MKII\ spends some 4\% of the time in accessing
+the hash table. This is a clear indication that quite some macro expansions goes
+on. Due to the fact that when I rewrote \MKII\ into \MKIV\ I no longer had to
+take memory and other limitations into account, the codebase looks quite
+different. There we do have more expansion in the mechanism that deals with
+settings but the body of macros is much smaller and less parameters are passed.
+So, the overall performance is better.
+
+\stopsubject
+
+\startsubject[title=Fonts]
+
+Using a font has several aspects. First you have to define an instance. Then, when
+you use it for the first time, the font gets loaded from storage, initialized and
+is passed to \TEX. All these steps are quite optimized. If we process the following
+file:
+
+\starttyping
+\setupbodyfont[dejavu]
+
+\starttext
+ regular, {\it italic}, {\bf bold ({\bi italic})} and $m^a_th$
+\stoptext
+\stoptyping
+
+we get reported:
+
+\starttabulate[||T|]
+\NC \type{loaded fonts} \NC xits-math.otf xits-mathbold.otf \NC \NR
+\NC \NC dejavuserif-bold.ttf dejavuserif-bolditalic.ttf \NC \NR
+\NC \NC dejavuserif-italic.ttf dejavuserif.ttf \NC \NR
+\NC \type{fonts load time} \NC 0.374 seconds \NR
+\NC \type{runtime} \NC 1.014 seconds, 0.986 pages/second \NC \NR
+\stoptabulate
+
+So, six fonts are loaded and because XITS is used we also preload the math bold
+variant. Loading of text fonts is delayed but in order initialize math we need to
+preload the math fonts.
+
+If we don't define a bodyfont, a default set gets loaded: Latin Modern. In that
+case we get:
+
+\starttabulate[||T|]
+\NC \type{loaded fonts} \NC latinmodern-math.otf \NC \NR
+\NC \NC lmroman10-bolditalic.otf lmroman12-bold.otf \NC \NR
+\NC \NC lmroman12-italic.otf lmroman12-regular.otf \NC \NR
+\NC \type{fonts load time} \NC 0.265 seconds \NR
+\NC \type{runtime} \NC 0.874 seconds, 1.144 pages/second \NC \NR
+\stoptabulate
+
+Before we had native \OPENTYPE\ Latin Modern math fonts, it took slightly longer
+because we had to load many small \TYPEONE\ fonts and assemble a virtual math font.
+
+As soon as you start mixing more fonts and/or load additional weights and styles
+you will see these times increase. But if you use an already loaded font with
+a different featureset or scaled differently, the burden is rather low. It is
+safe to say that at this moment loading fonts is not a bottleneck.
+
+Applying fonts can be more demanding. For instance if you typeset Arabic or
+Devanagari the amount of node and font juggling definitely influences the total
+runtime. As the code is rather optimized there is not much we can do about it.
+It's the price that comes with flexibility. As far as I can tell getting the same
+results with \PDFTEX\ (if possible at all) or \XETEX\ is not taking less time. If
+you've split up your document in separate files you will seldom run more than a
+dozen pages which is then still bearable.
+
+If you are for instance typesetting a dictionary like document, it does not make
+sense to do all font switches by switching body fonts. Just defining a couple of
+font instances makes more sense and comes at no cost. Being already quite
+efficient given the complexity you should not expect impressive speedups in this
+area.
+
+\stopsubject
+
+\startsubject[title=Manipulations]
+
+The main manipulation that I have to do is to process \XML\ into something
+readable. Using the built||in parser and mapper already has some advantages
+and if applied in the right way it's also rather efficient. The more you restrict
+your queries, the better.
+
+Text manipulations using \LUA\ are often quite fast and seldom the reason for
+seeing slow processing. You can do lots of things at the \LUA\ end and still have
+all the \CONTEXT\ magic by using the \type {context} namespace and function.
+
+\stopsubject
+
+\startsubject[title=Multipass]
+
+You can try to save 1 second on a 20 second run but that is not that impressive
+if you need to process the document three times in order to get your cross
+references right. Okay you'd save 3 seconds but still to get result you needs
+some 60 seconds (unless you already have run the document before). If you have a
+predictable workflow you might know in advance that you only need two runs in
+case you can enforce that with \type {--runs=2}. Furthermore you can try to
+optimize the style by getting rid of redundant settings and inefficient font
+switches. But no matter what we optimize, unless we have a document with no cross
+references, sectioning and positioning, you often end up with the extra run,
+although \CONTEXT\ tries to minimize the number of needed runs needed.
+
+\stopsubject
+
+\startsubject[title=Trial runs]
+
+Some mechanisms, like extreme tables, need multiple passes and all but the last
+one are tagged as trial runs. Because in many cases only dimensions matter, we
+can disable some time consuming code in such case. For instance, at some point
+Alan Braslau and I found out that the new chemical manual ran real slow, mainly
+due to the tens of thousands of \METAPOST\ graphics. Adding support for trial
+runs to the chemical structure macros gave a fourfold improvement. The manual is
+still a slow|-|runner, but that is simply because it has so many runtime
+generated graphics.
+
+\stopsubject
+
+\stopsection
+
+\startsection[title=The \METAPOST\ library]
+
+When the \METAPOST\ library got included we saw a drastic speedup in processing
+document with lots of graphics. However, when \METAPOST\ got a different number
+system (native, double and decimal) the changed memory model immediately lead to
+a slow down. On one 150 page manual which a graphic on each page I saw the
+\METAPOST\ runtime go up from about half a second upto more than 5 seconds. In
+this case I was able to rewrite some core \METAFUN\ macro to better suit the new
+model, but you might not be so lucky. So more careful coding is needed. Of course
+if you only have a few graphics, you can just ignore the change.
+
+\stopsection
+
+\startsection[title=The \LUA\ interpreter]
+
+Where the \TEX\ part of \LUATEX\ is compiled, the \LUA\ code gets interpreted,
+converted into bytecode, and ran by the virtual machine. \LUA\ is by design quite
+portable, which means that the virtual machine is not optimized for a specific
+target. The \LUAJIT\ interpreter on the other hand is written in assembler and
+available for only some platforms, but the virtual machine is about twice as
+fast. The just||in||time part of \LUAJIT\ is not if much help and even can slow
+down processing.
+
+When we moved from \LUA~5.1 to 5.2 we found out that there was some speedup but
+it's hard to say why. There has been changes in the way strings are dealt with
+(\LUA\ hashes strings) and we use lots of strings, really lots. There has been
+changes in the garbage collection and during a run lots of garbage needs to be
+collected. There are some fundamental changes in so called environments and who
+knows what impact that has.
+
+If you ever tried to measure the performance of \LUA, you probably have noticed
+that it is quite fast. This means that it makes no sense to optimize code that
+gets visited only occasionally. But some of the \CONTEXT\ code gets exercised a
+lot, for instance all code that deals with fonts. We use attributes a lot and
+checking them is for good reason not the fastest code. But given the often
+advanced functionality that it makes possible we're willing to pay the price.
+It's also functionality that you seldom need all at the same time and for
+straightforward text only documents all that code is never executed.
+
+When writing \TEX\ or \LUA\ code I spent a lot of time making it as efficient as
+possible in terms of performance and memory usage. The sole reason for that is
+that we happen to process documents where a lot of functionality is combined, so
+if many small speed||ups accumulate to a noticeable performance gain it's worth
+the effort.
+
+So, where does \LUA\ influence runtime? First of all we use \LUA\ do deal with all
+in- and output as well as locating files in the \TEX\ directory structure. Because
+that code is partly shared with the script manager (\type {mtxrun}) it is optimized
+but some more is possible if needed. It is already not the most easy to read code,
+so I don't want to introduce even more obscurity.
+
+Quite some code deals with loading, preparing and caching fonts. That code is
+mostly optimized for memory usage although speed is also okay. This code is only
+called when a font is loaded for the first time (after an update). After that
+loading is at matter of milliseconds. When a text gets typeset and when fonts are
+processed in so called node mode, depending on the script and|/|or enabled
+features, a substantial amount of time is spent in \LUA. There is still a bit
+complex dealing with inserting kerns but future \LUATEX\ will carry kerning
+in the glyph node so there we can gain some runtime.
+
+If a page has 4000 characters and if font features as well as other manipulations
+demand 10 runs over the text, we have 40.000 checks of nodes and potential
+actions. Each involves an id check, maybe a subtype check, maybe some attribute
+checking and possibly some action. So, if we have 200.000 (or more) function
+calls to the per page \TEX\ end it might add up to a lot. Around the time that we
+went to \LUA~5.2 and played with \LUAJITTEX, the node accessors have been sped
+up. This gave indeed a measurable speedup but not on an average document, only on
+the more extreme documents or features. Because the \MKIV\ \LUA\ code goes from
+experimental to production to final, some improvements are made in the process
+but there is not much to gain there. We just have to wait till computers get
+faster, \CPU\ cache get bigger, branch prediction improves, floating point
+calculations take less time, memory is speedy, and flash storage is the standard.
+
+The \LUA\ code is plugged into the \TEX\ machinery via callbacks. For
+instance each time a box is build several callbacks are triggered, even if it's
+an empty box or just an extra wrapper. Take for instance this:
+
+\starttyping
+\hbox \bgroup
+ \hskip \zeropoint
+ \hbox \bgroup
+ test
+ \egroup
+ \hskip \zeropoint
+\egroup
+\stoptyping
+
+Of course you won't come up with this code as it doesn't do much good but macros
+that you use can definitely produce this. For instance, the zero skip can be a
+left and right margin that happen to be. For 10.000 iterations I measured 0.78
+seconds while the text one takes 0.62 seconds:
+
+\starttyping
+\hbox \bgroup
+ \hbox \bgroup
+ test
+ \egroup
+\egroup
+\stoptyping
+
+Why is this? One reason is that a zero skip results in a node and the more nodes
+we have the more memory (de)allocation takes place and the more nodes in the list
+need to be checked. Of course the relative difference is less when we have more
+text. So how can we improve this? The following variant, at the cost of some
+testing takes just as much time.
+
+\starttyping
+\hbox \bgroup
+ \hbox \bgroup
+ \scratchdimen\zeropoint
+ \ifdim\scratchdimen=\zeropoint\else\hskip\scratchdimen\fi
+ test
+ \ifdim\scratchdimen=\zeropoint\else\hskip\scratchdimen\fi
+ \egroup
+\egroup
+\stoptyping
+
+As does this one, but the longer the text, the slower it gets as one of the two
+copies needs to be skipped.
+
+\starttyping
+\hbox \bgroup
+ \hbox \bgroup
+ \scratchdimen\zeropoint
+ \ifdim\scratchdimen=\zeropoint
+ test%
+ \else
+ \hskip\scratchdimen
+ test%
+ \hskip\scratchdimen
+ \fi
+ \egroup
+\egroup
+\stoptyping
+
+Of course most speedup is gained when we don't package at all, so if we test
+before we package but such an optimization is seldom realistic because much more
+goes on and we cannot check for everything. Also, 10.000 is a lot while 0.10
+seconds is something we can live with. By the way, compare the following
+
+\starttyping
+\hbox \bgroup
+ \hskip\zeropoint
+ test%
+ \hskip\zeropoint
+\egroup
+
+\hbox \bgroup
+ \kern\zeropoint
+ test%
+ \kern\zeropoint
+\egroup
+\stoptyping
+
+The first variant is less efficient that the second one, because a skip
+effectively is a glue node pointing to a specification node while a kern is just
+a simple node with the width stored in it. \footnote {On the \LUATEX\ agenda is
+moving the glue spec into the glue node.} I must admit that I seldom keep in mind
+to use kerns instead of skips when possible if only because one needs to be sure
+to be in the right mode, horizontal or vertical, so additional commands might be
+needed.
+
+\stopsection
+
+\startsection[title=Macros]
+
+Are macros a bottleneck? In practice not really. Of course we have optimized the
+core \CONTEXT\ macros pretty well, but one reason for that is that we have a
+rather extensive set of configuration and definition mechanisms that rely heavily
+on inheritance. Where possible all that code is written in a way that macro
+expansion won't hurt too much. because of this users themselves can be more
+liberal in coding. There is a lot going on deep down and if you turn on tracing
+macros you can get horrified. But, not all shown code paths are entered. During the
+move (and rewrite) from \MKII\ to \MKIV\ quite some bottlenecks that result from
+limitations of machines and memory have been removed and as a result the macro
+expansion part is somewhat faster, which nicely compensates the fact that we
+have a more advanced but slower inheritance subsystem. Readability of code and
+speed are probably nicely balanced by now.
+
+Once a macro is read in, its internal representation is pretty efficient. For
+instance references to macro names are just pointers into a hash table. Of
+course, when a macro is seen in your source, that name has to be looked up, but
+that's a fast action. Using short names in the running text for instance really
+doesn't speed up processing much. Switching font sets on the other hand does, as
+then quite some checking happens and the related macros are pretty extensive.
+However, once a font is loaded references to it a pretty fast. Just keep in mind
+that if you define something inside a group, in most cases it got forgotten. So,
+if you need something more often, just define it at the outer level.
+
+\stopsection
+
+\startsection[title=Optimizing code]
+
+Optimizing only makes sense if used very often and called frequently or when the
+problem to solve is demanding. An example of code that gets done often is page
+building, where we pack together many layout elements. Font switches can also be
+time consuming, if defined wrong. These can happen for instance for formulas,
+marked words, cross references, margin notes, footnotes (often a complete
+bodyfont switch), table cells, etc. Yet another is clever vertical spacing that
+happens between structural elements. All these mechanisms are reasonably
+optimized.
+
+I can safely say that deep down \CONTEXT\ is no that inefficient, given what it
+has to do. But when a style for instance does redundant or unnecessary massive
+font switches you are wasting runtime. I dare to say that instead of trying to
+speed up code (for instance by redefining macros) you can better spend the time
+in making styles efficient. For instance having 10 \type {\blank}'s in a row
+will work out rather well but takes time. If you know that a section head has no
+raised or lowered text and no math, you can consider using \type {\definefont} to
+define the right size (especially if it is a special size) instead of defining
+an extra bodyfont size and switch to that as it includes setting up related sizes
+and math.
+
+It might sound like using \LUA\ for some tasks makes \CONTEXT\ slower, but this
+is not true. Of course it's hard to prove because by now we also have more
+advanced font support, cleaner math mechanisms, additional features especially in
+especially structure related mechanisms, etc. There are also mechanisms that are
+faster, for instance extreme tables (a follow up on natural tables) and mixed
+column modes. Of course on the previously mentioned 300 page simple paragraphs
+with simple Latin text the \PDFTEX\ engine is much faster than \LUATEX, also
+because simple fonts are used. But for many of todays document this engine is no
+longer an options. For instance in our \XML\ processing in multiple languages,
+\LUATEX\ beats \PDFTEX. There is not that much to optimize left, so most speed up
+has to come from faster machines. And this is not much different from the past:
+processing 300 page document on a 4.7Mhz 8086 architecture was not much fun and
+we're not even talking of advanced macros here. Faster machines made more clever
+and user friendly systems possible but at the cost of runtime, to even if
+machines have become many times faster, processing still takes time. On the other
+hand, \CONTEXT\ will not become more complex than it is now, so from now on we
+can benefit from faster \CPU's, memory and storage.
+
+\stopsection
+
+\stopchapter
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-threequarters.tex b/doc/context/sources/general/manuals/about/about-threequarters.tex
new file mode 100644
index 000000000..fe6f4a95b
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-threequarters.tex
@@ -0,0 +1,330 @@
+% language=uk
+
+\startcomponent about-calls
+
+\environment about-environment
+
+\logo[CRITED]{CritEd}
+
+\startchapter[title={\LUATEX\ 0.79}]
+
+% Hans Hagen, PRAGMA ADE, April 2014
+
+\startsection[title=Introduction]
+
+To some it might look as if not much has been done in \LUATEX\ development but
+this is not true. First of all, the 2013 versions (0.75-0.77) are quite stable
+and can be used for production so there is not much buzz about new things.
+\CONTEXT\ users normally won't even notice changes because most is encapsulated
+in functionality that itself won't change. The binaries on the \type
+{contextgarden.net} are always the latest so an update results in binaries that
+are in sync with the \LUA\ and \TEX\ code. Okay, behaviour might become better
+but that could also be the side effect of better coding. Of course some more
+fundamental changes can result in temporary bugs but those are normally easy to
+solve.
+
+Here I will only mention the most important work done. I'll leave out the
+technical details as they can be found in the manual and in articles that were
+written during development. The version discussed is 0.79.
+
+\stopsection
+
+\startsection[title=Speed]
+
+One of the things we spent a lot of time on is speed. This is of course of more
+importance for a system like \CONTEXT\ that can spend more than half its time in
+\LUA, but eventually we all benefit from it. For the average user it doesn't
+matter much if a run takes a few seconds but in automated workflows these
+accumulate and if a process has to produce 5 documents of 20 pages (each
+demanding a few runs) or a few documents of several hundreds of pages, it might
+make a difference. In the \CRITED\ project we aim for complex documents produced
+from \XML\ at a rate of 20 pages per second, at least for stock \LUATEX.
+\footnote {This might look slow but a lot is happening there. A simple 100 page
+document with one word per page processes at more that 500 pages per second but
+this is hard to match with more realistic documents. When processing data from
+bases using the \CLD\ interface getting 50 pages per seconds is no problem.} In
+an edit|-|preview cycle it feels better if we don't use more than half a second
+for a couple of pages: loading the \TEX\ format, initializing the \LUA\ modules,
+loading fonts, typesetting and producing a proper \PDF\ file. We also want to be
+prepared for the ultra portable computers where multiple cores compensate the
+lower frequency, which harms \TEX\ as sequential processor using one core only.
+
+An important aspect of speedup is that it must not obscure the code. This is why
+the easiest way to achieve it is to use a faster variant of \LUA, and \LUAJIT\
+with its faster virtual machine, is a solution for that. We are aware of the
+fact that processors not necessarily become faster, but that on the other hand
+memory becomes larger. Disk speed also got better with the arrival of
+flash based storage. Because \LUATEX\ should run smoothly on future portable
+devices, the more we can gain now, the better it gets in the future. A decent
+basic performance is possible and we don't have to focus too much on memory and
+disk access and mostly need to keep an eye on basic \CPU\ cycles. Although we
+have some ideas about improving performance, tests demonstrate that \LUATEX\
+is not doing that bad and we don't have to change it's internals. In fact, if we
+do it might as well result in a drastic slowdown!
+
+One interesting performance factor is console output. Because \TEX\ outputs
+immediately with hardly any buffering, it depends a lot on the speed of console
+output. This itself depends on what console is used. \UNIX\ consoles normally
+have some buffering and refresh delay built in. There the speed depends on what
+fonts are used and to what extend the output gets interpreted (escape sequences
+are an example). I've run into cases where a run took seconds more because of a
+bad choice of fonts. On \WINDOWS\ it's more complicated since there the standard
+console (like \TEX) is unbuffered. The good news is that there are several
+alternatives that perform quite well, like console2 and conemu. These
+alternatives buffer output and have refresh delays. But still, on a very high res
+screen, with a large console window logging has impact. Interesting is that when
+I run from the editor (SciTE) output is pretty fast, so normally I never notice
+much of a slowdown. Of course these kind of performance issues can hit you more
+when you work in a remote terminal.
+
+The reason why I mention this is that in order to provide a user feedback about
+issues, there has to be some logging and depending on the kind of use, more or
+less is needed. This means that on the \CONTEXT\ mailing list we sometimes get
+complaints about the amount of logging. It is for this reason that much logging is
+optional and all logging can be disabled as well. Because we go through \LUA\
+we have some control over efficiency too. In the current \LUATEX\ release most
+logging can now be intercepted, including error messages.
+
+Talking of a slowdown, in the \CRITED\ project we have to deal with real large
+indices (tens of thousands of entries) and we found out that in the case of
+interactive variants (register entry to text and back) the use of \LUAJITTEX\
+could bring down a run to a grinding halt. In the end, after much testing we
+figured out that a suboptimal string hashing function was the culprit and we did
+extensive tests with both the \LUAJIT, \LUA\ 5.1 and \LUA\ 5.2 variant. We ended
+up by replacing the \LUAJIT\ hash function by the the \LUA\ 5.1 one which is a
+relative easy operation. Because \LUAJIT\ can address less memory than regular
+\LUA\ it will always be a matter of testing if \LUAJITTEX\ can be used instead of
+\LUATEX. Standard document processing (reports and such) is normally no problem
+but processing large amounts of data from databases can be an issue.
+
+In the process of cleaning up the code base for sure we will also find ways to
+make things run even smoother. But, in any case, version 0.80 is already a good
+benchmark for what can be achieved.
+
+\stopsection
+
+\startsection[title=Nodes]
+
+One of the bottlenecks in the hybrid approach is crossing the so called C
+boundary. This is not really a bottleneck, unless we're talking of many millions
+of function calls. In practice this only happens in for instance more extreme
+font handling (Devanagari or sometimes Arabic). If performance is really an issue
+one can fallback on a more direct node access model. Of course the overhead of
+access should be compared to other related activities: one can gain .25 seconds
+on a run in using the direct access model, but if the whole runs takes 25
+seconds, it can be neglected. If the price paid for it is less readable code it
+should only be done deep down a macro package where no user even sees the code.
+We use this access model in the \CONTEXT\ core modules and so far it looks quite
+okay, especially for more extensive manipulations. The gain in speed is quite
+noticeable if you use the more advanced features of \CONTEXT.
+
+There can be some changes in the node model but not that drastic as the current
+model is quite ok and also stays close to original \TEX\ so that existing
+documentation still applies. One of the changes will be that glue spec (sub)nodes
+will disappear and glue nodes will carry that information. Direction whatsits
+will become first class nodes as they are part of the concept (whatsits
+normally relate to extensions) and the same might happen with image nodes. As a
+side effect we can restructure the code so that it becomes more readable. Some
+experimental \PDFTEX\ functionality will be removed as it can be done better with
+callbacks.
+
+\stopsection
+
+\startsection[title=The parbuilder and HZ]
+
+As we started from \PDFTEX\ we inherit also its experimental code and character.
+One of the objectives is to separate font- and backend as good as possible. We
+have already achieved a lot and apart from bringing consistency in the code, the
+biggest change has been a partial rewrite of the hz code, especially the way
+fonts are managed. Instead of making copies of fonts with different properties,
+we now carry information in the relevant nodes. The backend code already got away
+from multiple fonts by using transformation of the base font instead of
+additional font instances, so this was a natural adaptation. This was actually
+triggered by the fact that a \LUA\ based par builder demonstrated that this made
+sense. The new approach uses less memory and is a bit faster (at least in
+theory).
+
+In callbacks it makes life easier when a node list has a predictable structure.
+For instance, the result of a paragraph broken into lines still has discretionary
+nodes. Is that really needed? Lines can have left- or rightskip nodes, depending
+on the fact if they were set. Math nodes can disappear as part of a cleanup in
+the line break code, but this is unfortunate when one expects them to be
+somewhere in the list in a callback. All this will be made consistent. These are
+issues we will look into on the way to version 1.0.
+
+I occasionally play with the \LUA\ based par builder and it is quite compatible
+even if we take the floating point \LUA\ aspect into account. However when using
+hz the outcome is different: sometimes better, sometimes worse. Personally I
+don't care too much as long as it's consistent. Features like hz are for special
+cases anyway and can never be stable over years if only because fonts evolve. And
+we're talking of bordercase typesetting: narrow columns that no matter what method is
+used will never look okay. \footnote {Some people don't like larger spaces, others
+don't like stretched glyphs.}
+
+\stopsection
+
+\startsection[title=The backend]
+
+The separation of front- and backend is more a pet project. There is some
+experimental code that will get removed or integrated. We try to make the backend
+consistent from the \TEX\ as well as \LUA\ end and some is reflected in
+additional features and callbacks.
+
+Some of the variables that can be set (the \LUA\ counterparts of the \type {\pdf..}
+token registers at the \TEX\ end) are now consistent with each other and avoid
+going via pseudo tokenization. Typical aspects of a backend that only a few users
+will notice but nevertheless needed work.
+
+The merge of engines also resulted in inconsistencies in function names, like using
+\type {pdf_} in function names where nothing \type {PDF} is involved.
+
+\stopsection
+
+\startsection[title=Backlinks]
+
+In callbacks we mostly deal with node lists. At the \TEX\ end of course we also
+have these lists but there it is quite clear what gets done with them. This means
+that there is no need for double linked lists. It also means that what is known
+as the head of a list can in fact be in the middle. The for \TEX\ characteristic
+nesting model has resulted in stacks and current pointers. The code uses so
+called temp nodes to point at the head node.
+
+As a consequence in \LUATEX, where we present a double linked list, before the
+current version one could run into cases where for instance a head node had a
+prev pointer, even one that made no sense. As said, no big deal in \TEX\ but in
+the hands of a user who manipulates the node list it can be dramatic. The current
+version has cleaned head nodes as well as consistent backlinks, but of course we
+keep the internals mostly unchanged because we stay close to the Knuthian
+original when possible. \footnote {Even with extensions the original
+documentation still covers most of what happens.}
+
+\stopsection
+
+\startsection[title=Properties]
+
+Sometimes you want to associate additional information to a node. A natural way
+to do this is attributes. These can be set at the \TEX\ and \LUA\ end and
+accessed at the \LUA\ end. At the \LUA\ end one can have tables with nodes as
+indices and store extra information but that has the disadvantage that one has no
+clue if such information is current: nodes come and go and are recycled.
+
+For this reason we now have a global properties table where each allocated node
+can have a table with whatever information users might like to store. This itself
+is not special, but the nice thing is that when a node is freed, that information
+is also freed. So, you cannot run into old data. When nodes are copied its
+properties are also copied. The overhead, when not used, is close to zero, which is
+always an objective when extending the core engine.
+
+Of course this model demands that macro package somehow controls consistent use
+but that is not different from what already has to be done. Also, simple
+extensions like this avoid hard codes solutions, which is also something we want
+to avoid.
+
+\stopsection
+
+\startsection[title=\LUA\ calls]
+
+We have so called user nodes that can carry a number, string, token list or node
+list. We now have added \LUA\ to this repertoire. In fact, we now could use only a
+\LUA\ variable and we might have done so in retrospect, but for the moment we we
+stick to the current model of several basic types. The \LUA\ variable can be
+anything and it is up to the user (in some callback) to deal with them.
+
+User nodes are not to be confused with late \LUA\ nodes. You can store a function
+call in a user node but that's about it. You can at a later moment decide to call
+that function but it's still an explicit action. The value of a late \LUA\ node
+on the other hand is dealt with automatically during shipout. When the value is a
+string it gets interpreted as \LUA, but new is that when the value is a function
+it will get called. At that moment we have access to some of the current backend
+properties, like locations.
+
+\stopsection
+
+\startsection[title=Artefacts]
+
+Because \LUATEX\ took code from \PDFTEX, that is built upon \ETEX, which in turn
+is an extension to \TEX, and \OMEGA, that also extends \TEX, there is code that
+no longer makes sense for us. Combine that with the fact that some code carries
+signatures of translated \PASCAL\ to \CCODE, we have some cleanup to do as follow
+up on the not to be underestimated move to \CCODE. This is an ongoing process but
+also fun doing. Luigi and I spend many hours exploring venues and have
+interesting Skype sessions that can easily sidetrack, and with Taco getting more
+time for \LUATEX\ we expect to get most of our (still growing) todo list done.
+
+Because \LUATEX\ started out as an experiment, there is some old code around. For
+instance, we used to have multiple instances and this still shows in some places.
+We can simplify the \LUA\ to \TEX\ interface a bit and clean up the \LUA\ global
+state handling, but we're not in a big hurry with this. Experiments have been
+done with some extensions to the writer code but they are hold back to after the
+cleanup.
+
+In a similar fashion we have sped up the way \LUA\ keyword and values get
+resolved. Already early in the development we did this for critical code like
+passing \LUA\ font tables to \TEX, followed by accessing nodes, but now we have
+done that for most code. There is still some to do but it has the side effect of
+not only consistency but also of helping to document the interface. Of course we
+learn a lot about the \LUA\ internals too. The C macro system is of great help
+here, although the mentioned pascal conversion (web2c) and merged engines have
+resulted in some inconsistency that needs to be cleaned up before we start
+documenting more of the internals (another subproject we want to finish before
+retirement).
+
+\stopsection
+
+\startsection[title=Callbacks]
+
+There are a few more callbacks and most of them come from the tracker. The
+backend now has page related callbacks, the \LUA\ error handler can be
+intercepted. Error messages that consist of multiple pieces are handled better
+too. When a file is opened and closed a callback is now possible. Technically we
+could have combined this with the already present callbacks but as in \TEX\
+synchronization matters these new callbacks relate to current message callbacks
+that show \type {[]}, \type {{}}, \type {<>} and|/|or \type {<<>>} fenced
+filenames, where the later were introduced in successive backend code.
+
+\stopsection
+
+\startsection[title=\LUA]
+
+We currently use \LUA\ 5.2 but a next version will show up soon. Because \LUA\
+5.3 introduces a hybrid number model, this will be one of the next things to play
+with. It could work out well, because \TEX\ is internally integer based (scaled
+points) but you never know. It could be that we need to check existing code for
+serialization and printing issues but normally that will not lead to
+compatibility issues. We could even decide to stick to \LUA\ 5.2 or at least wait
+till all has stabilized. There is some basic support for \UTF\ in 5.3 but in
+\CONTEXT\ we don't depend on that. In practice hardly any processing takes place
+that assumes that \UTF\ is more than a sequence of bytes and \LUA\ can handle
+bytes quite well.
+
+\stopsection
+
+\startsection[title=\CONTEXT]
+
+Of course the development of \LUATEX\ has consequences for \CONTEXT. For
+instance, existing code is used to test alternative solutions and sometimes these
+make it into the core. Some new features are used immediately, like the more
+consistent control over \PDF\ properties, but others have to wait till the new
+binary is more widespread. \footnote {Normally dissemination is rather fast
+because the contextgarden provides recent binaries. The new windows binaries
+often show up within hours after the repository has been updated.}
+
+Some of the improvement in the code base directly relate to \CONTEXT\ activities.
+For instance the \CRITED\ project (complex critical editions) uncovered some
+hashing issues with \LUAJIT\ that have been taken care of now. The (small)
+additions to the \PDF\ backend resulted in a partial cleanup of relatively old
+\CONTEXT\ backend code.
+
+Although some more complex mechanisms, like multi|-|columns are being reworked,
+it is still needed to open up a bit more of the \TEX\ internals, so we have some
+work to do. As usual, version 0.80 doesn't mean that only 0.20 has to be done to
+get to 1.00, as development is not a linear process. The jump from 0.77 to 0.79
+for instance involved a lot of work (exploration as well as testing). But as long
+as it's fun to do, time doesn't matter much. As we've said before: we're in no
+hurry.
+
+\stopsection
+
+\stopchapter
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about-titlepage.tex b/doc/context/sources/general/manuals/about/about-titlepage.tex
new file mode 100644
index 000000000..392fdb1be
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about-titlepage.tex
@@ -0,0 +1,31 @@
+\startcomponent about-titlepage
+
+\environment about-environment
+
+\setvariables
+ [document]
+ [title=about,
+ subtitle=luatex and context,
+ author=hans hagen]
+
+\startpagemakeup
+
+ \startMPcode
+
+ StartPage ;
+
+ fill Page enlarged 5mm withcolor \MPcolor{maincolor} ;
+
+ draw anchored.lrt(image(draw textext("\getvariable{document}{title}") xsized(.750PaperWidth) withcolor white),(lrcorner Page) shifted (-PaperWidth/20, PaperWidth/ 5)) ;
+ draw anchored.lrt(image(draw textext("\getvariable{document}{subtitle}") xsized(.750PaperWidth) withcolor white),(lrcorner Page) shifted (-PaperWidth/20, PaperWidth/10)) ;
+ draw anchored.urt(image(draw textext("\getvariable{document}{author}") xsized(.375PaperWidth) rotated 90 withcolor white),(urcorner Page) shifted (-PaperWidth/20,-PaperWidth/20)) ;
+
+ setbounds currentpicture to Page ;
+
+ StopPage ;
+
+ \stopMPcode
+
+\stoppagemakeup
+
+\stopcomponent
diff --git a/doc/context/sources/general/manuals/about/about.tex b/doc/context/sources/general/manuals/about/about.tex
new file mode 100644
index 000000000..96a1bea6d
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/about.tex
@@ -0,0 +1,46 @@
+% author : Hans Hagen
+% copyright : PRAGMA ADE & ConTeXt Development Team
+% license : Creative Commons Attribution ShareAlike 4.0 International
+% reference : pragma-ade.nl | contextgarden.net | texlive (related) distributions
+% origin : the ConTeXt distribution
+%
+% comment : Because this manual is distributed with TeX distributions it comes with a rather
+% liberal license. We try to adapt these documents to upgrades in the (sub)systems
+% that they describe. Using parts of the content otherwise can therefore conflict
+% with existing functionality and we cannot be held responsible for that. Many of
+% the manuals contain characteristic graphics and personal notes or examples that
+% make no sense when used out-of-context.
+%
+% comment : Some chapters have been published in TugBoat, the NTG Maps, the ConTeXt Group
+% journal or otherwise. Thanks to the editors for corrections.
+
+\environment about-environment
+
+\startproduct about
+
+\component about-titlepage
+
+\startfrontmatter
+ \component about-contents
+ \component about-introduction
+\stopfrontmatter
+
+\startbodymatter
+ \component about-mathstackers
+ \component about-speed
+ \component about-mathstyles
+ \component about-calls
+ \component about-jitting
+ \component about-mobility
+ \component about-hz
+ \component about-nodes
+ \component about-expanding
+ \component about-nuts
+ \component about-hashing
+ \component about-properties
+ \component about-luafunctions
+ \component about-metafun
+ \component about-threequarters
+\stopbodymatter
+
+\stopproduct
diff --git a/doc/context/sources/general/manuals/about/demo-data.lua b/doc/context/sources/general/manuals/about/demo-data.lua
new file mode 100644
index 000000000..96b55acd0
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/demo-data.lua
@@ -0,0 +1,10 @@
+return {
+ { 1, 2 },
+ { 2, 4 },
+ { 3, 3 },
+ { 4, 2 },
+ { 5, 2 },
+ { 6, 3 },
+ { 7, 4 },
+ { 8, 1 },
+} \ No newline at end of file
diff --git a/doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-JIT20.lua b/doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-JIT20.lua
new file mode 100644
index 000000000..ab09ee429
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-JIT20.lua
@@ -0,0 +1,406 @@
+return {
+ ["comment"]="jit 20 hashing",
+ ["datetime"]=1393683029,
+ ["samples"]={
+ {
+ ["center"]=1.125,
+ ["edges"]=1.656,
+ ["left"]=1.125,
+ ["none"]=0.016,
+ ["right"]=1.125,
+ ["text"]="a",
+ },
+ {
+ ["center"]=1.11,
+ ["edges"]=1.594,
+ ["left"]=1.109,
+ ["none"]=0,
+ ["right"]=1.141,
+ ["text"]="ab",
+ },
+ {
+ ["center"]=1.124,
+ ["edges"]=1.651,
+ ["left"]=1.094,
+ ["none"]=0.016,
+ ["right"]=1.109,
+ ["text"]="abc",
+ },
+ {
+ ["center"]=1.014,
+ ["edges"]=1.653,
+ ["left"]=1.147,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.149,
+ ["text"]="abcd",
+ },
+ {
+ ["center"]=1.008,
+ ["edges"]=1.646,
+ ["left"]=1.142,
+ ["none"]=0.0080000000000027,
+ ["right"]=1.153,
+ ["text"]="abcde",
+ },
+ {
+ ["center"]=1.014,
+ ["edges"]=1.652,
+ ["left"]=1.157,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.14,
+ ["text"]="abcdef",
+ },
+ {
+ ["center"]=1.006,
+ ["edges"]=1.652,
+ ["left"]=1.155,
+ ["none"]=0.0079999999999956,
+ ["right"]=1.214,
+ ["text"]="abcdefg",
+ },
+ {
+ ["center"]=1.17,
+ ["edges"]=1.642,
+ ["left"]=1.256,
+ ["none"]=0.0080000000000027,
+ ["right"]=1.169,
+ ["text"]="abcdefgh",
+ },
+ {
+ ["center"]=1.169,
+ ["edges"]=1.644,
+ ["left"]=1.216,
+ ["none"]=0.0080000000000027,
+ ["right"]=2.557,
+ ["text"]="abcdefghi",
+ },
+ {
+ ["center"]=1.172,
+ ["edges"]=1.636,
+ ["left"]=1.296,
+ ["none"]=0.0090000000000003,
+ ["right"]=2.048,
+ ["text"]="abcdefghij",
+ },
+ {
+ ["center"]=1.172,
+ ["edges"]=1.639,
+ ["left"]=2.841,
+ ["none"]=0.0079999999999956,
+ ["right"]=2.621,
+ ["text"]="abcdefghijk",
+ },
+ {
+ ["center"]=1.196,
+ ["edges"]=1.638,
+ ["left"]=1.761,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.977,
+ ["text"]="abcdefghijkl",
+ },
+ {
+ ["center"]=1.201,
+ ["edges"]=1.635,
+ ["left"]=1.589,
+ ["none"]=0.0079999999999956,
+ ["right"]=2.56,
+ ["text"]="abcdefghijklm",
+ },
+ {
+ ["center"]=1.194,
+ ["edges"]=1.634,
+ ["left"]=1.592,
+ ["none"]=0.0079999999999956,
+ ["right"]=1.983,
+ ["text"]="abcdefghijklmn",
+ },
+ {
+ ["center"]=1.2,
+ ["edges"]=1.637,
+ ["left"]=2.722,
+ ["none"]=0.0090000000000003,
+ ["right"]=2.537,
+ ["text"]="abcdefghijklmno",
+ },
+ {
+ ["center"]=1.221,
+ ["edges"]=1.639,
+ ["left"]=2.279,
+ ["none"]=0.0079999999999956,
+ ["right"]=1.955,
+ ["text"]="abcdefghijklmnop",
+ },
+ {
+ ["center"]=1.219,
+ ["edges"]=1.639,
+ ["left"]=1.889,
+ ["none"]=0.0090000000000003,
+ ["right"]=2.511,
+ ["text"]="abcdefghijklmnopq",
+ },
+ {
+ ["center"]=1.202,
+ ["edges"]=1.652,
+ ["left"]=1.157,
+ ["none"]=0.0080000000000098,
+ ["right"]=2.035,
+ ["text"]="abcdefghijklmnopqr",
+ },
+ {
+ ["center"]=1.203,
+ ["edges"]=1.635,
+ ["left"]=1.486,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.583,
+ ["text"]="abcdefghijklmnopqrs",
+ },
+ {
+ ["center"]=1.224,
+ ["edges"]=1.643,
+ ["left"]=1.404,
+ ["none"]=0.0080000000000098,
+ ["right"]=2.012,
+ ["text"]="abcdefghijklmnopqrst",
+ },
+ {
+ ["center"]=1.224,
+ ["edges"]=1.639,
+ ["left"]=1.056,
+ ["none"]=0.0089999999999861,
+ ["right"]=2.56,
+ ["text"]="abcdefghijklmnopqrstu",
+ },
+ {
+ ["center"]=1.223,
+ ["edges"]=1.648,
+ ["left"]=1.111,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.008,
+ ["text"]="abcdefghijklmnopqrstuv",
+ },
+ {
+ ["center"]=1.226,
+ ["edges"]=1.648,
+ ["left"]=1.084,
+ ["none"]=0.0089999999999861,
+ ["right"]=2.555,
+ ["text"]="abcdefghijklmnopqrstuvw",
+ },
+ {
+ ["center"]=1.239,
+ ["edges"]=1.645,
+ ["left"]=1.071,
+ ["none"]=0.0089999999999861,
+ ["right"]=1.951,
+ ["text"]="abcdefghijklmnopqrstuvwx",
+ },
+ {
+ ["center"]=1.239,
+ ["edges"]=1.645,
+ ["left"]=1.048,
+ ["none"]=0.0080000000000098,
+ ["right"]=2.518,
+ ["text"]="abcdefghijklmnopqrstuvwxy",
+ },
+ {
+ ["center"]=1.234,
+ ["edges"]=1.635,
+ ["left"]=1.062,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.069,
+ ["text"]="abcdefghijklmnopqrstuvwxyz",
+ },
+ {
+ ["center"]=1.236,
+ ["edges"]=1.636,
+ ["left"]=1.076,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.616,
+ ["text"]="abcdefghijklmnopqrstuvwxyzA",
+ },
+ {
+ ["center"]=1.26,
+ ["edges"]=1.639,
+ ["left"]=1.085,
+ ["none"]=0.0080000000000098,
+ ["right"]=2.065,
+ ["text"]="abcdefghijklmnopqrstuvwxyzAB",
+ },
+ {
+ ["center"]=1.27,
+ ["edges"]=1.651,
+ ["left"]=1.06,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.671,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABC",
+ },
+ {
+ ["center"]=1.274,
+ ["edges"]=1.648,
+ ["left"]=1.117,
+ ["none"]=0.0099999999999909,
+ ["right"]=2.075,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCD",
+ },
+ {
+ ["center"]=1.27,
+ ["edges"]=1.652,
+ ["left"]=1.056,
+ ["none"]=0.0080000000000098,
+ ["right"]=2.631,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDE",
+ },
+ {
+ ["center"]=1.294,
+ ["edges"]=1.656,
+ ["left"]=1.09,
+ ["none"]=0.0080000000000098,
+ ["right"]=2.048,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEF",
+ },
+ {
+ ["center"]=1.301,
+ ["edges"]=1.647,
+ ["left"]=1.079,
+ ["none"]=0.0089999999999861,
+ ["right"]=2.548,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFG",
+ },
+ {
+ ["center"]=1.301,
+ ["edges"]=1.653,
+ ["left"]=1.06,
+ ["none"]=0.0080000000000382,
+ ["right"]=2.043,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGH",
+ },
+ {
+ ["center"]=1.347,
+ ["edges"]=1.649,
+ ["left"]=1.0530000000001,
+ ["none"]=0.0079999999999814,
+ ["right"]=2.618,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHI",
+ },
+ {
+ ["center"]=1.388,
+ ["edges"]=1.643,
+ ["left"]=1.086,
+ ["none"]=0.0079999999999814,
+ ["right"]=2.018,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJ",
+ },
+ {
+ ["center"]=1.417,
+ ["edges"]=1.667,
+ ["left"]=1.034,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.535,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJK",
+ },
+ {
+ ["center"]=1.43,
+ ["edges"]=1.639,
+ ["left"]=1.163,
+ ["none"]=0.0079999999999814,
+ ["right"]=2.018,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL",
+ },
+ {
+ ["center"]=1.454,
+ ["edges"]=1.643,
+ ["left"]=1.051,
+ ["none"]=0.0080000000000382,
+ ["right"]=2.548,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM",
+ },
+ {
+ ["center"]=1.489,
+ ["edges"]=1.639,
+ ["left"]=1.117,
+ ["none"]=0.0079999999999814,
+ ["right"]=1.98,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN",
+ },
+ {
+ ["center"]=1.495,
+ ["edges"]=1.637,
+ ["left"]=1.051,
+ ["none"]=0.0079999999999814,
+ ["right"]=2.51,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNO",
+ },
+ {
+ ["center"]=1.498,
+ ["edges"]=1.642,
+ ["left"]=1.052,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.069,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP",
+ },
+ {
+ ["center"]=1.502,
+ ["edges"]=1.642,
+ ["left"]=1.084,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.643,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ",
+ },
+ {
+ ["center"]=1.524,
+ ["edges"]=1.641,
+ ["left"]=1.172,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.052,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR",
+ },
+ {
+ ["center"]=1.523,
+ ["edges"]=1.649,
+ ["left"]=1.064,
+ ["none"]=0.0080000000000382,
+ ["right"]=2.61,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS",
+ },
+ {
+ ["center"]=1.522,
+ ["edges"]=1.64,
+ ["left"]=1.193,
+ ["none"]=0.0079999999999814,
+ ["right"]=2.04,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST",
+ },
+ {
+ ["center"]=1.509,
+ ["edges"]=1.64,
+ ["left"]=1.029,
+ ["none"]=0.0090000000000146,
+ ["right"]=2.557,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU",
+ },
+ {
+ ["center"]=1.533,
+ ["edges"]=1.642,
+ ["left"]=1.172,
+ ["none"]=0.0089999999999577,
+ ["right"]=2.038,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV",
+ },
+ {
+ ["center"]=1.541,
+ ["edges"]=1.645,
+ ["left"]=1.078,
+ ["none"]=0.0079999999999814,
+ ["right"]=2.586,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW",
+ },
+ {
+ ["center"]=1.535,
+ ["edges"]=1.643,
+ ["left"]=1.114,
+ ["none"]=0.0079999999999814,
+ ["right"]=2.107,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX",
+ },
+ },
+}
diff --git a/doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-LUA51-40-6.lua b/doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-LUA51-40-6.lua
new file mode 100644
index 000000000..91a7b34f0
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-LUA51-40-6.lua
@@ -0,0 +1,406 @@
+return {
+ ["comment"]="lua 51 max 40 hash 2^6 bytes",
+ ["datetime"]=1393867414,
+ ["samples"]={
+ {
+ ["center"]=1.094,
+ ["edges"]=1.684,
+ ["left"]=1.078,
+ ["none"]=0.016,
+ ["right"]=1.078,
+ ["text"]="a",
+ },
+ {
+ ["center"]=1.116,
+ ["edges"]=1.657,
+ ["left"]=1.098,
+ ["none"]=0.008,
+ ["right"]=1.112,
+ ["text"]="ab",
+ },
+ {
+ ["center"]=1.109,
+ ["edges"]=1.646,
+ ["left"]=1.091,
+ ["none"]=0.0079999999999991,
+ ["right"]=1.108,
+ ["text"]="abc",
+ },
+ {
+ ["center"]=1.126,
+ ["edges"]=1.653,
+ ["left"]=1.095,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.108,
+ ["text"]="abcd",
+ },
+ {
+ ["center"]=1.131,
+ ["edges"]=1.647,
+ ["left"]=1.099,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.104,
+ ["text"]="abcde",
+ },
+ {
+ ["center"]=1.135,
+ ["edges"]=1.648,
+ ["left"]=1.102,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.11,
+ ["text"]="abcdef",
+ },
+ {
+ ["center"]=1.13,
+ ["edges"]=1.65,
+ ["left"]=1.099,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.113,
+ ["text"]="abcdefg",
+ },
+ {
+ ["center"]=1.123,
+ ["edges"]=1.64,
+ ["left"]=1.108,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.116,
+ ["text"]="abcdefgh",
+ },
+ {
+ ["center"]=1.127,
+ ["edges"]=1.646,
+ ["left"]=1.107,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.115,
+ ["text"]="abcdefghi",
+ },
+ {
+ ["center"]=1.132,
+ ["edges"]=1.645,
+ ["left"]=1.114,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.12,
+ ["text"]="abcdefghij",
+ },
+ {
+ ["center"]=1.137,
+ ["edges"]=1.646,
+ ["left"]=1.121,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.126,
+ ["text"]="abcdefghijk",
+ },
+ {
+ ["center"]=1.144,
+ ["edges"]=1.646,
+ ["left"]=1.115,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.121,
+ ["text"]="abcdefghijkl",
+ },
+ {
+ ["center"]=1.158,
+ ["edges"]=1.648,
+ ["left"]=1.117,
+ ["none"]=0.0079999999999956,
+ ["right"]=1.128,
+ ["text"]="abcdefghijklm",
+ },
+ {
+ ["center"]=1.168,
+ ["edges"]=1.655,
+ ["left"]=1.121,
+ ["none"]=0.010000000000005,
+ ["right"]=1.129,
+ ["text"]="abcdefghijklmn",
+ },
+ {
+ ["center"]=1.174,
+ ["edges"]=1.657,
+ ["left"]=1.12,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.132,
+ ["text"]="abcdefghijklmno",
+ },
+ {
+ ["center"]=1.205,
+ ["edges"]=1.65,
+ ["left"]=1.118,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.127,
+ ["text"]="abcdefghijklmnop",
+ },
+ {
+ ["center"]=1.232,
+ ["edges"]=1.655,
+ ["left"]=1.115,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.129,
+ ["text"]="abcdefghijklmnopq",
+ },
+ {
+ ["center"]=1.263,
+ ["edges"]=1.66,
+ ["left"]=1.079,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.134,
+ ["text"]="abcdefghijklmnopqr",
+ },
+ {
+ ["center"]=1.273,
+ ["edges"]=1.656,
+ ["left"]=1.138,
+ ["none"]=0.0080000000000098,
+ ["right"]=1.134,
+ ["text"]="abcdefghijklmnopqrs",
+ },
+ {
+ ["center"]=1.306,
+ ["edges"]=1.659,
+ ["left"]=1.123,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.134,
+ ["text"]="abcdefghijklmnopqrst",
+ },
+ {
+ ["center"]=1.331,
+ ["edges"]=1.663,
+ ["left"]=1.124,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.137,
+ ["text"]="abcdefghijklmnopqrstu",
+ },
+ {
+ ["center"]=1.346,
+ ["edges"]=1.677,
+ ["left"]=1.135,
+ ["none"]=0.0089999999999861,
+ ["right"]=1.15,
+ ["text"]="abcdefghijklmnopqrstuv",
+ },
+ {
+ ["center"]=1.349,
+ ["edges"]=1.682,
+ ["left"]=1.137,
+ ["none"]=0.0089999999999861,
+ ["right"]=1.151,
+ ["text"]="abcdefghijklmnopqrstuvw",
+ },
+ {
+ ["center"]=1.326,
+ ["edges"]=1.662,
+ ["left"]=1.12,
+ ["none"]=0.0079999999999814,
+ ["right"]=1.131,
+ ["text"]="abcdefghijklmnopqrstuvwx",
+ },
+ {
+ ["center"]=1.326,
+ ["edges"]=1.677,
+ ["left"]=1.12,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.134,
+ ["text"]="abcdefghijklmnopqrstuvwxy",
+ },
+ {
+ ["center"]=1.329,
+ ["edges"]=1.689,
+ ["left"]=1.122,
+ ["none"]=0.0089999999999861,
+ ["right"]=1.136,
+ ["text"]="abcdefghijklmnopqrstuvwxyz",
+ },
+ {
+ ["center"]=1.328,
+ ["edges"]=1.706,
+ ["left"]=1.126,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.147,
+ ["text"]="abcdefghijklmnopqrstuvwxyzA",
+ },
+ {
+ ["center"]=1.329,
+ ["edges"]=1.722,
+ ["left"]=1.13,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.145,
+ ["text"]="abcdefghijklmnopqrstuvwxyzAB",
+ },
+ {
+ ["center"]=4.739,
+ ["edges"]=1.758,
+ ["left"]=1.14,
+ ["none"]=0.0080000000000098,
+ ["right"]=1.155,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABC",
+ },
+ {
+ ["center"]=5.212,
+ ["edges"]=1.778,
+ ["left"]=1.147,
+ ["none"]=0.0089999999999861,
+ ["right"]=1.169,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCD",
+ },
+ {
+ ["center"]=5.438,
+ ["edges"]=1.784,
+ ["left"]=1.173,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.195,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDE",
+ },
+ {
+ ["center"]=5.288,
+ ["edges"]=1.782,
+ ["left"]=1.175,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.2,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEF",
+ },
+ {
+ ["center"]=5.698,
+ ["edges"]=1.797,
+ ["left"]=1.181,
+ ["none"]=0.0080000000000098,
+ ["right"]=1.201,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFG",
+ },
+ {
+ ["center"]=5.676,
+ ["edges"]=1.805,
+ ["left"]=1.201,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.218,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGH",
+ },
+ {
+ ["center"]=5.933,
+ ["edges"]=1.822,
+ ["left"]=1.215,
+ ["none"]=0.0079999999999814,
+ ["right"]=1.23,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHI",
+ },
+ {
+ ["center"]=5.795,
+ ["edges"]=1.83,
+ ["left"]=1.23,
+ ["none"]=0.0089999999999577,
+ ["right"]=1.251,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJ",
+ },
+ {
+ ["center"]=5.933,
+ ["edges"]=1.842,
+ ["left"]=1.234,
+ ["none"]=0.0080000000000382,
+ ["right"]=1.257,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJK",
+ },
+ {
+ ["center"]=5.953,
+ ["edges"]=1.849,
+ ["left"]=1.251,
+ ["none"]=0.0080000000000382,
+ ["right"]=1.273,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL",
+ },
+ {
+ ["center"]=6.297,
+ ["edges"]=1.845,
+ ["left"]=1.26,
+ ["none"]=0.0089999999999577,
+ ["right"]=1.289,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM",
+ },
+ {
+ ["center"]=6.005,
+ ["edges"]=1.841,
+ ["left"]=1.273,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.295,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN",
+ },
+ {
+ ["center"]=6.303,
+ ["edges"]=1.843,
+ ["left"]=1.285,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.312,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNO",
+ },
+ {
+ ["center"]=6.11,
+ ["edges"]=1.852,
+ ["left"]=1.309,
+ ["none"]=0.0079999999999814,
+ ["right"]=1.325,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP",
+ },
+ {
+ ["center"]=6.672,
+ ["edges"]=1.871,
+ ["left"]=1.319,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.337,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ",
+ },
+ {
+ ["center"]=6.417,
+ ["edges"]=1.838,
+ ["left"]=1.305,
+ ["none"]=0.0089999999999577,
+ ["right"]=1.33,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR",
+ },
+ {
+ ["center"]=6.69,
+ ["edges"]=1.843,
+ ["left"]=1.303,
+ ["none"]=0.0079999999999814,
+ ["right"]=1.328,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS",
+ },
+ {
+ ["center"]=6.4,
+ ["edges"]=1.852,
+ ["left"]=1.31,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.33,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST",
+ },
+ {
+ ["center"]=7.058,
+ ["edges"]=1.853,
+ ["left"]=1.312,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.33,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU",
+ },
+ {
+ ["center"]=6.736,
+ ["edges"]=1.847,
+ ["left"]=1.308,
+ ["none"]=0.0079999999999814,
+ ["right"]=1.331,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV",
+ },
+ {
+ ["center"]=7.123,
+ ["edges"]=1.85,
+ ["left"]=1.305,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.326,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW",
+ },
+ {
+ ["center"]=6.893,
+ ["edges"]=1.848,
+ ["left"]=1.305,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.331,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX",
+ },
+ },
+} \ No newline at end of file
diff --git a/doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-LUA51.lua b/doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-LUA51.lua
new file mode 100644
index 000000000..f11fa024f
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/luatest-hash-luajittex-073-LUA51.lua
@@ -0,0 +1,406 @@
+return {
+ ["comment"]="lua 5.1 hashing",
+ ["datetime"]=1393675147,
+ ["samples"]={
+ {
+ ["center"]=1.11,
+ ["edges"]=1.625,
+ ["left"]=1.094,
+ ["none"]=0,
+ ["right"]=1.157,
+ ["text"]="a",
+ },
+ {
+ ["center"]=1.133,
+ ["edges"]=1.659,
+ ["left"]=1.111,
+ ["none"]=0,
+ ["right"]=1.125,
+ ["text"]="ab",
+ },
+ {
+ ["center"]=1.133,
+ ["edges"]=1.659,
+ ["left"]=1.122,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.128,
+ ["text"]="abc",
+ },
+ {
+ ["center"]=1.132,
+ ["edges"]=1.668,
+ ["left"]=1.119,
+ ["none"]=0.0079999999999991,
+ ["right"]=1.128,
+ ["text"]="abcd",
+ },
+ {
+ ["center"]=1.141,
+ ["edges"]=1.661,
+ ["left"]=1.122,
+ ["none"]=0.0079999999999991,
+ ["right"]=1.131,
+ ["text"]="abcde",
+ },
+ {
+ ["center"]=1.141,
+ ["edges"]=1.66,
+ ["left"]=1.121,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.134,
+ ["text"]="abcdef",
+ },
+ {
+ ["center"]=1.14,
+ ["edges"]=1.667,
+ ["left"]=1.12,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.129,
+ ["text"]="abcdefg",
+ },
+ {
+ ["center"]=1.14,
+ ["edges"]=1.662,
+ ["left"]=1.116,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.128,
+ ["text"]="abcdefgh",
+ },
+ {
+ ["center"]=1.137,
+ ["edges"]=1.66,
+ ["left"]=1.112,
+ ["none"]=0.0079999999999956,
+ ["right"]=1.124,
+ ["text"]="abcdefghi",
+ },
+ {
+ ["center"]=1.146,
+ ["edges"]=1.659,
+ ["left"]=1.116,
+ ["none"]=0.0080000000000027,
+ ["right"]=1.127,
+ ["text"]="abcdefghij",
+ },
+ {
+ ["center"]=1.15,
+ ["edges"]=1.664,
+ ["left"]=1.121,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.132,
+ ["text"]="abcdefghijk",
+ },
+ {
+ ["center"]=1.168,
+ ["edges"]=1.674,
+ ["left"]=1.122,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.135,
+ ["text"]="abcdefghijkl",
+ },
+ {
+ ["center"]=3.021,
+ ["edges"]=1.677,
+ ["left"]=1.128,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.139,
+ ["text"]="abcdefghijklm",
+ },
+ {
+ ["center"]=3.952,
+ ["edges"]=1.676,
+ ["left"]=1.129,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.142,
+ ["text"]="abcdefghijklmn",
+ },
+ {
+ ["center"]=3.309,
+ ["edges"]=1.673,
+ ["left"]=1.124,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.138,
+ ["text"]="abcdefghijklmno",
+ },
+ {
+ ["center"]=3.999,
+ ["edges"]=1.68,
+ ["left"]=1.121,
+ ["none"]=0.0090000000000003,
+ ["right"]=1.134,
+ ["text"]="abcdefghijklmnop",
+ },
+ {
+ ["center"]=3.405,
+ ["edges"]=1.678,
+ ["left"]=1.13,
+ ["none"]=0.0080000000000098,
+ ["right"]=1.144,
+ ["text"]="abcdefghijklmnopq",
+ },
+ {
+ ["center"]=4.034,
+ ["edges"]=1.686,
+ ["left"]=1.134,
+ ["none"]=0.0079999999999956,
+ ["right"]=1.142,
+ ["text"]="abcdefghijklmnopqr",
+ },
+ {
+ ["center"]=3.998,
+ ["edges"]=1.69,
+ ["left"]=1.133,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.145,
+ ["text"]="abcdefghijklmnopqrs",
+ },
+ {
+ ["center"]=4.145,
+ ["edges"]=4.488,
+ ["left"]=1.133,
+ ["none"]=0.0089999999999861,
+ ["right"]=1.148,
+ ["text"]="abcdefghijklmnopqrst",
+ },
+ {
+ ["center"]=4.095,
+ ["edges"]=1.759,
+ ["left"]=1.138,
+ ["none"]=0.0079999999999814,
+ ["right"]=1.152,
+ ["text"]="abcdefghijklmnopqrstu",
+ },
+ {
+ ["center"]=4.238,
+ ["edges"]=4.466,
+ ["left"]=1.144,
+ ["none"]=0.0079999999999814,
+ ["right"]=1.154,
+ ["text"]="abcdefghijklmnopqrstuv",
+ },
+ {
+ ["center"]=4.441,
+ ["edges"]=1.743,
+ ["left"]=1.141,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.154,
+ ["text"]="abcdefghijklmnopqrstuvw",
+ },
+ {
+ ["center"]=4.404,
+ ["edges"]=4.455,
+ ["left"]=1.153,
+ ["none"]=0.0090000000000146,
+ ["right"]=1.163,
+ ["text"]="abcdefghijklmnopqrstuvwx",
+ },
+ {
+ ["center"]=4.531,
+ ["edges"]=1.747,
+ ["left"]=1.151,
+ ["none"]=0.0080000000000098,
+ ["right"]=1.162,
+ ["text"]="abcdefghijklmnopqrstuvwxy",
+ },
+ {
+ ["center"]=4.585,
+ ["edges"]=4.466,
+ ["left"]=3.902,
+ ["none"]=0.0090000000000146,
+ ["right"]=4.392,
+ ["text"]="abcdefghijklmnopqrstuvwxyz",
+ },
+ {
+ ["center"]=4.851,
+ ["edges"]=1.727,
+ ["left"]=33.17,
+ ["none"]=0.0080000000000098,
+ ["right"]=4.341,
+ ["text"]="abcdefghijklmnopqrstuvwxyzA",
+ },
+ {
+ ["center"]=5.002,
+ ["edges"]=4.959,
+ ["left"]=4.508,
+ ["none"]=0.0090000000000146,
+ ["right"]=4.642,
+ ["text"]="abcdefghijklmnopqrstuvwxyzAB",
+ },
+ {
+ ["center"]=36.952,
+ ["edges"]=1.747,
+ ["left"]=32.597,
+ ["none"]=0.0090000000000146,
+ ["right"]=4.65,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABC",
+ },
+ {
+ ["center"]=59.268,
+ ["edges"]=5.001,
+ ["left"]=4.613,
+ ["none"]=0.0089999999999577,
+ ["right"]=4.617,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCD",
+ },
+ {
+ ["center"]=42.982,
+ ["edges"]=1.747,
+ ["left"]=33.058,
+ ["none"]=0.0080000000000382,
+ ["right"]=4.696,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDE",
+ },
+ {
+ ["center"]=39.54,
+ ["edges"]=4.953,
+ ["left"]=4.438,
+ ["none"]=0.0090000000000146,
+ ["right"]=4.9359999999999,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEF",
+ },
+ {
+ ["center"]=69.576,
+ ["edges"]=1.7379999999999,
+ ["left"]=32.999,
+ ["none"]=0.0090000000000146,
+ ["right"]=4.874,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFG",
+ },
+ {
+ ["center"]=43.781,
+ ["edges"]=4.961,
+ ["left"]=4.84,
+ ["none"]=0.0080000000000382,
+ ["right"]=4.9749999999999,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGH",
+ },
+ {
+ ["center"]=40.142,
+ ["edges"]=1.744,
+ ["left"]=33.765,
+ ["none"]=0.0090000000000146,
+ ["right"]=4.994,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHI",
+ },
+ {
+ ["center"]=70.239,
+ ["edges"]=5.114,
+ ["left"]=4.7800000000001,
+ ["none"]=0.0090000000000146,
+ ["right"]=5.213,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJ",
+ },
+ {
+ ["center"]=46.93,
+ ["edges"]=1.742,
+ ["left"]=32.366,
+ ["none"]=0.0079999999999245,
+ ["right"]=5.117,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJK",
+ },
+ {
+ ["center"]=43.434,
+ ["edges"]=5.1499999999999,
+ ["left"]=4.5730000000001,
+ ["none"]=0.0080000000000382,
+ ["right"]=5.23,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL",
+ },
+ {
+ ["center"]=76.315,
+ ["edges"]=1.752,
+ ["left"]=32.632,
+ ["none"]=0.0080000000000382,
+ ["right"]=5.3120000000001,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM",
+ },
+ {
+ ["center"]=51.809,
+ ["edges"]=5.1949999999999,
+ ["left"]=4.5729999999999,
+ ["none"]=0.0080000000000382,
+ ["right"]=5.4829999999999,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN",
+ },
+ {
+ ["center"]=46.811,
+ ["edges"]=1.7719999999999,
+ ["left"]=32.4,
+ ["none"]=0.0080000000000382,
+ ["right"]=5.595,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNO",
+ },
+ {
+ ["center"]=87.013,
+ ["edges"]=5.1410000000001,
+ ["left"]=4.961,
+ ["none"]=0.0090000000000146,
+ ["right"]=5.527,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP",
+ },
+ {
+ ["center"]=55.775,
+ ["edges"]=1.7800000000002,
+ ["left"]=32.732,
+ ["none"]=0.0089999999997872,
+ ["right"]=5.624,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ",
+ },
+ {
+ ["center"]=49.956,
+ ["edges"]=5.5519999999999,
+ ["left"]=5.0459999999998,
+ ["none"]=0.0090000000000146,
+ ["right"]=5.893,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR",
+ },
+ {
+ ["center"]=495.147,
+ ["edges"]=1.819,
+ ["left"]=32.684,
+ ["none"]=0.0090000000000146,
+ ["right"]=5.8970000000002,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS",
+ },
+ {
+ ["center"]=542.566,
+ ["edges"]=5.482,
+ ["left"]=4.982,
+ ["none"]=0.0080000000002656,
+ ["right"]=5.9839999999999,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST",
+ },
+ {
+ ["center"]=66.082,
+ ["edges"]=1.835,
+ ["left"]=32.42,
+ ["none"]=0.0090000000000146,
+ ["right"]=5.8340000000003,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU",
+ },
+ {
+ ["center"]=97.62,
+ ["edges"]=5.6189999999997,
+ ["left"]=5.0569999999998,
+ ["none"]=0.0090000000000146,
+ ["right"]=6.172,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV",
+ },
+ {
+ ["center"]=531.977,
+ ["edges"]=1.8630000000003,
+ ["left"]=32.873,
+ ["none"]=0.0090000000000146,
+ ["right"]=6.1799999999998,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW",
+ },
+ {
+ ["center"]=576.093,
+ ["edges"]=5.6260000000002,
+ ["left"]=5.4200000000001,
+ ["none"]=0.0090000000000146,
+ ["right"]=6.306,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX",
+ },
+ },
+}
diff --git a/doc/context/sources/general/manuals/about/luatest-hash-luatex-073-LUA52-40-6.lua b/doc/context/sources/general/manuals/about/luatest-hash-luatex-073-LUA52-40-6.lua
new file mode 100644
index 000000000..201f67096
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/luatest-hash-luatex-073-LUA52-40-6.lua
@@ -0,0 +1,406 @@
+return {
+ ["comment"]="lua 52 hashing 40 max 2^6 bytes",
+ ["datetime"]=1393862849,
+ ["samples"]={
+ {
+ ["center"]=1.198,
+ ["edges"]=1.723,
+ ["left"]=1.154,
+ ["none"]=0.026,
+ ["right"]=1.202,
+ ["text"]="a",
+ },
+ {
+ ["center"]=1.202,
+ ["edges"]=1.728,
+ ["left"]=1.156,
+ ["none"]=0.026000000000001,
+ ["right"]=1.199,
+ ["text"]="ab",
+ },
+ {
+ ["center"]=1.21,
+ ["edges"]=1.731,
+ ["left"]=1.174,
+ ["none"]=0.026,
+ ["right"]=1.203,
+ ["text"]="abc",
+ },
+ {
+ ["center"]=1.216,
+ ["edges"]=1.743,
+ ["left"]=1.177,
+ ["none"]=0.026,
+ ["right"]=1.207,
+ ["text"]="abcd",
+ },
+ {
+ ["center"]=1.221,
+ ["edges"]=1.738,
+ ["left"]=1.18,
+ ["none"]=0.026,
+ ["right"]=1.21,
+ ["text"]="abcde",
+ },
+ {
+ ["center"]=1.256,
+ ["edges"]=1.758,
+ ["left"]=1.209,
+ ["none"]=0.027000000000001,
+ ["right"]=1.219,
+ ["text"]="abcdef",
+ },
+ {
+ ["center"]=1.236,
+ ["edges"]=1.741,
+ ["left"]=1.196,
+ ["none"]=0.027000000000001,
+ ["right"]=1.234,
+ ["text"]="abcdefg",
+ },
+ {
+ ["center"]=1.23,
+ ["edges"]=1.742,
+ ["left"]=1.187,
+ ["none"]=0.025999999999996,
+ ["right"]=1.218,
+ ["text"]="abcdefgh",
+ },
+ {
+ ["center"]=1.217,
+ ["edges"]=1.744,
+ ["left"]=1.188,
+ ["none"]=0.026000000000003,
+ ["right"]=1.215,
+ ["text"]="abcdefghi",
+ },
+ {
+ ["center"]=1.227,
+ ["edges"]=1.734,
+ ["left"]=1.193,
+ ["none"]=0.025999999999996,
+ ["right"]=1.21,
+ ["text"]="abcdefghij",
+ },
+ {
+ ["center"]=1.225,
+ ["edges"]=1.732,
+ ["left"]=1.196,
+ ["none"]=0.024999999999991,
+ ["right"]=1.214,
+ ["text"]="abcdefghijk",
+ },
+ {
+ ["center"]=1.229,
+ ["edges"]=1.734,
+ ["left"]=1.18,
+ ["none"]=0.024999999999991,
+ ["right"]=1.213,
+ ["text"]="abcdefghijkl",
+ },
+ {
+ ["center"]=1.241,
+ ["edges"]=1.733,
+ ["left"]=1.186,
+ ["none"]=0.025999999999996,
+ ["right"]=1.218,
+ ["text"]="abcdefghijklm",
+ },
+ {
+ ["center"]=1.249,
+ ["edges"]=1.736,
+ ["left"]=1.191,
+ ["none"]=0.02600000000001,
+ ["right"]=1.219,
+ ["text"]="abcdefghijklmn",
+ },
+ {
+ ["center"]=1.261,
+ ["edges"]=1.748,
+ ["left"]=1.187,
+ ["none"]=0.025999999999996,
+ ["right"]=1.236,
+ ["text"]="abcdefghijklmno",
+ },
+ {
+ ["center"]=1.256,
+ ["edges"]=1.745,
+ ["left"]=1.192,
+ ["none"]=0.025999999999996,
+ ["right"]=1.23,
+ ["text"]="abcdefghijklmnop",
+ },
+ {
+ ["center"]=1.259,
+ ["edges"]=1.743,
+ ["left"]=1.195,
+ ["none"]=0.025999999999996,
+ ["right"]=1.226,
+ ["text"]="abcdefghijklmnopq",
+ },
+ {
+ ["center"]=1.056,
+ ["edges"]=1.74,
+ ["left"]=1.192,
+ ["none"]=0.025999999999996,
+ ["right"]=1.225,
+ ["text"]="abcdefghijklmnopqr",
+ },
+ {
+ ["center"]=1.057,
+ ["edges"]=1.741,
+ ["left"]=1.186,
+ ["none"]=0.025000000000006,
+ ["right"]=1.223,
+ ["text"]="abcdefghijklmnopqrs",
+ },
+ {
+ ["center"]=1.062,
+ ["edges"]=1.751,
+ ["left"]=1.194,
+ ["none"]=0.025000000000006,
+ ["right"]=1.23,
+ ["text"]="abcdefghijklmnopqrst",
+ },
+ {
+ ["center"]=1.069,
+ ["edges"]=1.756,
+ ["left"]=1.197,
+ ["none"]=0.02600000000001,
+ ["right"]=1.231,
+ ["text"]="abcdefghijklmnopqrstu",
+ },
+ {
+ ["center"]=1.087,
+ ["edges"]=1.756,
+ ["left"]=1.208,
+ ["none"]=0.025000000000006,
+ ["right"]=1.231,
+ ["text"]="abcdefghijklmnopqrstuv",
+ },
+ {
+ ["center"]=1.072,
+ ["edges"]=1.76,
+ ["left"]=1.198,
+ ["none"]=0.025000000000006,
+ ["right"]=1.234,
+ ["text"]="abcdefghijklmnopqrstuvw",
+ },
+ {
+ ["center"]=1.063,
+ ["edges"]=1.759,
+ ["left"]=1.195,
+ ["none"]=0.025999999999982,
+ ["right"]=1.232,
+ ["text"]="abcdefghijklmnopqrstuvwx",
+ },
+ {
+ ["center"]=1.066,
+ ["edges"]=1.764,
+ ["left"]=1.199,
+ ["none"]=0.025999999999982,
+ ["right"]=1.235,
+ ["text"]="abcdefghijklmnopqrstuvwxy",
+ },
+ {
+ ["center"]=1.062,
+ ["edges"]=1.762,
+ ["left"]=1.248,
+ ["none"]=0.02600000000001,
+ ["right"]=1.248,
+ ["text"]="abcdefghijklmnopqrstuvwxyz",
+ },
+ {
+ ["center"]=1.07,
+ ["edges"]=1.772,
+ ["left"]=1.216,
+ ["none"]=0.02600000000001,
+ ["right"]=1.247,
+ ["text"]="abcdefghijklmnopqrstuvwxyzA",
+ },
+ {
+ ["center"]=1.07,
+ ["edges"]=1.77,
+ ["left"]=1.223,
+ ["none"]=0.027000000000015,
+ ["right"]=1.264,
+ ["text"]="abcdefghijklmnopqrstuvwxyzAB",
+ },
+ {
+ ["center"]=1.073,
+ ["edges"]=1.586,
+ ["left"]=1.211,
+ ["none"]=0.02600000000001,
+ ["right"]=1.248,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABC",
+ },
+ {
+ ["center"]=1.075,
+ ["edges"]=1.584,
+ ["left"]=1.22,
+ ["none"]=0.02600000000001,
+ ["right"]=1.252,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCD",
+ },
+ {
+ ["center"]=1.105,
+ ["edges"]=1.593,
+ ["left"]=1.218,
+ ["none"]=0.02600000000001,
+ ["right"]=1.255,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDE",
+ },
+ {
+ ["center"]=1.109,
+ ["edges"]=1.594,
+ ["left"]=1.219,
+ ["none"]=0.025000000000006,
+ ["right"]=1.256,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEF",
+ },
+ {
+ ["center"]=1.122,
+ ["edges"]=1.589,
+ ["left"]=1.223,
+ ["none"]=0.025000000000006,
+ ["right"]=1.257,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFG",
+ },
+ {
+ ["center"]=1.129,
+ ["edges"]=1.596,
+ ["left"]=1.22,
+ ["none"]=0.02600000000001,
+ ["right"]=1.253,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGH",
+ },
+ {
+ ["center"]=1.141,
+ ["edges"]=1.59,
+ ["left"]=1.046,
+ ["none"]=0.024999999999977,
+ ["right"]=1.077,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHI",
+ },
+ {
+ ["center"]=1.159,
+ ["edges"]=1.599,
+ ["left"]=1.033,
+ ["none"]=0.025999999999982,
+ ["right"]=1.08,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJ",
+ },
+ {
+ ["center"]=1.162,
+ ["edges"]=1.595,
+ ["left"]=1.034,
+ ["none"]=0.02600000000001,
+ ["right"]=1.06,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJK",
+ },
+ {
+ ["center"]=1.171,
+ ["edges"]=1.599,
+ ["left"]=1.04,
+ ["none"]=0.024999999999977,
+ ["right"]=1.06,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL",
+ },
+ {
+ ["center"]=1.178,
+ ["edges"]=1.6,
+ ["left"]=1.033,
+ ["none"]=0.024999999999977,
+ ["right"]=1.063,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM",
+ },
+ {
+ ["center"]=1.137,
+ ["edges"]=1.602,
+ ["left"]=1.029,
+ ["none"]=0.02600000000001,
+ ["right"]=1.061,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN",
+ },
+ {
+ ["center"]=1.138,
+ ["edges"]=1.604,
+ ["left"]=1.032,
+ ["none"]=0.024999999999977,
+ ["right"]=1.06,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNO",
+ },
+ {
+ ["center"]=1.151,
+ ["edges"]=1.622,
+ ["left"]=1.032,
+ ["none"]=0.025000000000034,
+ ["right"]=1.064,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP",
+ },
+ {
+ ["center"]=1.151,
+ ["edges"]=1.635,
+ ["left"]=1.039,
+ ["none"]=0.025999999999954,
+ ["right"]=1.068,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ",
+ },
+ {
+ ["center"]=1.149,
+ ["edges"]=1.633,
+ ["left"]=1.039,
+ ["none"]=0.02600000000001,
+ ["right"]=1.069,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR",
+ },
+ {
+ ["center"]=1.16,
+ ["edges"]=1.642,
+ ["left"]=1.041,
+ ["none"]=0.024999999999977,
+ ["right"]=1.067,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS",
+ },
+ {
+ ["center"]=1.155,
+ ["edges"]=1.651,
+ ["left"]=1.04,
+ ["none"]=0.02600000000001,
+ ["right"]=1.071,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST",
+ },
+ {
+ ["center"]=1.155,
+ ["edges"]=1.664,
+ ["left"]=1.042,
+ ["none"]=0.024999999999977,
+ ["right"]=1.073,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU",
+ },
+ {
+ ["center"]=1.146,
+ ["edges"]=1.668,
+ ["left"]=1.059,
+ ["none"]=0.02600000000001,
+ ["right"]=1.088,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV",
+ },
+ {
+ ["center"]=1.173,
+ ["edges"]=1.673,
+ ["left"]=1.067,
+ ["none"]=0.02600000000001,
+ ["right"]=1.099,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW",
+ },
+ {
+ ["center"]=1.14,
+ ["edges"]=1.669,
+ ["left"]=1.063,
+ ["none"]=0.024999999999977,
+ ["right"]=1.102,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX",
+ },
+ },
+} \ No newline at end of file
diff --git a/doc/context/sources/general/manuals/about/luatest-hash-luatex-073-LUA52.lua b/doc/context/sources/general/manuals/about/luatest-hash-luatex-073-LUA52.lua
new file mode 100644
index 000000000..7fb7dc044
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/luatest-hash-luatex-073-LUA52.lua
@@ -0,0 +1,406 @@
+return {
+ ["comment"]="lua 5.2 hashing",
+ ["datetime"]=1393679507,
+ ["samples"]={
+ {
+ ["center"]=1.188,
+ ["edges"]=1.701,
+ ["left"]=1.143,
+ ["none"]=0.016,
+ ["right"]=1.19,
+ ["text"]="a",
+ },
+ {
+ ["center"]=1.175,
+ ["edges"]=1.685,
+ ["left"]=1.141,
+ ["none"]=0.024999999999999,
+ ["right"]=1.177,
+ ["text"]="ab",
+ },
+ {
+ ["center"]=1.179,
+ ["edges"]=1.691,
+ ["left"]=1.142,
+ ["none"]=0.025,
+ ["right"]=1.183,
+ ["text"]="abc",
+ },
+ {
+ ["center"]=1.187,
+ ["edges"]=1.692,
+ ["left"]=1.147,
+ ["none"]=0.024999999999999,
+ ["right"]=1.183,
+ ["text"]="abcd",
+ },
+ {
+ ["center"]=1.209,
+ ["edges"]=1.705,
+ ["left"]=1.156,
+ ["none"]=0.025000000000002,
+ ["right"]=1.194,
+ ["text"]="abcde",
+ },
+ {
+ ["center"]=1.215,
+ ["edges"]=1.714,
+ ["left"]=1.161,
+ ["none"]=0.024999999999999,
+ ["right"]=1.201,
+ ["text"]="abcdef",
+ },
+ {
+ ["center"]=1.222,
+ ["edges"]=1.714,
+ ["left"]=1.164,
+ ["none"]=0.027000000000001,
+ ["right"]=1.203,
+ ["text"]="abcdefg",
+ },
+ {
+ ["center"]=1.215,
+ ["edges"]=1.715,
+ ["left"]=1.162,
+ ["none"]=0.026000000000003,
+ ["right"]=1.202,
+ ["text"]="abcdefgh",
+ },
+ {
+ ["center"]=1.209,
+ ["edges"]=1.698,
+ ["left"]=1.171,
+ ["none"]=0.024999999999999,
+ ["right"]=1.206,
+ ["text"]="abcdefghi",
+ },
+ {
+ ["center"]=1.207,
+ ["edges"]=1.707,
+ ["left"]=1.161,
+ ["none"]=0.024999999999991,
+ ["right"]=1.21,
+ ["text"]="abcdefghij",
+ },
+ {
+ ["center"]=1.228,
+ ["edges"]=1.708,
+ ["left"]=1.165,
+ ["none"]=0.024999999999991,
+ ["right"]=1.213,
+ ["text"]="abcdefghijk",
+ },
+ {
+ ["center"]=1.224,
+ ["edges"]=1.708,
+ ["left"]=1.165,
+ ["none"]=0.025000000000006,
+ ["right"]=1.205,
+ ["text"]="abcdefghijkl",
+ },
+ {
+ ["center"]=3.586,
+ ["edges"]=1.705,
+ ["left"]=1.162,
+ ["none"]=0.025000000000006,
+ ["right"]=1.215,
+ ["text"]="abcdefghijklm",
+ },
+ {
+ ["center"]=5.056,
+ ["edges"]=1.708,
+ ["left"]=1.175,
+ ["none"]=0.025000000000006,
+ ["right"]=1.207,
+ ["text"]="abcdefghijklmn",
+ },
+ {
+ ["center"]=3.965,
+ ["edges"]=1.712,
+ ["left"]=1.177,
+ ["none"]=0.025000000000006,
+ ["right"]=1.215,
+ ["text"]="abcdefghijklmno",
+ },
+ {
+ ["center"]=5.097,
+ ["edges"]=1.725,
+ ["left"]=1.177,
+ ["none"]=0.025000000000006,
+ ["right"]=1.21,
+ ["text"]="abcdefghijklmnop",
+ },
+ {
+ ["center"]=3.982,
+ ["edges"]=1.724,
+ ["left"]=1.18,
+ ["none"]=0.024000000000001,
+ ["right"]=1.213,
+ ["text"]="abcdefghijklmnopq",
+ },
+ {
+ ["center"]=5.195,
+ ["edges"]=1.714,
+ ["left"]=1.182,
+ ["none"]=0.024999999999977,
+ ["right"]=1.219,
+ ["text"]="abcdefghijklmnopqr",
+ },
+ {
+ ["center"]=4.016,
+ ["edges"]=1.722,
+ ["left"]=1.184,
+ ["none"]=0.025000000000006,
+ ["right"]=1.217,
+ ["text"]="abcdefghijklmnopqrs",
+ },
+ {
+ ["center"]=5.199,
+ ["edges"]=5.623,
+ ["left"]=1.182,
+ ["none"]=0.025000000000006,
+ ["right"]=1.221,
+ ["text"]="abcdefghijklmnopqrst",
+ },
+ {
+ ["center"]=4.056,
+ ["edges"]=1.815,
+ ["left"]=1.191,
+ ["none"]=0.024999999999977,
+ ["right"]=1.244,
+ ["text"]="abcdefghijklmnopqrstu",
+ },
+ {
+ ["center"]=1.082,
+ ["edges"]=5.637,
+ ["left"]=1.193,
+ ["none"]=0.024999999999977,
+ ["right"]=1.247,
+ ["text"]="abcdefghijklmnopqrstuv",
+ },
+ {
+ ["center"]=1.085,
+ ["edges"]=1.827,
+ ["left"]=1.22,
+ ["none"]=0.024999999999977,
+ ["right"]=1.251,
+ ["text"]="abcdefghijklmnopqrstuvw",
+ },
+ {
+ ["center"]=1.071,
+ ["edges"]=5.58,
+ ["left"]=1.205,
+ ["none"]=0.025000000000006,
+ ["right"]=1.244,
+ ["text"]="abcdefghijklmnopqrstuvwx",
+ },
+ {
+ ["center"]=1.07,
+ ["edges"]=1.821,
+ ["left"]=1.195,
+ ["none"]=0.025000000000006,
+ ["right"]=1.247,
+ ["text"]="abcdefghijklmnopqrstuvwxy",
+ },
+ {
+ ["center"]=1.088,
+ ["edges"]=5.514,
+ ["left"]=5.094,
+ ["none"]=0.024999999999977,
+ ["right"]=5.24,
+ ["text"]="abcdefghijklmnopqrstuvwxyz",
+ },
+ {
+ ["center"]=1.069,
+ ["edges"]=1.838,
+ ["left"]=44.874,
+ ["none"]=0.025000000000006,
+ ["right"]=5.257,
+ ["text"]="abcdefghijklmnopqrstuvwxyzA",
+ },
+ {
+ ["center"]=1.075,
+ ["edges"]=5.577,
+ ["left"]=5.412,
+ ["none"]=0.024999999999977,
+ ["right"]=5.231,
+ ["text"]="abcdefghijklmnopqrstuvwxyzAB",
+ },
+ {
+ ["center"]=1.081,
+ ["edges"]=1.841,
+ ["left"]=45.411,
+ ["none"]=0.024999999999977,
+ ["right"]=5.208,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABC",
+ },
+ {
+ ["center"]=1.091,
+ ["edges"]=5.643,
+ ["left"]=5.536,
+ ["none"]=0.02600000000001,
+ ["right"]=5.248,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCD",
+ },
+ {
+ ["center"]=1.084,
+ ["edges"]=1.844,
+ ["left"]=45.54,
+ ["none"]=0.024000000000001,
+ ["right"]=5.351,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDE",
+ },
+ {
+ ["center"]=1.078,
+ ["edges"]=5.657,
+ ["left"]=5.55,
+ ["none"]=0.024999999999977,
+ ["right"]=5.376,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEF",
+ },
+ {
+ ["center"]=1.077,
+ ["edges"]=1.831,
+ ["left"]=45.903,
+ ["none"]=0.025000000000034,
+ ["right"]=5.422,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFG",
+ },
+ {
+ ["center"]=1.082,
+ ["edges"]=5.71,
+ ["left"]=5.525,
+ ["none"]=0.024999999999977,
+ ["right"]=5.266,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGH",
+ },
+ {
+ ["center"]=1.076,
+ ["edges"]=1.8480000000001,
+ ["left"]=48.141,
+ ["none"]=0.024999999999977,
+ ["right"]=5.223,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHI",
+ },
+ {
+ ["center"]=1.083,
+ ["edges"]=6.241,
+ ["left"]=5.427,
+ ["none"]=0.025000000000091,
+ ["right"]=5.26,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJ",
+ },
+ {
+ ["center"]=1.08,
+ ["edges"]=1.59,
+ ["left"]=45.596,
+ ["none"]=0.024999999999977,
+ ["right"]=5.3099999999999,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJK",
+ },
+ {
+ ["center"]=1.0799999999999,
+ ["edges"]=1.5790000000001,
+ ["left"]=5.9499999999999,
+ ["none"]=0.024999999999977,
+ ["right"]=5.2330000000001,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKL",
+ },
+ {
+ ["center"]=1.088,
+ ["edges"]=1.567,
+ ["left"]=45.252,
+ ["none"]=0.024999999999977,
+ ["right"]=5.314,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLM",
+ },
+ {
+ ["center"]=1.074,
+ ["edges"]=1.5699999999999,
+ ["left"]=5.5310000000001,
+ ["none"]=0.024000000000001,
+ ["right"]=5.4889999999999,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN",
+ },
+ {
+ ["center"]=1.074,
+ ["edges"]=1.574,
+ ["left"]=45.903,
+ ["none"]=0.024999999999977,
+ ["right"]=5.598,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNO",
+ },
+ {
+ ["center"]=1.081,
+ ["edges"]=1.569,
+ ["left"]=6.033,
+ ["none"]=0.024999999999977,
+ ["right"]=5.657,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOP",
+ },
+ {
+ ["center"]=1.0690000000001,
+ ["edges"]=1.568,
+ ["left"]=1.296,
+ ["none"]=0.024999999999977,
+ ["right"]=1.115,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQ",
+ },
+ {
+ ["center"]=1.08,
+ ["edges"]=1.572,
+ ["left"]=1.048,
+ ["none"]=0.025000000000091,
+ ["right"]=1.0799999999999,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQR",
+ },
+ {
+ ["center"]=1.085,
+ ["edges"]=1.566,
+ ["left"]=1.0509999999999,
+ ["none"]=0.025000000000091,
+ ["right"]=1.083,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRS",
+ },
+ {
+ ["center"]=1.09,
+ ["edges"]=1.573,
+ ["left"]=1.0459999999999,
+ ["none"]=0.024999999999977,
+ ["right"]=1.083,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRST",
+ },
+ {
+ ["center"]=1.088,
+ ["edges"]=1.576,
+ ["left"]=1.052,
+ ["none"]=0.024000000000001,
+ ["right"]=1.082,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTU",
+ },
+ {
+ ["center"]=1.085,
+ ["edges"]=1.5699999999999,
+ ["left"]=1.048,
+ ["none"]=0.024999999999977,
+ ["right"]=1.08,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUV",
+ },
+ {
+ ["center"]=1.08,
+ ["edges"]=1.571,
+ ["left"]=1.049,
+ ["none"]=0.024999999999977,
+ ["right"]=1.085,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVW",
+ },
+ {
+ ["center"]=1.077,
+ ["edges"]=1.568,
+ ["left"]=1.037,
+ ["none"]=0.024999999999977,
+ ["right"]=1.083,
+ ["text"]="abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWX",
+ },
+ },
+}
diff --git a/doc/context/sources/general/manuals/about/pi-speed-1.tex b/doc/context/sources/general/manuals/about/pi-speed-1.tex
new file mode 100644
index 000000000..be716749a
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/pi-speed-1.tex
@@ -0,0 +1,3 @@
+\starttext
+ \dorecurse{1000}{\input ward \par}
+\stoptext
diff --git a/doc/context/sources/general/manuals/about/pi-speed-2.tex b/doc/context/sources/general/manuals/about/pi-speed-2.tex
new file mode 100644
index 000000000..8dcfab188
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/pi-speed-2.tex
@@ -0,0 +1,3 @@
+\starttext
+ \dorecurse{1000}{test \page}
+\stoptext
diff --git a/doc/context/sources/general/manuals/about/pi-speed-3.tex b/doc/context/sources/general/manuals/about/pi-speed-3.tex
new file mode 100644
index 000000000..d3ead50a1
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/pi-speed-3.tex
@@ -0,0 +1,10 @@
+\starttext
+ \dorecurse{1000}{
+ The Earth, as a habitat for animal life, is in old age and
+ has a fatal illness. Several, in fact. It would be happening
+ whether humans had ever evolved or not. But our presence is
+ like the effect of an old|-|age patient who smokes many packs
+ of cigarettes per day |=| and we humans are the cigarettes.
+ \par
+ }
+\stoptext
diff --git a/doc/context/sources/general/manuals/about/pi-speed-4.tex b/doc/context/sources/general/manuals/about/pi-speed-4.tex
new file mode 100644
index 000000000..517e77285
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/pi-speed-4.tex
@@ -0,0 +1,10 @@
+\starttext
+
+\dorecurse{10} {
+ \startMPcode
+ draw fullcircle scaled 1cm withpen pencircle scaled 1mm ;
+ draw textext("X") ;
+ \stopMPcode
+}
+
+\stoptext
diff --git a/doc/context/sources/general/manuals/about/pi-speed-5.tex b/doc/context/sources/general/manuals/about/pi-speed-5.tex
new file mode 100644
index 000000000..2417e00c3
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/pi-speed-5.tex
@@ -0,0 +1,16 @@
+\setupbodyfont[pagella]
+
+\starttext
+
+\dorecurse {100} {
+ \input ward \par
+ \dorecurse{100} {
+ \dontleavehmode
+ {\green this is green}
+ {\red \smallcaps this is red}
+ {\blue \bf this is blue}
+ }
+ \par
+}
+
+\stoptext
diff --git a/doc/context/sources/general/manuals/about/still-expanding-1.png b/doc/context/sources/general/manuals/about/still-expanding-1.png
new file mode 100644
index 000000000..54579ca13
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-1.png
Binary files differ
diff --git a/doc/context/sources/general/manuals/about/still-expanding-10.png b/doc/context/sources/general/manuals/about/still-expanding-10.png
new file mode 100644
index 000000000..3eda60b82
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-10.png
Binary files differ
diff --git a/doc/context/sources/general/manuals/about/still-expanding-2.png b/doc/context/sources/general/manuals/about/still-expanding-2.png
new file mode 100644
index 000000000..6e750b7a2
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-2.png
Binary files differ
diff --git a/doc/context/sources/general/manuals/about/still-expanding-3.png b/doc/context/sources/general/manuals/about/still-expanding-3.png
new file mode 100644
index 000000000..68ec86a9f
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-3.png
Binary files differ
diff --git a/doc/context/sources/general/manuals/about/still-expanding-4.png b/doc/context/sources/general/manuals/about/still-expanding-4.png
new file mode 100644
index 000000000..0033f3063
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-4.png
Binary files differ
diff --git a/doc/context/sources/general/manuals/about/still-expanding-5.png b/doc/context/sources/general/manuals/about/still-expanding-5.png
new file mode 100644
index 000000000..0c50f50d6
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-5.png
Binary files differ
diff --git a/doc/context/sources/general/manuals/about/still-expanding-6.png b/doc/context/sources/general/manuals/about/still-expanding-6.png
new file mode 100644
index 000000000..ce0730847
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-6.png
Binary files differ
diff --git a/doc/context/sources/general/manuals/about/still-expanding-7.png b/doc/context/sources/general/manuals/about/still-expanding-7.png
new file mode 100644
index 000000000..a74d19a99
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-7.png
Binary files differ
diff --git a/doc/context/sources/general/manuals/about/still-expanding-8.png b/doc/context/sources/general/manuals/about/still-expanding-8.png
new file mode 100644
index 000000000..43199fa0c
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-8.png
Binary files differ
diff --git a/doc/context/sources/general/manuals/about/still-expanding-9.png b/doc/context/sources/general/manuals/about/still-expanding-9.png
new file mode 100644
index 000000000..0e965a724
--- /dev/null
+++ b/doc/context/sources/general/manuals/about/still-expanding-9.png
Binary files differ