summaryrefslogtreecommitdiff
path: root/tex/context/base/mkiv
diff options
context:
space:
mode:
Diffstat (limited to 'tex/context/base/mkiv')
-rw-r--r--tex/context/base/mkiv/anch-pos.lua9
-rw-r--r--tex/context/base/mkiv/attr-ini.lua18
-rw-r--r--tex/context/base/mkiv/bibl-bib.lua8
-rw-r--r--tex/context/base/mkiv/char-def.lua1
-rw-r--r--tex/context/base/mkiv/char-ini.lua44
-rw-r--r--tex/context/base/mkiv/char-tex.lua44
-rw-r--r--tex/context/base/mkiv/char-utf.lua34
-rw-r--r--tex/context/base/mkiv/chem-ini.lua8
-rw-r--r--tex/context/base/mkiv/cont-new.mkiv2
-rw-r--r--tex/context/base/mkiv/context.mkiv2
-rw-r--r--tex/context/base/mkiv/core-con.lua10
-rw-r--r--tex/context/base/mkiv/core-dat.lua18
-rw-r--r--tex/context/base/mkiv/core-two.lua9
-rw-r--r--tex/context/base/mkiv/core-uti.lua28
-rw-r--r--tex/context/base/mkiv/data-con.lua24
-rw-r--r--tex/context/base/mkiv/data-res.lua33
-rw-r--r--tex/context/base/mkiv/data-tar.lua12
-rw-r--r--tex/context/base/mkiv/data-tmp.lua23
-rw-r--r--tex/context/base/mkiv/data-zip.lua18
-rw-r--r--tex/context/base/mkiv/file-ini.lua7
-rw-r--r--tex/context/base/mkiv/file-mod.lua16
-rw-r--r--tex/context/base/mkiv/font-afk.lua8
-rw-r--r--tex/context/base/mkiv/font-con.lua65
-rw-r--r--tex/context/base/mkiv/font-ctx.lua39
-rw-r--r--tex/context/base/mkiv/font-def.lua92
-rw-r--r--tex/context/base/mkiv/font-enc.lua42
-rw-r--r--tex/context/base/mkiv/font-fbk.lua4
-rw-r--r--tex/context/base/mkiv/font-imp-tex.lua47
-rw-r--r--tex/context/base/mkiv/font-ini.lua4
-rw-r--r--tex/context/base/mkiv/font-log.lua9
-rw-r--r--tex/context/base/mkiv/font-nod.lua5
-rw-r--r--tex/context/base/mkiv/font-one.lua90
-rw-r--r--tex/context/base/mkiv/font-onr.lua40
-rw-r--r--tex/context/base/mkiv/font-ota.lua6
-rw-r--r--tex/context/base/mkiv/font-ots.lua225
-rw-r--r--tex/context/base/mkiv/font-syn.lua31
-rw-r--r--tex/context/base/mkiv/font-tfm.lua29
-rw-r--r--tex/context/base/mkiv/font-trt.lua8
-rw-r--r--tex/context/base/mkiv/font-vir.lua11
-rw-r--r--tex/context/base/mkiv/l-dir.lua17
-rw-r--r--tex/context/base/mkiv/lang-url.lua10
-rw-r--r--tex/context/base/mkiv/luat-cbk.lua132
-rw-r--r--tex/context/base/mkiv/luat-ini.lua8
-rw-r--r--tex/context/base/mkiv/lxml-aux.lua18
-rw-r--r--tex/context/base/mkiv/lxml-ent.lua12
-rw-r--r--tex/context/base/mkiv/lxml-lpt.lua75
-rw-r--r--tex/context/base/mkiv/lxml-mis.lua11
-rw-r--r--tex/context/base/mkiv/lxml-tab.lua254
-rw-r--r--tex/context/base/mkiv/math-map.lua32
-rw-r--r--tex/context/base/mkiv/meta-fun.lua22
-rw-r--r--tex/context/base/mkiv/mlib-fio.lua12
-rw-r--r--tex/context/base/mkiv/mlib-run.lua28
-rw-r--r--tex/context/base/mkiv/mult-mps.lua2
-rw-r--r--tex/context/base/mkiv/node-ini.lua64
-rw-r--r--tex/context/base/mkiv/node-res.lua5
-rw-r--r--tex/context/base/mkiv/node-tra.lua6
-rw-r--r--tex/context/base/mkiv/pack-obj.lua6
-rw-r--r--tex/context/base/mkiv/pack-rul.lua4
-rw-r--r--tex/context/base/mkiv/publ-dat.lua6
-rw-r--r--tex/context/base/mkiv/publ-ini.lua3
-rw-r--r--tex/context/base/mkiv/publ-ini.mkiv2
-rw-r--r--tex/context/base/mkiv/regi-ini.lua11
-rw-r--r--tex/context/base/mkiv/sort-ini.lua82
-rw-r--r--tex/context/base/mkiv/status-files.pdfbin24657 -> 24625 bytes
-rw-r--r--tex/context/base/mkiv/status-lua.pdfbin267358 -> 267345 bytes
-rw-r--r--tex/context/base/mkiv/syst-con.lua7
-rw-r--r--tex/context/base/mkiv/syst-ini.mkiv3
-rw-r--r--tex/context/base/mkiv/tabl-tbl.mkiv3
-rw-r--r--tex/context/base/mkiv/trac-lmx.lua3
-rw-r--r--tex/context/base/mkiv/util-dim.lua234
-rw-r--r--tex/context/base/mkiv/util-fmt.lua70
-rw-r--r--tex/context/base/mkiv/util-seq.lua14
72 files changed, 944 insertions, 1335 deletions
diff --git a/tex/context/base/mkiv/anch-pos.lua b/tex/context/base/mkiv/anch-pos.lua
index 77f55964f..cf3ed87fc 100644
--- a/tex/context/base/mkiv/anch-pos.lua
+++ b/tex/context/base/mkiv/anch-pos.lua
@@ -6,12 +6,9 @@ if not modules then modules = { } end modules ['anch-pos'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>We save positional information in the main utility table. Not only
-can we store much more information in <l n='lua'/> but it's also
-more efficient.</p>
---ldx]]--
-
+-- We save positional information in the main utility table. Not only can we store
+-- much more information in Lua but it's also more efficient.
+--
-- plus (extra) is obsolete but we will keep it for a while
--
-- maybe replace texsp by our own converter (stay at the lua end)
diff --git a/tex/context/base/mkiv/attr-ini.lua b/tex/context/base/mkiv/attr-ini.lua
index b05c343e5..cd1a0c549 100644
--- a/tex/context/base/mkiv/attr-ini.lua
+++ b/tex/context/base/mkiv/attr-ini.lua
@@ -9,10 +9,8 @@ if not modules then modules = { } end modules ['attr-ini'] = {
local next, type = next, type
local osexit = os.exit
---[[ldx--
-<p>We start with a registration system for atributes so that we can use the
-symbolic names later on.</p>
---ldx]]--
+-- We start with a registration system for atributes so that we can use the symbolic
+-- names later on.
local nodes = nodes
local context = context
@@ -54,17 +52,13 @@ storage.register("attributes/list", list, "attributes.list")
-- end
-- end
---[[ldx--
-<p>We reserve this one as we really want it to be always set (faster).</p>
---ldx]]--
+-- We reserve this one as we really want it to be always set (faster).
names[0], numbers["fontdynamic"] = "fontdynamic", 0
---[[ldx--
-<p>private attributes are used by the system and public ones are for users. We use dedicated
-ranges of numbers for them. Of course a the <l n='context'/> end a private attribute can be
-accessible too, so a private attribute can have a public appearance.</p>
---ldx]]--
+-- Private attributes are used by the system and public ones are for users. We use
+-- dedicated ranges of numbers for them. Of course a the TeX end a private attribute
+-- can be accessible too, so a private attribute can have a public appearance.
sharedstorage.attributes_last_private = sharedstorage.attributes_last_private or 15 -- very private
sharedstorage.attributes_last_public = sharedstorage.attributes_last_public or 1024 -- less private
diff --git a/tex/context/base/mkiv/bibl-bib.lua b/tex/context/base/mkiv/bibl-bib.lua
index baeb3d2f9..b7e478004 100644
--- a/tex/context/base/mkiv/bibl-bib.lua
+++ b/tex/context/base/mkiv/bibl-bib.lua
@@ -6,11 +6,9 @@ if not modules then modules = { } end modules ['bibl-bib'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This is a prelude to integrated bibliography support. This file just loads
-bibtex files and converts them to xml so that the we access the content
-in a convenient way. Actually handling the data takes place elsewhere.</p>
---ldx]]--
+-- This is a prelude to integrated bibliography support. This file just loads bibtex
+-- files and converts them to xml so that the we access the content in a convenient
+-- way. Actually handling the data takes place elsewhere.
local lower, format, gsub, concat = string.lower, string.format, string.gsub, table.concat
local next = next
diff --git a/tex/context/base/mkiv/char-def.lua b/tex/context/base/mkiv/char-def.lua
index 5e9d7d05a..1d4c130e9 100644
--- a/tex/context/base/mkiv/char-def.lua
+++ b/tex/context/base/mkiv/char-def.lua
@@ -67155,6 +67155,7 @@ characters.data={
description="MINUS SIGN",
direction="es",
linebreak="pr",
+ mathextensible="h",
mathgroup="binary arithmetic",
mathspec={
{
diff --git a/tex/context/base/mkiv/char-ini.lua b/tex/context/base/mkiv/char-ini.lua
index db1b85cc5..627ba072c 100644
--- a/tex/context/base/mkiv/char-ini.lua
+++ b/tex/context/base/mkiv/char-ini.lua
@@ -36,20 +36,16 @@ local trace_defining = false trackers.register("characters.defining", fu
local report_defining = logs.reporter("characters")
---[[ldx--
-<p>This module implements some methods and creates additional datastructured
-from the big character table that we use for all kind of purposes:
-<type>char-def.lua</type>.</p>
-
-<p>We assume that at this point <type>characters.data</type> is already
-loaded!</p>
---ldx]]--
-
+-- This module implements some methods and creates additional datastructured from
+-- the big character table that we use for all kind of purposes: 'char-def.lua'.
+--
+-- We assume that at this point 'characters.data' is already populated!
+--
-- todo: in 'char-def.lua' assume defaults:
--
--- directions = l
--- cjkwd = a
--- linebreak = al
+-- directions = l
+-- cjkwd = a
+-- linebreak = al
characters = characters or { }
local characters = characters
@@ -62,9 +58,7 @@ else
os.exit()
end
---[[ldx--
-Extending the table.
---ldx]]--
+-- Extending the table.
if context and CONTEXTLMTXMODE == 0 then
@@ -84,9 +78,7 @@ if context and CONTEXTLMTXMODE == 0 then
end
---[[ldx--
-<p>This converts a string (if given) into a number.</p>
---ldx]]--
+-- This converts a string (if given) into a number.
local pattern = (P("0x") + P("U+")) * ((R("09","AF")^1 * P(-1)) / function(s) return tonumber(s,16) end)
@@ -957,10 +949,8 @@ characters.bidi = allocate {
on = "Other Neutrals",
}
---[[ldx--
-<p>At this point we assume that the big data table is loaded. From this
-table we derive a few more.</p>
---ldx]]--
+-- At this point we assume that the big data table is loaded. From this table we
+-- derive a few more.
if not characters.fallbacks then
@@ -1037,10 +1027,8 @@ setmetatableindex(characters.textclasses,function(t,k)
return false
end)
---[[ldx--
-<p>Next comes a whole series of helper methods. These are (will be) part
-of the official <l n='api'/>.</p>
---ldx]]--
+-- Next comes a whole series of helper methods. These are (will be) part of the
+-- official API.
-- we could make them virtual: characters.contextnames[n]
@@ -1433,9 +1421,7 @@ function characters.lettered(str,spacing)
return concat(new)
end
---[[ldx--
-<p>Requesting lower and uppercase codes:</p>
---ldx]]--
+-- Requesting lower and uppercase codes:
function characters.uccode(n) return uccodes[n] end -- obsolete
function characters.lccode(n) return lccodes[n] end -- obsolete
diff --git a/tex/context/base/mkiv/char-tex.lua b/tex/context/base/mkiv/char-tex.lua
index 7f544b147..09547d005 100644
--- a/tex/context/base/mkiv/char-tex.lua
+++ b/tex/context/base/mkiv/char-tex.lua
@@ -42,17 +42,14 @@ local trace_defining = false trackers.register("characters.defining", fu
local report_defining = logs.reporter("characters")
---[[ldx--
-<p>In order to deal with 8-bit output, we need to find a way to go from <l n='utf'/> to
-8-bit. This is handled in the <l n='luatex'/> engine itself.</p>
-
-<p>This leaves us problems with characters that are specific to <l n='tex'/> like
-<type>{}</type>, <type>$</type> and alike. We can remap some chars that tex input files
-are sensitive for to a private area (while writing to a utility file) and revert then
-to their original slot when we read in such a file. Instead of reverting, we can (when
-we resolve characters to glyphs) map them to their right glyph there. For this purpose
-we can use the private planes 0x0F0000 and 0x100000.</p>
---ldx]]--
+-- In order to deal with 8-bit output, we need to find a way to go from UTF to
+-- 8-bit. This is handled in the 32 bit engine itself. This leaves us problems with
+-- characters that are specific to TeX, like curly braces and dollars. We can remap
+-- some chars that tex input files are sensitive for to a private area (while
+-- writing to a utility file) and revert then to their original slot when we read in
+-- such a file. Instead of reverting, we can (when we resolve characters to glyphs)
+-- map them to their right glyph there. For this purpose we can use the private
+-- planes 0x0F0000 and 0x100000.
local low = allocate()
local high = allocate()
@@ -102,21 +99,6 @@ private.escape = utf.remapper(escapes) -- maybe: ,"dynamic"
private.replace = utf.remapper(low) -- maybe: ,"dynamic"
private.revert = utf.remapper(high) -- maybe: ,"dynamic"
---[[ldx--
-<p>We get a more efficient variant of this when we integrate
-replacements in collapser. This more or less renders the previous
-private code redundant. The following code is equivalent but the
-first snippet uses the relocated dollars.</p>
-
-<typing>
-[󰀤x󰀤] [$x$]
-</typing>
---ldx]]--
-
--- using the tree-lpeg-mapper would be nice but we also need to deal with end-of-string
--- cases: "\"\i" and don't want "\relax" to be seen as \r e lax" (for which we need to mess
--- with spaces
-
local accentmapping = allocate {
['"'] = { [""] = "¨",
A = "Ä", a = "ä",
@@ -452,10 +434,8 @@ implement { -- a waste of scanner but consistent
actions = texcharacters.defineaccents
}
---[[ldx--
-<p>Instead of using a <l n='tex'/> file to define the named glyphs, we
-use the table. After all, we have this information available anyway.</p>
---ldx]]--
+-- Instead of using a TeX file to define the named glyphs, we use the table. After
+-- all, we have this information available anyway.
function commands.makeactive(n,name) -- not used
contextsprint(ctxcatcodes,format("\\catcode%s=13\\unexpanded\\def %s{\\%s}",n,utfchar(n),name))
@@ -747,9 +727,7 @@ function characters.setactivecatcodes(cct)
tex.catcodetable = saved
end
---[[ldx--
-<p>Setting the lccodes is also done in a loop over the data table.</p>
---ldx]]--
+-- -- Setting the lccodes is also done in a loop over the data table.
-- function characters.setcodes() -- we could loop over csletters
-- if trace_defining then
diff --git a/tex/context/base/mkiv/char-utf.lua b/tex/context/base/mkiv/char-utf.lua
index e230370b5..f9cba36ca 100644
--- a/tex/context/base/mkiv/char-utf.lua
+++ b/tex/context/base/mkiv/char-utf.lua
@@ -6,21 +6,19 @@ if not modules then modules = { } end modules ['char-utf'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>When a sequence of <l n='utf'/> characters enters the application, it may be
-neccessary to collapse subsequences into their composed variant.</p>
-
-<p>This module implements methods for collapsing and expanding <l n='utf'/>
-sequences. We also provide means to deal with characters that are special to
-<l n='tex'/> as well as 8-bit characters that need to end up in special kinds
-of output (for instance <l n='pdf'/>).</p>
-
-<p>We implement these manipulations as filters. One can run multiple filters
-over a string.</p>
-
-<p>The old code has now been moved to char-obs.lua which we keep around for
-educational purposes.</p>
---ldx]]--
+-- When a sequence of UTF characters enters the application, it may be
+-- neccessary to collapse subsequences into their composed variant.
+--
+-- This module implements methods for collapsing and expanding UTF sequences. We
+-- also provide means to deal with characters that are special to TeX as well as
+-- 8-bit characters that need to end up in special kinds of output (for instance
+-- PDF).
+--
+-- We implement these manipulations as filters. One can run multiple filters over a
+-- string.
+--
+-- The old code has now been moved to char-obs.lua which we keep around for
+-- educational purposes.
local next, type = next, type
local gsub, find = string.gsub, string.find
@@ -55,10 +53,8 @@ characters.filters.utf = utffilters
local data = characters.data
---[[ldx--
-<p>It only makes sense to collapse at runtime, since we don't expect source code
-to depend on collapsing.</p>
---ldx]]--
+-- It only makes sense to collapse at runtime, since we don't expect source code to
+-- depend on collapsing.
-- for the moment, will be entries in char-def.lua .. this is just a subset that for
-- typographic (font) reasons we want to have split ... if we decompose all, we get
diff --git a/tex/context/base/mkiv/chem-ini.lua b/tex/context/base/mkiv/chem-ini.lua
index f7d10ffa2..06049807a 100644
--- a/tex/context/base/mkiv/chem-ini.lua
+++ b/tex/context/base/mkiv/chem-ini.lua
@@ -19,11 +19,9 @@ local cpatterns = patterns.context
chemistry = chemistry or { }
local chemistry = chemistry
---[[
-<p>The next code started out as adaptation of code from Wolfgang Schuster as
-posted on the mailing list. The current version supports nested braces and
-unbraced integers as scripts.</p>
-]]--
+-- The next code started out as adaptation of code from Wolfgang Schuster as posted
+-- on the mailing list. The current version supports nested braces and unbraced
+-- integers as scripts.
local moleculeparser = cpatterns.scripted
chemistry.moleculeparser = moleculeparser
diff --git a/tex/context/base/mkiv/cont-new.mkiv b/tex/context/base/mkiv/cont-new.mkiv
index 684cf24c8..f0fd15f3b 100644
--- a/tex/context/base/mkiv/cont-new.mkiv
+++ b/tex/context/base/mkiv/cont-new.mkiv
@@ -13,7 +13,7 @@
% \normalend % uncomment this to get the real base runtime
-\newcontextversion{2023.03.20 15:42}
+\newcontextversion{2023.04.01 09:28}
%D This file is loaded at runtime, thereby providing an excellent place for hacks,
%D patches, extensions and new features. There can be local overloads in cont-loc
diff --git a/tex/context/base/mkiv/context.mkiv b/tex/context/base/mkiv/context.mkiv
index 9b89b9bdf..c2735fa5e 100644
--- a/tex/context/base/mkiv/context.mkiv
+++ b/tex/context/base/mkiv/context.mkiv
@@ -49,7 +49,7 @@
%D {YYYY.MM.DD HH:MM} format.
\edef\contextformat {\jobname}
-\edef\contextversion{2023.03.20 15:42}
+\edef\contextversion{2023.04.01 09:28}
%D Kind of special:
diff --git a/tex/context/base/mkiv/core-con.lua b/tex/context/base/mkiv/core-con.lua
index f57eb6ef8..d3e108a7a 100644
--- a/tex/context/base/mkiv/core-con.lua
+++ b/tex/context/base/mkiv/core-con.lua
@@ -8,13 +8,9 @@ if not modules then modules = { } end modules ['core-con'] = {
-- todo: split into lang-con.lua and core-con.lua
---[[ldx--
-<p>This module implements a bunch of conversions. Some are more
-efficient than their <l n='tex'/> counterpart, some are even
-slower but look nicer this way.</p>
-
-<p>Some code may move to a module in the language namespace.</p>
---ldx]]--
+-- This module implements a bunch of conversions. Some are more efficient than their
+-- TeX counterpart, some are even slower but look nicer this way. Some code may move
+-- to a module in the language namespace.
local floor = math.floor
local osdate, ostime, ostimezone = os.date, os.time, os.timezone
diff --git a/tex/context/base/mkiv/core-dat.lua b/tex/context/base/mkiv/core-dat.lua
index b58a801d9..89521b185 100644
--- a/tex/context/base/mkiv/core-dat.lua
+++ b/tex/context/base/mkiv/core-dat.lua
@@ -6,10 +6,8 @@ if not modules then modules = { } end modules ['core-dat'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This module provides a (multipass) container for arbitrary data. It
-replaces the twopass data mechanism.</p>
---ldx]]--
+-- This module provides a (multipass) container for arbitrary data. It replaces the
+-- twopass data mechanism.
local tonumber, tostring, type = tonumber, tostring, type
@@ -231,9 +229,7 @@ implement {
actions = datasetvariablefromjob
}
---[[ldx--
-<p>We also provide an efficient variant for page states.</p>
---ldx]]--
+-- We also provide an efficient variant for page states.
local collected = allocate()
local tobesaved = allocate()
@@ -250,13 +246,9 @@ local function initializer()
tobesaved = pagestates.tobesaved
end
-job.register('job.pagestates.collected', tobesaved, initializer, nil)
+job.register("job.pagestates.collected", tobesaved, initializer, nil)
-table.setmetatableindex(tobesaved, function(t,k)
- local v = { }
- t[k] = v
- return v
-end)
+table.setmetatableindex(tobesaved, "table")
local function setstate(settings)
local name = settings.name
diff --git a/tex/context/base/mkiv/core-two.lua b/tex/context/base/mkiv/core-two.lua
index 3ab2112b9..da37a6170 100644
--- a/tex/context/base/mkiv/core-two.lua
+++ b/tex/context/base/mkiv/core-two.lua
@@ -6,15 +6,14 @@ if not modules then modules = { } end modules ['core-two'] = {
license = "see context related readme files"
}
+-- This is actually one of the oldest MkIV files and basically a port of MkII but
+-- the old usage has long be phased out. Also, the public part is now handled by
+-- datasets which makes this a more private store.
+
local next = next
local remove, concat = table.remove, table.concat
local allocate = utilities.storage.allocate
---[[ldx--
-<p>We save multi-pass information in the main utility table. This is a
-bit of a mess because we support old and new methods.</p>
---ldx]]--
-
local collected = allocate()
local tobesaved = allocate()
diff --git a/tex/context/base/mkiv/core-uti.lua b/tex/context/base/mkiv/core-uti.lua
index 887ef9a75..e8a28c187 100644
--- a/tex/context/base/mkiv/core-uti.lua
+++ b/tex/context/base/mkiv/core-uti.lua
@@ -6,16 +6,13 @@ if not modules then modules = { } end modules ['core-uti'] = {
license = "see context related readme files"
}
--- todo: keep track of changes here (hm, track access, and only true when
--- accessed and changed)
-
---[[ldx--
-<p>A utility file has always been part of <l n='context'/> and with
-the move to <l n='luatex'/> we also moved a lot of multi-pass info
-to a <l n='lua'/> table. Instead of loading a <l n='tex'/> based
-utility file under different setups, we now load a table once. This
-saves much runtime but at the cost of more memory usage.</p>
---ldx]]--
+-- A utility file has always been part of ConTeXt and with the move to LuaTeX we
+-- also moved a lot of multi-pass info to a Lua table. Instead of loading a TeX
+-- based utility file under different setups, we now load a table once. This saves
+-- much runtime but at the cost of more memory usage.
+--
+-- In the meantime the overhead is a bit more due to the amount of data being saved
+-- and more agressive compacting.
local math = math
local format, match = string.format, string.match
@@ -46,14 +43,9 @@ local job = job
job.version = 1.32
job.packversion = 1.02
--- some day we will implement loading of other jobs and then we need
--- job.jobs
-
---[[ldx--
-<p>Variables are saved using in the previously defined table and passed
-onto <l n='tex'/> using the following method. Of course one can also
-directly access the variable using a <l n='lua'/> call.</p>
---ldx]]--
+-- Variables are saved using in the previously defined table and passed onto TeX
+-- using the following method. Of course one can also directly access the variable
+-- using a Lua call.
local savelist, comment = { }, { }
diff --git a/tex/context/base/mkiv/data-con.lua b/tex/context/base/mkiv/data-con.lua
index 51e0ce856..d7d3c7d46 100644
--- a/tex/context/base/mkiv/data-con.lua
+++ b/tex/context/base/mkiv/data-con.lua
@@ -13,19 +13,17 @@ local trace_cache = false trackers.register("resolvers.cache", functi
local trace_containers = false trackers.register("resolvers.containers", function(v) trace_containers = v end)
local trace_storage = false trackers.register("resolvers.storage", function(v) trace_storage = v end)
---[[ldx--
-<p>Once we found ourselves defining similar cache constructs several times,
-containers were introduced. Containers are used to collect tables in memory and
-reuse them when possible based on (unique) hashes (to be provided by the calling
-function).</p>
-
-<p>Caching to disk is disabled by default. Version numbers are stored in the
-saved table which makes it possible to change the table structures without
-bothering about the disk cache.</p>
-
-<p>Examples of usage can be found in the font related code. This code is not
-ideal but we need it in generic too so we compromise.</p>
---ldx]]--
+-- Once we found ourselves defining similar cache constructs several times,
+-- containers were introduced. Containers are used to collect tables in memory and
+-- reuse them when possible based on (unique) hashes (to be provided by the calling
+-- function).
+--
+-- Caching to disk is disabled by default. Version numbers are stored in the saved
+-- table which makes it possible to change the table structures without bothering
+-- about the disk cache.
+--
+-- Examples of usage can be found in the font related code. This code is not ideal
+-- but we need it in generic too so we compromise.
containers = containers or { }
local containers = containers
diff --git a/tex/context/base/mkiv/data-res.lua b/tex/context/base/mkiv/data-res.lua
index 8afc09b97..11e67f785 100644
--- a/tex/context/base/mkiv/data-res.lua
+++ b/tex/context/base/mkiv/data-res.lua
@@ -135,16 +135,35 @@ local criticalvars = {
-- we also report weird ones, with weird being: (1) duplicate /texmf or (2) no /web2c in
-- the names.
+-- if environment.default_texmfcnf then
+-- resolvers.luacnfspec = "home:texmf/web2c;" .. environment.default_texmfcnf -- texlive + home: for taco etc
+-- else
+-- resolvers.luacnfspec = concat ( {
+-- "home:texmf/web2c",
+-- "selfautoparent:/texmf-local/web2c",
+-- "selfautoparent:/texmf-context/web2c",
+-- "selfautoparent:/texmf-dist/web2c",
+-- "selfautoparent:/texmf/web2c",
+-- }, ";")
+-- end
+
if environment.default_texmfcnf then
+ -- this will go away (but then also no more checking in mtxrun.lua itself)
resolvers.luacnfspec = "home:texmf/web2c;" .. environment.default_texmfcnf -- texlive + home: for taco etc
else
- resolvers.luacnfspec = concat ( {
- "home:texmf/web2c",
- "selfautoparent:/texmf-local/web2c",
- "selfautoparent:/texmf-context/web2c",
- "selfautoparent:/texmf-dist/web2c",
- "selfautoparent:/texmf/web2c",
- }, ";")
+ local texroot = environment.texroot
+ resolvers.luacnfspec = "home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-context/web2c;selfautoparent:/texmf/web2c"
+ if texroot and isdir(texroot .. "/texmf-context") then
+ -- we're okay and run the lean and mean installation
+ elseif texroot and isdir(texroot .. "/texmf-dist") then
+ -- we're in texlive where texmf-dist is leading
+ resolvers.luacnfspec = "home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-dist/web2c;selfautoparent:/texmf/web2c"
+ elseif ostype ~= "windows" and isdir("/etc/texmf/web2c") then
+ -- we have some linux distribution that does it its own way
+ resolvers.luacnfspec = "home:texmf/web2c;/etc/texmf/web2c;selfautodir:/share/texmf/web2c"
+ else
+ -- we stick to the reference specification
+ end
end
local unset_variable = "unset"
diff --git a/tex/context/base/mkiv/data-tar.lua b/tex/context/base/mkiv/data-tar.lua
index 45de749b6..b2416330f 100644
--- a/tex/context/base/mkiv/data-tar.lua
+++ b/tex/context/base/mkiv/data-tar.lua
@@ -12,14 +12,10 @@ local trace_locating = false trackers.register("resolvers.locating", function(v
local report_tar = logs.reporter("resolvers","tar")
---[[ldx--
-<p>We use a url syntax for accessing the tar file itself and file in it:</p>
-
-<typing>
-tar:///oeps.tar?name=bla/bla.tex
-tar:///oeps.tar?tree=tex/texmf-local
-</typing>
---ldx]]--
+-- We use a url syntax for accessing the tar file itself and file in it:
+--
+-- tar:///oeps.tar?name=bla/bla.tex
+-- tar:///oeps.tar?tree=tex/texmf-local
local resolvers = resolvers
local findfile = resolvers.findfile
diff --git a/tex/context/base/mkiv/data-tmp.lua b/tex/context/base/mkiv/data-tmp.lua
index 1948f1ea5..21e0d1f4f 100644
--- a/tex/context/base/mkiv/data-tmp.lua
+++ b/tex/context/base/mkiv/data-tmp.lua
@@ -6,20 +6,15 @@ if not modules then modules = { } end modules ['data-tmp'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This module deals with caching data. It sets up the paths and implements
-loaders and savers for tables. Best is to set the following variable. When not
-set, the usual paths will be checked. Personally I prefer the (users) temporary
-path.</p>
-
-</code>
-TEXMFCACHE=$TMP;$TEMP;$TMPDIR;$TEMPDIR;$HOME;$TEXMFVAR;$VARTEXMF;.
-</code>
-
-<p>Currently we do no locking when we write files. This is no real problem
-because most caching involves fonts and the chance of them being written at the
-same time is small. We also need to extend luatools with a recache feature.</p>
---ldx]]--
+-- This module deals with caching data. It sets up the paths and implements loaders
+-- and savers for tables. Best is to set the following variable. When not set, the
+-- usual paths will be checked. Personally I prefer the (users) temporary path.
+--
+-- TEXMFCACHE=$TMP;$TEMP;$TMPDIR;$TEMPDIR;$HOME;$TEXMFVAR;$VARTEXMF;.
+--
+-- Currently we do no locking when we write files. This is no real problem because
+-- most caching involves fonts and the chance of them being written at the same time
+-- is small. We also need to extend luatools with a recache feature.
local next, type = next, type
local pcall, loadfile, collectgarbage = pcall, loadfile, collectgarbage
diff --git a/tex/context/base/mkiv/data-zip.lua b/tex/context/base/mkiv/data-zip.lua
index 1a9310f17..40f38c855 100644
--- a/tex/context/base/mkiv/data-zip.lua
+++ b/tex/context/base/mkiv/data-zip.lua
@@ -14,17 +14,13 @@ local trace_locating = false trackers.register("resolvers.locating", function(v
local report_zip = logs.reporter("resolvers","zip")
---[[ldx--
-<p>We use a url syntax for accessing the zip file itself and file in it:</p>
-
-<typing>
-zip:///oeps.zip?name=bla/bla.tex
-zip:///oeps.zip?tree=tex/texmf-local
-zip:///texmf.zip?tree=/tex/texmf
-zip:///texmf.zip?tree=/tex/texmf-local
-zip:///texmf-mine.zip?tree=/tex/texmf-projects
-</typing>
---ldx]]--
+-- We use a url syntax for accessing the zip file itself and file in it:
+--
+-- zip:///oeps.zip?name=bla/bla.tex
+-- zip:///oeps.zip?tree=tex/texmf-local
+-- zip:///texmf.zip?tree=/tex/texmf
+-- zip:///texmf.zip?tree=/tex/texmf-local
+-- zip:///texmf-mine.zip?tree=/tex/texmf-projects
local resolvers = resolvers
local findfile = resolvers.findfile
diff --git a/tex/context/base/mkiv/file-ini.lua b/tex/context/base/mkiv/file-ini.lua
index 2a0271a9d..01bedeeeb 100644
--- a/tex/context/base/mkiv/file-ini.lua
+++ b/tex/context/base/mkiv/file-ini.lua
@@ -6,11 +6,8 @@ if not modules then modules = { } end modules ['file-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>It's more convenient to manipulate filenames (paths) in <l n='lua'/> than in
-<l n='tex'/>. These methods have counterparts at the <l n='tex'/> end.</p>
---ldx]]--
-
+-- It's more convenient to manipulate filenames (paths) in Lua than in TeX. These
+-- methods have counterparts at the TeX end.
local implement = interfaces.implement
local setmacro = interfaces.setmacro
diff --git a/tex/context/base/mkiv/file-mod.lua b/tex/context/base/mkiv/file-mod.lua
index d392887ec..ac9ad938a 100644
--- a/tex/context/base/mkiv/file-mod.lua
+++ b/tex/context/base/mkiv/file-mod.lua
@@ -6,17 +6,11 @@ if not modules then modules = { } end modules ['file-mod'] = {
license = "see context related readme files"
}
--- This module will be redone! For instance, the prefixes will move to data-*
--- as they arr sort of generic along with home:// etc/.
-
--- context is not defined yet! todo! (we need to load tupp-fil after cld)
--- todo: move startreadingfile to lua and push regime there
-
---[[ldx--
-<p>It's more convenient to manipulate filenames (paths) in
-<l n='lua'/> than in <l n='tex'/>. These methods have counterparts
-at the <l n='tex'/> side.</p>
---ldx]]--
+-- This module will be redone! For instance, the prefixes will move to data-* as
+-- they are sort of generic along with home:// etc/.
+--
+-- It is more convenient to manipulate filenames (paths) in Lua than in TeX. The
+-- methods below have counterparts at the TeX end.
local format, find, concat, tonumber = string.format, string.find, table.concat, tonumber
local sortedhash = table.sortedhash
diff --git a/tex/context/base/mkiv/font-afk.lua b/tex/context/base/mkiv/font-afk.lua
index 761016d34..250c17e77 100644
--- a/tex/context/base/mkiv/font-afk.lua
+++ b/tex/context/base/mkiv/font-afk.lua
@@ -7,11 +7,9 @@ if not modules then modules = { } end modules ['font-afk'] = {
dataonly = true,
}
---[[ldx--
-<p>For ligatures, only characters with a code smaller than 128 make sense,
-anything larger is encoding dependent. An interesting complication is that a
-character can be in an encoding twice but is hashed once.</p>
---ldx]]--
+-- For ligatures, only characters with a code smaller than 128 make sense, anything
+-- larger is encoding dependent. An interesting complication is that a character can
+-- be in an encoding twice but is hashed once.
local allocate = utilities.storage.allocate
diff --git a/tex/context/base/mkiv/font-con.lua b/tex/context/base/mkiv/font-con.lua
index 066ea33ed..77708ee08 100644
--- a/tex/context/base/mkiv/font-con.lua
+++ b/tex/context/base/mkiv/font-con.lua
@@ -22,11 +22,9 @@ local trace_scaling = false trackers.register("fonts.scaling", function(v)
local report_defining = logs.reporter("fonts","defining")
--- watch out: no negative depths and negative eights permitted in regular fonts
-
---[[ldx--
-<p>Here we only implement a few helper functions.</p>
---ldx]]--
+-- Watch out: no negative depths and negative heights are permitted in regular
+-- fonts. Also, the code in LMTX is a bit different. Here we only implement a
+-- few helper functions.
local fonts = fonts
local constructors = fonts.constructors or { }
@@ -59,11 +57,9 @@ constructors.designsizes = designsizes
local loadedfonts = allocate()
constructors.loadedfonts = loadedfonts
---[[ldx--
-<p>We need to normalize the scale factor (in scaled points). This has to
-do with the fact that <l n='tex'/> uses a negative multiple of 1000 as
-a signal for a font scaled based on the design size.</p>
---ldx]]--
+-- We need to normalize the scale factor (in scaled points). This has to do with the
+-- fact that TeX uses a negative multiple of 1000 as a signal for a font scaled
+-- based on the design size.
local factors = {
pt = 65536.0,
@@ -118,22 +114,18 @@ function constructors.getmathparameter(tfmdata,name)
end
end
---[[ldx--
-<p>Beware, the boundingbox is passed as reference so we may not overwrite it
-in the process; numbers are of course copies. Here 65536 equals 1pt. (Due to
-excessive memory usage in CJK fonts, we no longer pass the boundingbox.)</p>
---ldx]]--
-
--- The scaler is only used for otf and afm and virtual fonts. If a virtual font has italic
--- correction make sure to set the hasitalics flag. Some more flags will be added in the
--- future.
-
---[[ldx--
-<p>The reason why the scaler was originally split, is that for a while we experimented
-with a helper function. However, in practice the <l n='api'/> calls are too slow to
-make this profitable and the <l n='lua'/> based variant was just faster. A days
-wasted day but an experience richer.</p>
---ldx]]--
+-- Beware, the boundingbox is passed as reference so we may not overwrite it in the
+-- process; numbers are of course copies. Here 65536 equals 1pt. (Due to excessive
+-- memory usage in CJK fonts, we no longer pass the boundingbox.)
+--
+-- The scaler is only used for OTF and AFM and virtual fonts. If a virtual font has
+-- italic correction make sure to set the hasitalics flag. Some more flags will be
+-- added in the future.
+--
+-- The reason why the scaler was originally split, is that for a while we
+-- experimented with a helper function. However, in practice the API calls are too
+-- slow to make this profitable and the Lua based variant was just faster. A days
+-- wasted day but an experience richer.
function constructors.cleanuptable(tfmdata)
-- This no longer makes sense because the addition of font.getcopy and its
@@ -1093,9 +1085,7 @@ function constructors.finalize(tfmdata)
return tfmdata
end
---[[ldx--
-<p>A unique hash value is generated by:</p>
---ldx]]--
+-- A unique hash value is generated by:
local hashmethods = { }
constructors.hashmethods = hashmethods
@@ -1154,13 +1144,11 @@ hashmethods.normal = function(list)
end
end
---[[ldx--
-<p>In principle we can share tfm tables when we are in need for a font, but then
-we need to define a font switch as an id/attr switch which is no fun, so in that
-case users can best use dynamic features ... so, we will not use that speedup. Okay,
-when we get rid of base mode we can optimize even further by sharing, but then we
-loose our testcases for <l n='luatex'/>.</p>
---ldx]]--
+-- In principle we can share tfm tables when we are in need for a font, but then we
+-- need to define a font switch as an id/attr switch which is no fun, so in that
+-- case users can best use dynamic features ... so, we will not use that speedup.
+-- Okay, when we get rid of base mode we can optimize even further by sharing, but
+-- then we loose our testcases for LuaTeX.
function constructors.hashinstance(specification,force)
local hash = specification.hash
@@ -1516,10 +1504,7 @@ do
end
---[[ldx--
-<p>We need to check for default features. For this we provide
-a helper function.</p>
---ldx]]--
+-- We need to check for default features. For this we provide a helper function.
function constructors.checkedfeatures(what,features)
local defaults = handlers[what].features.defaults
diff --git a/tex/context/base/mkiv/font-ctx.lua b/tex/context/base/mkiv/font-ctx.lua
index 2c56b5613..f9ad475ac 100644
--- a/tex/context/base/mkiv/font-ctx.lua
+++ b/tex/context/base/mkiv/font-ctx.lua
@@ -528,26 +528,19 @@ do
end
---[[ldx--
-<p>So far we haven't really dealt with features (or whatever we want
-to pass along with the font definition. We distinguish the following
-situations:</p>
-situations:</p>
-
-<code>
-name:xetex like specs
-name@virtual font spec
-name*context specification
-</code>
---ldx]]--
-
--- currently fonts are scaled while constructing the font, so we
--- have to do scaling of commands in the vf at that point using e.g.
--- "local scale = g.parameters.factor or 1" after all, we need to
--- work with copies anyway and scaling needs to be done at some point;
--- however, when virtual tricks are used as feature (makes more
--- sense) we scale the commands in fonts.constructors.scale (and set the
--- factor there)
+-- So far we haven't really dealt with features (or whatever we want to pass along
+-- with the font definition. We distinguish the following situations:
+--
+-- name:xetex like specs
+-- name@virtual font spec
+-- name*context specification
+--
+-- Currently fonts are scaled while constructing the font, so we have to do scaling
+-- of commands in the vf at that point using e.g. "local scale = g.parameters.factor
+-- or 1" after all, we need to work with copies anyway and scaling needs to be done
+-- at some point; however, when virtual tricks are used as feature (makes more
+-- sense) we scale the commands in fonts.constructors.scale (and set the factor
+-- there).
local loadfont = definers.loadfont
@@ -2385,10 +2378,8 @@ dimenfactors.em = nil
dimenfactors["%"] = nil
dimenfactors.pct = nil
---[[ldx--
-<p>Before a font is passed to <l n='tex'/> we scale it. Here we also need
-to scale virtual characters.</p>
---ldx]]--
+-- Before a font is passed to TeX we scale it. Here we also need to scale virtual
+-- characters.
do
diff --git a/tex/context/base/mkiv/font-def.lua b/tex/context/base/mkiv/font-def.lua
index b752b3258..01bced6d1 100644
--- a/tex/context/base/mkiv/font-def.lua
+++ b/tex/context/base/mkiv/font-def.lua
@@ -24,10 +24,9 @@ trackers.register("fonts.loading", "fonts.defining", "otf.loading", "afm.loading
local report_defining = logs.reporter("fonts","defining")
---[[ldx--
-<p>Here we deal with defining fonts. We do so by intercepting the
-default loader that only handles <l n='tfm'/>.</p>
---ldx]]--
+-- Here we deal with defining fonts. We do so by intercepting the default loader
+-- that only handles TFM files. Although, we started out that way but in the
+-- meantime we can hardly speak of TFM any more.
local fonts = fonts
local fontdata = fonts.hashes.identifiers
@@ -53,25 +52,18 @@ local designsizes = constructors.designsizes
local resolvefile = fontgoodies and fontgoodies.filenames and fontgoodies.filenames.resolve or function(s) return s end
---[[ldx--
-<p>We hardly gain anything when we cache the final (pre scaled)
-<l n='tfm'/> table. But it can be handy for debugging, so we no
-longer carry this code along. Also, we now have quite some reference
-to other tables so we would end up with lots of catches.</p>
---ldx]]--
-
---[[ldx--
-<p>We can prefix a font specification by <type>name:</type> or
-<type>file:</type>. The first case will result in a lookup in the
-synonym table.</p>
-
-<typing>
-[ name: | file: ] identifier [ separator [ specification ] ]
-</typing>
-
-<p>The following function split the font specification into components
-and prepares a table that will move along as we proceed.</p>
---ldx]]--
+-- We hardly gain anything when we cache the final (pre scaled) TFM table. But it
+-- can be handy for debugging, so we no longer carry this code along. Also, we now
+-- have quite some reference to other tables so we would end up with lots of
+-- catches.
+--
+-- We can prefix a font specification by "name:" or "file:". The first case will
+-- result in a lookup in the synonym table.
+--
+-- [ name: | file: ] identifier [ separator [ specification ] ]
+--
+-- The following function split the font specification into components and prepares
+-- a table that will move along as we proceed.
-- beware, we discard additional specs
--
@@ -164,9 +156,7 @@ if context then
end
---[[ldx--
-<p>We can resolve the filename using the next function:</p>
---ldx]]--
+-- We can resolve the filename using the next function:
definers.resolvers = definers.resolvers or { }
local resolvers = definers.resolvers
@@ -258,23 +248,17 @@ function definers.resolve(specification)
return specification
end
---[[ldx--
-<p>The main read function either uses a forced reader (as determined by
-a lookup) or tries to resolve the name using the list of readers.</p>
-
-<p>We need to cache when possible. We do cache raw tfm data (from <l
-n='tfm'/>, <l n='afm'/> or <l n='otf'/>). After that we can cache based
-on specificstion (name) and size, that is, <l n='tex'/> only needs a number
-for an already loaded fonts. However, it may make sense to cache fonts
-before they're scaled as well (store <l n='tfm'/>'s with applied methods
-and features). However, there may be a relation between the size and
-features (esp in virtual fonts) so let's not do that now.</p>
-
-<p>Watch out, here we do load a font, but we don't prepare the
-specification yet.</p>
---ldx]]--
-
--- very experimental:
+-- The main read function either uses a forced reader (as determined by a lookup) or
+-- tries to resolve the name using the list of readers.
+--
+-- We need to cache when possible. We do cache raw tfm data (from TFM, AFM or OTF).
+-- After that we can cache based on specificstion (name) and size, that is, TeX only
+-- needs a number for an already loaded fonts. However, it may make sense to cache
+-- fonts before they're scaled as well (store TFM's with applied methods and
+-- features). However, there may be a relation between the size and features (esp in
+-- virtual fonts) so let's not do that now.
+--
+-- Watch out, here we do load a font, but we don't prepare the specification yet.
function definers.applypostprocessors(tfmdata)
local postprocessors = tfmdata.postprocessors
@@ -439,17 +423,13 @@ function constructors.readanddefine(name,size) -- no id -- maybe a dummy first
return fontdata[id], id
end
---[[ldx--
-<p>So far the specifiers. Now comes the real definer. Here we cache
-based on id's. Here we also intercept the virtual font handler. Since
-it evolved stepwise I may rewrite this bit (combine code).</p>
-
-In the previously defined reader (the one resulting in a <l n='tfm'/>
-table) we cached the (scaled) instances. Here we cache them again, but
-this time based on id. We could combine this in one cache but this does
-not gain much. By the way, passing id's back to in the callback was
-introduced later in the development.</p>
---ldx]]--
+-- So far the specifiers. Now comes the real definer. Here we cache based on id's.
+-- Here we also intercept the virtual font handler.
+--
+-- In the previously defined reader (the one resulting in a TFM table) we cached the
+-- (scaled) instances. Here we cache them again, but this time based on id. We could
+-- combine this in one cache but this does not gain much. By the way, passing id's
+-- back to in the callback was introduced later in the development.
function definers.registered(hash)
local id = internalized[hash]
@@ -522,9 +502,7 @@ function font.getfont(id)
return fontdata[id] -- otherwise issues
end
---[[ldx--
-<p>We overload the <l n='tfm'/> reader.</p>
---ldx]]--
+-- We overload the <l n='tfm'/> reader.
if not context then
callbacks.register('define_font', definers.read, "definition of fonts (tfmdata preparation)")
diff --git a/tex/context/base/mkiv/font-enc.lua b/tex/context/base/mkiv/font-enc.lua
index f2f0595dd..732bc8907 100644
--- a/tex/context/base/mkiv/font-enc.lua
+++ b/tex/context/base/mkiv/font-enc.lua
@@ -16,10 +16,8 @@ local setmetatableindex = table.setmetatableindex
local allocate = utilities.storage.allocate
local mark = utilities.storage.mark
---[[ldx--
-<p>Because encodings are going to disappear, we don't bother defining
-them in tables. But we may do so some day, for consistency.</p>
---ldx]]--
+-- Because encodings are going to disappear, we don't bother defining them in
+-- tables. But we may do so some day, for consistency.
local report_encoding = logs.reporter("fonts","encoding")
@@ -43,24 +41,19 @@ function encodings.is_known(encoding)
return containers.is_valid(encodings.cache,encoding)
end
---[[ldx--
-<p>An encoding file looks like this:</p>
-
-<typing>
-/TeXnANSIEncoding [
-/.notdef
-/Euro
-...
-/ydieresis
-] def
-</typing>
-
-<p>Beware! The generic encoding files don't always apply to the ones that
-ship with fonts. This has to do with the fact that names follow (slightly)
-different standards. However, the fonts where this applies to (for instance
-Latin Modern or <l n='tex'> Gyre) come in OpenType variants too, so these
-will be used.</p>
---ldx]]--
+-- An encoding file looks like this:
+--
+-- /TeXnANSIEncoding [
+-- /.notdef
+-- /Euro
+-- ...
+-- /ydieresis
+-- ] def
+--
+-- Beware! The generic encoding files don't always apply to the ones that ship with
+-- fonts. This has to do with the fact that names follow (slightly) different
+-- standards. However, the fonts where this applies to (for instance Latin Modern or
+-- TeXGyre come in OpenType variants too, so these will be used.
local enccodes = characters.enccodes or { }
@@ -120,10 +113,7 @@ function encodings.load(filename)
return containers.write(encodings.cache, name, data)
end
---[[ldx--
-<p>There is no unicode encoding but for practical purposes we define
-one.</p>
---ldx]]--
+-- There is no unicode encoding but for practical purposes we define one.
-- maybe make this a function:
diff --git a/tex/context/base/mkiv/font-fbk.lua b/tex/context/base/mkiv/font-fbk.lua
index b6c9a430d..da04b50a8 100644
--- a/tex/context/base/mkiv/font-fbk.lua
+++ b/tex/context/base/mkiv/font-fbk.lua
@@ -10,10 +10,6 @@ local cos, tan, rad, format = math.cos, math.tan, math.rad, string.format
local utfbyte, utfchar = utf.byte, utf.char
local next = next
---[[ldx--
-<p>This is very experimental code!</p>
---ldx]]--
-
local trace_visualize = false trackers.register("fonts.composing.visualize", function(v) trace_visualize = v end)
local trace_define = false trackers.register("fonts.composing.define", function(v) trace_define = v end)
diff --git a/tex/context/base/mkiv/font-imp-tex.lua b/tex/context/base/mkiv/font-imp-tex.lua
index b4b9a7b69..87a1ae3aa 100644
--- a/tex/context/base/mkiv/font-imp-tex.lua
+++ b/tex/context/base/mkiv/font-imp-tex.lua
@@ -13,36 +13,31 @@ local otf = fonts.handlers.otf
local registerotffeature = otf.features.register
local addotffeature = otf.addfeature
--- tlig (we need numbers for some fonts so ...)
+-- We provide a few old and obsolete compatibility input features. We need numbers
+-- for some fonts so no names here. Do we also need them for afm fonts?
-local specification = {
+local tlig = {
type = "ligature",
order = { "tlig" },
prepend = true,
data = {
- -- endash = "hyphen hyphen",
- -- emdash = "hyphen hyphen hyphen",
- [0x2013] = { 0x002D, 0x002D },
- [0x2014] = { 0x002D, 0x002D, 0x002D },
- -- quotedblleft = "quoteleft quoteleft",
- -- quotedblright = "quoteright quoteright",
- -- quotedblleft = "grave grave",
- -- quotedblright = "quotesingle quotesingle",
- -- quotedblbase = "comma comma",
+ [0x2013] = { 0x002D, 0x002D },
+ [0x2014] = { 0x002D, 0x002D, 0x002D },
},
}
-addotffeature("tlig",specification)
-
-registerotffeature {
- -- this makes it a known feature (in tables)
- name = "tlig",
- description = "tex ligatures",
+local tquo = {
+ type = "ligature",
+ order = { "tquo" },
+ prepend = true,
+ data = {
+ [0x201C] = { 0x0060, 0x0060 },
+ [0x201D] = { 0x0027, 0x0027 },
+ [0x201E] = { 0x002C, 0x002C },
+ },
}
--- trep
-
-local specification = {
+local trep = {
type = "substitution",
order = { "trep" },
prepend = true,
@@ -53,13 +48,13 @@ local specification = {
},
}
-addotffeature("trep",specification)
+addotffeature("trep",trep) -- last
+addotffeature("tlig",tlig)
+addotffeature("tquo",tquo) -- first
-registerotffeature {
- -- this makes it a known feature (in tables)
- name = "trep",
- description = "tex replacements",
-}
+registerotffeature { name = "tlig", description = "tex ligatures" }
+registerotffeature { name = "tquo", description = "tex quotes" }
+registerotffeature { name = "trep", description = "tex replacements" }
-- some day this will be moved to font-imp-scripts.lua
diff --git a/tex/context/base/mkiv/font-ini.lua b/tex/context/base/mkiv/font-ini.lua
index 8bab6d902..201cc69f4 100644
--- a/tex/context/base/mkiv/font-ini.lua
+++ b/tex/context/base/mkiv/font-ini.lua
@@ -6,9 +6,7 @@ if not modules then modules = { } end modules ['font-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Not much is happening here.</p>
---ldx]]--
+-- Not much is happening here.
local allocate = utilities.storage.allocate
local sortedhash = table.sortedhash
diff --git a/tex/context/base/mkiv/font-log.lua b/tex/context/base/mkiv/font-log.lua
index 092b5a62e..96b5864fd 100644
--- a/tex/context/base/mkiv/font-log.lua
+++ b/tex/context/base/mkiv/font-log.lua
@@ -19,12 +19,9 @@ fonts.loggers = loggers
local usedfonts = utilities.storage.allocate()
----- loadedfonts = utilities.storage.allocate()
---[[ldx--
-<p>The following functions are used for reporting about the fonts
-used. The message itself is not that useful in regular runs but since
-we now have several readers it may be handy to know what reader is
-used for which font.</p>
---ldx]]--
+-- The following functions are used for reporting about the fonts used. The message
+-- itself is not that useful in regular runs but since we now have several readers
+-- it may be handy to know what reader is used for which font.
function loggers.onetimemessage(font,char,message,reporter)
local tfmdata = fonts.hashes.identifiers[font]
diff --git a/tex/context/base/mkiv/font-nod.lua b/tex/context/base/mkiv/font-nod.lua
index a7dcfd9b0..1e39784d9 100644
--- a/tex/context/base/mkiv/font-nod.lua
+++ b/tex/context/base/mkiv/font-nod.lua
@@ -7,11 +7,6 @@ if not modules then modules = { } end modules ['font-nod'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This is rather experimental. We need more control and some of this
-might become a runtime module instead. This module will be cleaned up!</p>
---ldx]]--
-
local utfchar = utf.char
local concat, fastcopy = table.concat, table.fastcopy
local match, rep = string.match, string.rep
diff --git a/tex/context/base/mkiv/font-one.lua b/tex/context/base/mkiv/font-one.lua
index 829f52ea0..25efc2a04 100644
--- a/tex/context/base/mkiv/font-one.lua
+++ b/tex/context/base/mkiv/font-one.lua
@@ -7,18 +7,16 @@ if not modules then modules = { } end modules ['font-one'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Some code may look a bit obscure but this has to do with the fact that we also use
-this code for testing and much code evolved in the transition from <l n='tfm'/> to
-<l n='afm'/> to <l n='otf'/>.</p>
-
-<p>The following code still has traces of intermediate font support where we handles
-font encodings. Eventually font encoding went away but we kept some code around in
-other modules.</p>
-
-<p>This version implements a node mode approach so that users can also more easily
-add features.</p>
---ldx]]--
+-- Some code may look a bit obscure but this has to do with the fact that we also
+-- use this code for testing and much code evolved in the transition from TFM to AFM
+-- to OTF.
+--
+-- The following code still has traces of intermediate font support where we handles
+-- font encodings. Eventually font encoding went away but we kept some code around
+-- in other modules.
+--
+-- This version implements a node mode approach so that users can also more easily
+-- add features.
local fonts, logs, trackers, containers, resolvers = fonts, logs, trackers, containers, resolvers
@@ -71,15 +69,13 @@ local overloads = fonts.mappings.overloads
local applyruntimefixes = fonts.treatments and fonts.treatments.applyfixes
---[[ldx--
-<p>We cache files. Caching is taken care of in the loader. We cheat a bit by adding
-ligatures and kern information to the afm derived data. That way we can set them faster
-when defining a font.</p>
-
-<p>We still keep the loading two phased: first we load the data in a traditional
-fashion and later we transform it to sequences. Then we apply some methods also
-used in opentype fonts (like <t>tlig</t>).</p>
---ldx]]--
+-- We cache files. Caching is taken care of in the loader. We cheat a bit by adding
+-- ligatures and kern information to the afm derived data. That way we can set them
+-- faster when defining a font.
+--
+-- We still keep the loading two phased: first we load the data in a traditional
+-- fashion and later we transform it to sequences. Then we apply some methods also
+-- used in opentype fonts (like tlig).
function afm.load(filename)
filename = resolvers.findfile(filename,'afm') or ""
@@ -312,10 +308,8 @@ local function enhance_fix_names(data)
end
end
---[[ldx--
-<p>These helpers extend the basic table with extra ligatures, texligatures
-and extra kerns. This saves quite some lookups later.</p>
---ldx]]--
+-- These helpers extend the basic table with extra ligatures, texligatures and extra
+-- kerns. This saves quite some lookups later.
local addthem = function(rawdata,ligatures)
if ligatures then
@@ -349,17 +343,14 @@ local function enhance_add_ligatures(rawdata)
addthem(rawdata,afm.helpdata.ligatures)
end
---[[ldx--
-<p>We keep the extra kerns in separate kerning tables so that we can use
-them selectively.</p>
---ldx]]--
-
--- This is rather old code (from the beginning when we had only tfm). If
--- we unify the afm data (now we have names all over the place) then
--- we can use shcodes but there will be many more looping then. But we
--- could get rid of the tables in char-cmp then. Als, in the generic version
--- we don't use the character database. (Ok, we can have a context specific
--- variant).
+-- We keep the extra kerns in separate kerning tables so that we can use them
+-- selectively.
+--
+-- This is rather old code (from the beginning when we had only tfm). If we unify
+-- the afm data (now we have names all over the place) then we can use shcodes but
+-- there will be many more looping then. But we could get rid of the tables in
+-- char-cmp then. Als, in the generic version we don't use the character database.
+-- (Ok, we can have a context specific variant).
local function enhance_add_extra_kerns(rawdata) -- using shcodes is not robust here
local descriptions = rawdata.descriptions
@@ -440,9 +431,7 @@ local function enhance_add_extra_kerns(rawdata) -- using shcodes is not robust h
do_it_copy(afm.helpdata.rightkerned)
end
---[[ldx--
-<p>The copying routine looks messy (and is indeed a bit messy).</p>
---ldx]]--
+-- The copying routine looks messy (and is indeed a bit messy).
local function adddimensions(data) -- we need to normalize afm to otf i.e. indexed table instead of name
if data then
@@ -619,11 +608,9 @@ end
return nil
end
---[[ldx--
-<p>Originally we had features kind of hard coded for <l n='afm'/> files but since I
-expect to support more font formats, I decided to treat this fontformat like any
-other and handle features in a more configurable way.</p>
---ldx]]--
+-- Originally we had features kind of hard coded for AFM files but since I expect to
+-- support more font formats, I decided to treat this fontformat like any other and
+-- handle features in a more configurable way.
function afm.setfeatures(tfmdata,features)
local okay = constructors.initializefeatures("afm",tfmdata,features,trace_features,report_afm)
@@ -715,13 +702,10 @@ local function afmtotfm(specification)
end
end
---[[ldx--
-<p>As soon as we could intercept the <l n='tfm'/> reader, I implemented an
-<l n='afm'/> reader. Since traditional <l n='pdftex'/> could use <l n='opentype'/>
-fonts with <l n='afm'/> companions, the following method also could handle
-those cases, but now that we can handle <l n='opentype'/> directly we no longer
-need this features.</p>
---ldx]]--
+-- As soon as we could intercept the TFM reader, I implemented an AFM reader. Since
+-- traditional pdfTeX could use OpenType fonts with AFM companions, the following
+-- method also could handle those cases, but now that we can handle OpenType
+-- directly we no longer need this features.
local function read_from_afm(specification)
local tfmdata = afmtotfm(specification)
@@ -736,9 +720,7 @@ local function read_from_afm(specification)
return tfmdata
end
---[[ldx--
-<p>We have the usual two modes and related features initializers and processors.</p>
---ldx]]--
+-- We have the usual two modes and related features initializers and processors.
registerafmfeature {
name = "mode",
diff --git a/tex/context/base/mkiv/font-onr.lua b/tex/context/base/mkiv/font-onr.lua
index 9e5a012bd..6234742a3 100644
--- a/tex/context/base/mkiv/font-onr.lua
+++ b/tex/context/base/mkiv/font-onr.lua
@@ -7,18 +7,16 @@ if not modules then modules = { } end modules ['font-onr'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Some code may look a bit obscure but this has to do with the fact that we also use
-this code for testing and much code evolved in the transition from <l n='tfm'/> to
-<l n='afm'/> to <l n='otf'/>.</p>
-
-<p>The following code still has traces of intermediate font support where we handles
-font encodings. Eventually font encoding went away but we kept some code around in
-other modules.</p>
-
-<p>This version implements a node mode approach so that users can also more easily
-add features.</p>
---ldx]]--
+-- Some code may look a bit obscure but this has to do with the fact that we also
+-- use this code for testing and much code evolved in the transition from TFM to AFM
+-- to OTF.
+--
+-- The following code still has traces of intermediate font support where we handles
+-- font encodings. Eventually font encoding went away but we kept some code around
+-- in other modules.
+--
+-- This version implements a node mode approach so that users can also more easily
+-- add features.
local fonts, logs, trackers, resolvers = fonts, logs, trackers, resolvers
@@ -44,12 +42,9 @@ afm.readers = readers
afm.version = 1.513 -- incrementing this number one up will force a re-cache
---[[ldx--
-<p>We start with the basic reader which we give a name similar to the built in <l n='tfm'/>
-and <l n='otf'/> reader.</p>
-<p>We use a new (unfinished) pfb loader but I see no differences between the old
-and new vectors (we actually had one bad vector with the old loader).</p>
---ldx]]--
+-- We start with the basic reader which we give a name similar to the built in TFM
+-- and OTF reader. We use a PFB loader but I see no differences between the old and
+-- new vectors (we actually had one bad vector with the old loader).
local get_indexes, get_shapes
@@ -305,11 +300,10 @@ do
end
---[[ldx--
-<p>We start with the basic reader which we give a name similar to the built in <l n='tfm'/>
-and <l n='otf'/> reader. We only need data that is relevant for our use. We don't support
-more complex arrangements like multiple master (obsolete), direction specific kerning, etc.</p>
---ldx]]--
+-- We start with the basic reader which we give a name similar to the built in TFM
+-- and OTF reader. We only need data that is relevant for our use. We don't support
+-- more complex arrangements like multiple master (obsolete), direction specific
+-- kerning, etc.
local spacer = patterns.spacer
local whitespace = patterns.whitespace
diff --git a/tex/context/base/mkiv/font-ota.lua b/tex/context/base/mkiv/font-ota.lua
index a8f9f0047..160d0d0ed 100644
--- a/tex/context/base/mkiv/font-ota.lua
+++ b/tex/context/base/mkiv/font-ota.lua
@@ -54,10 +54,8 @@ local chardata = characters and characters.data
local otffeatures = fonts.constructors.features.otf
local registerotffeature = otffeatures.register
---[[ldx--
-<p>Analyzers run per script and/or language and are needed in order to
-process features right.</p>
---ldx]]--
+-- Analyzers run per script and/or language and are needed in order to process
+-- features right.
local setstate = nuts.setstate
local getstate = nuts.getstate
diff --git a/tex/context/base/mkiv/font-ots.lua b/tex/context/base/mkiv/font-ots.lua
index 6d7c5fb25..48f85c365 100644
--- a/tex/context/base/mkiv/font-ots.lua
+++ b/tex/context/base/mkiv/font-ots.lua
@@ -7,92 +7,90 @@ if not modules then modules = { } end modules ['font-ots'] = { -- sequences
license = "see context related readme files",
}
---[[ldx--
-<p>I need to check the description at the microsoft site ... it has been improved
-so maybe there are some interesting details there. Most below is based on old and
-incomplete documentation and involved quite a bit of guesswork (checking with the
-abstract uniscribe of those days. But changing things is tricky!</p>
-
-<p>This module is a bit more split up that I'd like but since we also want to test
-with plain <l n='tex'/> it has to be so. This module is part of <l n='context'/>
-and discussion about improvements and functionality mostly happens on the
-<l n='context'/> mailing list.</p>
-
-<p>The specification of OpenType is (or at least decades ago was) kind of vague.
-Apart from a lack of a proper free specifications there's also the problem that
-Microsoft and Adobe may have their own interpretation of how and in what order to
-apply features. In general the Microsoft website has more detailed specifications
-and is a better reference. There is also some information in the FontForge help
-files. In the end we rely most on the Microsoft specification.</p>
-
-<p>Because there is so much possible, fonts might contain bugs and/or be made to
-work with certain rederers. These may evolve over time which may have the side
-effect that suddenly fonts behave differently. We don't want to catch all font
-issues.</p>
-
-<p>After a lot of experiments (mostly by Taco, me and Idris) the first implementation
-was already quite useful. When it did most of what we wanted, a more optimized version
-evolved. Of course all errors are mine and of course the code can be improved. There
-are quite some optimizations going on here and processing speed is currently quite
-acceptable and has been improved over time. Many complex scripts are not yet supported
-yet, but I will look into them as soon as <l n='context'/> users ask for it.</p>
-
-<p>The specification leaves room for interpretation. In case of doubt the Microsoft
-implementation is the reference as it is the most complete one. As they deal with
-lots of scripts and fonts, Kai and Ivo did a lot of testing of the generic code and
-their suggestions help improve the code. I'm aware that not all border cases can be
-taken care of, unless we accept excessive runtime, and even then the interference
-with other mechanisms (like hyphenation) are not trivial.</p>
-
-<p>Especially discretionary handling has been improved much by Kai Eigner who uses complex
-(latin) fonts. The current implementation is a compromis between his patches and my code
-and in the meantime performance is quite ok. We cannot check all border cases without
-compromising speed but so far we're okay. Given good test cases we can probably improve
-it here and there. Especially chain lookups are non trivial with discretionaries but
-things got much better over time thanks to Kai.</p>
-
-<p>Glyphs are indexed not by unicode but in their own way. This is because there is no
-relationship with unicode at all, apart from the fact that a font might cover certain
-ranges of characters. One character can have multiple shapes. However, at the
-<l n='tex'/> end we use unicode so and all extra glyphs are mapped into a private
-space. This is needed because we need to access them and <l n='tex'/> has to include
-then in the output eventually.</p>
-
-<p>The initial data table is rather close to the open type specification and also not
-that different from the one produced by <l n='fontforge'/> but we uses hashes instead.
-In <l n='context'/> that table is packed (similar tables are shared) and cached on disk
-so that successive runs can use the optimized table (after loading the table is
-unpacked).</p>
-
-<p>This module is sparsely documented because it is has been a moving target. The
-table format of the reader changed a bit over time and we experiment a lot with
-different methods for supporting features. By now the structures are quite stable</p>
-
-<p>Incrementing the version number will force a re-cache. We jump the number by one
-when there's a fix in the reader or processing code that can result in different
-results.</p>
-
-<p>This code is also used outside context but in context it has to work with other
-mechanisms. Both put some constraints on the code here.</p>
-
---ldx]]--
-
--- Remark: We assume that cursives don't cross discretionaries which is okay because it
--- is only used in semitic scripts.
+-- I need to check the description at the microsoft site ... it has been improved so
+-- maybe there are some interesting details there. Most below is based on old and
+-- incomplete documentation and involved quite a bit of guesswork (checking with the
+-- abstract uniscribe of those days. But changing things is tricky!
+--
+-- This module is a bit more split up that I'd like but since we also want to test
+-- with plain TeX it has to be so. This module is part of ConTeXt and discussion
+-- about improvements and functionality mostly happens on the ConTeXt mailing list.
+--
+-- The specification of OpenType is (or at least decades ago was) kind of vague.
+-- Apart from a lack of a proper free specifications there's also the problem that
+-- Microsoft and Adobe may have their own interpretation of how and in what order to
+-- apply features. In general the Microsoft website has more detailed specifications
+-- and is a better reference. There is also some information in the FontForge help
+-- files. In the end we rely most on the Microsoft specification.
+--
+-- Because there is so much possible, fonts might contain bugs and/or be made to
+-- work with certain rederers. These may evolve over time which may have the side
+-- effect that suddenly fonts behave differently. We don't want to catch all font
+-- issues.
+--
+-- After a lot of experiments (mostly by Taco, me and Idris) the first
+-- implementation was already quite useful. When it did most of what we wanted, a
+-- more optimized version evolved. Of course all errors are mine and of course the
+-- code can be improved. There are quite some optimizations going on here and
+-- processing speed is currently quite acceptable and has been improved over time.
+-- Many complex scripts are not yet supported yet, but I will look into them as soon
+-- as ConTeXt users ask for it.
+--
+-- The specification leaves room for interpretation. In case of doubt the Microsoft
+-- implementation is the reference as it is the most complete one. As they deal with
+-- lots of scripts and fonts, Kai and Ivo did a lot of testing of the generic code
+-- and their suggestions help improve the code. I'm aware that not all border cases
+-- can be taken care of, unless we accept excessive runtime, and even then the
+-- interference with other mechanisms (like hyphenation) are not trivial.
+--
+-- Especially discretionary handling has been improved much by Kai Eigner who uses
+-- complex (latin) fonts. The current implementation is a compromis between his
+-- patches and my code and in the meantime performance is quite ok. We cannot check
+-- all border cases without compromising speed but so far we're okay. Given good
+-- test cases we can probably improve it here and there. Especially chain lookups
+-- are non trivial with discretionaries but things got much better over time thanks
+-- to Kai.
+--
+-- Glyphs are indexed not by unicode but in their own way. This is because there is
+-- no relationship with unicode at all, apart from the fact that a font might cover
+-- certain ranges of characters. One character can have multiple shapes. However, at
+-- the TeX end we use unicode so and all extra glyphs are mapped into a private
+-- space. This is needed because we need to access them and TeX has to include then
+-- in the output eventually.
+--
+-- The initial data table is rather close to the open type specification and also
+-- not that different from the one produced by Fontforge but we uses hashes instead.
+-- In ConTeXt that table is packed (similar tables are shared) and cached on disk so
+-- that successive runs can use the optimized table (after loading the table is
+-- unpacked).
+--
+-- This module is sparsely documented because it is has been a moving target. The
+-- table format of the reader changed a bit over time and we experiment a lot with
+-- different methods for supporting features. By now the structures are quite stable
+--
+-- Incrementing the version number will force a re-cache. We jump the number by one
+-- when there's a fix in the reader or processing code that can result in different
+-- results.
+--
+-- This code is also used outside ConTeXt but in ConTeXt it has to work with other
+-- mechanisms. Both put some constraints on the code here.
+--
+-- Remark: We assume that cursives don't cross discretionaries which is okay because
+-- it is only used in semitic scripts.
--
-- Remark: We assume that marks precede base characters.
--
--- Remark: When complex ligatures extend into discs nodes we can get side effects. Normally
--- this doesn't happen; ff\d{l}{l}{l} in lm works but ff\d{f}{f}{f}.
+-- Remark: When complex ligatures extend into discs nodes we can get side effects.
+-- Normally this doesn't happen; ff\d{l}{l}{l} in lm works but ff\d{f}{f}{f}.
--
-- Todo: check if we copy attributes to disc nodes if needed.
--
--- Todo: it would be nice if we could get rid of components. In other places we can use
--- the unicode properties. We can just keep a lua table.
+-- Todo: it would be nice if we could get rid of components. In other places we can
+-- use the unicode properties. We can just keep a lua table.
--
--- Remark: We do some disc juggling where we need to keep in mind that the pre, post and
--- replace fields can have prev pointers to a nesting node ... I wonder if that is still
--- needed.
+-- Remark: We do some disc juggling where we need to keep in mind that the pre, post
+-- and replace fields can have prev pointers to a nesting node ... I wonder if that
+-- is still needed.
--
-- Remark: This is not possible:
--
@@ -1038,10 +1036,8 @@ function handlers.gpos_pair(head,start,dataset,sequence,kerns,rlmode,skiphash,st
end
end
---[[ldx--
-<p>We get hits on a mark, but we're not sure if the it has to be applied so
-we need to explicitly test for basechar, baselig and basemark entries.</p>
---ldx]]--
+-- We get hits on a mark, but we're not sure if the it has to be applied so we need
+-- to explicitly test for basechar, baselig and basemark entries.
function handlers.gpos_mark2base(head,start,dataset,sequence,markanchors,rlmode,skiphash)
local markchar = getchar(start)
@@ -1236,10 +1232,8 @@ function handlers.gpos_cursive(head,start,dataset,sequence,exitanchors,rlmode,sk
return head, start, false
end
---[[ldx--
-<p>I will implement multiple chain replacements once I run into a font that uses
-it. It's not that complex to handle.</p>
---ldx]]--
+-- I will implement multiple chain replacements once I run into a font that uses it.
+-- It's not that complex to handle.
local chainprocs = { }
@@ -1292,29 +1286,22 @@ end
chainprocs.reversesub = reversesub
---[[ldx--
-<p>This chain stuff is somewhat tricky since we can have a sequence of actions to be
-applied: single, alternate, multiple or ligature where ligature can be an invalid
-one in the sense that it will replace multiple by one but not neccessary one that
-looks like the combination (i.e. it is the counterpart of multiple then). For
-example, the following is valid:</p>
-
-<typing>
-<line>xxxabcdexxx [single a->A][multiple b->BCD][ligature cde->E] xxxABCDExxx</line>
-</typing>
-
-<p>Therefore we we don't really do the replacement here already unless we have the
-single lookup case. The efficiency of the replacements can be improved by deleting
-as less as needed but that would also make the code even more messy.</p>
---ldx]]--
-
---[[ldx--
-<p>Here we replace start by a single variant.</p>
---ldx]]--
-
--- To be done (example needed): what if > 1 steps
-
--- this is messy: do we need this disc checking also in alternates?
+-- This chain stuff is somewhat tricky since we can have a sequence of actions to be
+-- applied: single, alternate, multiple or ligature where ligature can be an invalid
+-- one in the sense that it will replace multiple by one but not neccessary one that
+-- looks like the combination (i.e. it is the counterpart of multiple then). For
+-- example, the following is valid:
+--
+-- xxxabcdexxx [single a->A][multiple b->BCD][ligature cde->E] xxxABCDExxx
+--
+-- Therefore we we don't really do the replacement here already unless we have the
+-- single lookup case. The efficiency of the replacements can be improved by
+-- deleting as less as needed but that would also make the code even more messy.
+--
+-- Here we replace start by a single variant.
+--
+-- To be done : what if > 1 steps (example needed)
+-- This is messy: do we need this disc checking also in alternates?
local function reportzerosteps(dataset,sequence)
logwarning("%s: no steps",cref(dataset,sequence))
@@ -1390,9 +1377,7 @@ function chainprocs.gsub_single(head,start,stop,dataset,sequence,currentlookup,r
return head, start, false
end
---[[ldx--
-<p>Here we replace start by new glyph. First we delete the rest of the match.</p>
---ldx]]--
+-- Here we replace start by new glyph. First we delete the rest of the match.
-- char_1 mark_1 -> char_x mark_1 (ignore marks)
-- char_1 mark_1 -> char_x
@@ -1444,9 +1429,7 @@ function chainprocs.gsub_alternate(head,start,stop,dataset,sequence,currentlooku
return head, start, false
end
---[[ldx--
-<p>Here we replace start by a sequence of new glyphs.</p>
---ldx]]--
+-- Here we replace start by a sequence of new glyphs.
function chainprocs.gsub_multiple(head,start,stop,dataset,sequence,currentlookup,rlmode,skiphash,chainindex)
local mapping = currentlookup.mapping
@@ -1470,11 +1453,9 @@ function chainprocs.gsub_multiple(head,start,stop,dataset,sequence,currentlookup
return head, start, false
end
---[[ldx--
-<p>When we replace ligatures we use a helper that handles the marks. I might change
-this function (move code inline and handle the marks by a separate function). We
-assume rather stupid ligatures (no complex disc nodes).</p>
---ldx]]--
+-- When we replace ligatures we use a helper that handles the marks. I might change
+-- this function (move code inline and handle the marks by a separate function). We
+-- assume rather stupid ligatures (no complex disc nodes).
-- compare to handlers.gsub_ligature which is more complex ... why
@@ -2532,7 +2513,7 @@ local function handle_contextchain(head,start,dataset,sequence,contexts,rlmode,s
-- fonts can have many steps (each doing one check) or many contexts
-- todo: make a per-char cache so that we have small contexts (when we have a context
- -- n == 1 and otherwise it can be more so we can even distingish n == 1 or more)
+ -- n == 1 and otherwise it can be more so we can even distinguish n == 1 or more)
local nofcontexts = contexts.n -- #contexts
diff --git a/tex/context/base/mkiv/font-syn.lua b/tex/context/base/mkiv/font-syn.lua
index e80d57f41..9fba3d8d4 100644
--- a/tex/context/base/mkiv/font-syn.lua
+++ b/tex/context/base/mkiv/font-syn.lua
@@ -56,10 +56,8 @@ local trace_rejections = false trackers.register("fonts.rejections", fu
local report_names = logs.reporter("fonts","names")
---[[ldx--
-<p>This module implements a name to filename resolver. Names are resolved
-using a table that has keys filtered from the font related files.</p>
---ldx]]--
+-- This module implements a name to filename resolver. Names are resolved using a
+-- table that has keys filtered from the font related files.
fonts = fonts or { } -- also used elsewhere
@@ -88,10 +86,6 @@ local autoreload = true
directives.register("fonts.autoreload", function(v) autoreload = toboolean(v) end)
directives.register("fonts.usesystemfonts", function(v) usesystemfonts = toboolean(v) end)
---[[ldx--
-<p>A few helpers.</p>
---ldx]]--
-
-- -- what to do with these -- --
--
-- thin -> thin
@@ -305,10 +299,8 @@ local function analyzespec(somename)
end
end
---[[ldx--
-<p>It would make sense to implement the filters in the related modules,
-but to keep the overview, we define them here.</p>
---ldx]]--
+-- It would make sense to implement the filters in the related modules, but to keep
+-- the overview, we define them here.
filters.afm = fonts.handlers.afm.readers.getinfo
filters.otf = fonts.handlers.otf.readers.getinfo
@@ -412,11 +404,9 @@ filters.ttc = filters.otf
-- end
-- end
---[[ldx--
-<p>The scanner loops over the filters using the information stored in
-the file databases. Watch how we check not only for the names, but also
-for combination with the weight of a font.</p>
---ldx]]--
+-- The scanner loops over the filters using the information stored in the file
+-- databases. Watch how we check not only for the names, but also for combination
+-- with the weight of a font.
filters.list = {
"otf", "ttf", "ttc", "afm", -- no longer dfont support (for now)
@@ -1402,11 +1392,8 @@ local function is_reloaded()
end
end
---[[ldx--
-<p>The resolver also checks if the cached names are loaded. Being clever
-here is for testing purposes only (it deals with names prefixed by an
-encoding name).</p>
---ldx]]--
+-- The resolver also checks if the cached names are loaded. Being clever here is for
+-- testing purposes only (it deals with names prefixed by an encoding name).
local function fuzzy(mapping,sorted,name,sub) -- no need for reverse sorted here
local condensed = gsub(name,"[^%a%d]","")
diff --git a/tex/context/base/mkiv/font-tfm.lua b/tex/context/base/mkiv/font-tfm.lua
index 945421a42..81f94532b 100644
--- a/tex/context/base/mkiv/font-tfm.lua
+++ b/tex/context/base/mkiv/font-tfm.lua
@@ -50,21 +50,18 @@ constructors.resolvevirtualtoo = false -- wil be set in font-ctx.lua
fonts.formats.tfm = "type1" -- we need to have at least a value here
fonts.formats.ofm = "type1" -- we need to have at least a value here
---[[ldx--
-<p>The next function encapsulates the standard <l n='tfm'/> loader as
-supplied by <l n='luatex'/>.</p>
---ldx]]--
-
--- this might change: not scaling and then apply features and do scaling in the
--- usual way with dummy descriptions but on the other hand .. we no longer use
--- tfm so why bother
-
--- ofm directive blocks local path search unless set; btw, in context we
--- don't support ofm files anyway as this format is obsolete
-
--- we need to deal with nested virtual fonts, but because we load in the
--- frontend we also need to make sure we don't nest too deep (esp when sizes
--- get large)
+-- The next function encapsulates the standard TFM loader as supplied by LuaTeX.
+--
+-- This might change: not scaling and then apply features and do scaling in the
+-- usual way with dummy descriptions but on the other hand. However, we no longer
+-- use TFM (except for the JMN math fonts) so why bother.
+--
+-- The ofm directive blocks a local path search unless set. Actually, in ConTeXt we
+-- never had to deal with OFM files anyway as this format is obsolete (there are
+-- hardly any fonts in that format that are of use).
+--
+-- We need to deal with nested virtual fonts, but because we load in the frontend we
+-- also need to make sure we don't nest too deep (esp when sizes get large)
--
-- (VTITLE Example of a recursion)
-- (MAPFONT D 0 (FONTNAME recurse)(FONTAT D 2))
@@ -72,7 +69,7 @@ supplied by <l n='luatex'/>.</p>
-- (CHARACTER C B (CHARWD D 2)(CHARHT D 2)(MAP (SETCHAR C A)))
-- (CHARACTER C C (CHARWD D 4)(CHARHT D 4)(MAP (SETCHAR C B)))
--
--- we added the same checks as below to the luatex engine
+-- We added the same checks as below to the LuaTeX engine.
function tfm.setfeatures(tfmdata,features)
local okay = constructors.initializefeatures("tfm",tfmdata,features,trace_features,report_tfm)
diff --git a/tex/context/base/mkiv/font-trt.lua b/tex/context/base/mkiv/font-trt.lua
index abc92ba52..893534078 100644
--- a/tex/context/base/mkiv/font-trt.lua
+++ b/tex/context/base/mkiv/font-trt.lua
@@ -12,11 +12,9 @@ local cleanfilename = fonts.names.cleanfilename
local splitbase = file.splitbase
local lower = string.lower
---[[ldx--
-<p>We provide a simple treatment mechanism (mostly because I want to demonstrate
-something in a manual). It's one of the few places where an lfg file gets loaded
-outside the goodies manager.</p>
---ldx]]--
+-- We provide a simple treatment mechanism (mostly because I want to demonstrate
+-- something in a manual). It's one of the few places where an lfg file gets loaded
+-- outside the goodies manager.
local treatments = fonts.treatments or { }
fonts.treatments = treatments
diff --git a/tex/context/base/mkiv/font-vir.lua b/tex/context/base/mkiv/font-vir.lua
index c3071cac0..6142ddafd 100644
--- a/tex/context/base/mkiv/font-vir.lua
+++ b/tex/context/base/mkiv/font-vir.lua
@@ -6,9 +6,8 @@ if not modules then modules = { } end modules ['font-vir'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This is very experimental code! Not yet adapted to recent changes. This will change.</p>
---ldx]]--
+-- This is very experimental code! Not yet adapted to recent changes. This will
+-- change. Actually we moved on.
-- present in the backend but unspecified:
--
@@ -25,10 +24,8 @@ local constructors = fonts.constructors
local vf = constructors.handlers.vf
vf.version = 1.000 -- same as tfm
---[[ldx--
-<p>We overload the <l n='vf'/> reader.</p>
---ldx]]--
-
+-- -- We overload the VF reader.:
+--
-- general code / already frozen
--
-- function vf.find(name)
diff --git a/tex/context/base/mkiv/l-dir.lua b/tex/context/base/mkiv/l-dir.lua
index 3e24e4e2a..316406850 100644
--- a/tex/context/base/mkiv/l-dir.lua
+++ b/tex/context/base/mkiv/l-dir.lua
@@ -21,7 +21,8 @@ local dir = dir
local lfs = lfs
local attributes = lfs.attributes
-local walkdir = lfs.dir
+----- walkdir = lfs.dir
+local scandir = lfs.dir
local isdir = lfs.isdir -- not robust, will be overloaded anyway
local isfile = lfs.isfile -- not robust, will be overloaded anyway
local currentdir = lfs.currentdir
@@ -69,6 +70,20 @@ else
end
+-- safeguard
+
+local isreadable = file.isreadable
+
+local walkdir = function(p,...)
+ if isreadable(p.."/.") then
+ return scandir(p,...)
+ else
+ return function() end
+ end
+end
+
+lfs.walkdir = walkdir
+
-- handy
function dir.current()
diff --git a/tex/context/base/mkiv/lang-url.lua b/tex/context/base/mkiv/lang-url.lua
index 7a8b7ca86..7cd666df5 100644
--- a/tex/context/base/mkiv/lang-url.lua
+++ b/tex/context/base/mkiv/lang-url.lua
@@ -21,12 +21,10 @@ local v_after = variables.after
local is_letter = characters.is_letter
---[[
-<p>Hyphenating <l n='url'/>'s is somewhat tricky and a matter of taste. I did
-consider using a dedicated hyphenation pattern or dealing with it by node
-parsing, but the following solution suits as well. After all, we're mostly
-dealing with <l n='ascii'/> characters.</p>
-]]--
+-- Hyphenating URL's is somewhat tricky and a matter of taste. I did consider using
+-- a dedicated hyphenation pattern or dealing with it by node parsing, but the
+-- following solution suits as well. After all, we're mostly dealing with ASCII
+-- characters.
local urls = { }
languages.urls = urls
diff --git a/tex/context/base/mkiv/luat-cbk.lua b/tex/context/base/mkiv/luat-cbk.lua
index 9fd55f3ec..9e35283c1 100644
--- a/tex/context/base/mkiv/luat-cbk.lua
+++ b/tex/context/base/mkiv/luat-cbk.lua
@@ -12,20 +12,16 @@ local collectgarbage, type, next = collectgarbage, type, next
local round = math.round
local sortedhash, sortedkeys, tohash = table.sortedhash, table.sortedkeys, table.tohash
---[[ldx--
-<p>Callbacks are the real asset of <l n='luatex'/>. They permit you to hook
-your own code into the <l n='tex'/> engine. Here we implement a few handy
-auxiliary functions.</p>
---ldx]]--
+-- Callbacks are the real asset of LuaTeX. They permit you to hook your own code
+-- into the TeX engine. Here we implement a few handy auxiliary functions. Watch
+-- out, there are diferences between LuateX and LuaMetaTeX.
callbacks = callbacks or { }
local callbacks = callbacks
---[[ldx--
-<p>When you (temporarily) want to install a callback function, and after a
-while wants to revert to the original one, you can use the following two
-functions. This only works for non-frozen ones.</p>
---ldx]]--
+-- When you (temporarily) want to install a callback function, and after a while
+-- wants to revert to the original one, you can use the following two functions.
+-- This only works for non-frozen ones.
local trace_callbacks = false trackers.register("system.callbacks", function(v) trace_callbacks = v end)
local trace_calls = false -- only used when analyzing performance and initializations
@@ -47,13 +43,12 @@ local list = callbacks.list
local permit_overloads = false
local block_overloads = false
---[[ldx--
-<p>By now most callbacks are frozen and most provide a way to plug in your own code. For instance
-all node list handlers provide before/after namespaces and the file handling code can be extended
-by adding schemes and if needed I can add more hooks. So there is no real need to overload a core
-callback function. It might be ok for quick and dirty testing but anyway you're on your own if
-you permanently overload callback functions.</p>
---ldx]]--
+-- By now most callbacks are frozen and most provide a way to plug in your own code.
+-- For instance all node list handlers provide before/after namespaces and the file
+-- handling code can be extended by adding schemes and if needed I can add more
+-- hooks. So there is no real need to overload a core callback function. It might be
+-- ok for quick and dirty testing but anyway you're on your own if you permanently
+-- overload callback functions.
-- This might become a configuration file only option when it gets abused too much.
@@ -279,65 +274,50 @@ end)
-- callbacks.freeze("read_.*_file","reading file")
-- callbacks.freeze("open_.*_file","opening file")
---[[ldx--
-<p>The simple case is to remove the callback:</p>
-
-<code>
-callbacks.push('linebreak_filter')
-... some actions ...
-callbacks.pop('linebreak_filter')
-</code>
-
-<p>Often, in such case, another callback or a macro call will pop
-the original.</p>
-
-<p>In practice one will install a new handler, like in:</p>
-
-<code>
-callbacks.push('linebreak_filter', function(...)
- return something_done(...)
-end)
-</code>
-
-<p>Even more interesting is:</p>
-
-<code>
-callbacks.push('linebreak_filter', function(...)
- callbacks.pop('linebreak_filter')
- return something_done(...)
-end)
-</code>
-
-<p>This does a one-shot.</p>
---ldx]]--
-
---[[ldx--
-<p>Callbacks may result in <l n='lua'/> doing some hard work
-which takes time and above all resourses. Sometimes it makes
-sense to disable or tune the garbage collector in order to
-keep the use of resources acceptable.</p>
-
-<p>At some point in the development we did some tests with counting
-nodes (in this case 121049).</p>
-
-<table>
-<tr><td>setstepmul</td><td>seconds</td><td>megabytes</td></tr>
-<tr><td>200</td><td>24.0</td><td>80.5</td></tr>
-<tr><td>175</td><td>21.0</td><td>78.2</td></tr>
-<tr><td>150</td><td>22.0</td><td>74.6</td></tr>
-<tr><td>160</td><td>22.0</td><td>74.6</td></tr>
-<tr><td>165</td><td>21.0</td><td>77.6</td></tr>
-<tr><td>125</td><td>21.5</td><td>89.2</td></tr>
-<tr><td>100</td><td>21.5</td><td>88.4</td></tr>
-</table>
-
-<p>The following code is kind of experimental. In the documents
-that describe the development of <l n='luatex'/> we report
-on speed tests. One observation is that it sometimes helps to
-restart the collector. Okay, experimental code has been removed,
-because messing aroudn with the gc is too unpredictable.</p>
---ldx]]--
-
+-- The simple case is to remove the callback:
+--
+-- callbacks.push('linebreak_filter')
+-- ... some actions ...
+-- callbacks.pop('linebreak_filter')
+--
+-- Often, in such case, another callback or a macro call will pop the original.
+--
+-- In practice one will install a new handler, like in:
+--
+-- callbacks.push('linebreak_filter', function(...)
+-- return something_done(...)
+-- end)
+--
+-- Even more interesting is:
+--
+-- callbacks.push('linebreak_filter', function(...)
+-- callbacks.pop('linebreak_filter')
+-- return something_done(...)
+-- end)
+--
+-- This does a one-shot.
+--
+-- Callbacks may result in Lua doing some hard work which takes time and above all
+-- resourses. Sometimes it makes sense to disable or tune the garbage collector in
+-- order to keep the use of resources acceptable.
+--
+-- At some point in the development we did some tests with counting nodes (in this
+-- case 121049).
+--
+-- setstepmul seconds megabytes
+-- 200 24.0 80.5
+-- 175 21.0 78.2
+-- 150 22.0 74.6
+-- 160 22.0 74.6
+-- 165 21.0 77.6
+-- 125 21.5 89.2
+-- 100 21.5 88.4
+--
+-- The following code is kind of experimental. In the documents that describe the
+-- development of LuaTeX we report on speed tests. One observation is that it
+-- sometimes helps to restart the collector. Okay, experimental code has been
+-- removed, because messing around with the gc is too unpredictable.
+--
-- For the moment we keep this here and not in util-gbc.lua or so.
utilities = utilities or { }
diff --git a/tex/context/base/mkiv/luat-ini.lua b/tex/context/base/mkiv/luat-ini.lua
index dcca8cec7..83fe0713d 100644
--- a/tex/context/base/mkiv/luat-ini.lua
+++ b/tex/context/base/mkiv/luat-ini.lua
@@ -6,11 +6,9 @@ if not modules then modules = { } end modules ['luat-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>We cannot load anything yet. However what we will do us reserve a few tables.
-These can be used for runtime user data or third party modules and will not be
-cluttered by macro package code.</p>
---ldx]]--
+-- We cannot load anything yet. However what we will do us reserve a few tables.
+-- These can be used for runtime user data or third party modules and will not be
+-- cluttered by macro package code.
userdata = userdata or { } -- for users (e.g. functions etc)
thirddata = thirddata or { } -- only for third party modules
diff --git a/tex/context/base/mkiv/lxml-aux.lua b/tex/context/base/mkiv/lxml-aux.lua
index fc17371e5..217f81c13 100644
--- a/tex/context/base/mkiv/lxml-aux.lua
+++ b/tex/context/base/mkiv/lxml-aux.lua
@@ -110,11 +110,7 @@ function xml.processattributes(root,pattern,handle)
return collected
end
---[[ldx--
-<p>The following functions collect elements and texts.</p>
---ldx]]--
-
--- are these still needed -> lxml-cmp.lua
+-- The following functions collect elements and texts.
function xml.collect(root, pattern)
return xmlapplylpath(root,pattern)
@@ -153,9 +149,7 @@ function xml.collect_tags(root, pattern, nonamespace)
end
end
---[[ldx--
-<p>We've now arrived at the functions that manipulate the tree.</p>
---ldx]]--
+-- We've now arrived at the functions that manipulate the tree.
local no_root = { no_root = true }
@@ -780,9 +774,7 @@ function xml.remapname(root, pattern, newtg, newns, newrn)
end
end
---[[ldx--
-<p>Helper (for q2p).</p>
---ldx]]--
+-- Helper (for q2p).
function xml.cdatatotext(e)
local dt = e.dt
@@ -879,9 +871,7 @@ end
-- xml.addentitiesdoctype(x,"hexadecimal")
-- print(x)
---[[ldx--
-<p>Here are a few synonyms.</p>
---ldx]]--
+-- Here are a few synonyms:
xml.all = xml.each
xml.insert = xml.insertafter
diff --git a/tex/context/base/mkiv/lxml-ent.lua b/tex/context/base/mkiv/lxml-ent.lua
index df80a7985..1d6d058b6 100644
--- a/tex/context/base/mkiv/lxml-ent.lua
+++ b/tex/context/base/mkiv/lxml-ent.lua
@@ -10,14 +10,10 @@ local next = next
local byte, format = string.byte, string.format
local setmetatableindex = table.setmetatableindex
---[[ldx--
-<p>We provide (at least here) two entity handlers. The more extensive
-resolver consults a hash first, tries to convert to <l n='utf'/> next,
-and finaly calls a handler when defines. When this all fails, the
-original entity is returned.</p>
-
-<p>We do things different now but it's still somewhat experimental</p>
---ldx]]--
+-- We provide (at least here) two entity handlers. The more extensive resolver
+-- consults a hash first, tries to convert to UTF next, and finaly calls a handler
+-- when defines. When this all fails, the original entity is returned. We do things
+-- different now but it's still somewhat experimental.
local trace_entities = false trackers.register("xml.entities", function(v) trace_entities = v end)
diff --git a/tex/context/base/mkiv/lxml-lpt.lua b/tex/context/base/mkiv/lxml-lpt.lua
index 78a9fca2e..d242b07de 100644
--- a/tex/context/base/mkiv/lxml-lpt.lua
+++ b/tex/context/base/mkiv/lxml-lpt.lua
@@ -20,28 +20,21 @@ local formatters = string.formatters -- no need (yet) as paths are cached anyway
-- beware, this is not xpath ... e.g. position is different (currently) and
-- we have reverse-sibling as reversed preceding sibling
---[[ldx--
-<p>This module can be used stand alone but also inside <l n='mkiv'/> in
-which case it hooks into the tracker code. Therefore we provide a few
-functions that set the tracers. Here we overload a previously defined
-function.</p>
-<p>If I can get in the mood I will make a variant that is XSLT compliant
-but I wonder if it makes sense.</P>
---ldx]]--
-
---[[ldx--
-<p>Expecially the lpath code is experimental, we will support some of xpath, but
-only things that make sense for us; as compensation it is possible to hook in your
-own functions. Apart from preprocessing content for <l n='context'/> we also need
-this module for process management, like handling <l n='ctx'/> and <l n='rlx'/>
-files.</p>
-
-<typing>
-a/b/c /*/c
-a/b/c/first() a/b/c/last() a/b/c/index(n) a/b/c/index(-n)
-a/b/c/text() a/b/c/text(1) a/b/c/text(-1) a/b/c/text(n)
-</typing>
---ldx]]--
+-- This module can be used stand alone but also inside ConTeXt in which case it
+-- hooks into the tracker code. Therefore we provide a few functions that set the
+-- tracers. Here we overload a previously defined function.
+--
+-- If I can get in the mood I will make a variant that is XSLT compliant but I
+-- wonder if it makes sense.
+--
+-- Expecially the lpath code is experimental, we will support some of xpath, but
+-- only things that make sense for us; as compensation it is possible to hook in
+-- your own functions. Apart from preprocessing content for ConTeXt we also need
+-- this module for process management, like handling CTX and RLX files.
+--
+-- a/b/c /*/c
+-- a/b/c/first() a/b/c/last() a/b/c/index(n) a/b/c/index(-n)
+-- a/b/c/text() a/b/c/text(1) a/b/c/text(-1) a/b/c/text(n)
local trace_lpath = false
local trace_lparse = false
@@ -62,11 +55,9 @@ if trackers then
end)
end
---[[ldx--
-<p>We've now arrived at an interesting part: accessing the tree using a subset
-of <l n='xpath'/> and since we're not compatible we call it <l n='lpath'/>. We
-will explain more about its usage in other documents.</p>
---ldx]]--
+-- We've now arrived at an interesting part: accessing the tree using a subset of
+-- XPATH and since we're not compatible we call it LPATH. We will explain more about
+-- its usage in other documents.
local xml = xml
@@ -1273,9 +1264,8 @@ do
end
local applylpath = xml.applylpath
---[[ldx--
-<p>This is the main filter function. It returns whatever is asked for.</p>
---ldx]]--
+
+-- This is the main filter function. It returns whatever is asked for.
function xml.filter(root,pattern) -- no longer funny attribute handling here
return applylpath(root,pattern)
@@ -1525,21 +1515,16 @@ expressions.tag = function(e,n) -- only tg
end
end
---[[ldx--
-<p>Often using an iterators looks nicer in the code than passing handler
-functions. The <l n='lua'/> book describes how to use coroutines for that
-purpose (<url href='http://www.lua.org/pil/9.3.html'/>). This permits
-code like:</p>
-
-<typing>
-for r, d, k in xml.elements(xml.load('text.xml'),"title") do
- print(d[k]) -- old method
-end
-for e in xml.collected(xml.load('text.xml'),"title") do
- print(e) -- new one
-end
-</typing>
---ldx]]--
+-- Often using an iterators looks nicer in the code than passing handler functions.
+-- The LUA book describes how to use coroutines for that purpose
+-- 'href="http://www.lua.org/pil/9.3.html"'. This permits code like:
+--
+-- for r, d, k in xml.elements(xml.load('text.xml'),"title") do
+-- print(d[k]) -- old method
+-- end
+-- for e in xml.collected(xml.load('text.xml'),"title") do
+-- print(e) -- new one
+-- end
-- local wrap, yield = coroutine.wrap, coroutine.yield
-- local dummy = function() end
diff --git a/tex/context/base/mkiv/lxml-mis.lua b/tex/context/base/mkiv/lxml-mis.lua
index 04ba7b35c..ea62550bb 100644
--- a/tex/context/base/mkiv/lxml-mis.lua
+++ b/tex/context/base/mkiv/lxml-mis.lua
@@ -17,13 +17,10 @@ local P, S, R, C, V, Cc, Cs = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.V, lpeg.Cc, l
lpegpatterns.xml = lpegpatterns.xml or { }
local xmlpatterns = lpegpatterns.xml
---[[ldx--
-<p>The following helper functions best belong to the <t>lxml-ini</t>
-module. Some are here because we need then in the <t>mk</t>
-document and other manuals, others came up when playing with
-this module. Since this module is also used in <l n='mtxrun'/> we've
-put them here instead of loading mode modules there then needed.</p>
---ldx]]--
+-- The following helper functions best belong to the 'lxml-ini' module. Some are
+-- here because we need then in the 'mk' document and other manuals, others came up
+-- when playing with this module. Since this module is also used in 'mtxrun' we've
+-- put them here instead of loading mode modules there then needed.
local function xmlgsub(t,old,new) -- will be replaced
local dt = t.dt
diff --git a/tex/context/base/mkiv/lxml-tab.lua b/tex/context/base/mkiv/lxml-tab.lua
index e18362bd8..a06b59065 100644
--- a/tex/context/base/mkiv/lxml-tab.lua
+++ b/tex/context/base/mkiv/lxml-tab.lua
@@ -18,13 +18,12 @@ local trace_entities = false trackers.register("xml.entities", function(v) trac
local report_xml = logs and logs.reporter("xml","core") or function(...) print(string.format(...)) end
---[[ldx--
-<p>The parser used here is inspired by the variant discussed in the lua book, but
-handles comment and processing instructions, has a different structure, provides
-parent access; a first version used different trickery but was less optimized to we
-went this route. First we had a find based parser, now we have an <l n='lpeg'/> based one.
-The find based parser can be found in l-xml-edu.lua along with other older code.</p>
---ldx]]--
+-- The parser used here is inspired by the variant discussed in the lua book, but
+-- handles comment and processing instructions, has a different structure, provides
+-- parent access; a first version used different trickery but was less optimized to
+-- we went this route. First we had a find based parser, now we have an LPEG based
+-- one. The find based parser can be found in l-xml-edu.lua along with other older
+-- code.
if lpeg.setmaxstack then lpeg.setmaxstack(1000) end -- deeply nested xml files
@@ -42,26 +41,19 @@ local lpegmatch, lpegpatterns = lpeg.match, lpeg.patterns
local P, S, R, C, V, C, Cs = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.V, lpeg.C, lpeg.Cs
local formatters = string.formatters
---[[ldx--
-<p>First a hack to enable namespace resolving. A namespace is characterized by
-a <l n='url'/>. The following function associates a namespace prefix with a
-pattern. We use <l n='lpeg'/>, which in this case is more than twice as fast as a
-find based solution where we loop over an array of patterns. Less code and
-much cleaner.</p>
---ldx]]--
+-- First a hack to enable namespace resolving. A namespace is characterized by a
+-- URL. The following function associates a namespace prefix with a pattern. We use
+-- LPEG, which in this case is more than twice as fast as a find based solution
+-- where we loop over an array of patterns. Less code and much cleaner.
do -- begin of namespace closure (we ran out of locals)
xml.xmlns = xml.xmlns or { }
---[[ldx--
-<p>The next function associates a namespace prefix with an <l n='url'/>. This
-normally happens independent of parsing.</p>
-
-<typing>
-xml.registerns("mml","mathml")
-</typing>
---ldx]]--
+-- The next function associates a namespace prefix with an URL. This normally
+-- happens independent of parsing.
+--
+-- xml.registerns("mml","mathml")
local check = P(false)
local parse = check
@@ -71,15 +63,11 @@ function xml.registerns(namespace, pattern) -- pattern can be an lpeg
parse = P { P(check) + 1 * V(1) }
end
---[[ldx--
-<p>The next function also registers a namespace, but this time we map a
-given namespace prefix onto a registered one, using the given
-<l n='url'/>. This used for attributes like <t>xmlns:m</t>.</p>
-
-<typing>
-xml.checkns("m","http://www.w3.org/mathml")
-</typing>
---ldx]]--
+-- The next function also registers a namespace, but this time we map a given
+-- namespace prefix onto a registered one, using the given URL. This used for
+-- attributes like 'xmlns:m'.
+--
+-- xml.checkns("m","http://www.w3.org/mathml")
function xml.checkns(namespace,url)
local ns = lpegmatch(parse,lower(url))
@@ -88,68 +76,54 @@ function xml.checkns(namespace,url)
end
end
---[[ldx--
-<p>Next we provide a way to turn an <l n='url'/> into a registered
-namespace. This used for the <t>xmlns</t> attribute.</p>
-
-<typing>
-resolvedns = xml.resolvens("http://www.w3.org/mathml")
-</typing>
-
-This returns <t>mml</t>.
---ldx]]--
+-- Next we provide a way to turn an URL into a registered namespace. This used for
+-- the 'xmlns' attribute.
+--
+-- resolvedns = xml.resolvens("http://www.w3.org/mathml")
+--
+-- This returns MATHML.
function xml.resolvens(url)
return lpegmatch(parse,lower(url)) or ""
end
---[[ldx--
-<p>A namespace in an element can be remapped onto the registered
-one efficiently by using the <t>xml.xmlns</t> table.</p>
---ldx]]--
+-- A namespace in an element can be remapped onto the registered one efficiently by
+-- using the 'xml.xmlns' table.
end -- end of namespace closure
---[[ldx--
-<p>This version uses <l n='lpeg'/>. We follow the same approach as before, stack and top and
-such. This version is about twice as fast which is mostly due to the fact that
-we don't have to prepare the stream for cdata, doctype etc etc. This variant is
-is dedicated to Luigi Scarso, who challenged me with 40 megabyte <l n='xml'/> files that
-took 12.5 seconds to load (1.5 for file io and the rest for tree building). With
-the <l n='lpeg'/> implementation we got that down to less 7.3 seconds. Loading the 14
-<l n='context'/> interface definition files (2.6 meg) went down from 1.05 seconds to 0.55.</p>
-
-<p>Next comes the parser. The rather messy doctype definition comes in many
-disguises so it is no surprice that later on have to dedicate quite some
-<l n='lpeg'/> code to it.</p>
-
-<typing>
-<!DOCTYPE Something PUBLIC "... ..." "..." [ ... ] >
-<!DOCTYPE Something PUBLIC "... ..." "..." >
-<!DOCTYPE Something SYSTEM "... ..." [ ... ] >
-<!DOCTYPE Something SYSTEM "... ..." >
-<!DOCTYPE Something [ ... ] >
-<!DOCTYPE Something >
-</typing>
-
-<p>The code may look a bit complex but this is mostly due to the fact that we
-resolve namespaces and attach metatables. There is only one public function:</p>
-
-<typing>
-local x = xml.convert(somestring)
-</typing>
-
-<p>An optional second boolean argument tells this function not to create a root
-element.</p>
-
-<p>Valid entities are:</p>
-
-<typing>
-<!ENTITY xxxx SYSTEM "yyyy" NDATA zzzz>
-<!ENTITY xxxx PUBLIC "yyyy" >
-<!ENTITY xxxx "yyyy" >
-</typing>
---ldx]]--
+-- This version uses LPEG. We follow the same approach as before, stack and top and
+-- such. This version is about twice as fast which is mostly due to the fact that we
+-- don't have to prepare the stream for cdata, doctype etc etc. This variant is is
+-- dedicated to Luigi Scarso, who challenged me with 40 megabyte XML files that took
+-- 12.5 seconds to load (1.5 for file io and the rest for tree building). With the
+-- LPEG implementation we got that down to less 7.3 seconds. Loading the 14 ConTeXt
+-- interface definition files (2.6 meg) went down from 1.05 seconds to 0.55.
+--
+-- Next comes the parser. The rather messy doctype definition comes in many
+-- disguises so it is no surprice that later on have to dedicate quite some LPEG
+-- code to it.
+--
+-- <!DOCTYPE Something PUBLIC "... ..." "..." [ ... ] >
+-- <!DOCTYPE Something PUBLIC "... ..." "..." >
+-- <!DOCTYPE Something SYSTEM "... ..." [ ... ] >
+-- <!DOCTYPE Something SYSTEM "... ..." >
+-- <!DOCTYPE Something [ ... ] >
+-- <!DOCTYPE Something >
+--
+-- The code may look a bit complex but this is mostly due to the fact that we
+-- resolve namespaces and attach metatables. There is only one public function:
+--
+-- local x = xml.convert(somestring)
+--
+-- An optional second boolean argument tells this function not to create a root
+-- element.
+--
+-- Valid entities are:
+--
+-- <!ENTITY xxxx SYSTEM "yyyy" NDATA zzzz>
+-- <!ENTITY xxxx PUBLIC "yyyy" >
+-- <!ENTITY xxxx "yyyy" >
-- not just one big nested table capture (lpeg overflow)
@@ -1332,10 +1306,8 @@ function xml.inheritedconvert(data,xmldata,cleanup) -- xmldata is parent
return xc
end
---[[ldx--
-<p>Packaging data in an xml like table is done with the following
-function. Maybe it will go away (when not used).</p>
---ldx]]--
+-- Packaging data in an xml like table is done with the following function. Maybe it
+-- will go away (when not used).
function xml.is_valid(root)
return root and root.dt and root.dt[1] and type(root.dt[1]) == "table" and not root.dt[1].er
@@ -1354,11 +1326,8 @@ end
xml.errorhandler = report_xml
---[[ldx--
-<p>We cannot load an <l n='lpeg'/> from a filehandle so we need to load
-the whole file first. The function accepts a string representing
-a filename or a file handle.</p>
---ldx]]--
+-- We cannot load an LPEG from a filehandle so we need to load the whole file first.
+-- The function accepts a string representing a filename or a file handle.
function xml.load(filename,settings)
local data = ""
@@ -1382,10 +1351,8 @@ function xml.load(filename,settings)
end
end
---[[ldx--
-<p>When we inject new elements, we need to convert strings to
-valid trees, which is what the next function does.</p>
---ldx]]--
+-- When we inject new elements, we need to convert strings to valid trees, which is
+-- what the next function does.
local no_root = { no_root = true }
@@ -1398,11 +1365,9 @@ function xml.toxml(data)
end
end
---[[ldx--
-<p>For copying a tree we use a dedicated function instead of the
-generic table copier. Since we know what we're dealing with we
-can speed up things a bit. The second argument is not to be used!</p>
---ldx]]--
+-- For copying a tree we use a dedicated function instead of the generic table
+-- copier. Since we know what we're dealing with we can speed up things a bit. The
+-- second argument is not to be used!
-- local function copy(old)
-- if old then
@@ -1466,13 +1431,10 @@ end
xml.copy = copy
---[[ldx--
-<p>In <l n='context'/> serializing the tree or parts of the tree is a major
-actitivity which is why the following function is pretty optimized resulting
-in a few more lines of code than needed. The variant that uses the formatting
-function for all components is about 15% slower than the concatinating
-alternative.</p>
---ldx]]--
+-- In ConTeXt serializing the tree or parts of the tree is a major actitivity which
+-- is why the following function is pretty optimized resulting in a few more lines
+-- of code than needed. The variant that uses the formatting function for all
+-- components is about 15% slower than the concatinating alternative.
-- todo: add <?xml version='1.0' standalone='yes'?> when not present
@@ -1490,10 +1452,8 @@ function xml.checkbom(root) -- can be made faster
end
end
---[[ldx--
-<p>At the cost of some 25% runtime overhead you can first convert the tree to a string
-and then handle the lot.</p>
---ldx]]--
+-- At the cost of some 25% runtime overhead you can first convert the tree to a
+-- string and then handle the lot.
-- new experimental reorganized serialize
@@ -1711,21 +1671,18 @@ newhandlers {
}
}
---[[ldx--
-<p>How you deal with saving data depends on your preferences. For a 40 MB database
-file the timing on a 2.3 Core Duo are as follows (time in seconds):</p>
-
-<lines>
-1.3 : load data from file to string
-6.1 : convert string into tree
-5.3 : saving in file using xmlsave
-6.8 : converting to string using xml.tostring
-3.6 : saving converted string in file
-</lines>
-<p>Beware, these were timing with the old routine but measurements will not be that
-much different I guess.</p>
---ldx]]--
+-- How you deal with saving data depends on your preferences. For a 40 MB database
+-- file the timing on a 2.3 Core Duo are as follows (time in seconds):
+--
+-- 1.3 : load data from file to string
+-- 6.1 : convert string into tree
+-- 5.3 : saving in file using xmlsave
+-- 6.8 : converting to string using xml.tostring
+-- 3.6 : saving converted string in file
+--
+-- Beware, these were timing with the old routine but measurements will not be that
+-- much different I guess.
-- maybe this will move to lxml-xml
@@ -1827,10 +1784,8 @@ xml.newhandlers = newhandlers
xml.serialize = serialize
xml.tostring = xmltostring
---[[ldx--
-<p>The next function operated on the content only and needs a handle function
-that accepts a string.</p>
---ldx]]--
+-- The next function operated on the content only and needs a handle function that
+-- accepts a string.
local function xmlstring(e,handle)
if not handle or (e.special and e.tg ~= "@rt@") then
@@ -1849,9 +1804,7 @@ end
xml.string = xmlstring
---[[ldx--
-<p>A few helpers:</p>
---ldx]]--
+-- A few helpers:
--~ xmlsetproperty(root,"settings",settings)
@@ -1899,11 +1852,9 @@ function xml.name(root)
end
end
---[[ldx--
-<p>The next helper erases an element but keeps the table as it is,
-and since empty strings are not serialized (effectively) it does
-not harm. Copying the table would take more time. Usage:</p>
---ldx]]--
+-- The next helper erases an element but keeps the table as it is, and since empty
+-- strings are not serialized (effectively) it does not harm. Copying the table
+-- would take more time.
function xml.erase(dt,k)
if dt then
@@ -1915,13 +1866,9 @@ function xml.erase(dt,k)
end
end
---[[ldx--
-<p>The next helper assigns a tree (or string). Usage:</p>
-
-<typing>
-dt[k] = xml.assign(root) or xml.assign(dt,k,root)
-</typing>
---ldx]]--
+-- The next helper assigns a tree (or string). Usage:
+--
+-- dt[k] = xml.assign(root) or xml.assign(dt,k,root)
function xml.assign(dt,k,root)
if dt and k then
@@ -1932,15 +1879,10 @@ function xml.assign(dt,k,root)
end
end
--- the following helpers may move
-
---[[ldx--
-<p>The next helper assigns a tree (or string). Usage:</p>
-<typing>
-xml.tocdata(e)
-xml.tocdata(e,"error")
-</typing>
---ldx]]--
+-- The next helper assigns a tree (or string). Usage:
+--
+-- xml.tocdata(e)
+-- xml.tocdata(e,"error")
function xml.tocdata(e,wrapper) -- a few more in the aux module
local whatever = type(e) == "table" and xmltostring(e.dt) or e or ""
diff --git a/tex/context/base/mkiv/math-map.lua b/tex/context/base/mkiv/math-map.lua
index 5f93b43fc..153dde852 100644
--- a/tex/context/base/mkiv/math-map.lua
+++ b/tex/context/base/mkiv/math-map.lua
@@ -7,31 +7,13 @@ if not modules then modules = { } end modules ['math-map'] = {
license = "see context related readme files"
}
--- todo: make sparse .. if self
-
---[[ldx--
-<p>Remapping mathematics alphabets.</p>
---ldx]]--
-
--- oldstyle: not really mathematics but happened to be part of
--- the mathematics fonts in cmr
---
--- persian: we will also provide mappers for other
--- scripts
-
--- todo: alphabets namespace
--- maybe: script/scriptscript dynamic,
-
--- superscripped primes get unscripted !
-
--- to be looked into once the fonts are ready (will become font
--- goodie):
---
--- (U+2202,U+1D715) : upright
--- (U+2202,U+1D715) : italic
--- (U+2202,U+1D715) : upright
---
--- plus add them to the regular vectors below so that they honor \it etc
+-- persian: we will also provide mappers for other scripts
+-- todo : alphabets namespace
+-- maybe : script/scriptscript dynamic,
+-- check : (U+2202,U+1D715) : upright
+-- (U+2202,U+1D715) : italic
+-- (U+2202,U+1D715) : upright
+-- add them to the regular vectors below so that they honor \it etc
local type, next = type, next
local merged, sortedhash = table.merged, table.sortedhash
diff --git a/tex/context/base/mkiv/meta-fun.lua b/tex/context/base/mkiv/meta-fun.lua
index ddbbd9a52..aa388b0ca 100644
--- a/tex/context/base/mkiv/meta-fun.lua
+++ b/tex/context/base/mkiv/meta-fun.lua
@@ -13,15 +13,18 @@ local format, load, type = string.format, load, type
local context = context
local metapost = metapost
-metapost.metafun = metapost.metafun or { }
-local metafun = metapost.metafun
+local metafun = metapost.metafun or { }
+metapost.metafun = metafun
function metafun.topath(t,connector)
context("(")
if #t > 0 then
+ if not connector then
+ connector = ".."
+ end
for i=1,#t do
if i > 1 then
- context(connector or "..")
+ context(connector)
end
local ti = t[i]
if type(ti) == "string" then
@@ -39,12 +42,15 @@ end
function metafun.interpolate(f,b,e,s,c)
local done = false
context("(")
- for i=b,e,(e-b)/s do
- local d = load(format("return function(x) return %s end",f))
- if d then
- d = d()
+ local d = load(format("return function(x) return %s end",f))
+ if d then
+ d = d()
+ if not c then
+ c = "..."
+ end
+ for i=b,e,(e-b)/s do
if done then
- context(c or "...")
+ context(c)
else
done = true
end
diff --git a/tex/context/base/mkiv/mlib-fio.lua b/tex/context/base/mkiv/mlib-fio.lua
index 51c88eb22..39a709505 100644
--- a/tex/context/base/mkiv/mlib-fio.lua
+++ b/tex/context/base/mkiv/mlib-fio.lua
@@ -54,8 +54,18 @@ local function validftype(ftype)
end
end
+local remapped = {
+ -- We don't yet have an interface for adding more here but when needed
+ -- there will be one.
+ ["hatching.mp"] = "mp-remapped-hatching.mp",
+ ["boxes.mp"] = "mp-remapped-boxes.mp",
+ ["hatching"] = "mp-remapped-hatching.mp",
+ ["boxes"] = "mp-remapped-boxes.mp",
+}
+
finders.file = function(specification,name,mode,ftype)
- return resolvers.findfile(name,validftype(ftype))
+ local usedname = remapped[name] or name
+ return resolvers.findfile(usedname,validftype(ftype))
end
local function i_finder(name,mode,ftype) -- fake message for mpost.map and metafun.mpvi
diff --git a/tex/context/base/mkiv/mlib-run.lua b/tex/context/base/mkiv/mlib-run.lua
index 602d6f36c..82426668f 100644
--- a/tex/context/base/mkiv/mlib-run.lua
+++ b/tex/context/base/mkiv/mlib-run.lua
@@ -6,28 +6,12 @@ if not modules then modules = { } end modules ['mlib-run'] = {
license = "see context related readme files",
}
--- cmyk -> done, native
--- spot -> done, but needs reworking (simpler)
--- multitone ->
--- shade -> partly done, todo: cm
--- figure -> done
--- hyperlink -> low priority, easy
-
--- new * run
--- or
--- new * execute^1 * finish
-
--- a*[b,c] == b + a * (c-b)
-
---[[ldx--
-<p>The directional helpers and pen analysis are more or less translated from the
-<l n='c'/> code. It really helps that Taco know that source so well. Taco and I spent
-quite some time on speeding up the <l n='lua'/> and <l n='c'/> code. There is not
-much to gain, especially if one keeps in mind that when integrated in <l n='tex'/>
-only a part of the time is spent in <l n='metapost'/>. Of course an integrated
-approach is way faster than an external <l n='metapost'/> and processing time
-nears zero.</p>
---ldx]]--
+-- The directional helpers and pen analysis are more or less translated from the C
+-- code. It really helps that Taco know that source so well. Taco and I spent quite
+-- some time on speeding up the Lua and C code. There is not much to gain,
+-- especially if one keeps in mind that when integrated in TeX only a part of the
+-- time is spent in MetaPost. Of course an integrated approach is way faster than an
+-- external MetaPost and processing time nears zero.
local type, tostring, tonumber, next = type, tostring, tonumber, next
local find, striplines = string.find, utilities.strings.striplines
diff --git a/tex/context/base/mkiv/mult-mps.lua b/tex/context/base/mkiv/mult-mps.lua
index 008bcbb9f..cfa821517 100644
--- a/tex/context/base/mkiv/mult-mps.lua
+++ b/tex/context/base/mkiv/mult-mps.lua
@@ -127,7 +127,7 @@ return {
--
"red", "green", "blue", "cyan", "magenta", "yellow", "black", "white", "background",
--
- "mm", "pt", "dd", "bp", "cm", "pc", "cc", "in", "dk",
+ "mm", "pt", "dd", "bp", "cm", "pc", "cc", "in", "dk", "es", "ts",
--
"triplet", "quadruplet", "totransform", "bymatrix", "closedcurve", "closedlines",
--
diff --git a/tex/context/base/mkiv/node-ini.lua b/tex/context/base/mkiv/node-ini.lua
index ef7d4afed..ea726ff3d 100644
--- a/tex/context/base/mkiv/node-ini.lua
+++ b/tex/context/base/mkiv/node-ini.lua
@@ -6,50 +6,38 @@ if not modules then modules = { } end modules ['node-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Most of the code that had accumulated here is now separated in modules.</p>
---ldx]]--
-
--- I need to clean up this module as it's a bit of a mess now. The latest luatex
--- has most tables but we have a few more in luametatex. Also, some are different
--- between these engines. We started out with hardcoded tables, that then ended
--- up as comments and are now gone (as they differ per engine anyway).
+-- Most of the code that had accumulated here is now separated in modules.
local next, type, tostring = next, type, tostring
local gsub = string.gsub
local concat, remove = table.concat, table.remove
local sortedhash, sortedkeys, swapped = table.sortedhash, table.sortedkeys, table.swapped
---[[ldx--
-<p>Access to nodes is what gives <l n='luatex'/> its power. Here we implement a
-few helper functions. These functions are rather optimized.</p>
---ldx]]--
-
---[[ldx--
-<p>When manipulating node lists in <l n='context'/>, we will remove nodes and
-insert new ones. While node access was implemented, we did quite some experiments
-in order to find out if manipulating nodes in <l n='lua'/> was feasible from the
-perspective of performance.</p>
-
-<p>First of all, we noticed that the bottleneck is more with excessive callbacks
-(some gets called very often) and the conversion from and to <l n='tex'/>'s
-datastructures. However, at the <l n='lua'/> end, we found that inserting and
-deleting nodes in a table could become a bottleneck.</p>
-
-<p>This resulted in two special situations in passing nodes back to <l n='tex'/>:
-a table entry with value <type>false</type> is ignored, and when instead of a
-table <type>true</type> is returned, the original table is used.</p>
-
-<p>Insertion is handled (at least in <l n='context'/> as follows. When we need to
-insert a node at a certain position, we change the node at that position by a
-dummy node, tagged <type>inline</type> which itself has_attribute the original
-node and one or more new nodes. Before we pass back the list we collapse the
-list. Of course collapsing could be built into the <l n='tex'/> engine, but this
-is a not so natural extension.</p>
-
-<p>When we collapse (something that we only do when really needed), we also
-ignore the empty nodes. [This is obsolete!]</p>
---ldx]]--
+-- Access to nodes is what gives LuaTeX its power. Here we implement a few helper
+-- functions. These functions are rather optimized.
+--
+-- When manipulating node lists in ConTeXt, we will remove nodes and insert new
+-- ones. While node access was implemented, we did quite some experiments in order
+-- to find out if manipulating nodes in Lua was feasible from the perspective of
+-- performance.
+--
+-- First of all, we noticed that the bottleneck is more with excessive callbacks
+-- (some gets called very often) and the conversion from and to TeX's
+-- datastructures. However, at the Lua end, we found that inserting and deleting
+-- nodes in a table could become a bottleneck.
+--
+-- This resulted in two special situations in passing nodes back to TeX: a table
+-- entry with value 'false' is ignored, and when instead of a table 'true' is
+-- returned, the original table is used.
+--
+-- Insertion is handled (at least in ConTeXt as follows. When we need to insert a
+-- node at a certain position, we change the node at that position by a dummy node,
+-- tagged 'inline' which itself has_attribute the original node and one or more new
+-- nodes. Before we pass back the list we collapse the list. Of course collapsing
+-- could be built into the TeX engine, but this is a not so natural extension.
+
+-- When we collapse (something that we only do when really needed), we also ignore
+-- the empty nodes. [This is obsolete!]
-- local gf = node.direct.getfield
-- local n = table.setmetatableindex("number")
diff --git a/tex/context/base/mkiv/node-res.lua b/tex/context/base/mkiv/node-res.lua
index 5c669f9da..f2c6e97e9 100644
--- a/tex/context/base/mkiv/node-res.lua
+++ b/tex/context/base/mkiv/node-res.lua
@@ -9,11 +9,6 @@ if not modules then modules = { } end modules ['node-res'] = {
local type, next = type, next
local gmatch, format = string.gmatch, string.format
---[[ldx--
-<p>The next function is not that much needed but in <l n='context'/> we use
-for debugging <l n='luatex'/> node management.</p>
---ldx]]--
-
local nodes, node = nodes, node
local report_nodes = logs.reporter("nodes","housekeeping")
diff --git a/tex/context/base/mkiv/node-tra.lua b/tex/context/base/mkiv/node-tra.lua
index 67435f1c7..20e354392 100644
--- a/tex/context/base/mkiv/node-tra.lua
+++ b/tex/context/base/mkiv/node-tra.lua
@@ -6,10 +6,8 @@ if not modules then modules = { } end modules ['node-tra'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This is rather experimental. We need more control and some of this
-might become a runtime module instead. This module will be cleaned up!</p>
---ldx]]--
+-- Some of the code here might become a runtime module instead. This old module will
+-- be cleaned up anyway!
local next = next
local utfchar = utf.char
diff --git a/tex/context/base/mkiv/pack-obj.lua b/tex/context/base/mkiv/pack-obj.lua
index 445085776..dda828749 100644
--- a/tex/context/base/mkiv/pack-obj.lua
+++ b/tex/context/base/mkiv/pack-obj.lua
@@ -6,10 +6,8 @@ if not modules then modules = { } end modules ['pack-obj'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>We save object references in the main utility table. jobobjects are
-reusable components.</p>
---ldx]]--
+-- We save object references in the main utility table; job objects are reusable
+-- components.
local context = context
local codeinjections = backends.codeinjections
diff --git a/tex/context/base/mkiv/pack-rul.lua b/tex/context/base/mkiv/pack-rul.lua
index 98117867c..20db028ec 100644
--- a/tex/context/base/mkiv/pack-rul.lua
+++ b/tex/context/base/mkiv/pack-rul.lua
@@ -7,10 +7,6 @@ if not modules then modules = { } end modules ['pack-rul'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>An explanation is given in the history document <t>mk</t>.</p>
---ldx]]--
-
-- we need to be careful with display math as it uses shifts
-- \framed[align={lohi,middle}]{$x$}
diff --git a/tex/context/base/mkiv/publ-dat.lua b/tex/context/base/mkiv/publ-dat.lua
index 64aaaf460..2e5f07f05 100644
--- a/tex/context/base/mkiv/publ-dat.lua
+++ b/tex/context/base/mkiv/publ-dat.lua
@@ -11,12 +11,6 @@ if not modules then modules = { } end modules ['publ-dat'] = {
-- todo: dataset = datasets[dataset] => current = datasets[dataset]
-- todo: maybe split this file
---[[ldx--
-<p>This is a prelude to integrated bibliography support. This file just loads
-bibtex files and converts them to xml so that the we access the content
-in a convenient way. Actually handling the data takes place elsewhere.</p>
---ldx]]--
-
if not characters then
dofile(resolvers.findfile("char-utf.lua"))
dofile(resolvers.findfile("char-tex.lua"))
diff --git a/tex/context/base/mkiv/publ-ini.lua b/tex/context/base/mkiv/publ-ini.lua
index dac0ab441..aa96dd8bc 100644
--- a/tex/context/base/mkiv/publ-ini.lua
+++ b/tex/context/base/mkiv/publ-ini.lua
@@ -296,7 +296,8 @@ do
local checksum = nil
local username = file.addsuffix(file.robustname(formatters["%s-btx-%s"](prefix,name)),"lua")
if userdata and next(userdata) then
- if job.passes.first then
+ if environment.currentrun == 1 then
+ -- if job.passes.first then
local newdata = serialize(userdata)
checksum = md5.HEX(newdata)
io.savedata(username,newdata)
diff --git a/tex/context/base/mkiv/publ-ini.mkiv b/tex/context/base/mkiv/publ-ini.mkiv
index 6e34d3ab5..05d93ef85 100644
--- a/tex/context/base/mkiv/publ-ini.mkiv
+++ b/tex/context/base/mkiv/publ-ini.mkiv
@@ -342,7 +342,7 @@
\newtoks\t_btx_cmd
\newbox \b_btx_cmd
-\t_btx_cmd{\global\setbox\b_btx_cmd\hpack{\clf_btxcmdstring}}
+\t_btx_cmd{\global\setbox\b_btx_cmd\hbox{\clf_btxcmdstring}} % no \hpack, otherwise prerolling --- doesn't work
\let\btxcmd\btxcommand
diff --git a/tex/context/base/mkiv/regi-ini.lua b/tex/context/base/mkiv/regi-ini.lua
index 2a3b2caaf..460d97d5e 100644
--- a/tex/context/base/mkiv/regi-ini.lua
+++ b/tex/context/base/mkiv/regi-ini.lua
@@ -6,11 +6,8 @@ if not modules then modules = { } end modules ['regi-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Regimes take care of converting the input characters into
-<l n='utf'/> sequences. The conversion tables are loaded at
-runtime.</p>
---ldx]]--
+-- Regimes take care of converting the input characters into UTF sequences. The
+-- conversion tables are loaded at runtime.
-- Todo: use regi-imp*.lua instead
@@ -30,9 +27,7 @@ local sequencers = utilities.sequencers
local textlineactions = resolvers.openers.helpers.textlineactions
local setmetatableindex = table.setmetatableindex
---[[ldx--
-<p>We will hook regime handling code into the input methods.</p>
---ldx]]--
+-- We will hook regime handling code into the input methods.
local trace_translating = false trackers.register("regimes.translating", function(v) trace_translating = v end)
diff --git a/tex/context/base/mkiv/sort-ini.lua b/tex/context/base/mkiv/sort-ini.lua
index 98f516c22..a375d7057 100644
--- a/tex/context/base/mkiv/sort-ini.lua
+++ b/tex/context/base/mkiv/sort-ini.lua
@@ -6,49 +6,45 @@ if not modules then modules = { } end modules ['sort-ini'] = {
license = "see context related readme files"
}
--- It took a while to get there, but with Fleetwood Mac's "Don't Stop"
--- playing in the background we sort of got it done.
-
---[[<p>The code here evolved from the rather old mkii approach. There
-we concatinate the key and (raw) entry into a new string. Numbers and
-special characters get some treatment so that they sort ok. In
-addition some normalization (lowercasing, accent stripping) takes
-place and again data is appended ror prepended. Eventually these
-strings are sorted using a regular string sorter. The relative order
-of character is dealt with by weighting them. It took a while to
-figure this all out but eventually it worked ok for most languages,
-given that the right datatables were provided.</p>
-
-<p>Here we do follow a similar approach but this time we don't append
-the manipulated keys and entries but create tables for each of them
-with entries being tables themselves having different properties. In
-these tables characters are represented by numbers and sorting takes
-place using these numbers. Strings are simplified using lowercasing
-as well as shape codes. Numbers are filtered and after getting an offset
-they end up at the right end of the spectrum (more clever parser will
-be added some day). There are definitely more solutions to the problem
-and it is a nice puzzle to solve.</p>
-
-<p>In the future more methods can be added, as there is practically no
-limit to what goes into the tables. For that we will provide hooks.</p>
-
-<p>Todo: decomposition with specific order of accents, this is
-relatively easy to do.</p>
-
-<p>Todo: investigate what standards and conventions there are and see
-how they map onto this mechanism. I've learned that users can come up
-with any demand so nothing here is frozen.</p>
-
-<p>Todo: I ran into the Unicode Collation document and noticed that
-there are some similarities (like the weights) but using that method
-would still demand extra code for language specifics. One option is
-to use the allkeys.txt file for the uc vectors but then we would also
-use the collapsed key (sq, code is now commented). In fact, we could
-just hook those into the replacer code that we reun beforehand.</p>
-
-<p>In the future index entries will become more clever, i.e. they will
-have language etc properties that then can be used.</p>
-]]--
+-- It took a while to get there, but with Fleetwood Mac's "Don't Stop" playing in
+-- the background we sort of got it done.
+--
+-- The code here evolved from the rather old mkii approach. There we concatinate the
+-- key and (raw) entry into a new string. Numbers and special characters get some
+-- treatment so that they sort ok. In addition some normalization (lowercasing,
+-- accent stripping) takes place and again data is appended ror prepended.
+-- Eventually these strings are sorted using a regular string sorter. The relative
+-- order of character is dealt with by weighting them. It took a while to figure
+-- this all out but eventually it worked ok for most languages, given that the right
+-- datatables were provided.
+--
+-- Here we do follow a similar approach but this time we don't append the
+-- manipulated keys and entries but create tables for each of them with entries
+-- being tables themselves having different properties. In these tables characters
+-- are represented by numbers and sorting takes place using these numbers. Strings
+-- are simplified using lowercasing as well as shape codes. Numbers are filtered and
+-- after getting an offset they end up at the right end of the spectrum (more clever
+-- parser will be added some day). There are definitely more solutions to the
+-- problem and it is a nice puzzle to solve.
+--
+-- In the future more methods can be added, as there is practically no limit to what
+-- goes into the tables. For that we will provide hooks.
+--
+-- Todo: decomposition with specific order of accents, this is relatively easy to
+-- do.
+--
+-- Todo: investigate what standards and conventions there are and see how they map
+-- onto this mechanism. I've learned that users can come up with any demand so
+-- nothing here is frozen.
+--
+-- Todo: I ran into the Unicode Collation document and noticed that there are some
+-- similarities (like the weights) but using that method would still demand extra
+-- code for language specifics. One option is to use the allkeys.txt file for the uc
+-- vectors but then we would also use the collapsed key (sq, code is now commented).
+-- In fact, we could just hook those into the replacer code that we reun beforehand.
+--
+-- In the future index entries will become more clever, i.e. they will have language
+-- etc properties that then can be used.
local gsub, find, rep, sub, sort, concat, tohash, format = string.gsub, string.find, string.rep, string.sub, table.sort, table.concat, table.tohash, string.format
local utfbyte, utfchar, utfcharacters = utf.byte, utf.char, utf.characters
diff --git a/tex/context/base/mkiv/status-files.pdf b/tex/context/base/mkiv/status-files.pdf
index de994239b..476b1642f 100644
--- a/tex/context/base/mkiv/status-files.pdf
+++ b/tex/context/base/mkiv/status-files.pdf
Binary files differ
diff --git a/tex/context/base/mkiv/status-lua.pdf b/tex/context/base/mkiv/status-lua.pdf
index e6773acf4..734e7705c 100644
--- a/tex/context/base/mkiv/status-lua.pdf
+++ b/tex/context/base/mkiv/status-lua.pdf
Binary files differ
diff --git a/tex/context/base/mkiv/syst-con.lua b/tex/context/base/mkiv/syst-con.lua
index 6a11fa8d3..f0ea8546a 100644
--- a/tex/context/base/mkiv/syst-con.lua
+++ b/tex/context/base/mkiv/syst-con.lua
@@ -20,10 +20,9 @@ local implement = interfaces.implement
local formatters = string.formatters
---[[ldx--
-<p>For raw 8 bit characters, the offset is 0x110000 (bottom of plane 18) at
-the top of <l n='luatex'/>'s char range but outside the unicode range.</p>
---ldx]]--
+-- For raw 8 bit characters, the offset is 0x110000 (bottom of plane 18) at the top
+-- of LuaTeX's char range but outside the unicode range. This is no longer the case
+-- in LuaMetaTeX.
function converters.hexstringtonumber(n) tonumber(n,16) end
function converters.octstringtonumber(n) tonumber(n, 8) end
diff --git a/tex/context/base/mkiv/syst-ini.mkiv b/tex/context/base/mkiv/syst-ini.mkiv
index ae1978eb6..5f226958b 100644
--- a/tex/context/base/mkiv/syst-ini.mkiv
+++ b/tex/context/base/mkiv/syst-ini.mkiv
@@ -253,6 +253,9 @@
\let\newfam\newfamily
+\let\newinteger \newcount % just in case
+\let\newdimension\newdimen % just in case
+
\firstvalidlanguage\plusone
% Watch out, for the moment we disable the check for already being defined
diff --git a/tex/context/base/mkiv/tabl-tbl.mkiv b/tex/context/base/mkiv/tabl-tbl.mkiv
index 2ed104adf..8b6afb956 100644
--- a/tex/context/base/mkiv/tabl-tbl.mkiv
+++ b/tex/context/base/mkiv/tabl-tbl.mkiv
@@ -1551,7 +1551,8 @@
\fi}
\def\tabl_tabulate_vrule_reset_indeed
- {\dofastloopcs\c_tabl_tabulate_max_vrulecolumn\tabl_tabulate_vrule_reset_step
+ {\gletcsname\??tabulatevrule0\endcsname\undefined
+ \dofastloopcs\c_tabl_tabulate_max_vrulecolumn\tabl_tabulate_vrule_reset_step
\global\c_tabl_tabulate_max_vrulecolumn\zerocount}
\def\tabl_tabulate_vrule_reset_step % undefined or relax
diff --git a/tex/context/base/mkiv/trac-lmx.lua b/tex/context/base/mkiv/trac-lmx.lua
index a531a76d6..56522e1e7 100644
--- a/tex/context/base/mkiv/trac-lmx.lua
+++ b/tex/context/base/mkiv/trac-lmx.lua
@@ -6,7 +6,8 @@ if not modules then modules = { } end modules ['trac-lmx'] = {
license = "see context related readme files"
}
--- this one will be adpated to the latest helpers
+-- This one will be adpated to the latest helpers. It might even become a
+-- module instead.
local type, tostring, rawget, loadstring, pcall = type, tostring, rawget, loadstring, pcall
local format, sub, gsub = string.format, string.sub, string.gsub
diff --git a/tex/context/base/mkiv/util-dim.lua b/tex/context/base/mkiv/util-dim.lua
index bb9eca966..6462f3e49 100644
--- a/tex/context/base/mkiv/util-dim.lua
+++ b/tex/context/base/mkiv/util-dim.lua
@@ -6,14 +6,10 @@ if not modules then modules = { } end modules ['util-dim'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Internally <l n='luatex'/> work with scaled point, which are
-represented by integers. However, in practice, at east at the
-<l n='tex'/> end we work with more generic units like points (pt). Going
-from scaled points (numbers) to one of those units can be
-done by using the conversion factors collected in the following
-table.</p>
---ldx]]--
+-- Internally LuaTeX work with scaled point, which are represented by integers.
+-- However, in practice, at east at the TeX end we work with more generic units like
+-- points (pt). Going from scaled points (numbers) to one of those units can be done
+-- by using the conversion factors collected in the following table.
local format, match, gsub, type, setmetatable = string.format, string.match, string.gsub, type, setmetatable
local P, S, R, Cc, C, lpegmatch = lpeg.P, lpeg.S, lpeg.R, lpeg.Cc, lpeg.C, lpeg.match
@@ -45,7 +41,9 @@ local dimenfactors = allocate {
["dd"] = ( 1157/ 1238)/65536,
["cc"] = ( 1157/14856)/65536,
-- ["nd"] = (20320/21681)/65536,
- -- ["nc"] = ( 5080/65043)/65536
+ -- ["nc"] = ( 5080/65043)/65536,
+ ["es"] = ( 9176/ 129)/65536,
+ ["ts"] = ( 4588/ 645)/65536,
}
-- print(table.serialize(dimenfactors))
@@ -86,10 +84,8 @@ local dimenfactors = allocate {
-- ["sp"]=1,
-- }
---[[ldx--
-<p>A conversion function that takes a number, unit (string) and optional
-format (string) is implemented using this table.</p>
---ldx]]--
+-- A conversion function that takes a number, unit (string) and optional format
+-- (string) is implemented using this table.
local f_none = formatters["%s%s"]
local f_true = formatters["%0.5F%s"]
@@ -110,9 +106,7 @@ local function numbertodimen(n,unit,fmt) -- will be redefined later !
end
end
---[[ldx--
-<p>We collect a bunch of converters in the <type>number</type> namespace.</p>
---ldx]]--
+-- We collect a bunch of converters in the 'number' namespace.
number.maxdimen = 1073741823
number.todimen = numbertodimen
@@ -122,7 +116,7 @@ function number.topoints (n,fmt) return numbertodimen(n,"pt",fmt) end
function number.toinches (n,fmt) return numbertodimen(n,"in",fmt) end
function number.tocentimeters (n,fmt) return numbertodimen(n,"cm",fmt) end
function number.tomillimeters (n,fmt) return numbertodimen(n,"mm",fmt) end
-function number.toscaledpoints(n,fmt) return numbertodimen(n,"sp",fmt) end
+-------- number.toscaledpoints(n,fmt) return numbertodimen(n,"sp",fmt) end
function number.toscaledpoints(n) return n .. "sp" end
function number.tobasepoints (n,fmt) return numbertodimen(n,"bp",fmt) end
function number.topicas (n,fmt) return numbertodimen(n "pc",fmt) end
@@ -130,14 +124,13 @@ function number.todidots (n,fmt) return numbertodimen(n,"dd",fmt) end
function number.tociceros (n,fmt) return numbertodimen(n,"cc",fmt) end
-------- number.tonewdidots (n,fmt) return numbertodimen(n,"nd",fmt) end
-------- number.tonewciceros (n,fmt) return numbertodimen(n,"nc",fmt) end
+function number.toediths (n,fmt) return numbertodimen(n,"es",fmt) end
+function number.totoves (n,fmt) return numbertodimen(n,"ts",fmt) end
---[[ldx--
-<p>More interesting it to implement a (sort of) dimen datatype, one
-that permits calculations too. First we define a function that
-converts a string to scaledpoints. We use <l n='lpeg'/>. We capture
-a number and optionally a unit. When no unit is given a constant
-capture takes place.</p>
---ldx]]--
+-- More interesting it to implement a (sort of) dimen datatype, one that permits
+-- calculations too. First we define a function that converts a string to
+-- scaledpoints. We use LPEG. We capture a number and optionally a unit. When no
+-- unit is given a constant capture takes place.
local amount = (S("+-")^0 * R("09")^0 * P(".")^0 * R("09")^0) + Cc("0")
local unit = R("az")^1 + P("%")
@@ -152,21 +145,16 @@ function number.splitdimen(str)
return lpegmatch(splitter,str)
end
---[[ldx--
-<p>We use a metatable to intercept errors. When no key is found in
-the table with factors, the metatable will be consulted for an
-alternative index function.</p>
---ldx]]--
+-- We use a metatable to intercept errors. When no key is found in the table with
+-- factors, the metatable will be consulted for an alternative index function.
setmetatableindex(dimenfactors, function(t,s)
-- error("wrong dimension: " .. (s or "?")) -- better a message
return false
end)
---[[ldx--
-<p>We redefine the following function later on, so we comment it
-here (which saves us bytecodes.</p>
---ldx]]--
+-- We redefine the following function later on, so we comment it here (which saves
+-- us bytecodes.
-- function string.todimen(str)
-- if type(str) == "number" then
@@ -182,44 +170,38 @@ here (which saves us bytecodes.</p>
local stringtodimen -- assigned later (commenting saves bytecode)
local amount = S("+-")^0 * R("09")^0 * S(".,")^0 * R("09")^0
-local unit = P("pt") + P("cm") + P("mm") + P("sp") + P("bp") + P("in") +
- P("pc") + P("dd") + P("cc") + P("nd") + P("nc")
+local unit = P("pt") + P("cm") + P("mm") + P("sp") + P("bp")
+ + P("es") + P("ts") + P("pc") + P("dd") + P("cc")
+ + P("in")
+ -- + P("nd") + P("nc")
local validdimen = amount * unit
lpeg.patterns.validdimen = validdimen
---[[ldx--
-<p>This converter accepts calls like:</p>
-
-<typing>
-string.todimen("10")
-string.todimen(".10")
-string.todimen("10.0")
-string.todimen("10.0pt")
-string.todimen("10pt")
-string.todimen("10.0pt")
-</typing>
-
-<p>With this in place, we can now implement a proper datatype for dimensions, one
-that permits us to do this:</p>
-
-<typing>
-s = dimen "10pt" + dimen "20pt" + dimen "200pt"
- - dimen "100sp" / 10 + "20pt" + "0pt"
-</typing>
-
-<p>We create a local metatable for this new type:</p>
---ldx]]--
+-- This converter accepts calls like:
+--
+-- string.todimen("10")
+-- string.todimen(".10")
+-- string.todimen("10.0")
+-- string.todimen("10.0pt")
+-- string.todimen("10pt")
+-- string.todimen("10.0pt")
+--
+-- With this in place, we can now implement a proper datatype for dimensions, one
+-- that permits us to do this:
+--
+-- s = dimen "10pt" + dimen "20pt" + dimen "200pt"
+-- - dimen "100sp" / 10 + "20pt" + "0pt"
+--
+-- We create a local metatable for this new type:
local dimensions = { }
---[[ldx--
-<p>The main (and globally) visible representation of a dimen is defined next: it is
-a one-element table. The unit that is returned from the match is normally a number
-(one of the previously defined factors) but we also accept functions. Later we will
-see why. This function is redefined later.</p>
---ldx]]--
+-- The main (and globally) visible representation of a dimen is defined next: it is
+-- a one-element table. The unit that is returned from the match is normally a
+-- number (one of the previously defined factors) but we also accept functions.
+-- Later we will see why. This function is redefined later.
-- function dimen(a)
-- if a then
@@ -241,11 +223,9 @@ see why. This function is redefined later.</p>
-- end
-- end
---[[ldx--
-<p>This function return a small hash with a metatable attached. It is
-through this metatable that we can do the calculations. We could have
-shared some of the code but for reasons of speed we don't.</p>
---ldx]]--
+-- This function return a small hash with a metatable attached. It is through this
+-- metatable that we can do the calculations. We could have shared some of the code
+-- but for reasons of speed we don't.
function dimensions.__add(a, b)
local ta, tb = type(a), type(b)
@@ -281,20 +261,16 @@ function dimensions.__unm(a)
return setmetatable({ - a }, dimensions)
end
---[[ldx--
-<p>It makes no sense to implement the power and modulo function but
-the next two do make sense because they permits is code like:</p>
-
-<typing>
-local a, b = dimen "10pt", dimen "11pt"
-...
-if a > b then
- ...
-end
-</typing>
---ldx]]--
-
--- makes no sense: dimensions.__pow and dimensions.__mod
+-- It makes no sense to implement the power and modulo function but
+-- the next two do make sense because they permits is code like:
+--
+-- local a, b = dimen "10pt", dimen "11pt"
+-- ...
+-- if a > b then
+-- ...
+-- end
+--
+-- This also makes no sense: dimensions.__pow and dimensions.__mod.
function dimensions.__lt(a, b)
return a[1] < b[1]
@@ -304,24 +280,17 @@ function dimensions.__eq(a, b)
return a[1] == b[1]
end
---[[ldx--
-<p>We also need to provide a function for conversion to string (so that
-we can print dimensions). We print them as points, just like <l n='tex'/>.</p>
---ldx]]--
+-- We also need to provide a function for conversion to string (so that we can print
+-- dimensions). We print them as points, just like TeX.
function dimensions.__tostring(a)
return a[1]/65536 .. "pt" -- instead of todimen(a[1])
end
---[[ldx--
-<p>Since it does not take much code, we also provide a way to access
-a few accessors</p>
-
-<typing>
-print(dimen().pt)
-print(dimen().sp)
-</typing>
---ldx]]--
+-- Since it does not take much code, we also provide a way to access a few accessors
+--
+-- print(dimen().pt)
+-- print(dimen().sp)
function dimensions.__index(tab,key)
local d = dimenfactors[key]
@@ -332,41 +301,34 @@ function dimensions.__index(tab,key)
return 1/d
end
---[[ldx--
-<p>In the converter from string to dimension we support functions as
-factors. This is because in <l n='tex'/> we have a few more units:
-<type>ex</type> and <type>em</type>. These are not constant factors but
-depend on the current font. They are not defined by default, but need
-an explicit function call. This is because at the moment that this code
-is loaded, the relevant tables that hold the functions needed may not
-yet be available.</p>
---ldx]]--
-
- dimenfactors["ex"] = 4 * 1/65536 -- 4pt
- dimenfactors["em"] = 10 * 1/65536 -- 10pt
--- dimenfactors["%"] = 4 * 1/65536 -- 400pt/100
-
---[[ldx--
-<p>The previous code is rather efficient (also thanks to <l n='lpeg'/>) but we
-can speed it up by caching converted dimensions. On my machine (2008) the following
-loop takes about 25.5 seconds.</p>
-
-<typing>
-for i=1,1000000 do
- local s = dimen "10pt" + dimen "20pt" + dimen "200pt"
- - dimen "100sp" / 10 + "20pt" + "0pt"
-end
-</typing>
-
-<p>When we cache converted strings this becomes 16.3 seconds. In order not
-to waste too much memory on it, we tag the values of the cache as being
-week which mean that the garbage collector will collect them in a next
-sweep. This means that in most cases the speed up is mostly affecting the
-current couple of calculations and as such the speed penalty is small.</p>
-
-<p>We redefine two previous defined functions that can benefit from
-this:</p>
---ldx]]--
+-- In the converter from string to dimension we support functions as factors. This
+-- is because in TeX we have a few more units: 'ex' and 'em'. These are not constant
+-- factors but depend on the current font. They are not defined by default, but need
+-- an explicit function call. This is because at the moment that this code is
+-- loaded, the relevant tables that hold the functions needed may not yet be
+-- available.
+
+ dimenfactors["ex"] = 4 /65536 -- 4pt
+ dimenfactors["em"] = 10 /65536 -- 10pt
+-- dimenfactors["%"] = 4 /65536 -- 400pt/100
+ dimenfactors["eu"] = (9176/129)/65536 -- 1es
+
+-- The previous code is rather efficient (also thanks to LPEG) but we can speed it
+-- up by caching converted dimensions. On my machine (2008) the following loop takes
+-- about 25.5 seconds.
+--
+-- for i=1,1000000 do
+-- local s = dimen "10pt" + dimen "20pt" + dimen "200pt"
+-- - dimen "100sp" / 10 + "20pt" + "0pt"
+-- end
+--
+-- When we cache converted strings this becomes 16.3 seconds. In order not to waste
+-- too much memory on it, we tag the values of the cache as being week which mean
+-- that the garbage collector will collect them in a next sweep. This means that in
+-- most cases the speed up is mostly affecting the current couple of calculations
+-- and as such the speed penalty is small.
+--
+-- We redefine two previous defined functions that can benefit from this:
local known = { } setmetatable(known, { __mode = "v" })
@@ -436,14 +398,10 @@ function number.toscaled(d)
return format("%0.5f",d/0x10000) -- 2^16
end
---[[ldx--
-<p>In a similar fashion we can define a glue datatype. In that case we
-probably use a hash instead of a one-element table.</p>
---ldx]]--
-
---[[ldx--
-<p>Goodie:s</p>
---ldx]]--
+-- In a similar fashion we can define a glue datatype. In that case we probably use
+-- a hash instead of a one-element table.
+--
+-- A goodie:
function number.percent(n,d) -- will be cleaned up once luatex 0.30 is out
d = d or texget("hsize")
diff --git a/tex/context/base/mkiv/util-fmt.lua b/tex/context/base/mkiv/util-fmt.lua
index fe80c6420..4da4ef985 100644
--- a/tex/context/base/mkiv/util-fmt.lua
+++ b/tex/context/base/mkiv/util-fmt.lua
@@ -11,7 +11,7 @@ utilities.formatters = utilities.formatters or { }
local formatters = utilities.formatters
local concat, format = table.concat, string.format
-local tostring, type = tostring, type
+local tostring, type, unpack = tostring, type, unpack
local strip = string.strip
local lpegmatch = lpeg.match
@@ -21,12 +21,15 @@ function formatters.stripzeros(str)
return lpegmatch(stripper,str)
end
-function formatters.formatcolumns(result,between)
+function formatters.formatcolumns(result,between,header)
if result and #result > 0 then
- between = between or " "
- local widths, numbers = { }, { }
- local first = result[1]
- local n = #first
+ local widths = { }
+ local numbers = { }
+ local templates = { }
+ local first = result[1]
+ local n = #first
+ between = between or " "
+ --
for i=1,n do
widths[i] = 0
end
@@ -35,13 +38,6 @@ function formatters.formatcolumns(result,between)
for j=1,n do
local rj = r[j]
local tj = type(rj)
--- if tj == "number" then
--- numbers[j] = true
--- end
--- if tj ~= "string" then
--- rj = tostring(rj)
--- r[j] = rj
--- end
if tj == "number" then
numbers[j] = true
rj = tostring(rj)
@@ -55,29 +51,59 @@ function formatters.formatcolumns(result,between)
end
end
end
+ if header then
+ for i=1,#header do
+ local h = header[i]
+ for j=1,n do
+ local hj = tostring(h[j])
+ h[j] = hj
+ local w = #hj
+ if w > widths[j] then
+ widths[j] = w
+ end
+ end
+ end
+ end
for i=1,n do
local w = widths[i]
if numbers[i] then
if w > 80 then
- widths[i] = "%s" .. between
- else
- widths[i] = "%0" .. w .. "i" .. between
+ templates[i] = "%s" .. between
+ else
+ templates[i] = "% " .. w .. "i" .. between
end
else
if w > 80 then
- widths[i] = "%s" .. between
- elseif w > 0 then
- widths[i] = "%-" .. w .. "s" .. between
+ templates[i] = "%s" .. between
+ elseif w > 0 then
+ templates[i] = "%-" .. w .. "s" .. between
else
- widths[i] = "%s"
+ templates[i] = "%s"
end
end
end
- local template = strip(concat(widths))
+ local template = strip(concat(templates))
for i=1,#result do
local str = format(template,unpack(result[i]))
result[i] = strip(str)
end
+ if header then
+ for i=1,n do
+ local w = widths[i]
+ if w > 80 then
+ templates[i] = "%s" .. between
+ elseif w > 0 then
+ templates[i] = "%-" .. w .. "s" .. between
+ else
+ templates[i] = "%s"
+ end
+ end
+ local template = strip(concat(templates))
+ for i=1,#header do
+ local str = format(template,unpack(header[i]))
+ header[i] = strip(str)
+ end
+ end
end
- return result
+ return result, header
end
diff --git a/tex/context/base/mkiv/util-seq.lua b/tex/context/base/mkiv/util-seq.lua
index 35839f230..49952dd98 100644
--- a/tex/context/base/mkiv/util-seq.lua
+++ b/tex/context/base/mkiv/util-seq.lua
@@ -6,15 +6,13 @@ if not modules then modules = { } end modules ['util-seq'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Here we implement a mechanism for chaining the special functions
-that we use in <l n="context"> to deal with mode list processing. We
-assume that namespaces for the functions are used, but for speed we
-use locals to refer to them when compiling the chain.</p>
---ldx]]--
-
+-- Here we implement a mechanism for chaining the special functions that we use in
+-- ConteXt to deal with mode list processing. We assume that namespaces for the
+-- functions are used, but for speed we use locals to refer to them when compiling
+-- the chain.
+--
-- todo: delayed: i.e. we register them in the right order already but delay usage
-
+--
-- todo: protect groups (as in tasks)
local gsub, gmatch = string.gsub, string.gmatch