summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarius <mariausol@gmail.com>2011-06-11 18:00:15 +0300
committerMarius <mariausol@gmail.com>2011-06-11 18:00:15 +0300
commit510cb140d2e8baed13b8b27daa02f3ad2f5df3e1 (patch)
tree180c914c5c27bcd130cd13098b426b03abd1a508
parent247a7c0fccc1f980a837daf19e0ef2be6102a18f (diff)
downloadcontext-510cb140d2e8baed13b8b27daa02f3ad2f5df3e1.tar.gz
beta 2011.06.11 16:45
-rw-r--r--scripts/context/lua/mtxrun.lua1160
-rw-r--r--scripts/context/stubs/mswin/mtxrun.lua1160
-rw-r--r--scripts/context/stubs/unix/mtxrun1160
-rw-r--r--tex/context/base/anch-pos.lua2
-rw-r--r--tex/context/base/back-exp.lua265
-rw-r--r--tex/context/base/back-exp.mkiv9
-rw-r--r--tex/context/base/back-ini.lua4
-rw-r--r--tex/context/base/bibl-bib.lua6
-rw-r--r--tex/context/base/char-ini.lua14
-rw-r--r--tex/context/base/cont-new.mkii2
-rw-r--r--tex/context/base/cont-new.mkiv2
-rw-r--r--tex/context/base/context.mkii2
-rw-r--r--tex/context/base/context.mkiv2
-rw-r--r--tex/context/base/core-fnt.mkiv16
-rw-r--r--tex/context/base/core-mis.mkiv56
-rw-r--r--tex/context/base/core-sys.lua5
-rw-r--r--tex/context/base/data-exp.lua8
-rw-r--r--tex/context/base/data-ini.lua4
-rw-r--r--tex/context/base/data-lst.lua15
-rw-r--r--tex/context/base/data-tmp.lua9
-rw-r--r--tex/context/base/font-afm.lua7
-rw-r--r--tex/context/base/font-col.lua3
-rw-r--r--tex/context/base/font-con.lua8
-rw-r--r--tex/context/base/font-ctx.lua15
-rw-r--r--tex/context/base/font-ini.mkiv2
-rw-r--r--tex/context/base/font-otd.lua3
-rw-r--r--tex/context/base/font-otf.lua8
-rw-r--r--tex/context/base/font-syn.lua7
-rw-r--r--tex/context/base/grph-inc.lua6
-rw-r--r--tex/context/base/java-ini.lua2
-rw-r--r--tex/context/base/l-aux.lua13
-rw-r--r--tex/context/base/l-boolean.lua4
-rw-r--r--tex/context/base/l-lpeg.lua83
-rw-r--r--tex/context/base/l-table.lua10
-rw-r--r--tex/context/base/l-utils.lua12
-rw-r--r--tex/context/base/lang-ini.lua9
-rw-r--r--tex/context/base/lpdf-ano.lua2
-rw-r--r--tex/context/base/lpdf-epa.lua13
-rw-r--r--tex/context/base/lpdf-mis.lua4
-rw-r--r--tex/context/base/lpdf-wid.lua5
-rw-r--r--tex/context/base/luat-bas.mkiv2
-rw-r--r--tex/context/base/luat-cbk.lua9
-rw-r--r--tex/context/base/luat-cod.lua4
-rw-r--r--tex/context/base/luat-sto.lua12
-rw-r--r--tex/context/base/lxml-ctx.lua6
-rw-r--r--tex/context/base/lxml-sor.lua5
-rw-r--r--tex/context/base/lxml-tex.lua2
-rw-r--r--tex/context/base/m-dimensions.lua398
-rw-r--r--tex/context/base/m-dimensions.mkiv194
-rw-r--r--tex/context/base/m-pstricks.lua2
-rw-r--r--tex/context/base/m-units.mkiv2
-rw-r--r--tex/context/base/math-map.lua638
-rw-r--r--tex/context/base/math-tag.lua126
-rw-r--r--tex/context/base/meta-ini.lua6
-rw-r--r--tex/context/base/mlib-pps.lua14
-rw-r--r--tex/context/base/mlib-run.lua3
-rw-r--r--tex/context/base/node-acc.lua64
-rw-r--r--tex/context/base/node-fnt.lua6
-rw-r--r--tex/context/base/node-ini.lua13
-rw-r--r--tex/context/base/node-ref.lua4
-rw-r--r--tex/context/base/node-ser.lua11
-rw-r--r--tex/context/base/node-tsk.lua4
-rw-r--r--tex/context/base/page-str.lua6
-rw-r--r--tex/context/base/spac-ali.lua3
-rw-r--r--tex/context/base/spac-ver.lua3
-rw-r--r--tex/context/base/spac-ver.mkiv10
-rw-r--r--tex/context/base/status-files.pdfbin23470 -> 23463 bytes
-rw-r--r--tex/context/base/status-lua.pdfbin155084 -> 155139 bytes
-rw-r--r--tex/context/base/strc-reg.lua2
-rw-r--r--tex/context/base/strc-tag.lua12
-rw-r--r--tex/context/base/strc-tag.mkiv8
-rw-r--r--tex/context/base/syst-lua.lua6
-rw-r--r--tex/context/base/trac-fil.lua144
-rw-r--r--tex/context/base/trac-inf.lua4
-rw-r--r--tex/context/base/typo-mar.lua2
-rw-r--r--tex/context/base/util-deb.lua3
-rw-r--r--tex/context/base/util-dim.lua31
-rw-r--r--tex/context/base/util-tab.lua38
-rw-r--r--tex/context/base/x-calcmath.lua3
-rw-r--r--tex/context/base/x-ldx.lua93
-rw-r--r--tex/context/base/x-mathml.lua2
-rw-r--r--tex/context/base/x-mathml.mkiv4
-rw-r--r--tex/generic/context/luatex-fonts-merged.lua1369
-rw-r--r--tex/generic/context/luatex-fonts.lua2
-rw-r--r--tex/generic/context/luatex-plain.tex25
85 files changed, 4460 insertions, 2952 deletions
diff --git a/scripts/context/lua/mtxrun.lua b/scripts/context/lua/mtxrun.lua
index d0cf3d46d..6a8b2e99b 100644
--- a/scripts/context/lua/mtxrun.lua
+++ b/scripts/context/lua/mtxrun.lua
@@ -160,509 +160,6 @@ end -- of closure
do -- create closure to overcome 200 locals limit
-if not modules then modules = { } end modules ['l-lpeg'] = {
- version = 1.001,
- comment = "companion to luat-lib.mkiv",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files"
-}
-
-local lpeg = require("lpeg")
-
-local type = type
-
--- Beware, we predefine a bunch of patterns here and one reason for doing so
--- is that we get consistent behaviour in some of the visualizers.
-
-lpeg.patterns = lpeg.patterns or { } -- so that we can share
-local patterns = lpeg.patterns
-
-local P, R, S, V, match = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.match
-local Ct, C, Cs, Cc = lpeg.Ct, lpeg.C, lpeg.Cs, lpeg.Cc
-local lpegtype = lpeg.type
-
-local utfcharacters = string.utfcharacters
-local utfgmatch = unicode and unicode.utf8.gmatch
-
-local anything = P(1)
-local endofstring = P(-1)
-local alwaysmatched = P(true)
-
-patterns.anything = anything
-patterns.endofstring = endofstring
-patterns.beginofstring = alwaysmatched
-patterns.alwaysmatched = alwaysmatched
-
-local digit, sign = R('09'), S('+-')
-local cr, lf, crlf = P("\r"), P("\n"), P("\r\n")
-local newline = crlf + cr + lf
-local escaped = P("\\") * anything
-local squote = P("'")
-local dquote = P('"')
-local space = P(" ")
-
-local utfbom_32_be = P('\000\000\254\255')
-local utfbom_32_le = P('\255\254\000\000')
-local utfbom_16_be = P('\255\254')
-local utfbom_16_le = P('\254\255')
-local utfbom_8 = P('\239\187\191')
-local utfbom = utfbom_32_be + utfbom_32_le
- + utfbom_16_be + utfbom_16_le
- + utfbom_8
-local utftype = utfbom_32_be / "utf-32-be" + utfbom_32_le / "utf-32-le"
- + utfbom_16_be / "utf-16-be" + utfbom_16_le / "utf-16-le"
- + utfbom_8 / "utf-8" + alwaysmatched / "unknown"
-
-local utf8next = R("\128\191")
-
-patterns.utf8one = R("\000\127")
-patterns.utf8two = R("\194\223") * utf8next
-patterns.utf8three = R("\224\239") * utf8next * utf8next
-patterns.utf8four = R("\240\244") * utf8next * utf8next * utf8next
-patterns.utfbom = utfbom
-patterns.utftype = utftype
-
-local utf8char = patterns.utf8one + patterns.utf8two + patterns.utf8three + patterns.utf8four
-local validutf8char = utf8char^0 * endofstring * Cc(true) + Cc(false)
-
-patterns.utf8 = utf8char
-patterns.utf8char = utf8char
-patterns.validutf8 = validutf8char
-patterns.validutf8char = validutf8char
-
-patterns.digit = digit
-patterns.sign = sign
-patterns.cardinal = sign^0 * digit^1
-patterns.integer = sign^0 * digit^1
-patterns.float = sign^0 * digit^0 * P('.') * digit^1
-patterns.cfloat = sign^0 * digit^0 * P(',') * digit^1
-patterns.number = patterns.float + patterns.integer
-patterns.cnumber = patterns.cfloat + patterns.integer
-patterns.oct = P("0") * R("07")^1
-patterns.octal = patterns.oct
-patterns.HEX = P("0x") * R("09","AF")^1
-patterns.hex = P("0x") * R("09","af")^1
-patterns.hexadecimal = P("0x") * R("09","AF","af")^1
-patterns.lowercase = R("az")
-patterns.uppercase = R("AZ")
-patterns.letter = patterns.lowercase + patterns.uppercase
-patterns.space = space
-patterns.tab = P("\t")
-patterns.spaceortab = patterns.space + patterns.tab
-patterns.eol = S("\n\r")
-patterns.spacer = S(" \t\f\v") -- + string.char(0xc2, 0xa0) if we want utf (cf mail roberto)
-patterns.newline = newline
-patterns.emptyline = newline^1
-patterns.nonspacer = 1 - patterns.spacer
-patterns.whitespace = patterns.eol + patterns.spacer
-patterns.nonwhitespace = 1 - patterns.whitespace
-patterns.equal = P("=")
-patterns.comma = P(",")
-patterns.commaspacer = P(",") * patterns.spacer^0
-patterns.period = P(".")
-patterns.colon = P(":")
-patterns.semicolon = P(";")
-patterns.underscore = P("_")
-patterns.escaped = escaped
-patterns.squote = squote
-patterns.dquote = dquote
-patterns.nosquote = (escaped + (1-squote))^0
-patterns.nodquote = (escaped + (1-dquote))^0
-patterns.unsingle = (squote/"") * patterns.nosquote * (squote/"")
-patterns.undouble = (dquote/"") * patterns.nodquote * (dquote/"")
-patterns.unquoted = patterns.undouble + patterns.unsingle -- more often undouble
-patterns.unspacer = ((patterns.spacer^1)/"")^0
-
-patterns.somecontent = (anything - newline - space)^1 -- (utf8char - newline - space)^1
-patterns.beginline = #(1-newline)
-
-local unquoted = Cs(patterns.unquoted * endofstring) -- not C
-
-function string.unquoted(str)
- return match(unquoted,str) or str
-end
-
-
-function lpeg.anywhere(pattern) --slightly adapted from website
- return P { P(pattern) + 1 * V(1) } -- why so complex?
-end
-
-function lpeg.splitter(pattern, action)
- return (((1-P(pattern))^1)/action+1)^0
-end
-
-local splitters_s, splitters_m = { }, { }
-
-local function splitat(separator,single)
- local splitter = (single and splitters_s[separator]) or splitters_m[separator]
- if not splitter then
- separator = P(separator)
- local other = C((1 - separator)^0)
- if single then
- local any = anything
- splitter = other * (separator * C(any^0) + "") -- ?
- splitters_s[separator] = splitter
- else
- splitter = other * (separator * other)^0
- splitters_m[separator] = splitter
- end
- end
- return splitter
-end
-
-lpeg.splitat = splitat
-
-
-local cache = { }
-
-function lpeg.split(separator,str)
- local c = cache[separator]
- if not c then
- c = Ct(splitat(separator))
- cache[separator] = c
- end
- return match(c,str)
-end
-
-function string.split(str,separator)
- local c = cache[separator]
- if not c then
- c = Ct(splitat(separator))
- cache[separator] = c
- end
- return match(c,str)
-end
-
-local spacing = patterns.spacer^0 * newline -- sort of strip
-local empty = spacing * Cc("")
-local nonempty = Cs((1-spacing)^1) * spacing^-1
-local content = (empty + nonempty)^1
-
-patterns.textline = content
-
-
-local linesplitter = Ct(splitat(newline))
-
-patterns.linesplitter = linesplitter
-
-function string.splitlines(str)
- return match(linesplitter,str)
-end
-
-local utflinesplitter = utfbom^-1 * Ct(splitat(newline))
-
-patterns.utflinesplitter = utflinesplitter
-
-function string.utfsplitlines(str)
- return match(utflinesplitter,str)
-end
-
-
-local cache = { }
-
-function lpeg.checkedsplit(separator,str)
- local c = cache[separator]
- if not c then
- separator = P(separator)
- local other = C((1 - separator)^1)
- c = Ct(separator^0 * other * (separator^1 * other)^0)
- cache[separator] = c
- end
- return match(c,str)
-end
-
-function string.checkedsplit(str,separator)
- local c = cache[separator]
- if not c then
- separator = P(separator)
- local other = C((1 - separator)^1)
- c = Ct(separator^0 * other * (separator^1 * other)^0)
- cache[separator] = c
- end
- return match(c,str)
-end
-
-
-local f1 = string.byte
-
-local function f2(s) local c1, c2 = f1(s,1,2) return c1 * 64 + c2 - 12416 end
-local function f3(s) local c1, c2, c3 = f1(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
-local function f4(s) local c1, c2, c3, c4 = f1(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
-
-local utf8byte = patterns.utf8one/f1 + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
-
-patterns.utf8byte = utf8byte
-
-
-
-local cache = { }
-
-function lpeg.stripper(str)
- if type(str) == "string" then
- local s = cache[str]
- if not s then
- s = Cs(((S(str)^1)/"" + 1)^0)
- cache[str] = s
- end
- return s
- else
- return Cs(((str^1)/"" + 1)^0)
- end
-end
-
-local cache = { }
-
-function lpeg.keeper(str)
- if type(str) == "string" then
- local s = cache[str]
- if not s then
- s = Cs((((1-S(str))^1)/"" + 1)^0)
- cache[str] = s
- end
- return s
- else
- return Cs((((1-str)^1)/"" + 1)^0)
- end
-end
-
-function lpeg.frontstripper(str) -- or pattern (yet undocumented)
- return (P(str) + P(true)) * Cs(P(1)^0)
-end
-
-function lpeg.endstripper(str) -- or pattern (yet undocumented)
- return Cs((1 - P(str) * P(-1))^0)
-end
-
--- Just for fun I looked at the used bytecode and
--- p = (p and p + pp) or pp gets one more (testset).
-
-function lpeg.replacer(one,two)
- if type(one) == "table" then
- local no = #one
- if no > 0 then
- local p
- for i=1,no do
- local o = one[i]
- local pp = P(o[1]) / o[2]
- if p then
- p = p + pp
- else
- p = pp
- end
- end
- return Cs((p + 1)^0)
- end
- else
- two = two or ""
- return Cs((P(one)/two + 1)^0)
- end
-end
-
-local splitters_f, splitters_s = { }, { }
-
-function lpeg.firstofsplit(separator) -- always return value
- local splitter = splitters_f[separator]
- if not splitter then
- separator = P(separator)
- splitter = C((1 - separator)^0)
- splitters_f[separator] = splitter
- end
- return splitter
-end
-
-function lpeg.secondofsplit(separator) -- nil if not split
- local splitter = splitters_s[separator]
- if not splitter then
- separator = P(separator)
- splitter = (1 - separator)^0 * separator * C(anything^0)
- splitters_s[separator] = splitter
- end
- return splitter
-end
-
-function lpeg.balancer(left,right)
- left, right = P(left), P(right)
- return P { left * ((1 - left - right) + V(1))^0 * right }
-end
-
-
-
-local nany = utf8char/""
-
-function lpeg.counter(pattern)
- pattern = Cs((P(pattern)/" " + nany)^0)
- return function(str)
- return #match(pattern,str)
- end
-end
-
-if utfgmatch then
-
- function lpeg.count(str,what) -- replaces string.count
- if type(what) == "string" then
- local n = 0
- for _ in utfgmatch(str,what) do
- n = n + 1
- end
- return n
- else -- 4 times slower but still faster than / function
- return #match(Cs((P(what)/" " + nany)^0),str)
- end
- end
-
-else
-
- local cache = { }
-
- function lpeg.count(str,what) -- replaces string.count
- if type(what) == "string" then
- local p = cache[what]
- if not p then
- p = Cs((P(what)/" " + nany)^0)
- cache[p] = p
- end
- return #match(p,str)
- else -- 4 times slower but still faster than / function
- return #match(Cs((P(what)/" " + nany)^0),str)
- end
- end
-
-end
-
-local patterns_escapes = { -- also defines in l-string
- ["%"] = "%%",
- ["."] = "%.",
- ["+"] = "%+", ["-"] = "%-", ["*"] = "%*",
- ["["] = "%[", ["]"] = "%]",
- ["("] = "%)", [")"] = "%)",
- -- ["{"] = "%{", ["}"] = "%}"
- -- ["^"] = "%^", ["$"] = "%$",
-}
-
-local simple_escapes = { -- also defines in l-string
- ["-"] = "%-",
- ["."] = "%.",
- ["?"] = ".",
- ["*"] = ".*",
-}
-
-local p = Cs((S("-.+*%()[]") / patterns_escapes + anything)^0)
-local s = Cs((S("-.+*%()[]") / simple_escapes + anything)^0)
-
-function string.escapedpattern(str,simple)
- return match(simple and s or p,str)
-end
-
--- utf extensies
-
-lpeg.UP = lpeg.P
-
-if utfcharacters then
-
- function lpeg.US(str)
- local p
- for uc in utfcharacters(str) do
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- return p
- end
-
-
-elseif utfgmatch then
-
- function lpeg.US(str)
- local p
- for uc in utfgmatch(str,".") do
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- return p
- end
-
-else
-
- function lpeg.US(str)
- local p
- local f = function(uc)
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- match((utf8char/f)^0,str)
- return p
- end
-
-end
-
-local range = Cs(utf8byte) * (Cs(utf8byte) + Cc(false))
-
-local utfchar = unicode and unicode.utf8 and unicode.utf8.char
-
-function lpeg.UR(str,more)
- local first, last
- if type(str) == "number" then
- first = str
- last = more or first
- else
- first, last = match(range,str)
- if not last then
- return P(str)
- end
- end
- if first == last then
- return P(str)
- elseif utfchar and last - first < 8 then -- a somewhat arbitrary criterium
- local p
- for i=first,last do
- if p then
- p = p + P(utfchar(i))
- else
- p = P(utfchar(i))
- end
- end
- return p -- nil when invalid range
- else
- local f = function(b)
- return b >= first and b <= last
- end
- return utf8byte / f -- nil when invalid range
- end
-end
-
-
-
-function lpeg.oneof(list,...) -- lpeg.oneof("elseif","else","if","then")
- if type(list) ~= "table" then
- list = { list, ... }
- end
- -- sort(list) -- longest match first
- local p = P(list[1])
- for l=2,#list do
- p = p + P(list[l])
- end
- return p
-end
-
-function lpeg.is_lpeg(p)
- return p and lpegtype(p) == "pattern"
-end
-
-
-
-end -- of closure
-
-do -- create closure to overcome 200 locals limit
-
if not modules then modules = { } end modules ['l-table'] = {
version = 1.001,
comment = "companion to luat-lib.mkiv",
@@ -1575,6 +1072,576 @@ function table.has_one_entry(t)
return t and not next(t,next(t))
end
+-- new
+
+function table.loweredkeys(t) -- maybe utf
+ local l = { }
+ for k, v in next, t do
+ l[lower(k)] = v
+ end
+ return l
+end
+
+
+end -- of closure
+
+do -- create closure to overcome 200 locals limit
+
+if not modules then modules = { } end modules ['l-lpeg'] = {
+ version = 1.001,
+ comment = "companion to luat-lib.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+local lpeg = require("lpeg")
+
+local type = type
+local byte, char = string.byte, string.char
+
+-- Beware, we predefine a bunch of patterns here and one reason for doing so
+-- is that we get consistent behaviour in some of the visualizers.
+
+lpeg.patterns = lpeg.patterns or { } -- so that we can share
+local patterns = lpeg.patterns
+
+local P, R, S, V, match = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.match
+local Ct, C, Cs, Cc = lpeg.Ct, lpeg.C, lpeg.Cs, lpeg.Cc
+local lpegtype = lpeg.type
+
+local utfcharacters = string.utfcharacters
+local utfgmatch = unicode and unicode.utf8.gmatch
+
+local anything = P(1)
+local endofstring = P(-1)
+local alwaysmatched = P(true)
+
+patterns.anything = anything
+patterns.endofstring = endofstring
+patterns.beginofstring = alwaysmatched
+patterns.alwaysmatched = alwaysmatched
+
+local digit, sign = R('09'), S('+-')
+local cr, lf, crlf = P("\r"), P("\n"), P("\r\n")
+local newline = crlf + cr + lf
+local escaped = P("\\") * anything
+local squote = P("'")
+local dquote = P('"')
+local space = P(" ")
+
+local utfbom_32_be = P('\000\000\254\255')
+local utfbom_32_le = P('\255\254\000\000')
+local utfbom_16_be = P('\255\254')
+local utfbom_16_le = P('\254\255')
+local utfbom_8 = P('\239\187\191')
+local utfbom = utfbom_32_be + utfbom_32_le
+ + utfbom_16_be + utfbom_16_le
+ + utfbom_8
+local utftype = utfbom_32_be / "utf-32-be" + utfbom_32_le / "utf-32-le"
+ + utfbom_16_be / "utf-16-be" + utfbom_16_le / "utf-16-le"
+ + utfbom_8 / "utf-8" + alwaysmatched / "unknown"
+
+local utf8next = R("\128\191")
+
+patterns.utf8one = R("\000\127")
+patterns.utf8two = R("\194\223") * utf8next
+patterns.utf8three = R("\224\239") * utf8next * utf8next
+patterns.utf8four = R("\240\244") * utf8next * utf8next * utf8next
+patterns.utfbom = utfbom
+patterns.utftype = utftype
+
+local utf8char = patterns.utf8one + patterns.utf8two + patterns.utf8three + patterns.utf8four
+local validutf8char = utf8char^0 * endofstring * Cc(true) + Cc(false)
+
+patterns.utf8 = utf8char
+patterns.utf8char = utf8char
+patterns.validutf8 = validutf8char
+patterns.validutf8char = validutf8char
+
+patterns.digit = digit
+patterns.sign = sign
+patterns.cardinal = sign^0 * digit^1
+patterns.integer = sign^0 * digit^1
+patterns.float = sign^0 * digit^0 * P('.') * digit^1
+patterns.cfloat = sign^0 * digit^0 * P(',') * digit^1
+patterns.number = patterns.float + patterns.integer
+patterns.cnumber = patterns.cfloat + patterns.integer
+patterns.oct = P("0") * R("07")^1
+patterns.octal = patterns.oct
+patterns.HEX = P("0x") * R("09","AF")^1
+patterns.hex = P("0x") * R("09","af")^1
+patterns.hexadecimal = P("0x") * R("09","AF","af")^1
+patterns.lowercase = R("az")
+patterns.uppercase = R("AZ")
+patterns.letter = patterns.lowercase + patterns.uppercase
+patterns.space = space
+patterns.tab = P("\t")
+patterns.spaceortab = patterns.space + patterns.tab
+patterns.eol = S("\n\r")
+patterns.spacer = S(" \t\f\v") -- + char(0xc2, 0xa0) if we want utf (cf mail roberto)
+patterns.newline = newline
+patterns.emptyline = newline^1
+patterns.nonspacer = 1 - patterns.spacer
+patterns.whitespace = patterns.eol + patterns.spacer
+patterns.nonwhitespace = 1 - patterns.whitespace
+patterns.equal = P("=")
+patterns.comma = P(",")
+patterns.commaspacer = P(",") * patterns.spacer^0
+patterns.period = P(".")
+patterns.colon = P(":")
+patterns.semicolon = P(";")
+patterns.underscore = P("_")
+patterns.escaped = escaped
+patterns.squote = squote
+patterns.dquote = dquote
+patterns.nosquote = (escaped + (1-squote))^0
+patterns.nodquote = (escaped + (1-dquote))^0
+patterns.unsingle = (squote/"") * patterns.nosquote * (squote/"")
+patterns.undouble = (dquote/"") * patterns.nodquote * (dquote/"")
+patterns.unquoted = patterns.undouble + patterns.unsingle -- more often undouble
+patterns.unspacer = ((patterns.spacer^1)/"")^0
+
+patterns.somecontent = (anything - newline - space)^1 -- (utf8char - newline - space)^1
+patterns.beginline = #(1-newline)
+
+local unquoted = Cs(patterns.unquoted * endofstring) -- not C
+
+function string.unquoted(str)
+ return match(unquoted,str) or str
+end
+
+-- more efficient:
+
+local unquoted = (
+ squote * Cs(1 - P(-2)) * squote
+ + dquote * Cs(1 - P(-2)) * dquote
+)
+
+function string.unquoted(str)
+ return match(unquoted,str) or str
+end
+
+patterns.unquoted = unquoted
+
+
+function lpeg.anywhere(pattern) --slightly adapted from website
+ return P { P(pattern) + 1 * V(1) } -- why so complex?
+end
+
+function lpeg.splitter(pattern, action)
+ return (((1-P(pattern))^1)/action+1)^0
+end
+
+function lpeg.tsplitter(pattern, action)
+ return Ct((((1-P(pattern))^1)/action+1)^0)
+end
+
+-- probleem: separator can be lpeg and that does not hash too well, but
+-- it's quite okay as the key is then not garbage collected
+
+local splitters_s, splitters_m, splitters_t = { }, { }, { }
+
+local function splitat(separator,single)
+ local splitter = (single and splitters_s[separator]) or splitters_m[separator]
+ if not splitter then
+ separator = P(separator)
+ local other = C((1 - separator)^0)
+ if single then
+ local any = anything
+ splitter = other * (separator * C(any^0) + "") -- ?
+ splitters_s[separator] = splitter
+ else
+ splitter = other * (separator * other)^0
+ splitters_m[separator] = splitter
+ end
+ end
+ return splitter
+end
+
+local function tsplitat(separator)
+ local splitter = splitters_t[separator]
+ if not splitter then
+ splitter = Ct(splitat(separator))
+ splitters_t[separator] = splitter
+ end
+ return splitter
+end
+
+lpeg.splitat = splitat
+lpeg.tsplitat = tsplitat
+
+
+local cache = { }
+
+function lpeg.split(separator,str)
+ local c = cache[separator]
+ if not c then
+ c = tsplitat(separator)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+function string.split(str,separator)
+ local c = cache[separator]
+ if not c then
+ c = tsplitat(separator)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+local spacing = patterns.spacer^0 * newline -- sort of strip
+local empty = spacing * Cc("")
+local nonempty = Cs((1-spacing)^1) * spacing^-1
+local content = (empty + nonempty)^1
+
+patterns.textline = content
+
+
+local linesplitter = tsplitat(newline)
+
+patterns.linesplitter = linesplitter
+
+function string.splitlines(str)
+ return match(linesplitter,str)
+end
+
+local utflinesplitter = utfbom^-1 * tsplitat(newline)
+
+patterns.utflinesplitter = utflinesplitter
+
+function string.utfsplitlines(str)
+ return match(utflinesplitter,str)
+end
+
+
+local cache = { }
+
+function lpeg.checkedsplit(separator,str)
+ local c = cache[separator]
+ if not c then
+ separator = P(separator)
+ local other = C((1 - separator)^1)
+ c = Ct(separator^0 * other * (separator^1 * other)^0)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+function string.checkedsplit(str,separator)
+ local c = cache[separator]
+ if not c then
+ separator = P(separator)
+ local other = C((1 - separator)^1)
+ c = Ct(separator^0 * other * (separator^1 * other)^0)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+
+local function f2(s) local c1, c2 = byte(s,1,2) return c1 * 64 + c2 - 12416 end
+local function f3(s) local c1, c2, c3 = byte(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
+local function f4(s) local c1, c2, c3, c4 = byte(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
+
+local utf8byte = patterns.utf8one/byte + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
+
+patterns.utf8byte = utf8byte
+
+
+
+local cache = { }
+
+function lpeg.stripper(str)
+ if type(str) == "string" then
+ local s = cache[str]
+ if not s then
+ s = Cs(((S(str)^1)/"" + 1)^0)
+ cache[str] = s
+ end
+ return s
+ else
+ return Cs(((str^1)/"" + 1)^0)
+ end
+end
+
+local cache = { }
+
+function lpeg.keeper(str)
+ if type(str) == "string" then
+ local s = cache[str]
+ if not s then
+ s = Cs((((1-S(str))^1)/"" + 1)^0)
+ cache[str] = s
+ end
+ return s
+ else
+ return Cs((((1-str)^1)/"" + 1)^0)
+ end
+end
+
+function lpeg.frontstripper(str) -- or pattern (yet undocumented)
+ return (P(str) + P(true)) * Cs(P(1)^0)
+end
+
+function lpeg.endstripper(str) -- or pattern (yet undocumented)
+ return Cs((1 - P(str) * P(-1))^0)
+end
+
+-- Just for fun I looked at the used bytecode and
+-- p = (p and p + pp) or pp gets one more (testset).
+
+function lpeg.replacer(one,two)
+ if type(one) == "table" then
+ local no = #one
+ if no > 0 then
+ local p
+ for i=1,no do
+ local o = one[i]
+ local pp = P(o[1]) / o[2]
+ if p then
+ p = p + pp
+ else
+ p = pp
+ end
+ end
+ return Cs((p + 1)^0)
+ end
+ else
+ two = two or ""
+ return Cs((P(one)/two + 1)^0)
+ end
+end
+
+local splitters_f, splitters_s = { }, { }
+
+function lpeg.firstofsplit(separator) -- always return value
+ local splitter = splitters_f[separator]
+ if not splitter then
+ separator = P(separator)
+ splitter = C((1 - separator)^0)
+ splitters_f[separator] = splitter
+ end
+ return splitter
+end
+
+function lpeg.secondofsplit(separator) -- nil if not split
+ local splitter = splitters_s[separator]
+ if not splitter then
+ separator = P(separator)
+ splitter = (1 - separator)^0 * separator * C(anything^0)
+ splitters_s[separator] = splitter
+ end
+ return splitter
+end
+
+function lpeg.balancer(left,right)
+ left, right = P(left), P(right)
+ return P { left * ((1 - left - right) + V(1))^0 * right }
+end
+
+
+
+local nany = utf8char/""
+
+function lpeg.counter(pattern)
+ pattern = Cs((P(pattern)/" " + nany)^0)
+ return function(str)
+ return #match(pattern,str)
+ end
+end
+
+if utfgmatch then
+
+ function lpeg.count(str,what) -- replaces string.count
+ if type(what) == "string" then
+ local n = 0
+ for _ in utfgmatch(str,what) do
+ n = n + 1
+ end
+ return n
+ else -- 4 times slower but still faster than / function
+ return #match(Cs((P(what)/" " + nany)^0),str)
+ end
+ end
+
+else
+
+ local cache = { }
+
+ function lpeg.count(str,what) -- replaces string.count
+ if type(what) == "string" then
+ local p = cache[what]
+ if not p then
+ p = Cs((P(what)/" " + nany)^0)
+ cache[p] = p
+ end
+ return #match(p,str)
+ else -- 4 times slower but still faster than / function
+ return #match(Cs((P(what)/" " + nany)^0),str)
+ end
+ end
+
+end
+
+local patterns_escapes = { -- also defines in l-string
+ ["%"] = "%%",
+ ["."] = "%.",
+ ["+"] = "%+", ["-"] = "%-", ["*"] = "%*",
+ ["["] = "%[", ["]"] = "%]",
+ ["("] = "%)", [")"] = "%)",
+ -- ["{"] = "%{", ["}"] = "%}"
+ -- ["^"] = "%^", ["$"] = "%$",
+}
+
+local simple_escapes = { -- also defines in l-string
+ ["-"] = "%-",
+ ["."] = "%.",
+ ["?"] = ".",
+ ["*"] = ".*",
+}
+
+local p = Cs((S("-.+*%()[]") / patterns_escapes + anything)^0)
+local s = Cs((S("-.+*%()[]") / simple_escapes + anything)^0)
+
+function string.escapedpattern(str,simple)
+ return match(simple and s or p,str)
+end
+
+-- utf extensies
+
+lpeg.UP = lpeg.P
+
+if utfcharacters then
+
+ function lpeg.US(str)
+ local p
+ for uc in utfcharacters(str) do
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ return p
+ end
+
+
+elseif utfgmatch then
+
+ function lpeg.US(str)
+ local p
+ for uc in utfgmatch(str,".") do
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ return p
+ end
+
+else
+
+ function lpeg.US(str)
+ local p
+ local f = function(uc)
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ match((utf8char/f)^0,str)
+ return p
+ end
+
+end
+
+local range = Cs(utf8byte) * (Cs(utf8byte) + Cc(false))
+
+local utfchar = unicode and unicode.utf8 and unicode.utf8.char
+
+function lpeg.UR(str,more)
+ local first, last
+ if type(str) == "number" then
+ first = str
+ last = more or first
+ else
+ first, last = match(range,str)
+ if not last then
+ return P(str)
+ end
+ end
+ if first == last then
+ return P(str)
+ elseif utfchar and last - first < 8 then -- a somewhat arbitrary criterium
+ local p
+ for i=first,last do
+ if p then
+ p = p + P(utfchar(i))
+ else
+ p = P(utfchar(i))
+ end
+ end
+ return p -- nil when invalid range
+ else
+ local f = function(b)
+ return b >= first and b <= last
+ end
+ return utf8byte / f -- nil when invalid range
+ end
+end
+
+
+
+function lpeg.oneof(list,...) -- lpeg.oneof("elseif","else","if","then")
+ if type(list) ~= "table" then
+ list = { list, ... }
+ end
+ -- sort(list) -- longest match first
+ local p = P(list[1])
+ for l=2,#list do
+ p = p + P(list[l])
+ end
+ return p
+end
+
+function lpeg.is_lpeg(p)
+ return p and lpegtype(p) == "pattern"
+end
+
+-- For the moment here, but it might move to utilities:
+
+local sort, fastcopy, sortedpairs = table.sort, table.fastcopy, table.sortedpairs -- dependency!
+
+function lpeg.append(list,pp)
+ local p = pp
+ if #list > 0 then
+ list = fastcopy(list)
+ sort(list)
+ for l=1,#list do
+ if p then
+ p = P(list[l]) + p
+ else
+ p = P(list[l])
+ end
+ end
+ else
+ for k, v in sortedpairs(list) do
+ if p then
+ p = P(k)/v + p
+ else
+ p = P(k)/v
+ end
+ end
+ end
+ return p
+end
+
+
end -- of closure
@@ -3399,10 +3466,6 @@ local type, tonumber = type, tonumber
boolean = boolean or { }
local boolean = boolean
--- function boolean.tonumber(b)
--- return b and 1 or 0 -- test and test and return or return
--- end
-
function boolean.tonumber(b)
if b then return 1 else return 0 end -- test and return or return
end
@@ -3809,6 +3872,7 @@ local tables = utilities.tables
local format, gmatch, rep = string.format, string.gmatch, string.rep
local concat, insert, remove = table.concat, table.insert, table.remove
local setmetatable, getmetatable, tonumber, tostring = setmetatable, getmetatable, tonumber, tostring
+local type, next, rawset = type, next, rawset
function tables.definetable(target) -- defines undefined tables
local composed, t, n = nil, { }, 0
@@ -3902,6 +3966,43 @@ function table.toxml(t,name,nobanner,indent,spaces)
return concat(result,"\n")
end
+-- also experimental
+
+-- encapsulate(table,utilities.tables)
+-- encapsulate(table,utilities.tables,true)
+-- encapsulate(table,true)
+
+function tables.encapsulate(core,capsule,protect)
+ if type(capsule) ~= "table" then
+ protect = true
+ capsule = { }
+ end
+ for key, value in next, core do
+ if capsule[key] then
+ print(format("\ninvalid inheritance '%s' in '%s': %s",key,tostring(core)))
+ os.exit()
+ else
+ capsule[key] = value
+ end
+ end
+ if protect then
+ for key, value in next, core do
+ core[key] = nil
+ end
+ setmetatable(core, {
+ __index = capsule,
+ __newindex = function(t,key,value)
+ if capsule[key] then
+ print(format("\ninvalid overload '%s' in '%s'",key,tostring(core)))
+ os.exit()
+ else
+ rawset(t,key,value)
+ end
+ end
+ } )
+ end
+end
+
end -- of closure
@@ -4675,6 +4776,7 @@ end
local is_node = node and node.is_node
+local is_lpeg = lpeg and lpeg.type
function inspect(i) -- global function
local ti = type(i)
@@ -4682,6 +4784,8 @@ function inspect(i) -- global function
table.print(i,"table")
elseif is_node and is_node(i) then
table.print(nodes.astable(i),tostring(i))
+ elseif is_lpeg and is_lpeg(i) then
+ lpeg.print(i)
else
print(tostring(i))
end
@@ -4705,7 +4809,7 @@ if not modules then modules = { } end modules ['trac-inf'] = {
-- get warnings about assignments. This is more efficient than using rawset
-- and rawget.
-local format = string.format
+local format, lower = string.format, string.lower
local clock = os.gettimeofday or os.clock -- should go in environment
local write_nl = texio.write_nl
@@ -4807,7 +4911,7 @@ function statistics.show(reporter)
-- this code will move
local register = statistics.register
register("luatex banner", function()
- return string.lower(status.banner)
+ return lower(status.banner)
end)
register("control sequences", function()
return format("%s of %s", status.cs_count, status.hash_size+status.hash_extra)
@@ -9773,7 +9877,7 @@ if not modules then modules = { } end modules ['data-ini'] = {
license = "see context related readme files",
}
-local gsub, find, gmatch = string.gsub, string.find, string.gmatch
+local gsub, find, gmatch, char = string.gsub, string.find, string.gmatch, string.char
local concat = table.concat
local next, type = next, type
@@ -9835,7 +9939,7 @@ do
local homedir = osgetenv(ostype == "windows" and 'USERPROFILE' or 'HOME') or ''
if not homedir or homedir == "" then
- homedir = string.char(127) -- we need a value, later we wil trigger on it
+ homedir = char(127) -- we need a value, later we wil trigger on it
end
homedir = file.collapsepath(homedir)
@@ -10008,7 +10112,7 @@ if not modules then modules = { } end modules ['data-exp'] = {
license = "see context related readme files",
}
-local format, find, gmatch, lower = string.format, string.find, string.gmatch, string.lower
+local format, find, gmatch, lower, char = string.format, string.find, string.gmatch, string.lower, string.char
local concat, sort = table.concat, table.sort
local lpegmatch, lpegpatterns = lpeg.match, lpeg.patterns
local Ct, Cs, Cc, P, C, S = lpeg.Ct, lpeg.Cs, lpeg.Cc, lpeg.P, lpeg.C, lpeg.S
@@ -10142,7 +10246,7 @@ local homedir
function resolvers.cleanpath(str)
if not homedir then
homedir = lpegmatch(cleanup,environment.homedir or "")
- if homedir == string.char(127) or homedir == "" or not lfs.isdir(homedir) then
+ if homedir == char(127) or homedir == "" or not lfs.isdir(homedir) then
if trace_expansions then
report_expansions("no home dir set, ignoring dependent paths")
end
@@ -10191,8 +10295,8 @@ end
local cache = { }
----- splitter = Ct(lpeg.splitat(S(ostype == "windows" and ";" or ":;"))) -- maybe add ,
-local splitter = Ct(lpeg.splitat(";")) -- as we move towards urls, prefixes and use tables we no longer do :
+----- splitter = lpeg.tsplitat(S(ostype == "windows" and ";" or ":;")) -- maybe add ,
+local splitter = lpeg.tsplitat(";") -- as we move towards urls, prefixes and use tables we no longer do :
local backslashswapper = lpeg.replacer("\\","/")
@@ -10640,6 +10744,7 @@ luatools with a recache feature.</p>
--ldx]]--
local format, lower, gsub, concat = string.format, string.lower, string.gsub, table.concat
+local serialize, serializetofile = table.serialize, table.tofile
local mkdirs, isdir = dir.mkdirs, lfs.isdir
local trace_locating = false trackers.register("resolvers.locating", function(v) trace_locating = v end)
@@ -10793,7 +10898,7 @@ function caches.usedpaths()
end
function caches.configfiles()
- return table.concat(resolvers.instance.specification,";")
+ return concat(resolvers.instance.specification,";")
end
function caches.hashed(tree)
@@ -10917,9 +11022,9 @@ function caches.savedata(filepath,filename,data,raw)
end
data.cache_uuid = os.uuid()
if caches.direct then
- file.savedata(tmaname,table.serialize(data,true,saveoptions))
+ file.savedata(tmaname,serialize(data,true,saveoptions))
else
- table.tofile(tmaname,data,true,saveoptions)
+ serializetofile(tmaname,data,true,saveoptions)
end
utilities.lua.compile(tmaname,tmcname)
end
@@ -10986,7 +11091,7 @@ function caches.savecontent(cachename,dataname,content)
content = content,
uuid = os.uuid(),
}
- local ok = io.savedata(luaname,table.serialize(data,true))
+ local ok = io.savedata(luaname,serialize(data,true))
if ok then
if trace_locating then
report_resolvers("category '%s', cachename '%s' saved in '%s'",dataname,cachename,luaname)
@@ -13941,6 +14046,7 @@ if not modules then modules = { } end modules ['data-lst'] = {
-- used in mtxrun, can be loaded later .. todo
local find, concat, upper, format = string.find, table.concat, string.upper, string.format
+local fastcopy, sortedpairs = table.fastcopy, table.sortedpairs
resolvers.listers = resolvers.listers or { }
@@ -13971,10 +14077,10 @@ function resolvers.listers.variables(pattern)
end
end
end
- local env = table.fastcopy(environment)
- local var = table.fastcopy(variables)
- local exp = table.fastcopy(expansions)
- for key, value in table.sortedpairs(configured) do
+ local env = fastcopy(environment)
+ local var = fastcopy(variables)
+ local exp = fastcopy(expansions)
+ for key, value in sortedpairs(configured) do
if key ~= "" and (pattern == "" or find(upper(key),pattern)) then
report_lists(key)
report_lists(" env: %s",tabstr(rawget(environment,key)) or "unset")
@@ -13983,9 +14089,9 @@ function resolvers.listers.variables(pattern)
report_lists(" res: %s",resolvers.resolve(expansions[key]) or "unset")
end
end
- instance.environment = table.fastcopy(env)
- instance.variables = table.fastcopy(var)
- instance.expansions = table.fastcopy(exp)
+ instance.environment = fastcopy(env)
+ instance.variables = fastcopy(var)
+ instance.expansions = fastcopy(exp)
end
function resolvers.listers.configurations(report)
@@ -14272,8 +14378,8 @@ own = { } -- not local, might change
own.libs = { -- order can be made better
'l-string.lua',
- 'l-lpeg.lua',
'l-table.lua',
+ 'l-lpeg.lua',
'l-io.lua',
'l-number.lua',
'l-set.lua',
diff --git a/scripts/context/stubs/mswin/mtxrun.lua b/scripts/context/stubs/mswin/mtxrun.lua
index d0cf3d46d..6a8b2e99b 100644
--- a/scripts/context/stubs/mswin/mtxrun.lua
+++ b/scripts/context/stubs/mswin/mtxrun.lua
@@ -160,509 +160,6 @@ end -- of closure
do -- create closure to overcome 200 locals limit
-if not modules then modules = { } end modules ['l-lpeg'] = {
- version = 1.001,
- comment = "companion to luat-lib.mkiv",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files"
-}
-
-local lpeg = require("lpeg")
-
-local type = type
-
--- Beware, we predefine a bunch of patterns here and one reason for doing so
--- is that we get consistent behaviour in some of the visualizers.
-
-lpeg.patterns = lpeg.patterns or { } -- so that we can share
-local patterns = lpeg.patterns
-
-local P, R, S, V, match = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.match
-local Ct, C, Cs, Cc = lpeg.Ct, lpeg.C, lpeg.Cs, lpeg.Cc
-local lpegtype = lpeg.type
-
-local utfcharacters = string.utfcharacters
-local utfgmatch = unicode and unicode.utf8.gmatch
-
-local anything = P(1)
-local endofstring = P(-1)
-local alwaysmatched = P(true)
-
-patterns.anything = anything
-patterns.endofstring = endofstring
-patterns.beginofstring = alwaysmatched
-patterns.alwaysmatched = alwaysmatched
-
-local digit, sign = R('09'), S('+-')
-local cr, lf, crlf = P("\r"), P("\n"), P("\r\n")
-local newline = crlf + cr + lf
-local escaped = P("\\") * anything
-local squote = P("'")
-local dquote = P('"')
-local space = P(" ")
-
-local utfbom_32_be = P('\000\000\254\255')
-local utfbom_32_le = P('\255\254\000\000')
-local utfbom_16_be = P('\255\254')
-local utfbom_16_le = P('\254\255')
-local utfbom_8 = P('\239\187\191')
-local utfbom = utfbom_32_be + utfbom_32_le
- + utfbom_16_be + utfbom_16_le
- + utfbom_8
-local utftype = utfbom_32_be / "utf-32-be" + utfbom_32_le / "utf-32-le"
- + utfbom_16_be / "utf-16-be" + utfbom_16_le / "utf-16-le"
- + utfbom_8 / "utf-8" + alwaysmatched / "unknown"
-
-local utf8next = R("\128\191")
-
-patterns.utf8one = R("\000\127")
-patterns.utf8two = R("\194\223") * utf8next
-patterns.utf8three = R("\224\239") * utf8next * utf8next
-patterns.utf8four = R("\240\244") * utf8next * utf8next * utf8next
-patterns.utfbom = utfbom
-patterns.utftype = utftype
-
-local utf8char = patterns.utf8one + patterns.utf8two + patterns.utf8three + patterns.utf8four
-local validutf8char = utf8char^0 * endofstring * Cc(true) + Cc(false)
-
-patterns.utf8 = utf8char
-patterns.utf8char = utf8char
-patterns.validutf8 = validutf8char
-patterns.validutf8char = validutf8char
-
-patterns.digit = digit
-patterns.sign = sign
-patterns.cardinal = sign^0 * digit^1
-patterns.integer = sign^0 * digit^1
-patterns.float = sign^0 * digit^0 * P('.') * digit^1
-patterns.cfloat = sign^0 * digit^0 * P(',') * digit^1
-patterns.number = patterns.float + patterns.integer
-patterns.cnumber = patterns.cfloat + patterns.integer
-patterns.oct = P("0") * R("07")^1
-patterns.octal = patterns.oct
-patterns.HEX = P("0x") * R("09","AF")^1
-patterns.hex = P("0x") * R("09","af")^1
-patterns.hexadecimal = P("0x") * R("09","AF","af")^1
-patterns.lowercase = R("az")
-patterns.uppercase = R("AZ")
-patterns.letter = patterns.lowercase + patterns.uppercase
-patterns.space = space
-patterns.tab = P("\t")
-patterns.spaceortab = patterns.space + patterns.tab
-patterns.eol = S("\n\r")
-patterns.spacer = S(" \t\f\v") -- + string.char(0xc2, 0xa0) if we want utf (cf mail roberto)
-patterns.newline = newline
-patterns.emptyline = newline^1
-patterns.nonspacer = 1 - patterns.spacer
-patterns.whitespace = patterns.eol + patterns.spacer
-patterns.nonwhitespace = 1 - patterns.whitespace
-patterns.equal = P("=")
-patterns.comma = P(",")
-patterns.commaspacer = P(",") * patterns.spacer^0
-patterns.period = P(".")
-patterns.colon = P(":")
-patterns.semicolon = P(";")
-patterns.underscore = P("_")
-patterns.escaped = escaped
-patterns.squote = squote
-patterns.dquote = dquote
-patterns.nosquote = (escaped + (1-squote))^0
-patterns.nodquote = (escaped + (1-dquote))^0
-patterns.unsingle = (squote/"") * patterns.nosquote * (squote/"")
-patterns.undouble = (dquote/"") * patterns.nodquote * (dquote/"")
-patterns.unquoted = patterns.undouble + patterns.unsingle -- more often undouble
-patterns.unspacer = ((patterns.spacer^1)/"")^0
-
-patterns.somecontent = (anything - newline - space)^1 -- (utf8char - newline - space)^1
-patterns.beginline = #(1-newline)
-
-local unquoted = Cs(patterns.unquoted * endofstring) -- not C
-
-function string.unquoted(str)
- return match(unquoted,str) or str
-end
-
-
-function lpeg.anywhere(pattern) --slightly adapted from website
- return P { P(pattern) + 1 * V(1) } -- why so complex?
-end
-
-function lpeg.splitter(pattern, action)
- return (((1-P(pattern))^1)/action+1)^0
-end
-
-local splitters_s, splitters_m = { }, { }
-
-local function splitat(separator,single)
- local splitter = (single and splitters_s[separator]) or splitters_m[separator]
- if not splitter then
- separator = P(separator)
- local other = C((1 - separator)^0)
- if single then
- local any = anything
- splitter = other * (separator * C(any^0) + "") -- ?
- splitters_s[separator] = splitter
- else
- splitter = other * (separator * other)^0
- splitters_m[separator] = splitter
- end
- end
- return splitter
-end
-
-lpeg.splitat = splitat
-
-
-local cache = { }
-
-function lpeg.split(separator,str)
- local c = cache[separator]
- if not c then
- c = Ct(splitat(separator))
- cache[separator] = c
- end
- return match(c,str)
-end
-
-function string.split(str,separator)
- local c = cache[separator]
- if not c then
- c = Ct(splitat(separator))
- cache[separator] = c
- end
- return match(c,str)
-end
-
-local spacing = patterns.spacer^0 * newline -- sort of strip
-local empty = spacing * Cc("")
-local nonempty = Cs((1-spacing)^1) * spacing^-1
-local content = (empty + nonempty)^1
-
-patterns.textline = content
-
-
-local linesplitter = Ct(splitat(newline))
-
-patterns.linesplitter = linesplitter
-
-function string.splitlines(str)
- return match(linesplitter,str)
-end
-
-local utflinesplitter = utfbom^-1 * Ct(splitat(newline))
-
-patterns.utflinesplitter = utflinesplitter
-
-function string.utfsplitlines(str)
- return match(utflinesplitter,str)
-end
-
-
-local cache = { }
-
-function lpeg.checkedsplit(separator,str)
- local c = cache[separator]
- if not c then
- separator = P(separator)
- local other = C((1 - separator)^1)
- c = Ct(separator^0 * other * (separator^1 * other)^0)
- cache[separator] = c
- end
- return match(c,str)
-end
-
-function string.checkedsplit(str,separator)
- local c = cache[separator]
- if not c then
- separator = P(separator)
- local other = C((1 - separator)^1)
- c = Ct(separator^0 * other * (separator^1 * other)^0)
- cache[separator] = c
- end
- return match(c,str)
-end
-
-
-local f1 = string.byte
-
-local function f2(s) local c1, c2 = f1(s,1,2) return c1 * 64 + c2 - 12416 end
-local function f3(s) local c1, c2, c3 = f1(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
-local function f4(s) local c1, c2, c3, c4 = f1(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
-
-local utf8byte = patterns.utf8one/f1 + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
-
-patterns.utf8byte = utf8byte
-
-
-
-local cache = { }
-
-function lpeg.stripper(str)
- if type(str) == "string" then
- local s = cache[str]
- if not s then
- s = Cs(((S(str)^1)/"" + 1)^0)
- cache[str] = s
- end
- return s
- else
- return Cs(((str^1)/"" + 1)^0)
- end
-end
-
-local cache = { }
-
-function lpeg.keeper(str)
- if type(str) == "string" then
- local s = cache[str]
- if not s then
- s = Cs((((1-S(str))^1)/"" + 1)^0)
- cache[str] = s
- end
- return s
- else
- return Cs((((1-str)^1)/"" + 1)^0)
- end
-end
-
-function lpeg.frontstripper(str) -- or pattern (yet undocumented)
- return (P(str) + P(true)) * Cs(P(1)^0)
-end
-
-function lpeg.endstripper(str) -- or pattern (yet undocumented)
- return Cs((1 - P(str) * P(-1))^0)
-end
-
--- Just for fun I looked at the used bytecode and
--- p = (p and p + pp) or pp gets one more (testset).
-
-function lpeg.replacer(one,two)
- if type(one) == "table" then
- local no = #one
- if no > 0 then
- local p
- for i=1,no do
- local o = one[i]
- local pp = P(o[1]) / o[2]
- if p then
- p = p + pp
- else
- p = pp
- end
- end
- return Cs((p + 1)^0)
- end
- else
- two = two or ""
- return Cs((P(one)/two + 1)^0)
- end
-end
-
-local splitters_f, splitters_s = { }, { }
-
-function lpeg.firstofsplit(separator) -- always return value
- local splitter = splitters_f[separator]
- if not splitter then
- separator = P(separator)
- splitter = C((1 - separator)^0)
- splitters_f[separator] = splitter
- end
- return splitter
-end
-
-function lpeg.secondofsplit(separator) -- nil if not split
- local splitter = splitters_s[separator]
- if not splitter then
- separator = P(separator)
- splitter = (1 - separator)^0 * separator * C(anything^0)
- splitters_s[separator] = splitter
- end
- return splitter
-end
-
-function lpeg.balancer(left,right)
- left, right = P(left), P(right)
- return P { left * ((1 - left - right) + V(1))^0 * right }
-end
-
-
-
-local nany = utf8char/""
-
-function lpeg.counter(pattern)
- pattern = Cs((P(pattern)/" " + nany)^0)
- return function(str)
- return #match(pattern,str)
- end
-end
-
-if utfgmatch then
-
- function lpeg.count(str,what) -- replaces string.count
- if type(what) == "string" then
- local n = 0
- for _ in utfgmatch(str,what) do
- n = n + 1
- end
- return n
- else -- 4 times slower but still faster than / function
- return #match(Cs((P(what)/" " + nany)^0),str)
- end
- end
-
-else
-
- local cache = { }
-
- function lpeg.count(str,what) -- replaces string.count
- if type(what) == "string" then
- local p = cache[what]
- if not p then
- p = Cs((P(what)/" " + nany)^0)
- cache[p] = p
- end
- return #match(p,str)
- else -- 4 times slower but still faster than / function
- return #match(Cs((P(what)/" " + nany)^0),str)
- end
- end
-
-end
-
-local patterns_escapes = { -- also defines in l-string
- ["%"] = "%%",
- ["."] = "%.",
- ["+"] = "%+", ["-"] = "%-", ["*"] = "%*",
- ["["] = "%[", ["]"] = "%]",
- ["("] = "%)", [")"] = "%)",
- -- ["{"] = "%{", ["}"] = "%}"
- -- ["^"] = "%^", ["$"] = "%$",
-}
-
-local simple_escapes = { -- also defines in l-string
- ["-"] = "%-",
- ["."] = "%.",
- ["?"] = ".",
- ["*"] = ".*",
-}
-
-local p = Cs((S("-.+*%()[]") / patterns_escapes + anything)^0)
-local s = Cs((S("-.+*%()[]") / simple_escapes + anything)^0)
-
-function string.escapedpattern(str,simple)
- return match(simple and s or p,str)
-end
-
--- utf extensies
-
-lpeg.UP = lpeg.P
-
-if utfcharacters then
-
- function lpeg.US(str)
- local p
- for uc in utfcharacters(str) do
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- return p
- end
-
-
-elseif utfgmatch then
-
- function lpeg.US(str)
- local p
- for uc in utfgmatch(str,".") do
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- return p
- end
-
-else
-
- function lpeg.US(str)
- local p
- local f = function(uc)
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- match((utf8char/f)^0,str)
- return p
- end
-
-end
-
-local range = Cs(utf8byte) * (Cs(utf8byte) + Cc(false))
-
-local utfchar = unicode and unicode.utf8 and unicode.utf8.char
-
-function lpeg.UR(str,more)
- local first, last
- if type(str) == "number" then
- first = str
- last = more or first
- else
- first, last = match(range,str)
- if not last then
- return P(str)
- end
- end
- if first == last then
- return P(str)
- elseif utfchar and last - first < 8 then -- a somewhat arbitrary criterium
- local p
- for i=first,last do
- if p then
- p = p + P(utfchar(i))
- else
- p = P(utfchar(i))
- end
- end
- return p -- nil when invalid range
- else
- local f = function(b)
- return b >= first and b <= last
- end
- return utf8byte / f -- nil when invalid range
- end
-end
-
-
-
-function lpeg.oneof(list,...) -- lpeg.oneof("elseif","else","if","then")
- if type(list) ~= "table" then
- list = { list, ... }
- end
- -- sort(list) -- longest match first
- local p = P(list[1])
- for l=2,#list do
- p = p + P(list[l])
- end
- return p
-end
-
-function lpeg.is_lpeg(p)
- return p and lpegtype(p) == "pattern"
-end
-
-
-
-end -- of closure
-
-do -- create closure to overcome 200 locals limit
-
if not modules then modules = { } end modules ['l-table'] = {
version = 1.001,
comment = "companion to luat-lib.mkiv",
@@ -1575,6 +1072,576 @@ function table.has_one_entry(t)
return t and not next(t,next(t))
end
+-- new
+
+function table.loweredkeys(t) -- maybe utf
+ local l = { }
+ for k, v in next, t do
+ l[lower(k)] = v
+ end
+ return l
+end
+
+
+end -- of closure
+
+do -- create closure to overcome 200 locals limit
+
+if not modules then modules = { } end modules ['l-lpeg'] = {
+ version = 1.001,
+ comment = "companion to luat-lib.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+local lpeg = require("lpeg")
+
+local type = type
+local byte, char = string.byte, string.char
+
+-- Beware, we predefine a bunch of patterns here and one reason for doing so
+-- is that we get consistent behaviour in some of the visualizers.
+
+lpeg.patterns = lpeg.patterns or { } -- so that we can share
+local patterns = lpeg.patterns
+
+local P, R, S, V, match = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.match
+local Ct, C, Cs, Cc = lpeg.Ct, lpeg.C, lpeg.Cs, lpeg.Cc
+local lpegtype = lpeg.type
+
+local utfcharacters = string.utfcharacters
+local utfgmatch = unicode and unicode.utf8.gmatch
+
+local anything = P(1)
+local endofstring = P(-1)
+local alwaysmatched = P(true)
+
+patterns.anything = anything
+patterns.endofstring = endofstring
+patterns.beginofstring = alwaysmatched
+patterns.alwaysmatched = alwaysmatched
+
+local digit, sign = R('09'), S('+-')
+local cr, lf, crlf = P("\r"), P("\n"), P("\r\n")
+local newline = crlf + cr + lf
+local escaped = P("\\") * anything
+local squote = P("'")
+local dquote = P('"')
+local space = P(" ")
+
+local utfbom_32_be = P('\000\000\254\255')
+local utfbom_32_le = P('\255\254\000\000')
+local utfbom_16_be = P('\255\254')
+local utfbom_16_le = P('\254\255')
+local utfbom_8 = P('\239\187\191')
+local utfbom = utfbom_32_be + utfbom_32_le
+ + utfbom_16_be + utfbom_16_le
+ + utfbom_8
+local utftype = utfbom_32_be / "utf-32-be" + utfbom_32_le / "utf-32-le"
+ + utfbom_16_be / "utf-16-be" + utfbom_16_le / "utf-16-le"
+ + utfbom_8 / "utf-8" + alwaysmatched / "unknown"
+
+local utf8next = R("\128\191")
+
+patterns.utf8one = R("\000\127")
+patterns.utf8two = R("\194\223") * utf8next
+patterns.utf8three = R("\224\239") * utf8next * utf8next
+patterns.utf8four = R("\240\244") * utf8next * utf8next * utf8next
+patterns.utfbom = utfbom
+patterns.utftype = utftype
+
+local utf8char = patterns.utf8one + patterns.utf8two + patterns.utf8three + patterns.utf8four
+local validutf8char = utf8char^0 * endofstring * Cc(true) + Cc(false)
+
+patterns.utf8 = utf8char
+patterns.utf8char = utf8char
+patterns.validutf8 = validutf8char
+patterns.validutf8char = validutf8char
+
+patterns.digit = digit
+patterns.sign = sign
+patterns.cardinal = sign^0 * digit^1
+patterns.integer = sign^0 * digit^1
+patterns.float = sign^0 * digit^0 * P('.') * digit^1
+patterns.cfloat = sign^0 * digit^0 * P(',') * digit^1
+patterns.number = patterns.float + patterns.integer
+patterns.cnumber = patterns.cfloat + patterns.integer
+patterns.oct = P("0") * R("07")^1
+patterns.octal = patterns.oct
+patterns.HEX = P("0x") * R("09","AF")^1
+patterns.hex = P("0x") * R("09","af")^1
+patterns.hexadecimal = P("0x") * R("09","AF","af")^1
+patterns.lowercase = R("az")
+patterns.uppercase = R("AZ")
+patterns.letter = patterns.lowercase + patterns.uppercase
+patterns.space = space
+patterns.tab = P("\t")
+patterns.spaceortab = patterns.space + patterns.tab
+patterns.eol = S("\n\r")
+patterns.spacer = S(" \t\f\v") -- + char(0xc2, 0xa0) if we want utf (cf mail roberto)
+patterns.newline = newline
+patterns.emptyline = newline^1
+patterns.nonspacer = 1 - patterns.spacer
+patterns.whitespace = patterns.eol + patterns.spacer
+patterns.nonwhitespace = 1 - patterns.whitespace
+patterns.equal = P("=")
+patterns.comma = P(",")
+patterns.commaspacer = P(",") * patterns.spacer^0
+patterns.period = P(".")
+patterns.colon = P(":")
+patterns.semicolon = P(";")
+patterns.underscore = P("_")
+patterns.escaped = escaped
+patterns.squote = squote
+patterns.dquote = dquote
+patterns.nosquote = (escaped + (1-squote))^0
+patterns.nodquote = (escaped + (1-dquote))^0
+patterns.unsingle = (squote/"") * patterns.nosquote * (squote/"")
+patterns.undouble = (dquote/"") * patterns.nodquote * (dquote/"")
+patterns.unquoted = patterns.undouble + patterns.unsingle -- more often undouble
+patterns.unspacer = ((patterns.spacer^1)/"")^0
+
+patterns.somecontent = (anything - newline - space)^1 -- (utf8char - newline - space)^1
+patterns.beginline = #(1-newline)
+
+local unquoted = Cs(patterns.unquoted * endofstring) -- not C
+
+function string.unquoted(str)
+ return match(unquoted,str) or str
+end
+
+-- more efficient:
+
+local unquoted = (
+ squote * Cs(1 - P(-2)) * squote
+ + dquote * Cs(1 - P(-2)) * dquote
+)
+
+function string.unquoted(str)
+ return match(unquoted,str) or str
+end
+
+patterns.unquoted = unquoted
+
+
+function lpeg.anywhere(pattern) --slightly adapted from website
+ return P { P(pattern) + 1 * V(1) } -- why so complex?
+end
+
+function lpeg.splitter(pattern, action)
+ return (((1-P(pattern))^1)/action+1)^0
+end
+
+function lpeg.tsplitter(pattern, action)
+ return Ct((((1-P(pattern))^1)/action+1)^0)
+end
+
+-- probleem: separator can be lpeg and that does not hash too well, but
+-- it's quite okay as the key is then not garbage collected
+
+local splitters_s, splitters_m, splitters_t = { }, { }, { }
+
+local function splitat(separator,single)
+ local splitter = (single and splitters_s[separator]) or splitters_m[separator]
+ if not splitter then
+ separator = P(separator)
+ local other = C((1 - separator)^0)
+ if single then
+ local any = anything
+ splitter = other * (separator * C(any^0) + "") -- ?
+ splitters_s[separator] = splitter
+ else
+ splitter = other * (separator * other)^0
+ splitters_m[separator] = splitter
+ end
+ end
+ return splitter
+end
+
+local function tsplitat(separator)
+ local splitter = splitters_t[separator]
+ if not splitter then
+ splitter = Ct(splitat(separator))
+ splitters_t[separator] = splitter
+ end
+ return splitter
+end
+
+lpeg.splitat = splitat
+lpeg.tsplitat = tsplitat
+
+
+local cache = { }
+
+function lpeg.split(separator,str)
+ local c = cache[separator]
+ if not c then
+ c = tsplitat(separator)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+function string.split(str,separator)
+ local c = cache[separator]
+ if not c then
+ c = tsplitat(separator)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+local spacing = patterns.spacer^0 * newline -- sort of strip
+local empty = spacing * Cc("")
+local nonempty = Cs((1-spacing)^1) * spacing^-1
+local content = (empty + nonempty)^1
+
+patterns.textline = content
+
+
+local linesplitter = tsplitat(newline)
+
+patterns.linesplitter = linesplitter
+
+function string.splitlines(str)
+ return match(linesplitter,str)
+end
+
+local utflinesplitter = utfbom^-1 * tsplitat(newline)
+
+patterns.utflinesplitter = utflinesplitter
+
+function string.utfsplitlines(str)
+ return match(utflinesplitter,str)
+end
+
+
+local cache = { }
+
+function lpeg.checkedsplit(separator,str)
+ local c = cache[separator]
+ if not c then
+ separator = P(separator)
+ local other = C((1 - separator)^1)
+ c = Ct(separator^0 * other * (separator^1 * other)^0)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+function string.checkedsplit(str,separator)
+ local c = cache[separator]
+ if not c then
+ separator = P(separator)
+ local other = C((1 - separator)^1)
+ c = Ct(separator^0 * other * (separator^1 * other)^0)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+
+local function f2(s) local c1, c2 = byte(s,1,2) return c1 * 64 + c2 - 12416 end
+local function f3(s) local c1, c2, c3 = byte(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
+local function f4(s) local c1, c2, c3, c4 = byte(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
+
+local utf8byte = patterns.utf8one/byte + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
+
+patterns.utf8byte = utf8byte
+
+
+
+local cache = { }
+
+function lpeg.stripper(str)
+ if type(str) == "string" then
+ local s = cache[str]
+ if not s then
+ s = Cs(((S(str)^1)/"" + 1)^0)
+ cache[str] = s
+ end
+ return s
+ else
+ return Cs(((str^1)/"" + 1)^0)
+ end
+end
+
+local cache = { }
+
+function lpeg.keeper(str)
+ if type(str) == "string" then
+ local s = cache[str]
+ if not s then
+ s = Cs((((1-S(str))^1)/"" + 1)^0)
+ cache[str] = s
+ end
+ return s
+ else
+ return Cs((((1-str)^1)/"" + 1)^0)
+ end
+end
+
+function lpeg.frontstripper(str) -- or pattern (yet undocumented)
+ return (P(str) + P(true)) * Cs(P(1)^0)
+end
+
+function lpeg.endstripper(str) -- or pattern (yet undocumented)
+ return Cs((1 - P(str) * P(-1))^0)
+end
+
+-- Just for fun I looked at the used bytecode and
+-- p = (p and p + pp) or pp gets one more (testset).
+
+function lpeg.replacer(one,two)
+ if type(one) == "table" then
+ local no = #one
+ if no > 0 then
+ local p
+ for i=1,no do
+ local o = one[i]
+ local pp = P(o[1]) / o[2]
+ if p then
+ p = p + pp
+ else
+ p = pp
+ end
+ end
+ return Cs((p + 1)^0)
+ end
+ else
+ two = two or ""
+ return Cs((P(one)/two + 1)^0)
+ end
+end
+
+local splitters_f, splitters_s = { }, { }
+
+function lpeg.firstofsplit(separator) -- always return value
+ local splitter = splitters_f[separator]
+ if not splitter then
+ separator = P(separator)
+ splitter = C((1 - separator)^0)
+ splitters_f[separator] = splitter
+ end
+ return splitter
+end
+
+function lpeg.secondofsplit(separator) -- nil if not split
+ local splitter = splitters_s[separator]
+ if not splitter then
+ separator = P(separator)
+ splitter = (1 - separator)^0 * separator * C(anything^0)
+ splitters_s[separator] = splitter
+ end
+ return splitter
+end
+
+function lpeg.balancer(left,right)
+ left, right = P(left), P(right)
+ return P { left * ((1 - left - right) + V(1))^0 * right }
+end
+
+
+
+local nany = utf8char/""
+
+function lpeg.counter(pattern)
+ pattern = Cs((P(pattern)/" " + nany)^0)
+ return function(str)
+ return #match(pattern,str)
+ end
+end
+
+if utfgmatch then
+
+ function lpeg.count(str,what) -- replaces string.count
+ if type(what) == "string" then
+ local n = 0
+ for _ in utfgmatch(str,what) do
+ n = n + 1
+ end
+ return n
+ else -- 4 times slower but still faster than / function
+ return #match(Cs((P(what)/" " + nany)^0),str)
+ end
+ end
+
+else
+
+ local cache = { }
+
+ function lpeg.count(str,what) -- replaces string.count
+ if type(what) == "string" then
+ local p = cache[what]
+ if not p then
+ p = Cs((P(what)/" " + nany)^0)
+ cache[p] = p
+ end
+ return #match(p,str)
+ else -- 4 times slower but still faster than / function
+ return #match(Cs((P(what)/" " + nany)^0),str)
+ end
+ end
+
+end
+
+local patterns_escapes = { -- also defines in l-string
+ ["%"] = "%%",
+ ["."] = "%.",
+ ["+"] = "%+", ["-"] = "%-", ["*"] = "%*",
+ ["["] = "%[", ["]"] = "%]",
+ ["("] = "%)", [")"] = "%)",
+ -- ["{"] = "%{", ["}"] = "%}"
+ -- ["^"] = "%^", ["$"] = "%$",
+}
+
+local simple_escapes = { -- also defines in l-string
+ ["-"] = "%-",
+ ["."] = "%.",
+ ["?"] = ".",
+ ["*"] = ".*",
+}
+
+local p = Cs((S("-.+*%()[]") / patterns_escapes + anything)^0)
+local s = Cs((S("-.+*%()[]") / simple_escapes + anything)^0)
+
+function string.escapedpattern(str,simple)
+ return match(simple and s or p,str)
+end
+
+-- utf extensies
+
+lpeg.UP = lpeg.P
+
+if utfcharacters then
+
+ function lpeg.US(str)
+ local p
+ for uc in utfcharacters(str) do
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ return p
+ end
+
+
+elseif utfgmatch then
+
+ function lpeg.US(str)
+ local p
+ for uc in utfgmatch(str,".") do
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ return p
+ end
+
+else
+
+ function lpeg.US(str)
+ local p
+ local f = function(uc)
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ match((utf8char/f)^0,str)
+ return p
+ end
+
+end
+
+local range = Cs(utf8byte) * (Cs(utf8byte) + Cc(false))
+
+local utfchar = unicode and unicode.utf8 and unicode.utf8.char
+
+function lpeg.UR(str,more)
+ local first, last
+ if type(str) == "number" then
+ first = str
+ last = more or first
+ else
+ first, last = match(range,str)
+ if not last then
+ return P(str)
+ end
+ end
+ if first == last then
+ return P(str)
+ elseif utfchar and last - first < 8 then -- a somewhat arbitrary criterium
+ local p
+ for i=first,last do
+ if p then
+ p = p + P(utfchar(i))
+ else
+ p = P(utfchar(i))
+ end
+ end
+ return p -- nil when invalid range
+ else
+ local f = function(b)
+ return b >= first and b <= last
+ end
+ return utf8byte / f -- nil when invalid range
+ end
+end
+
+
+
+function lpeg.oneof(list,...) -- lpeg.oneof("elseif","else","if","then")
+ if type(list) ~= "table" then
+ list = { list, ... }
+ end
+ -- sort(list) -- longest match first
+ local p = P(list[1])
+ for l=2,#list do
+ p = p + P(list[l])
+ end
+ return p
+end
+
+function lpeg.is_lpeg(p)
+ return p and lpegtype(p) == "pattern"
+end
+
+-- For the moment here, but it might move to utilities:
+
+local sort, fastcopy, sortedpairs = table.sort, table.fastcopy, table.sortedpairs -- dependency!
+
+function lpeg.append(list,pp)
+ local p = pp
+ if #list > 0 then
+ list = fastcopy(list)
+ sort(list)
+ for l=1,#list do
+ if p then
+ p = P(list[l]) + p
+ else
+ p = P(list[l])
+ end
+ end
+ else
+ for k, v in sortedpairs(list) do
+ if p then
+ p = P(k)/v + p
+ else
+ p = P(k)/v
+ end
+ end
+ end
+ return p
+end
+
+
end -- of closure
@@ -3399,10 +3466,6 @@ local type, tonumber = type, tonumber
boolean = boolean or { }
local boolean = boolean
--- function boolean.tonumber(b)
--- return b and 1 or 0 -- test and test and return or return
--- end
-
function boolean.tonumber(b)
if b then return 1 else return 0 end -- test and return or return
end
@@ -3809,6 +3872,7 @@ local tables = utilities.tables
local format, gmatch, rep = string.format, string.gmatch, string.rep
local concat, insert, remove = table.concat, table.insert, table.remove
local setmetatable, getmetatable, tonumber, tostring = setmetatable, getmetatable, tonumber, tostring
+local type, next, rawset = type, next, rawset
function tables.definetable(target) -- defines undefined tables
local composed, t, n = nil, { }, 0
@@ -3902,6 +3966,43 @@ function table.toxml(t,name,nobanner,indent,spaces)
return concat(result,"\n")
end
+-- also experimental
+
+-- encapsulate(table,utilities.tables)
+-- encapsulate(table,utilities.tables,true)
+-- encapsulate(table,true)
+
+function tables.encapsulate(core,capsule,protect)
+ if type(capsule) ~= "table" then
+ protect = true
+ capsule = { }
+ end
+ for key, value in next, core do
+ if capsule[key] then
+ print(format("\ninvalid inheritance '%s' in '%s': %s",key,tostring(core)))
+ os.exit()
+ else
+ capsule[key] = value
+ end
+ end
+ if protect then
+ for key, value in next, core do
+ core[key] = nil
+ end
+ setmetatable(core, {
+ __index = capsule,
+ __newindex = function(t,key,value)
+ if capsule[key] then
+ print(format("\ninvalid overload '%s' in '%s'",key,tostring(core)))
+ os.exit()
+ else
+ rawset(t,key,value)
+ end
+ end
+ } )
+ end
+end
+
end -- of closure
@@ -4675,6 +4776,7 @@ end
local is_node = node and node.is_node
+local is_lpeg = lpeg and lpeg.type
function inspect(i) -- global function
local ti = type(i)
@@ -4682,6 +4784,8 @@ function inspect(i) -- global function
table.print(i,"table")
elseif is_node and is_node(i) then
table.print(nodes.astable(i),tostring(i))
+ elseif is_lpeg and is_lpeg(i) then
+ lpeg.print(i)
else
print(tostring(i))
end
@@ -4705,7 +4809,7 @@ if not modules then modules = { } end modules ['trac-inf'] = {
-- get warnings about assignments. This is more efficient than using rawset
-- and rawget.
-local format = string.format
+local format, lower = string.format, string.lower
local clock = os.gettimeofday or os.clock -- should go in environment
local write_nl = texio.write_nl
@@ -4807,7 +4911,7 @@ function statistics.show(reporter)
-- this code will move
local register = statistics.register
register("luatex banner", function()
- return string.lower(status.banner)
+ return lower(status.banner)
end)
register("control sequences", function()
return format("%s of %s", status.cs_count, status.hash_size+status.hash_extra)
@@ -9773,7 +9877,7 @@ if not modules then modules = { } end modules ['data-ini'] = {
license = "see context related readme files",
}
-local gsub, find, gmatch = string.gsub, string.find, string.gmatch
+local gsub, find, gmatch, char = string.gsub, string.find, string.gmatch, string.char
local concat = table.concat
local next, type = next, type
@@ -9835,7 +9939,7 @@ do
local homedir = osgetenv(ostype == "windows" and 'USERPROFILE' or 'HOME') or ''
if not homedir or homedir == "" then
- homedir = string.char(127) -- we need a value, later we wil trigger on it
+ homedir = char(127) -- we need a value, later we wil trigger on it
end
homedir = file.collapsepath(homedir)
@@ -10008,7 +10112,7 @@ if not modules then modules = { } end modules ['data-exp'] = {
license = "see context related readme files",
}
-local format, find, gmatch, lower = string.format, string.find, string.gmatch, string.lower
+local format, find, gmatch, lower, char = string.format, string.find, string.gmatch, string.lower, string.char
local concat, sort = table.concat, table.sort
local lpegmatch, lpegpatterns = lpeg.match, lpeg.patterns
local Ct, Cs, Cc, P, C, S = lpeg.Ct, lpeg.Cs, lpeg.Cc, lpeg.P, lpeg.C, lpeg.S
@@ -10142,7 +10246,7 @@ local homedir
function resolvers.cleanpath(str)
if not homedir then
homedir = lpegmatch(cleanup,environment.homedir or "")
- if homedir == string.char(127) or homedir == "" or not lfs.isdir(homedir) then
+ if homedir == char(127) or homedir == "" or not lfs.isdir(homedir) then
if trace_expansions then
report_expansions("no home dir set, ignoring dependent paths")
end
@@ -10191,8 +10295,8 @@ end
local cache = { }
----- splitter = Ct(lpeg.splitat(S(ostype == "windows" and ";" or ":;"))) -- maybe add ,
-local splitter = Ct(lpeg.splitat(";")) -- as we move towards urls, prefixes and use tables we no longer do :
+----- splitter = lpeg.tsplitat(S(ostype == "windows" and ";" or ":;")) -- maybe add ,
+local splitter = lpeg.tsplitat(";") -- as we move towards urls, prefixes and use tables we no longer do :
local backslashswapper = lpeg.replacer("\\","/")
@@ -10640,6 +10744,7 @@ luatools with a recache feature.</p>
--ldx]]--
local format, lower, gsub, concat = string.format, string.lower, string.gsub, table.concat
+local serialize, serializetofile = table.serialize, table.tofile
local mkdirs, isdir = dir.mkdirs, lfs.isdir
local trace_locating = false trackers.register("resolvers.locating", function(v) trace_locating = v end)
@@ -10793,7 +10898,7 @@ function caches.usedpaths()
end
function caches.configfiles()
- return table.concat(resolvers.instance.specification,";")
+ return concat(resolvers.instance.specification,";")
end
function caches.hashed(tree)
@@ -10917,9 +11022,9 @@ function caches.savedata(filepath,filename,data,raw)
end
data.cache_uuid = os.uuid()
if caches.direct then
- file.savedata(tmaname,table.serialize(data,true,saveoptions))
+ file.savedata(tmaname,serialize(data,true,saveoptions))
else
- table.tofile(tmaname,data,true,saveoptions)
+ serializetofile(tmaname,data,true,saveoptions)
end
utilities.lua.compile(tmaname,tmcname)
end
@@ -10986,7 +11091,7 @@ function caches.savecontent(cachename,dataname,content)
content = content,
uuid = os.uuid(),
}
- local ok = io.savedata(luaname,table.serialize(data,true))
+ local ok = io.savedata(luaname,serialize(data,true))
if ok then
if trace_locating then
report_resolvers("category '%s', cachename '%s' saved in '%s'",dataname,cachename,luaname)
@@ -13941,6 +14046,7 @@ if not modules then modules = { } end modules ['data-lst'] = {
-- used in mtxrun, can be loaded later .. todo
local find, concat, upper, format = string.find, table.concat, string.upper, string.format
+local fastcopy, sortedpairs = table.fastcopy, table.sortedpairs
resolvers.listers = resolvers.listers or { }
@@ -13971,10 +14077,10 @@ function resolvers.listers.variables(pattern)
end
end
end
- local env = table.fastcopy(environment)
- local var = table.fastcopy(variables)
- local exp = table.fastcopy(expansions)
- for key, value in table.sortedpairs(configured) do
+ local env = fastcopy(environment)
+ local var = fastcopy(variables)
+ local exp = fastcopy(expansions)
+ for key, value in sortedpairs(configured) do
if key ~= "" and (pattern == "" or find(upper(key),pattern)) then
report_lists(key)
report_lists(" env: %s",tabstr(rawget(environment,key)) or "unset")
@@ -13983,9 +14089,9 @@ function resolvers.listers.variables(pattern)
report_lists(" res: %s",resolvers.resolve(expansions[key]) or "unset")
end
end
- instance.environment = table.fastcopy(env)
- instance.variables = table.fastcopy(var)
- instance.expansions = table.fastcopy(exp)
+ instance.environment = fastcopy(env)
+ instance.variables = fastcopy(var)
+ instance.expansions = fastcopy(exp)
end
function resolvers.listers.configurations(report)
@@ -14272,8 +14378,8 @@ own = { } -- not local, might change
own.libs = { -- order can be made better
'l-string.lua',
- 'l-lpeg.lua',
'l-table.lua',
+ 'l-lpeg.lua',
'l-io.lua',
'l-number.lua',
'l-set.lua',
diff --git a/scripts/context/stubs/unix/mtxrun b/scripts/context/stubs/unix/mtxrun
index d0cf3d46d..6a8b2e99b 100644
--- a/scripts/context/stubs/unix/mtxrun
+++ b/scripts/context/stubs/unix/mtxrun
@@ -160,509 +160,6 @@ end -- of closure
do -- create closure to overcome 200 locals limit
-if not modules then modules = { } end modules ['l-lpeg'] = {
- version = 1.001,
- comment = "companion to luat-lib.mkiv",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files"
-}
-
-local lpeg = require("lpeg")
-
-local type = type
-
--- Beware, we predefine a bunch of patterns here and one reason for doing so
--- is that we get consistent behaviour in some of the visualizers.
-
-lpeg.patterns = lpeg.patterns or { } -- so that we can share
-local patterns = lpeg.patterns
-
-local P, R, S, V, match = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.match
-local Ct, C, Cs, Cc = lpeg.Ct, lpeg.C, lpeg.Cs, lpeg.Cc
-local lpegtype = lpeg.type
-
-local utfcharacters = string.utfcharacters
-local utfgmatch = unicode and unicode.utf8.gmatch
-
-local anything = P(1)
-local endofstring = P(-1)
-local alwaysmatched = P(true)
-
-patterns.anything = anything
-patterns.endofstring = endofstring
-patterns.beginofstring = alwaysmatched
-patterns.alwaysmatched = alwaysmatched
-
-local digit, sign = R('09'), S('+-')
-local cr, lf, crlf = P("\r"), P("\n"), P("\r\n")
-local newline = crlf + cr + lf
-local escaped = P("\\") * anything
-local squote = P("'")
-local dquote = P('"')
-local space = P(" ")
-
-local utfbom_32_be = P('\000\000\254\255')
-local utfbom_32_le = P('\255\254\000\000')
-local utfbom_16_be = P('\255\254')
-local utfbom_16_le = P('\254\255')
-local utfbom_8 = P('\239\187\191')
-local utfbom = utfbom_32_be + utfbom_32_le
- + utfbom_16_be + utfbom_16_le
- + utfbom_8
-local utftype = utfbom_32_be / "utf-32-be" + utfbom_32_le / "utf-32-le"
- + utfbom_16_be / "utf-16-be" + utfbom_16_le / "utf-16-le"
- + utfbom_8 / "utf-8" + alwaysmatched / "unknown"
-
-local utf8next = R("\128\191")
-
-patterns.utf8one = R("\000\127")
-patterns.utf8two = R("\194\223") * utf8next
-patterns.utf8three = R("\224\239") * utf8next * utf8next
-patterns.utf8four = R("\240\244") * utf8next * utf8next * utf8next
-patterns.utfbom = utfbom
-patterns.utftype = utftype
-
-local utf8char = patterns.utf8one + patterns.utf8two + patterns.utf8three + patterns.utf8four
-local validutf8char = utf8char^0 * endofstring * Cc(true) + Cc(false)
-
-patterns.utf8 = utf8char
-patterns.utf8char = utf8char
-patterns.validutf8 = validutf8char
-patterns.validutf8char = validutf8char
-
-patterns.digit = digit
-patterns.sign = sign
-patterns.cardinal = sign^0 * digit^1
-patterns.integer = sign^0 * digit^1
-patterns.float = sign^0 * digit^0 * P('.') * digit^1
-patterns.cfloat = sign^0 * digit^0 * P(',') * digit^1
-patterns.number = patterns.float + patterns.integer
-patterns.cnumber = patterns.cfloat + patterns.integer
-patterns.oct = P("0") * R("07")^1
-patterns.octal = patterns.oct
-patterns.HEX = P("0x") * R("09","AF")^1
-patterns.hex = P("0x") * R("09","af")^1
-patterns.hexadecimal = P("0x") * R("09","AF","af")^1
-patterns.lowercase = R("az")
-patterns.uppercase = R("AZ")
-patterns.letter = patterns.lowercase + patterns.uppercase
-patterns.space = space
-patterns.tab = P("\t")
-patterns.spaceortab = patterns.space + patterns.tab
-patterns.eol = S("\n\r")
-patterns.spacer = S(" \t\f\v") -- + string.char(0xc2, 0xa0) if we want utf (cf mail roberto)
-patterns.newline = newline
-patterns.emptyline = newline^1
-patterns.nonspacer = 1 - patterns.spacer
-patterns.whitespace = patterns.eol + patterns.spacer
-patterns.nonwhitespace = 1 - patterns.whitespace
-patterns.equal = P("=")
-patterns.comma = P(",")
-patterns.commaspacer = P(",") * patterns.spacer^0
-patterns.period = P(".")
-patterns.colon = P(":")
-patterns.semicolon = P(";")
-patterns.underscore = P("_")
-patterns.escaped = escaped
-patterns.squote = squote
-patterns.dquote = dquote
-patterns.nosquote = (escaped + (1-squote))^0
-patterns.nodquote = (escaped + (1-dquote))^0
-patterns.unsingle = (squote/"") * patterns.nosquote * (squote/"")
-patterns.undouble = (dquote/"") * patterns.nodquote * (dquote/"")
-patterns.unquoted = patterns.undouble + patterns.unsingle -- more often undouble
-patterns.unspacer = ((patterns.spacer^1)/"")^0
-
-patterns.somecontent = (anything - newline - space)^1 -- (utf8char - newline - space)^1
-patterns.beginline = #(1-newline)
-
-local unquoted = Cs(patterns.unquoted * endofstring) -- not C
-
-function string.unquoted(str)
- return match(unquoted,str) or str
-end
-
-
-function lpeg.anywhere(pattern) --slightly adapted from website
- return P { P(pattern) + 1 * V(1) } -- why so complex?
-end
-
-function lpeg.splitter(pattern, action)
- return (((1-P(pattern))^1)/action+1)^0
-end
-
-local splitters_s, splitters_m = { }, { }
-
-local function splitat(separator,single)
- local splitter = (single and splitters_s[separator]) or splitters_m[separator]
- if not splitter then
- separator = P(separator)
- local other = C((1 - separator)^0)
- if single then
- local any = anything
- splitter = other * (separator * C(any^0) + "") -- ?
- splitters_s[separator] = splitter
- else
- splitter = other * (separator * other)^0
- splitters_m[separator] = splitter
- end
- end
- return splitter
-end
-
-lpeg.splitat = splitat
-
-
-local cache = { }
-
-function lpeg.split(separator,str)
- local c = cache[separator]
- if not c then
- c = Ct(splitat(separator))
- cache[separator] = c
- end
- return match(c,str)
-end
-
-function string.split(str,separator)
- local c = cache[separator]
- if not c then
- c = Ct(splitat(separator))
- cache[separator] = c
- end
- return match(c,str)
-end
-
-local spacing = patterns.spacer^0 * newline -- sort of strip
-local empty = spacing * Cc("")
-local nonempty = Cs((1-spacing)^1) * spacing^-1
-local content = (empty + nonempty)^1
-
-patterns.textline = content
-
-
-local linesplitter = Ct(splitat(newline))
-
-patterns.linesplitter = linesplitter
-
-function string.splitlines(str)
- return match(linesplitter,str)
-end
-
-local utflinesplitter = utfbom^-1 * Ct(splitat(newline))
-
-patterns.utflinesplitter = utflinesplitter
-
-function string.utfsplitlines(str)
- return match(utflinesplitter,str)
-end
-
-
-local cache = { }
-
-function lpeg.checkedsplit(separator,str)
- local c = cache[separator]
- if not c then
- separator = P(separator)
- local other = C((1 - separator)^1)
- c = Ct(separator^0 * other * (separator^1 * other)^0)
- cache[separator] = c
- end
- return match(c,str)
-end
-
-function string.checkedsplit(str,separator)
- local c = cache[separator]
- if not c then
- separator = P(separator)
- local other = C((1 - separator)^1)
- c = Ct(separator^0 * other * (separator^1 * other)^0)
- cache[separator] = c
- end
- return match(c,str)
-end
-
-
-local f1 = string.byte
-
-local function f2(s) local c1, c2 = f1(s,1,2) return c1 * 64 + c2 - 12416 end
-local function f3(s) local c1, c2, c3 = f1(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
-local function f4(s) local c1, c2, c3, c4 = f1(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
-
-local utf8byte = patterns.utf8one/f1 + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
-
-patterns.utf8byte = utf8byte
-
-
-
-local cache = { }
-
-function lpeg.stripper(str)
- if type(str) == "string" then
- local s = cache[str]
- if not s then
- s = Cs(((S(str)^1)/"" + 1)^0)
- cache[str] = s
- end
- return s
- else
- return Cs(((str^1)/"" + 1)^0)
- end
-end
-
-local cache = { }
-
-function lpeg.keeper(str)
- if type(str) == "string" then
- local s = cache[str]
- if not s then
- s = Cs((((1-S(str))^1)/"" + 1)^0)
- cache[str] = s
- end
- return s
- else
- return Cs((((1-str)^1)/"" + 1)^0)
- end
-end
-
-function lpeg.frontstripper(str) -- or pattern (yet undocumented)
- return (P(str) + P(true)) * Cs(P(1)^0)
-end
-
-function lpeg.endstripper(str) -- or pattern (yet undocumented)
- return Cs((1 - P(str) * P(-1))^0)
-end
-
--- Just for fun I looked at the used bytecode and
--- p = (p and p + pp) or pp gets one more (testset).
-
-function lpeg.replacer(one,two)
- if type(one) == "table" then
- local no = #one
- if no > 0 then
- local p
- for i=1,no do
- local o = one[i]
- local pp = P(o[1]) / o[2]
- if p then
- p = p + pp
- else
- p = pp
- end
- end
- return Cs((p + 1)^0)
- end
- else
- two = two or ""
- return Cs((P(one)/two + 1)^0)
- end
-end
-
-local splitters_f, splitters_s = { }, { }
-
-function lpeg.firstofsplit(separator) -- always return value
- local splitter = splitters_f[separator]
- if not splitter then
- separator = P(separator)
- splitter = C((1 - separator)^0)
- splitters_f[separator] = splitter
- end
- return splitter
-end
-
-function lpeg.secondofsplit(separator) -- nil if not split
- local splitter = splitters_s[separator]
- if not splitter then
- separator = P(separator)
- splitter = (1 - separator)^0 * separator * C(anything^0)
- splitters_s[separator] = splitter
- end
- return splitter
-end
-
-function lpeg.balancer(left,right)
- left, right = P(left), P(right)
- return P { left * ((1 - left - right) + V(1))^0 * right }
-end
-
-
-
-local nany = utf8char/""
-
-function lpeg.counter(pattern)
- pattern = Cs((P(pattern)/" " + nany)^0)
- return function(str)
- return #match(pattern,str)
- end
-end
-
-if utfgmatch then
-
- function lpeg.count(str,what) -- replaces string.count
- if type(what) == "string" then
- local n = 0
- for _ in utfgmatch(str,what) do
- n = n + 1
- end
- return n
- else -- 4 times slower but still faster than / function
- return #match(Cs((P(what)/" " + nany)^0),str)
- end
- end
-
-else
-
- local cache = { }
-
- function lpeg.count(str,what) -- replaces string.count
- if type(what) == "string" then
- local p = cache[what]
- if not p then
- p = Cs((P(what)/" " + nany)^0)
- cache[p] = p
- end
- return #match(p,str)
- else -- 4 times slower but still faster than / function
- return #match(Cs((P(what)/" " + nany)^0),str)
- end
- end
-
-end
-
-local patterns_escapes = { -- also defines in l-string
- ["%"] = "%%",
- ["."] = "%.",
- ["+"] = "%+", ["-"] = "%-", ["*"] = "%*",
- ["["] = "%[", ["]"] = "%]",
- ["("] = "%)", [")"] = "%)",
- -- ["{"] = "%{", ["}"] = "%}"
- -- ["^"] = "%^", ["$"] = "%$",
-}
-
-local simple_escapes = { -- also defines in l-string
- ["-"] = "%-",
- ["."] = "%.",
- ["?"] = ".",
- ["*"] = ".*",
-}
-
-local p = Cs((S("-.+*%()[]") / patterns_escapes + anything)^0)
-local s = Cs((S("-.+*%()[]") / simple_escapes + anything)^0)
-
-function string.escapedpattern(str,simple)
- return match(simple and s or p,str)
-end
-
--- utf extensies
-
-lpeg.UP = lpeg.P
-
-if utfcharacters then
-
- function lpeg.US(str)
- local p
- for uc in utfcharacters(str) do
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- return p
- end
-
-
-elseif utfgmatch then
-
- function lpeg.US(str)
- local p
- for uc in utfgmatch(str,".") do
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- return p
- end
-
-else
-
- function lpeg.US(str)
- local p
- local f = function(uc)
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- match((utf8char/f)^0,str)
- return p
- end
-
-end
-
-local range = Cs(utf8byte) * (Cs(utf8byte) + Cc(false))
-
-local utfchar = unicode and unicode.utf8 and unicode.utf8.char
-
-function lpeg.UR(str,more)
- local first, last
- if type(str) == "number" then
- first = str
- last = more or first
- else
- first, last = match(range,str)
- if not last then
- return P(str)
- end
- end
- if first == last then
- return P(str)
- elseif utfchar and last - first < 8 then -- a somewhat arbitrary criterium
- local p
- for i=first,last do
- if p then
- p = p + P(utfchar(i))
- else
- p = P(utfchar(i))
- end
- end
- return p -- nil when invalid range
- else
- local f = function(b)
- return b >= first and b <= last
- end
- return utf8byte / f -- nil when invalid range
- end
-end
-
-
-
-function lpeg.oneof(list,...) -- lpeg.oneof("elseif","else","if","then")
- if type(list) ~= "table" then
- list = { list, ... }
- end
- -- sort(list) -- longest match first
- local p = P(list[1])
- for l=2,#list do
- p = p + P(list[l])
- end
- return p
-end
-
-function lpeg.is_lpeg(p)
- return p and lpegtype(p) == "pattern"
-end
-
-
-
-end -- of closure
-
-do -- create closure to overcome 200 locals limit
-
if not modules then modules = { } end modules ['l-table'] = {
version = 1.001,
comment = "companion to luat-lib.mkiv",
@@ -1575,6 +1072,576 @@ function table.has_one_entry(t)
return t and not next(t,next(t))
end
+-- new
+
+function table.loweredkeys(t) -- maybe utf
+ local l = { }
+ for k, v in next, t do
+ l[lower(k)] = v
+ end
+ return l
+end
+
+
+end -- of closure
+
+do -- create closure to overcome 200 locals limit
+
+if not modules then modules = { } end modules ['l-lpeg'] = {
+ version = 1.001,
+ comment = "companion to luat-lib.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+local lpeg = require("lpeg")
+
+local type = type
+local byte, char = string.byte, string.char
+
+-- Beware, we predefine a bunch of patterns here and one reason for doing so
+-- is that we get consistent behaviour in some of the visualizers.
+
+lpeg.patterns = lpeg.patterns or { } -- so that we can share
+local patterns = lpeg.patterns
+
+local P, R, S, V, match = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.match
+local Ct, C, Cs, Cc = lpeg.Ct, lpeg.C, lpeg.Cs, lpeg.Cc
+local lpegtype = lpeg.type
+
+local utfcharacters = string.utfcharacters
+local utfgmatch = unicode and unicode.utf8.gmatch
+
+local anything = P(1)
+local endofstring = P(-1)
+local alwaysmatched = P(true)
+
+patterns.anything = anything
+patterns.endofstring = endofstring
+patterns.beginofstring = alwaysmatched
+patterns.alwaysmatched = alwaysmatched
+
+local digit, sign = R('09'), S('+-')
+local cr, lf, crlf = P("\r"), P("\n"), P("\r\n")
+local newline = crlf + cr + lf
+local escaped = P("\\") * anything
+local squote = P("'")
+local dquote = P('"')
+local space = P(" ")
+
+local utfbom_32_be = P('\000\000\254\255')
+local utfbom_32_le = P('\255\254\000\000')
+local utfbom_16_be = P('\255\254')
+local utfbom_16_le = P('\254\255')
+local utfbom_8 = P('\239\187\191')
+local utfbom = utfbom_32_be + utfbom_32_le
+ + utfbom_16_be + utfbom_16_le
+ + utfbom_8
+local utftype = utfbom_32_be / "utf-32-be" + utfbom_32_le / "utf-32-le"
+ + utfbom_16_be / "utf-16-be" + utfbom_16_le / "utf-16-le"
+ + utfbom_8 / "utf-8" + alwaysmatched / "unknown"
+
+local utf8next = R("\128\191")
+
+patterns.utf8one = R("\000\127")
+patterns.utf8two = R("\194\223") * utf8next
+patterns.utf8three = R("\224\239") * utf8next * utf8next
+patterns.utf8four = R("\240\244") * utf8next * utf8next * utf8next
+patterns.utfbom = utfbom
+patterns.utftype = utftype
+
+local utf8char = patterns.utf8one + patterns.utf8two + patterns.utf8three + patterns.utf8four
+local validutf8char = utf8char^0 * endofstring * Cc(true) + Cc(false)
+
+patterns.utf8 = utf8char
+patterns.utf8char = utf8char
+patterns.validutf8 = validutf8char
+patterns.validutf8char = validutf8char
+
+patterns.digit = digit
+patterns.sign = sign
+patterns.cardinal = sign^0 * digit^1
+patterns.integer = sign^0 * digit^1
+patterns.float = sign^0 * digit^0 * P('.') * digit^1
+patterns.cfloat = sign^0 * digit^0 * P(',') * digit^1
+patterns.number = patterns.float + patterns.integer
+patterns.cnumber = patterns.cfloat + patterns.integer
+patterns.oct = P("0") * R("07")^1
+patterns.octal = patterns.oct
+patterns.HEX = P("0x") * R("09","AF")^1
+patterns.hex = P("0x") * R("09","af")^1
+patterns.hexadecimal = P("0x") * R("09","AF","af")^1
+patterns.lowercase = R("az")
+patterns.uppercase = R("AZ")
+patterns.letter = patterns.lowercase + patterns.uppercase
+patterns.space = space
+patterns.tab = P("\t")
+patterns.spaceortab = patterns.space + patterns.tab
+patterns.eol = S("\n\r")
+patterns.spacer = S(" \t\f\v") -- + char(0xc2, 0xa0) if we want utf (cf mail roberto)
+patterns.newline = newline
+patterns.emptyline = newline^1
+patterns.nonspacer = 1 - patterns.spacer
+patterns.whitespace = patterns.eol + patterns.spacer
+patterns.nonwhitespace = 1 - patterns.whitespace
+patterns.equal = P("=")
+patterns.comma = P(",")
+patterns.commaspacer = P(",") * patterns.spacer^0
+patterns.period = P(".")
+patterns.colon = P(":")
+patterns.semicolon = P(";")
+patterns.underscore = P("_")
+patterns.escaped = escaped
+patterns.squote = squote
+patterns.dquote = dquote
+patterns.nosquote = (escaped + (1-squote))^0
+patterns.nodquote = (escaped + (1-dquote))^0
+patterns.unsingle = (squote/"") * patterns.nosquote * (squote/"")
+patterns.undouble = (dquote/"") * patterns.nodquote * (dquote/"")
+patterns.unquoted = patterns.undouble + patterns.unsingle -- more often undouble
+patterns.unspacer = ((patterns.spacer^1)/"")^0
+
+patterns.somecontent = (anything - newline - space)^1 -- (utf8char - newline - space)^1
+patterns.beginline = #(1-newline)
+
+local unquoted = Cs(patterns.unquoted * endofstring) -- not C
+
+function string.unquoted(str)
+ return match(unquoted,str) or str
+end
+
+-- more efficient:
+
+local unquoted = (
+ squote * Cs(1 - P(-2)) * squote
+ + dquote * Cs(1 - P(-2)) * dquote
+)
+
+function string.unquoted(str)
+ return match(unquoted,str) or str
+end
+
+patterns.unquoted = unquoted
+
+
+function lpeg.anywhere(pattern) --slightly adapted from website
+ return P { P(pattern) + 1 * V(1) } -- why so complex?
+end
+
+function lpeg.splitter(pattern, action)
+ return (((1-P(pattern))^1)/action+1)^0
+end
+
+function lpeg.tsplitter(pattern, action)
+ return Ct((((1-P(pattern))^1)/action+1)^0)
+end
+
+-- probleem: separator can be lpeg and that does not hash too well, but
+-- it's quite okay as the key is then not garbage collected
+
+local splitters_s, splitters_m, splitters_t = { }, { }, { }
+
+local function splitat(separator,single)
+ local splitter = (single and splitters_s[separator]) or splitters_m[separator]
+ if not splitter then
+ separator = P(separator)
+ local other = C((1 - separator)^0)
+ if single then
+ local any = anything
+ splitter = other * (separator * C(any^0) + "") -- ?
+ splitters_s[separator] = splitter
+ else
+ splitter = other * (separator * other)^0
+ splitters_m[separator] = splitter
+ end
+ end
+ return splitter
+end
+
+local function tsplitat(separator)
+ local splitter = splitters_t[separator]
+ if not splitter then
+ splitter = Ct(splitat(separator))
+ splitters_t[separator] = splitter
+ end
+ return splitter
+end
+
+lpeg.splitat = splitat
+lpeg.tsplitat = tsplitat
+
+
+local cache = { }
+
+function lpeg.split(separator,str)
+ local c = cache[separator]
+ if not c then
+ c = tsplitat(separator)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+function string.split(str,separator)
+ local c = cache[separator]
+ if not c then
+ c = tsplitat(separator)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+local spacing = patterns.spacer^0 * newline -- sort of strip
+local empty = spacing * Cc("")
+local nonempty = Cs((1-spacing)^1) * spacing^-1
+local content = (empty + nonempty)^1
+
+patterns.textline = content
+
+
+local linesplitter = tsplitat(newline)
+
+patterns.linesplitter = linesplitter
+
+function string.splitlines(str)
+ return match(linesplitter,str)
+end
+
+local utflinesplitter = utfbom^-1 * tsplitat(newline)
+
+patterns.utflinesplitter = utflinesplitter
+
+function string.utfsplitlines(str)
+ return match(utflinesplitter,str)
+end
+
+
+local cache = { }
+
+function lpeg.checkedsplit(separator,str)
+ local c = cache[separator]
+ if not c then
+ separator = P(separator)
+ local other = C((1 - separator)^1)
+ c = Ct(separator^0 * other * (separator^1 * other)^0)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+function string.checkedsplit(str,separator)
+ local c = cache[separator]
+ if not c then
+ separator = P(separator)
+ local other = C((1 - separator)^1)
+ c = Ct(separator^0 * other * (separator^1 * other)^0)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+
+local function f2(s) local c1, c2 = byte(s,1,2) return c1 * 64 + c2 - 12416 end
+local function f3(s) local c1, c2, c3 = byte(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
+local function f4(s) local c1, c2, c3, c4 = byte(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
+
+local utf8byte = patterns.utf8one/byte + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
+
+patterns.utf8byte = utf8byte
+
+
+
+local cache = { }
+
+function lpeg.stripper(str)
+ if type(str) == "string" then
+ local s = cache[str]
+ if not s then
+ s = Cs(((S(str)^1)/"" + 1)^0)
+ cache[str] = s
+ end
+ return s
+ else
+ return Cs(((str^1)/"" + 1)^0)
+ end
+end
+
+local cache = { }
+
+function lpeg.keeper(str)
+ if type(str) == "string" then
+ local s = cache[str]
+ if not s then
+ s = Cs((((1-S(str))^1)/"" + 1)^0)
+ cache[str] = s
+ end
+ return s
+ else
+ return Cs((((1-str)^1)/"" + 1)^0)
+ end
+end
+
+function lpeg.frontstripper(str) -- or pattern (yet undocumented)
+ return (P(str) + P(true)) * Cs(P(1)^0)
+end
+
+function lpeg.endstripper(str) -- or pattern (yet undocumented)
+ return Cs((1 - P(str) * P(-1))^0)
+end
+
+-- Just for fun I looked at the used bytecode and
+-- p = (p and p + pp) or pp gets one more (testset).
+
+function lpeg.replacer(one,two)
+ if type(one) == "table" then
+ local no = #one
+ if no > 0 then
+ local p
+ for i=1,no do
+ local o = one[i]
+ local pp = P(o[1]) / o[2]
+ if p then
+ p = p + pp
+ else
+ p = pp
+ end
+ end
+ return Cs((p + 1)^0)
+ end
+ else
+ two = two or ""
+ return Cs((P(one)/two + 1)^0)
+ end
+end
+
+local splitters_f, splitters_s = { }, { }
+
+function lpeg.firstofsplit(separator) -- always return value
+ local splitter = splitters_f[separator]
+ if not splitter then
+ separator = P(separator)
+ splitter = C((1 - separator)^0)
+ splitters_f[separator] = splitter
+ end
+ return splitter
+end
+
+function lpeg.secondofsplit(separator) -- nil if not split
+ local splitter = splitters_s[separator]
+ if not splitter then
+ separator = P(separator)
+ splitter = (1 - separator)^0 * separator * C(anything^0)
+ splitters_s[separator] = splitter
+ end
+ return splitter
+end
+
+function lpeg.balancer(left,right)
+ left, right = P(left), P(right)
+ return P { left * ((1 - left - right) + V(1))^0 * right }
+end
+
+
+
+local nany = utf8char/""
+
+function lpeg.counter(pattern)
+ pattern = Cs((P(pattern)/" " + nany)^0)
+ return function(str)
+ return #match(pattern,str)
+ end
+end
+
+if utfgmatch then
+
+ function lpeg.count(str,what) -- replaces string.count
+ if type(what) == "string" then
+ local n = 0
+ for _ in utfgmatch(str,what) do
+ n = n + 1
+ end
+ return n
+ else -- 4 times slower but still faster than / function
+ return #match(Cs((P(what)/" " + nany)^0),str)
+ end
+ end
+
+else
+
+ local cache = { }
+
+ function lpeg.count(str,what) -- replaces string.count
+ if type(what) == "string" then
+ local p = cache[what]
+ if not p then
+ p = Cs((P(what)/" " + nany)^0)
+ cache[p] = p
+ end
+ return #match(p,str)
+ else -- 4 times slower but still faster than / function
+ return #match(Cs((P(what)/" " + nany)^0),str)
+ end
+ end
+
+end
+
+local patterns_escapes = { -- also defines in l-string
+ ["%"] = "%%",
+ ["."] = "%.",
+ ["+"] = "%+", ["-"] = "%-", ["*"] = "%*",
+ ["["] = "%[", ["]"] = "%]",
+ ["("] = "%)", [")"] = "%)",
+ -- ["{"] = "%{", ["}"] = "%}"
+ -- ["^"] = "%^", ["$"] = "%$",
+}
+
+local simple_escapes = { -- also defines in l-string
+ ["-"] = "%-",
+ ["."] = "%.",
+ ["?"] = ".",
+ ["*"] = ".*",
+}
+
+local p = Cs((S("-.+*%()[]") / patterns_escapes + anything)^0)
+local s = Cs((S("-.+*%()[]") / simple_escapes + anything)^0)
+
+function string.escapedpattern(str,simple)
+ return match(simple and s or p,str)
+end
+
+-- utf extensies
+
+lpeg.UP = lpeg.P
+
+if utfcharacters then
+
+ function lpeg.US(str)
+ local p
+ for uc in utfcharacters(str) do
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ return p
+ end
+
+
+elseif utfgmatch then
+
+ function lpeg.US(str)
+ local p
+ for uc in utfgmatch(str,".") do
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ return p
+ end
+
+else
+
+ function lpeg.US(str)
+ local p
+ local f = function(uc)
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ match((utf8char/f)^0,str)
+ return p
+ end
+
+end
+
+local range = Cs(utf8byte) * (Cs(utf8byte) + Cc(false))
+
+local utfchar = unicode and unicode.utf8 and unicode.utf8.char
+
+function lpeg.UR(str,more)
+ local first, last
+ if type(str) == "number" then
+ first = str
+ last = more or first
+ else
+ first, last = match(range,str)
+ if not last then
+ return P(str)
+ end
+ end
+ if first == last then
+ return P(str)
+ elseif utfchar and last - first < 8 then -- a somewhat arbitrary criterium
+ local p
+ for i=first,last do
+ if p then
+ p = p + P(utfchar(i))
+ else
+ p = P(utfchar(i))
+ end
+ end
+ return p -- nil when invalid range
+ else
+ local f = function(b)
+ return b >= first and b <= last
+ end
+ return utf8byte / f -- nil when invalid range
+ end
+end
+
+
+
+function lpeg.oneof(list,...) -- lpeg.oneof("elseif","else","if","then")
+ if type(list) ~= "table" then
+ list = { list, ... }
+ end
+ -- sort(list) -- longest match first
+ local p = P(list[1])
+ for l=2,#list do
+ p = p + P(list[l])
+ end
+ return p
+end
+
+function lpeg.is_lpeg(p)
+ return p and lpegtype(p) == "pattern"
+end
+
+-- For the moment here, but it might move to utilities:
+
+local sort, fastcopy, sortedpairs = table.sort, table.fastcopy, table.sortedpairs -- dependency!
+
+function lpeg.append(list,pp)
+ local p = pp
+ if #list > 0 then
+ list = fastcopy(list)
+ sort(list)
+ for l=1,#list do
+ if p then
+ p = P(list[l]) + p
+ else
+ p = P(list[l])
+ end
+ end
+ else
+ for k, v in sortedpairs(list) do
+ if p then
+ p = P(k)/v + p
+ else
+ p = P(k)/v
+ end
+ end
+ end
+ return p
+end
+
+
end -- of closure
@@ -3399,10 +3466,6 @@ local type, tonumber = type, tonumber
boolean = boolean or { }
local boolean = boolean
--- function boolean.tonumber(b)
--- return b and 1 or 0 -- test and test and return or return
--- end
-
function boolean.tonumber(b)
if b then return 1 else return 0 end -- test and return or return
end
@@ -3809,6 +3872,7 @@ local tables = utilities.tables
local format, gmatch, rep = string.format, string.gmatch, string.rep
local concat, insert, remove = table.concat, table.insert, table.remove
local setmetatable, getmetatable, tonumber, tostring = setmetatable, getmetatable, tonumber, tostring
+local type, next, rawset = type, next, rawset
function tables.definetable(target) -- defines undefined tables
local composed, t, n = nil, { }, 0
@@ -3902,6 +3966,43 @@ function table.toxml(t,name,nobanner,indent,spaces)
return concat(result,"\n")
end
+-- also experimental
+
+-- encapsulate(table,utilities.tables)
+-- encapsulate(table,utilities.tables,true)
+-- encapsulate(table,true)
+
+function tables.encapsulate(core,capsule,protect)
+ if type(capsule) ~= "table" then
+ protect = true
+ capsule = { }
+ end
+ for key, value in next, core do
+ if capsule[key] then
+ print(format("\ninvalid inheritance '%s' in '%s': %s",key,tostring(core)))
+ os.exit()
+ else
+ capsule[key] = value
+ end
+ end
+ if protect then
+ for key, value in next, core do
+ core[key] = nil
+ end
+ setmetatable(core, {
+ __index = capsule,
+ __newindex = function(t,key,value)
+ if capsule[key] then
+ print(format("\ninvalid overload '%s' in '%s'",key,tostring(core)))
+ os.exit()
+ else
+ rawset(t,key,value)
+ end
+ end
+ } )
+ end
+end
+
end -- of closure
@@ -4675,6 +4776,7 @@ end
local is_node = node and node.is_node
+local is_lpeg = lpeg and lpeg.type
function inspect(i) -- global function
local ti = type(i)
@@ -4682,6 +4784,8 @@ function inspect(i) -- global function
table.print(i,"table")
elseif is_node and is_node(i) then
table.print(nodes.astable(i),tostring(i))
+ elseif is_lpeg and is_lpeg(i) then
+ lpeg.print(i)
else
print(tostring(i))
end
@@ -4705,7 +4809,7 @@ if not modules then modules = { } end modules ['trac-inf'] = {
-- get warnings about assignments. This is more efficient than using rawset
-- and rawget.
-local format = string.format
+local format, lower = string.format, string.lower
local clock = os.gettimeofday or os.clock -- should go in environment
local write_nl = texio.write_nl
@@ -4807,7 +4911,7 @@ function statistics.show(reporter)
-- this code will move
local register = statistics.register
register("luatex banner", function()
- return string.lower(status.banner)
+ return lower(status.banner)
end)
register("control sequences", function()
return format("%s of %s", status.cs_count, status.hash_size+status.hash_extra)
@@ -9773,7 +9877,7 @@ if not modules then modules = { } end modules ['data-ini'] = {
license = "see context related readme files",
}
-local gsub, find, gmatch = string.gsub, string.find, string.gmatch
+local gsub, find, gmatch, char = string.gsub, string.find, string.gmatch, string.char
local concat = table.concat
local next, type = next, type
@@ -9835,7 +9939,7 @@ do
local homedir = osgetenv(ostype == "windows" and 'USERPROFILE' or 'HOME') or ''
if not homedir or homedir == "" then
- homedir = string.char(127) -- we need a value, later we wil trigger on it
+ homedir = char(127) -- we need a value, later we wil trigger on it
end
homedir = file.collapsepath(homedir)
@@ -10008,7 +10112,7 @@ if not modules then modules = { } end modules ['data-exp'] = {
license = "see context related readme files",
}
-local format, find, gmatch, lower = string.format, string.find, string.gmatch, string.lower
+local format, find, gmatch, lower, char = string.format, string.find, string.gmatch, string.lower, string.char
local concat, sort = table.concat, table.sort
local lpegmatch, lpegpatterns = lpeg.match, lpeg.patterns
local Ct, Cs, Cc, P, C, S = lpeg.Ct, lpeg.Cs, lpeg.Cc, lpeg.P, lpeg.C, lpeg.S
@@ -10142,7 +10246,7 @@ local homedir
function resolvers.cleanpath(str)
if not homedir then
homedir = lpegmatch(cleanup,environment.homedir or "")
- if homedir == string.char(127) or homedir == "" or not lfs.isdir(homedir) then
+ if homedir == char(127) or homedir == "" or not lfs.isdir(homedir) then
if trace_expansions then
report_expansions("no home dir set, ignoring dependent paths")
end
@@ -10191,8 +10295,8 @@ end
local cache = { }
----- splitter = Ct(lpeg.splitat(S(ostype == "windows" and ";" or ":;"))) -- maybe add ,
-local splitter = Ct(lpeg.splitat(";")) -- as we move towards urls, prefixes and use tables we no longer do :
+----- splitter = lpeg.tsplitat(S(ostype == "windows" and ";" or ":;")) -- maybe add ,
+local splitter = lpeg.tsplitat(";") -- as we move towards urls, prefixes and use tables we no longer do :
local backslashswapper = lpeg.replacer("\\","/")
@@ -10640,6 +10744,7 @@ luatools with a recache feature.</p>
--ldx]]--
local format, lower, gsub, concat = string.format, string.lower, string.gsub, table.concat
+local serialize, serializetofile = table.serialize, table.tofile
local mkdirs, isdir = dir.mkdirs, lfs.isdir
local trace_locating = false trackers.register("resolvers.locating", function(v) trace_locating = v end)
@@ -10793,7 +10898,7 @@ function caches.usedpaths()
end
function caches.configfiles()
- return table.concat(resolvers.instance.specification,";")
+ return concat(resolvers.instance.specification,";")
end
function caches.hashed(tree)
@@ -10917,9 +11022,9 @@ function caches.savedata(filepath,filename,data,raw)
end
data.cache_uuid = os.uuid()
if caches.direct then
- file.savedata(tmaname,table.serialize(data,true,saveoptions))
+ file.savedata(tmaname,serialize(data,true,saveoptions))
else
- table.tofile(tmaname,data,true,saveoptions)
+ serializetofile(tmaname,data,true,saveoptions)
end
utilities.lua.compile(tmaname,tmcname)
end
@@ -10986,7 +11091,7 @@ function caches.savecontent(cachename,dataname,content)
content = content,
uuid = os.uuid(),
}
- local ok = io.savedata(luaname,table.serialize(data,true))
+ local ok = io.savedata(luaname,serialize(data,true))
if ok then
if trace_locating then
report_resolvers("category '%s', cachename '%s' saved in '%s'",dataname,cachename,luaname)
@@ -13941,6 +14046,7 @@ if not modules then modules = { } end modules ['data-lst'] = {
-- used in mtxrun, can be loaded later .. todo
local find, concat, upper, format = string.find, table.concat, string.upper, string.format
+local fastcopy, sortedpairs = table.fastcopy, table.sortedpairs
resolvers.listers = resolvers.listers or { }
@@ -13971,10 +14077,10 @@ function resolvers.listers.variables(pattern)
end
end
end
- local env = table.fastcopy(environment)
- local var = table.fastcopy(variables)
- local exp = table.fastcopy(expansions)
- for key, value in table.sortedpairs(configured) do
+ local env = fastcopy(environment)
+ local var = fastcopy(variables)
+ local exp = fastcopy(expansions)
+ for key, value in sortedpairs(configured) do
if key ~= "" and (pattern == "" or find(upper(key),pattern)) then
report_lists(key)
report_lists(" env: %s",tabstr(rawget(environment,key)) or "unset")
@@ -13983,9 +14089,9 @@ function resolvers.listers.variables(pattern)
report_lists(" res: %s",resolvers.resolve(expansions[key]) or "unset")
end
end
- instance.environment = table.fastcopy(env)
- instance.variables = table.fastcopy(var)
- instance.expansions = table.fastcopy(exp)
+ instance.environment = fastcopy(env)
+ instance.variables = fastcopy(var)
+ instance.expansions = fastcopy(exp)
end
function resolvers.listers.configurations(report)
@@ -14272,8 +14378,8 @@ own = { } -- not local, might change
own.libs = { -- order can be made better
'l-string.lua',
- 'l-lpeg.lua',
'l-table.lua',
+ 'l-lpeg.lua',
'l-io.lua',
'l-number.lua',
'l-set.lua',
diff --git a/tex/context/base/anch-pos.lua b/tex/context/base/anch-pos.lua
index 789e1aefe..f94ed2e9a 100644
--- a/tex/context/base/anch-pos.lua
+++ b/tex/context/base/anch-pos.lua
@@ -346,7 +346,7 @@ function commands.MPpos(id)
end
end
-local splitter = lpeg.Ct(lpeg.splitat(","))
+local splitter = lpeg.tsplitat(",")
function commands.MPplus(id,n,default)
local jpi = collected[id] or tobesaved[id]
diff --git a/tex/context/base/back-exp.lua b/tex/context/base/back-exp.lua
index 7e27bd259..01bf5dea4 100644
--- a/tex/context/base/back-exp.lua
+++ b/tex/context/base/back-exp.lua
@@ -6,6 +6,9 @@ if not modules then modules = { } end modules ['back-exp'] = {
license = "see context related readme files"
}
+-- depth can go away (autodepth nu)
+
+
-- language -> only mainlanguage, local languages should happen through start/stoplanguage
-- tocs/registers -> maybe add a stripper (i.e. just don't flush entries in final tree)
@@ -48,10 +51,12 @@ end
nodes.locate = locate
local next, type = next, type
-local format, match, concat, rep, sub, gsub, gmatch = string.format, string.match, table.concat, string.rep, string.sub, string.gsub, string.gmatch
+local format, match, concat, rep, sub, gsub, gmatch, find = string.format, string.match, table.concat, string.rep, string.sub, string.gsub, string.gmatch, string.find
local lpegmatch = lpeg.match
local utfchar, utfbyte, utfsub, utfgsub = utf.char, utf.byte, utf.sub, utf.gsub
local insert, remove = table.insert, table.remove
+local topoints = number.topoints
+local utfvalues = string.utfvalues
local trace_export = false trackers.register ("structures.export", function(v) trace_export = v end)
local less_state = false directives.register("structures.export.lessstate", function(v) less_state = v end)
@@ -69,6 +74,7 @@ local settings_to_array = utilities.parsers.settings_to_array
local setmetatableindex = table.setmetatableindex
local tasks = nodes.tasks
local fontchar = fonts.hashes.characters
+local fontquads = fonts.hashes.quads
local languagenames = languages.numbers
local nodecodes = nodes.nodecodes
@@ -85,6 +91,7 @@ local disc_code = nodecodes.disc
local insert_code = nodecodes.insert
local whatsit_code = nodecodes.whatsit
local refximage_code = whatsitcodes.pdfrefximage
+local localpar_code = whatsitcodes.localpar
local userskip_code = skipcodes.userskip
local rightskip_code = skipcodes.rightskip
@@ -163,6 +170,7 @@ local tree = { data = { }, depth = 0, fulltag == "root" } -- root
local treeroot = tree
local treehash = { }
local extras = { }
+local checks = { }
local nofbreaks = 0
local used = { }
local exporting = false
@@ -218,6 +226,8 @@ local namespaces = {
mtr = "m",
mtd = "m",
mfenced = "m",
+ maction = "m",
+ mspace = "m",
}
setmetatableindex(namespaced, function(t,k)
@@ -253,7 +263,14 @@ end
local spaces = { } -- watch how we also moved the -1 in depth-1 to the creator
-setmetatableindex(spaces, function(t,k) local s = rep(" ",k-1) t[k] = s return s end)
+setmetatableindex(spaces, function(t,k)
+ if not k then
+ k = 1
+ end
+ local s = rep(" ",k-1)
+ t[k] = s
+ return s
+end)
function structurestags.setattributehash(fulltag,key,value)
if type(fulltag) == "number" then
@@ -713,8 +730,11 @@ local function checkmath(root) -- we can provide utf.toentities as an option
elseif #root.data == 1 then
local tg = d.tg
if automathrows and roottg == "mrow" then
+ -- maybe just always ! check spec first
if tg == "mrow" or tg == "mfenced" or tg == "mfrac" or tg == "mroot" then
root.skip = "comment"
+ elseif tg == "mo" then
+ root.skip = "comment"
end
elseif roottg == "mo" then
if tg == "mo" then
@@ -744,6 +764,19 @@ local function checkmath(root) -- we can provide utf.toentities as an option
elseif tg == "break" then
di.skip = "comment"
i = i + 1
+ elseif tg == "mrow" and detail then
+ di.detail = nil
+ checkmath(di)
+ di = {
+ element = "maction",
+ nature = "display",
+ depth = di.depth,
+ attributes = { actiontype = detail },
+ data = { di },
+ n = 0,
+ }
+ data[i] = di
+ i = i + 1
elseif detail then
-- no checkmath(di) here
local category = tonumber(detail) or 0
@@ -761,7 +794,7 @@ local function checkmath(root) -- we can provide utf.toentities as an option
category = category - 1000
end
if tg == "mi" then -- function
- if root.tg == "mrow" then
+ if roottg == "mrow" then
root.skip = "comment"
root.element = "function"
end
@@ -868,16 +901,39 @@ function stripmath(di)
end
end
-function extras.math(result,element,detail,n,fulltag,di)
- if di then
- local hash = attributehash[di.fulltag]
- di.attributes = {
- display = (hash and hash.mode) == "display" and "block" or "inline"
- }
- if automathstrip then
- stripmath(di)
+function checks.math(di)
+ local hash = attributehash[di.fulltag]
+ local mode = (hash and hash.mode) == "display" and "block" or "inline"
+ di.attributes = {
+ display = mode
+ }
+ -- can be option if needed:
+ if mode == "inline" then
+ di.nature = "mixed" -- "inline"
+ else
+ di.nature = "display"
+ end
+ if automathstrip then
+ stripmath(di)
+ end
+ checkmath(di)
+end
+
+local a, z, A, Z = 0x61, 0x7A, 0x41, 0x5A
+
+function extras.mi(result,element,detail,n,fulltag,di)
+ local str = di.data[1]
+ if str and sub(str,1,1) ~= "&" then -- hack but good enough (maybe gsub op eerste)
+ for v in utfvalues(str) do
+ if (v >= a and v <= z) or (v >= A and v <= Z) then
+ local a = di.attributes
+ if a then
+ a.mathvariant = "normal"
+ else
+ di.attributes = { mathvariant = "normal" }
+ end
+ end
end
- checkmath(di)
end
end
@@ -1006,6 +1062,24 @@ local function emptytag(result,element,nature,depth)
linedone = false
end
+local function btag(result,element,nature,depth)
+ if linedone then
+ result[#result+1] = format("%s<%s>\n",spaces[depth],namespaced[element])
+ else
+ result[#result+1] = format("\n%s<%s>\n",spaces[depth],namespaced[element])
+ end
+ linedone = false
+end
+
+local function etag(result,element,nature,depth)
+ if linedone then
+ result[#result+1] = format("%s</%s>\n",spaces[depth],namespaced[element])
+ else
+ result[#result+1] = format("\n%s</%s>\n",spaces[depth],namespaced[element])
+ end
+ linedone = false
+end
+
local function begintag(result,element,nature,depth,di,skip)
-- if needed we can use a local result with xresult
--~ local result = { }
@@ -1149,14 +1223,16 @@ local function endtag(result,element,nature,depth,skip)
end
end
-local function flushtree(result,data,nature)
+local function flushtree(result,data,nature,depth)
+ depth = depth + 1
local nofdata = #data
for i=1,nofdata do
local di = data[i]
if not di then -- or di == ""
- -- collapsed
+ -- whatever
elseif type(di) == "string" then
-di = utfgsub(di,".",entities)
+ -- already has breaks
+ di = utfgsub(di,".",entities) -- new
if i == nofdata and sub(di,-1) == "\n" then
if nature == "inline" or nature == "mixed" then
result[#result+1] = sub(di,1,-2)
@@ -1168,28 +1244,56 @@ di = utfgsub(di,".",entities)
result[#result+1] = di
end
linedone = false
- elseif not di.collapsed then
+ elseif not di.collapsed then -- ignore collapsed data (is appended, reconstructed par)
local element = di.element
if element == "break" or element == "pagebreak" then
- emptytag(result,element,nature,di.depth)
+ emptytag(result,element,nature,depth)
elseif element == "" or di.skip == "ignore" then
-- skip
else
if di.before then
- flushtree(result,di.before,nature)
+ flushtree(result,di.before,nature,depth)
end
- local nature, depth, skip = di.nature, di.depth, di.skip
- begintag(result,element,nature,depth,di,skip)
- flushtree(result,di.data,nature)
- endtag(result,element,nature,depth,skip)
+ local natu = di.nature
+ local skip = di.skip
+ if di.breaknode then
+ emptytag(result,"break","display",depth)
+ end
+ begintag(result,element,natu,depth,di,skip)
+ flushtree(result,di.data,natu,depth)
+ endtag(result,element,natu,depth,skip)
+ -- if pdone then
+ -- etag(result,"p","display",depth)
+ -- end
if di.after then
- flushtree(result,di.after,nature)
+ flushtree(result,di.after,nature,depth)
end
end
end
end
end
+local function breaktree(tree)
+--~ local data = tree.data
+--~ local parnumber = tree.parnumber
+--~ local nofdata = #data
+--~ for i=1,nofdata do
+--~ local di = data[i]
+--~ if di and type(di) == "table" and not di.collapsed then
+--~ local element = di.element
+--~ if element == "break" or element == "pagebreak" or element == "" or di.skip == "ignore" then
+--~ -- do nothing
+--~ else
+--~ local pn = di.parnumber
+--~ if parnumber and pn and di.nature == "inline" and parnumber ~= pn then
+--~ di.breaknode = true
+--~ end
+--~ breaktree(di)
+--~ end
+--~ end
+--~ end
+end
+
-- finalizers
local function checkinserts(data)
@@ -1217,7 +1321,9 @@ local function checkinserts(data)
end
end
---~ local function collapsetree() -- maybe better traverse tree (par stuff)
+-- tabulaterow reconstruction .. can better be a checker (TO BE CHECKED)
+
+--~ local function xcollapsetree() -- unwanted space injection
--~ for tag, trees in next, treehash do
--~ local d = trees[1].data
--~ if d then
@@ -1262,7 +1368,7 @@ end
--~ if not currentpar then
--~ if not spacedone and not breakdone then
--~ nd = nd + 1
---~ d[nd] = " " --
+--~ d[nd] = " " -- brr adds space in unwanted places (like math)
--~ spacedone = true
--~ end
--~ previouspar = nil
@@ -1304,7 +1410,7 @@ end
--~ end
--~ end
-local function collapsetree() -- maybe better traverse tree (par stuff)
+local function collapsetree()
for tag, trees in next, treehash do
local d = trees[1].data
if d then
@@ -1313,8 +1419,10 @@ local function collapsetree() -- maybe better traverse tree (par stuff)
for i=2,#trees do
local currenttree = trees[i]
local currentdata = currenttree.data
+ local currentpar = currenttree.parnumber
local previouspar = trees[i-1].parnumber
currenttree.collapsed = true
+ -- is the next ok?
if previouspar == 0 or type(currentdata[1]) ~= "string" then
previouspar = nil -- no need anyway so no further testing needed
end
@@ -1323,7 +1431,6 @@ local function collapsetree() -- maybe better traverse tree (par stuff)
if not cd or cd == "" then
-- skip
elseif type(cd) == "string" then
- local currentpar = d.parnumber
if not currentpar then
-- add space ?
elseif not previouspar then
@@ -1361,6 +1468,22 @@ local function indextree(tree)
end
end
+local function checktree(tree)
+ local data = tree.data
+ if data then
+ for i=1,#data do
+ local d = data[i]
+ if type(d) == "table" then
+ local check = checks[d.tg]
+ if check then
+ check(d)
+ end
+ checktree(d)
+ end
+ end
+ end
+end
+
-- collector code
local function push(fulltag,depth)
@@ -1395,7 +1518,11 @@ local function push(fulltag,depth)
nesting[currentdepth] = fulltag
treestack[currentdepth] = tree
if trace_export then
- report_export("%s<%s trigger='%s' index='%s'>",spaces[currentdepth-1],fulltag,currentattribute,#treedata)
+ if detail and detail ~= "" then
+ report_export("%s<%s trigger='%s' paragraph='%s' index='%s' detail='%s'>",spaces[currentdepth-1],fulltag,currentattribute or 0,currentparagraph or 0,#treedata,detail)
+ else
+ report_export("%s<%s trigger='%s' paragraph='%s' index='%s'>",spaces[currentdepth-1],fulltag,currentattribute or 0,currentparagraph or 0,#treedata)
+ end
end
tree = t
if tg == "break" then
@@ -1557,6 +1684,7 @@ local function finishexport()
end
local function collectresults(head,list)
+ local p
for n in traverse_nodes(head) do
local id = n.id -- 14: image, 8: literal (mp)
if id == glyph_code then
@@ -1621,25 +1749,6 @@ local function collectresults(head,list)
if trace_export then
report_export("%s<!-- skipping last glyph -->",spaces[currentdepth])
end
- -- skip
---~ elseif c == 0x26 then
---~ nofcurrentcontent = nofcurrentcontent + 1
---~ currentcontent[nofcurrentcontent] = "&amp;"
---~ if trace_export then
---~ report_export("%s<!-- turning last glyph into entity &amp;-->",spaces[currentdepth])
---~ end
---~ elseif c == 0x3E then
---~ nofcurrentcontent = nofcurrentcontent + 1
---~ currentcontent[nofcurrentcontent] = "&gt;"
---~ if trace_export then
---~ report_export("%s<!-- turning last glyph into entity &gt;-->",spaces[currentdepth])
---~ end
---~ elseif c == 0x3C then
---~ nofcurrentcontent = nofcurrentcontent + 1
---~ currentcontent[nofcurrentcontent] = "&lt;"
---~ if trace_export then
---~ report_export("%s<!-- turning last glyph into entity &lt;-->",spaces[currentdepth])
---~ end
elseif c == 0x20 then
local a = has_attribute(n,a_characters)
nofcurrentcontent = nofcurrentcontent + 1
@@ -1733,9 +1842,6 @@ local function collectresults(head,list)
end
end
elseif n.spec.width > threshold then
---~ if has_attribute(n,a_textblock) then
---~ -- todo
---~ else
if last and not somespace[currentcontent[nofcurrentcontent]] then
local a = has_attribute(n,a_tagged)
if a == last then
@@ -1779,6 +1885,7 @@ local function collectresults(head,list)
if s == hyphen then
currentcontent[nofcurrentcontent] = utfsub(r,1,-2)
elseif s ~= "\n" then
+-- test without this
if trace_export then
report_export("%s<!-- injecting newline 1 -->",spaces[currentdepth])
end
@@ -1789,36 +1896,44 @@ local function collectresults(head,list)
end
end
elseif id == kern_code then
- if n.kern > threshold then
- if last and not somespace[currentcontent[nofcurrentcontent]] then
- local a = has_attribute(n,a_tagged)
- if a == last then
- if not somespace[currentcontent[nofcurrentcontent]] then
+ local kern = n.kern
+ if kern > 0 then
+ local limit = threshold
+ if p and p.id == glyph_code then
+ limit = fontquads[p.font] / 4
+ end
+ if kern > limit then
+ if last and not somespace[currentcontent[nofcurrentcontent]] then
+ local a = has_attribute(n,a_tagged)
+ if a == last then
+ if not somespace[currentcontent[nofcurrentcontent]] then
+ if trace_export then
+ report_export("%s<!-- injecting spacing 8 (%s) -->",spaces[currentdepth],topoints(kern,true))
+ end
+ nofcurrentcontent = nofcurrentcontent + 1
+ currentcontent[nofcurrentcontent] = " "
+ end
+ elseif a then
+ -- e.g LOGO<space>LOGO
if trace_export then
- report_export("%s<!-- injecting spacing 8 -->",spaces[currentdepth])
+ report_export("%s<!-- processing kern, threshold %s, tag %s => %s -->",spaces[currentdepth],topoints(limit,true),last,a)
+ end
+ last = a
+ pushcontent()
+ if trace_export then
+ report_export("%s<!-- injecting spacing 9 (%s) -->",spaces[currentdepth],topoints(kern,true))
end
nofcurrentcontent = nofcurrentcontent + 1
currentcontent[nofcurrentcontent] = " "
+ currentnesting = taglist[last]
+ pushentry(currentnesting)
+ currentattribute = last
end
- elseif a then
- -- e.g LOGO<space>LOGO
- if trace_export then
- report_export("%s<!-- processing kern > threshold (tag %s => %s)",spaces[currentdepth],last,a)
- end
- last = a
- pushcontent()
- if trace_export then
- report_export("%s<!-- injecting spacing 9 -->",spaces[currentdepth])
- end
- nofcurrentcontent = nofcurrentcontent + 1
- currentcontent[nofcurrentcontent] = " "
- currentnesting = taglist[last]
- pushentry(currentnesting)
- currentattribute = last
end
end
end
end
+ p = n
end
end
@@ -1901,6 +2016,8 @@ local function stopexport(v)
finishexport()
collapsetree(tree)
indextree(tree)
+ checktree(tree)
+ breaktree(tree)
checkinserts(tree.data)
hashlistdata()
if type(v) ~= "string" or v == variables.yes or v == "" then
@@ -1935,9 +2052,10 @@ local function stopexport(v)
end
-- collect tree
local result = { }
- flushtree(result,tree.data)
+ flushtree(result,tree.data,"display",0)
result = concat(result)
- result = gsub(result,"\n *\n","\n")
+result = gsub(result,"\n *\n","\n")
+result = gsub(result,"\n +([^< ])","\n%1")
results[#results+1] = result
results = concat(results)
-- if needed we can do a cleanup of the tree (no need to load for xhtml then)
@@ -2018,6 +2136,7 @@ local function startexport(v)
--
enableaction("shipouts","nodes.handlers.accessibility")
enableaction("math", "noads.handlers.tags")
+--~ appendaction("finalizers","lists","builders.paragraphs.tag")
--~ enableaction("finalizers","builders.paragraphs.tag")
luatex.registerstopactions(function() stopexport(v) end)
exporting = true
diff --git a/tex/context/base/back-exp.mkiv b/tex/context/base/back-exp.mkiv
index d6fb74a97..c15f4c96d 100644
--- a/tex/context/base/back-exp.mkiv
+++ b/tex/context/base/back-exp.mkiv
@@ -123,9 +123,16 @@
{\enabledirectives[backend.export.css={\backendparameter\c!css}]}%
\to \everysetupbackend
+%D The zero char signal is needed in order to make sure that paragraphs with only
+%D elements get seen as new ones. This is a kludge but after a day of experimenting
+%D I could not figure out a cleaner way. All kind of analysis afterwards interferes.
+%D
+%D Todo: play with a user node.
+
\appendtoks
\doifsomething{\backendparameter\c!export}
- {\setupstructure
+ {\appendtoks \char\zerocount \to \everypar
+ \setupstructure
[\c!state=\v!start]%
\enabledirectives
[backend.export=\backendparameter\c!export]}%
diff --git a/tex/context/base/back-ini.lua b/tex/context/base/back-ini.lua
index 39de73741..10d10c253 100644
--- a/tex/context/base/back-ini.lua
+++ b/tex/context/base/back-ini.lua
@@ -6,6 +6,8 @@ if not modules then modules = { } end modules ['back-ini'] = {
license = "see context related readme files"
}
+local next, type = next, type
+local format = string.format
backends = backends or { }
local backends = backends
@@ -73,7 +75,7 @@ end
statistics.register("used backend", function()
local bc = backends.current
if bc ~= "unknown" then
- return string.format("%s (%s)",bc,backends[bc].comment or "no comment")
+ return format("%s (%s)",bc,backends[bc].comment or "no comment")
else
return nil
end
diff --git a/tex/context/base/bibl-bib.lua b/tex/context/base/bibl-bib.lua
index e0e5a6c3a..d7c195576 100644
--- a/tex/context/base/bibl-bib.lua
+++ b/tex/context/base/bibl-bib.lua
@@ -306,9 +306,9 @@ local P, Ct, lpegmatch, lpegpatterns = lpeg.P, lpeg.Ct, lpeg.match, lpeg.pattern
local space, comma = P(" "), P(",")
-local andsplitter = Ct(lpeg.splitat(space^1 * "and" * space^1))
-local commasplitter = Ct(lpeg.splitat(space^0 * comma * space^0))
-local spacesplitter = Ct(lpeg.splitat(space^1))
+local andsplitter = lpeg.tsplitat(space^1 * "and" * space^1)
+local commasplitter = lpeg.tsplitat(space^0 * comma * space^0)
+local spacesplitter = lpeg.tsplitat(space^1)
local firstcharacter = lpegpatterns.utf8byte
local function is_upper(str)
diff --git a/tex/context/base/char-ini.lua b/tex/context/base/char-ini.lua
index 930ca8eb2..c7a5d66a3 100644
--- a/tex/context/base/char-ini.lua
+++ b/tex/context/base/char-ini.lua
@@ -15,7 +15,7 @@ local utf = unicode.utf8
local utfchar, utfbyte, utfvalues = utf.char, utf.byte, string.utfvalues
local ustring = unicode.ustring
-local concat, unpack = table.concat, table.unpack
+local concat, unpack, tohash = table.concat, table.unpack, table.tohash
local next, tonumber, type, rawget, rawset = next, tonumber, type, rawget, rawset
local texsprint, texprint = tex.sprint, tex.print
local format, lower, gsub, match, gmatch = string.format, string.lower, string.gsub, string.match, string.match, string.gmatch
@@ -368,7 +368,7 @@ characters.categorytags = categorytags
--~ special : cf (softhyphen) zs (emspace)
--~ characters: ll lm lo lt lu mn nl no pc pd pe pf pi po ps sc sk sm so
-local is_character = allocate ( table.tohash {
+local is_character = allocate ( tohash {
"lu","ll","lt","lm","lo",
"nd","nl","no",
"mn",
@@ -377,19 +377,19 @@ local is_character = allocate ( table.tohash {
"sm","sc","sk","so"
} )
-local is_letter = allocate ( table.tohash {
+local is_letter = allocate ( tohash {
"ll","lm","lo","lt","lu"
} )
-local is_command = allocate ( table.tohash {
+local is_command = allocate ( tohash {
"cf","zs"
} )
-local is_spacing = allocate ( table.tohash {
+local is_spacing = allocate ( tohash {
"zs", "zl","zp",
} )
-local is_mark = allocate ( table.tohash {
+local is_mark = allocate ( tohash {
"mn", "ms",
} )
@@ -501,7 +501,7 @@ function tex.uprint(c,n)
end
end
-local temphack = table.tohash {
+local temphack = tohash {
0x00A0,
0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006, 0x2007, 0x2008, 0x2009, 0x200A, 0x200B,
0x202F,
diff --git a/tex/context/base/cont-new.mkii b/tex/context/base/cont-new.mkii
index 266deee0f..991bd8225 100644
--- a/tex/context/base/cont-new.mkii
+++ b/tex/context/base/cont-new.mkii
@@ -11,7 +11,7 @@
%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
%C details.
-\newcontextversion{2011.06.09 12:49}
+\newcontextversion{2011.06.11 16:45}
%D This file is loaded at runtime, thereby providing an
%D excellent place for hacks, patches, extensions and new
diff --git a/tex/context/base/cont-new.mkiv b/tex/context/base/cont-new.mkiv
index d1064187d..62e80131f 100644
--- a/tex/context/base/cont-new.mkiv
+++ b/tex/context/base/cont-new.mkiv
@@ -11,7 +11,7 @@
%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
%C details.
-\newcontextversion{2011.06.09 12:49}
+\newcontextversion{2011.06.11 16:45}
%D This file is loaded at runtime, thereby providing an
%D excellent place for hacks, patches, extensions and new
diff --git a/tex/context/base/context.mkii b/tex/context/base/context.mkii
index 842e4294d..baef733f4 100644
--- a/tex/context/base/context.mkii
+++ b/tex/context/base/context.mkii
@@ -20,7 +20,7 @@
%D your styles an modules.
\edef\contextformat {\jobname}
-\edef\contextversion{2011.06.09 12:49}
+\edef\contextversion{2011.06.11 16:45}
%D For those who want to use this:
diff --git a/tex/context/base/context.mkiv b/tex/context/base/context.mkiv
index 5a328d52a..5d39430ed 100644
--- a/tex/context/base/context.mkiv
+++ b/tex/context/base/context.mkiv
@@ -20,7 +20,7 @@
%D your styles an modules.
\edef\contextformat {\jobname}
-\edef\contextversion{2011.06.09 12:49}
+\edef\contextversion{2011.06.11 16:45}
%D For those who want to use this:
diff --git a/tex/context/base/core-fnt.mkiv b/tex/context/base/core-fnt.mkiv
index 1ca8b8426..76ce03f72 100644
--- a/tex/context/base/core-fnt.mkiv
+++ b/tex/context/base/core-fnt.mkiv
@@ -184,20 +184,20 @@
\tx
\fi}
-\def\dohighlow#1#2#3#4#5% todo, named fontdimens
+\def\dohighlow#1#2#3#4#5#6% todo, named fontdimens tag
{\dontleavehmode
\bgroup
\scratchdimen\ifdim\fontexheight\textfont2=1ex #2\textfont2\else #3ex\fi
\advance\scratchdimen #4ex
\kern.1ex
- \setbox\scratchbox\hbox{#1\scratchdimen\hbox{\dodohighlow#5}}%
+ \setbox\scratchbox\hbox{#1\scratchdimen\hbox{\dodohighlow\dostarttagged#5\empty#6\dostoptagged}}%
\ht\scratchbox\strutheight
\dp\scratchbox\strutdepth
\box\scratchbox
\egroup}
-\unexpanded\def\high{\dohighlow\raise\mathsupnormal{.86}{0}}
-\unexpanded\def\low {\dohighlow\lower\mathsubnormal{.48}{0}}
+\unexpanded\def\high{\dohighlow\raise\mathsupnormal{.86}{0}\t!sup}
+\unexpanded\def\low {\dohighlow\lower\mathsubnormal{.48}{0}\t!sub}
\unexpanded\def\lohi
{\dosingleempty\dolohi}
@@ -208,8 +208,9 @@
\def\dolohi[#1]#2#3%
{\dontleavehmode
\hbox
- {\setbox4\hbox{\dohighlow\lower\mathsubnormal{.48}{.1}{#2}}%
- \setbox6\hbox{\dohighlow\raise\mathsupnormal{.86}{.1}{#3}}%
+ {\dostarttagged\t!subsup
+ \setbox4\hbox{\dohighlow\lower\mathsubnormal{.48}{.1}\t!sub{#2}}%
+ \setbox6\hbox{\dohighlow\raise\mathsupnormal{.86}{.1}\t!sup{#3}}%
\doif{#1}{\v!left}
{\ifdim\wd4<\wd6
\setbox4\hbox to \wd6{\hss\box4}%
@@ -220,7 +221,8 @@
\wd4=\zeropoint\box4\box6
\else
\wd6=\zeropoint\box6\box4
- \fi}}
+ \fi
+ \dostoptagged}}
\def\dohilo[#1]#2#3%
{\dolohi[#1]{#3}{#2}}
diff --git a/tex/context/base/core-mis.mkiv b/tex/context/base/core-mis.mkiv
index bb07f0628..417ea4c3d 100644
--- a/tex/context/base/core-mis.mkiv
+++ b/tex/context/base/core-mis.mkiv
@@ -1973,21 +1973,47 @@
% We do a bit more calculations than needed, simply because that way
% it's easier to debug the code.
-\def\dododorotatenextbox
- {\setbox\nextbox\vbox to \@@layerysiz
- {\vfill
- \hbox to \@@layerxsiz
- {\dostartrotation\@@rorotation
- \nextboxwd\zeropoint
- \nextboxht\zeropoint
- \flushnextbox
- \dostoprotation
- \hfill}%
- \kern\@@layerypos}%
- \setbox\nextbox\hbox
- {\kern\@@layerxpos
- \kern\@@layerxoff
- \lower\@@layeryoff\flushnextbox}}
+% We can completely do this in lua .. when 'I'm bored ...
+
+% \def\dododorotatenextbox
+% {\setbox\nextbox\vbox to \@@layerysiz
+% {\vfill
+% \hbox to \@@layerxsiz
+% {\dostartrotation\@@rorotation
+% \nextboxwd\zeropoint
+% \nextboxht\zeropoint
+% \flushnextbox
+% \dostoprotation
+% \hfill}%
+% \kern\@@layerypos}%
+% \setbox\nextbox\hbox
+% {\kern\@@layerxpos
+% \kern\@@layerxoff
+% \lower\@@layeryoff\flushnextbox}}
+
+\def\dorotatenextbox#1#2%
+ {\hbox\bgroup
+ \edef\@@rorotation{#1}%
+ \ifx\@@rorotation\empty
+ \else
+ \ifx\@@rorotation\v!left
+ \doifoddpageelse{\edef\@@rorotation{90}}{\edef\@@rorotation{270}}%
+ \else\ifx\@@rorotation\v!right
+ \doifoddpageelse{\edef\@@rorotation{270}}{\edef\@@rorotation{90}}%
+ \else\ifx\@@rorotation\v!outer
+ \signalrightpage
+ \doifrightpageelse{\edef\@@rorotation{270}}{\edef\@@rorotation{90}}%
+ \else\ifx\@@rorotation\v!inner
+ \signalrightpage
+ \doifrightpageelse{\edef\@@rorotation{90}}{\edef\@@rorotation{270}}%
+ \else
+ \edef\@@rorotation{\realnumber{\@@rorotation}}% get rid of leading zeros and spaces
+ \fi\fi\fi\fi
+ \setbox\nextbox\vbox{\flushnextbox}% not really needed
+ \dodorotatenextbox\@@rorotation#2%
+ \fi
+ \boxcursor\flushnextbox
+ \egroup}
\def\dodorotatenextbox#1#2% quite some trial and error -)
{\dontshowcomposition
diff --git a/tex/context/base/core-sys.lua b/tex/context/base/core-sys.lua
index 47a5c340d..0be9fd588 100644
--- a/tex/context/base/core-sys.lua
+++ b/tex/context/base/core-sys.lua
@@ -6,7 +6,8 @@ if not modules then modules = { } end modules ['core-sys'] = {
license = "see context related readme files"
}
-local lower, extname, basename, removesuffix = string.lower, file.extname, file.basename, file.removesuffix
+local lower, format = string.lower, string.format
+local extname, basename, removesuffix = file.extname, file.basename, file.removesuffix
local environment = environment
@@ -21,5 +22,5 @@ end
statistics.register("result saved in file", function()
-- suffix will be fetched from backend
- return string.format( "%s.%s", environment.outputfilename, (tex.pdfoutput>0 and "pdf") or "dvi")
+ return format( "%s.%s", environment.outputfilename, (tex.pdfoutput>0 and "pdf") or "dvi")
end)
diff --git a/tex/context/base/data-exp.lua b/tex/context/base/data-exp.lua
index e81389682..86a287dd4 100644
--- a/tex/context/base/data-exp.lua
+++ b/tex/context/base/data-exp.lua
@@ -6,7 +6,7 @@ if not modules then modules = { } end modules ['data-exp'] = {
license = "see context related readme files",
}
-local format, find, gmatch, lower = string.format, string.find, string.gmatch, string.lower
+local format, find, gmatch, lower, char = string.format, string.find, string.gmatch, string.lower, string.char
local concat, sort = table.concat, table.sort
local lpegmatch, lpegpatterns = lpeg.match, lpeg.patterns
local Ct, Cs, Cc, P, C, S = lpeg.Ct, lpeg.Cs, lpeg.Cc, lpeg.P, lpeg.C, lpeg.S
@@ -140,7 +140,7 @@ local homedir
function resolvers.cleanpath(str)
if not homedir then
homedir = lpegmatch(cleanup,environment.homedir or "")
- if homedir == string.char(127) or homedir == "" or not lfs.isdir(homedir) then
+ if homedir == char(127) or homedir == "" or not lfs.isdir(homedir) then
if trace_expansions then
report_expansions("no home dir set, ignoring dependent paths")
end
@@ -189,8 +189,8 @@ end
local cache = { }
----- splitter = Ct(lpeg.splitat(S(ostype == "windows" and ";" or ":;"))) -- maybe add ,
-local splitter = Ct(lpeg.splitat(";")) -- as we move towards urls, prefixes and use tables we no longer do :
+----- splitter = lpeg.tsplitat(S(ostype == "windows" and ";" or ":;")) -- maybe add ,
+local splitter = lpeg.tsplitat(";") -- as we move towards urls, prefixes and use tables we no longer do :
local backslashswapper = lpeg.replacer("\\","/")
diff --git a/tex/context/base/data-ini.lua b/tex/context/base/data-ini.lua
index 7b114e47b..16fbb8e25 100644
--- a/tex/context/base/data-ini.lua
+++ b/tex/context/base/data-ini.lua
@@ -6,7 +6,7 @@ if not modules then modules = { } end modules ['data-ini'] = {
license = "see context related readme files",
}
-local gsub, find, gmatch = string.gsub, string.find, string.gmatch
+local gsub, find, gmatch, char = string.gsub, string.find, string.gmatch, string.char
local concat = table.concat
local next, type = next, type
@@ -68,7 +68,7 @@ do
local homedir = osgetenv(ostype == "windows" and 'USERPROFILE' or 'HOME') or ''
if not homedir or homedir == "" then
- homedir = string.char(127) -- we need a value, later we wil trigger on it
+ homedir = char(127) -- we need a value, later we wil trigger on it
end
homedir = file.collapsepath(homedir)
diff --git a/tex/context/base/data-lst.lua b/tex/context/base/data-lst.lua
index 3f9425340..048a26f0d 100644
--- a/tex/context/base/data-lst.lua
+++ b/tex/context/base/data-lst.lua
@@ -9,6 +9,7 @@ if not modules then modules = { } end modules ['data-lst'] = {
-- used in mtxrun, can be loaded later .. todo
local find, concat, upper, format = string.find, table.concat, string.upper, string.format
+local fastcopy, sortedpairs = table.fastcopy, table.sortedpairs
resolvers.listers = resolvers.listers or { }
@@ -39,10 +40,10 @@ function resolvers.listers.variables(pattern)
end
end
end
- local env = table.fastcopy(environment)
- local var = table.fastcopy(variables)
- local exp = table.fastcopy(expansions)
- for key, value in table.sortedpairs(configured) do
+ local env = fastcopy(environment)
+ local var = fastcopy(variables)
+ local exp = fastcopy(expansions)
+ for key, value in sortedpairs(configured) do
if key ~= "" and (pattern == "" or find(upper(key),pattern)) then
report_lists(key)
report_lists(" env: %s",tabstr(rawget(environment,key)) or "unset")
@@ -51,9 +52,9 @@ function resolvers.listers.variables(pattern)
report_lists(" res: %s",resolvers.resolve(expansions[key]) or "unset")
end
end
- instance.environment = table.fastcopy(env)
- instance.variables = table.fastcopy(var)
- instance.expansions = table.fastcopy(exp)
+ instance.environment = fastcopy(env)
+ instance.variables = fastcopy(var)
+ instance.expansions = fastcopy(exp)
end
function resolvers.listers.configurations(report)
diff --git a/tex/context/base/data-tmp.lua b/tex/context/base/data-tmp.lua
index ec6f91e24..6e64fc4c7 100644
--- a/tex/context/base/data-tmp.lua
+++ b/tex/context/base/data-tmp.lua
@@ -23,6 +23,7 @@ luatools with a recache feature.</p>
--ldx]]--
local format, lower, gsub, concat = string.format, string.lower, string.gsub, table.concat
+local serialize, serializetofile = table.serialize, table.tofile
local mkdirs, isdir = dir.mkdirs, lfs.isdir
local trace_locating = false trackers.register("resolvers.locating", function(v) trace_locating = v end)
@@ -176,7 +177,7 @@ function caches.usedpaths()
end
function caches.configfiles()
- return table.concat(resolvers.instance.specification,";")
+ return concat(resolvers.instance.specification,";")
end
function caches.hashed(tree)
@@ -300,9 +301,9 @@ function caches.savedata(filepath,filename,data,raw)
end
data.cache_uuid = os.uuid()
if caches.direct then
- file.savedata(tmaname,table.serialize(data,true,saveoptions))
+ file.savedata(tmaname,serialize(data,true,saveoptions))
else
- table.tofile(tmaname,data,true,saveoptions)
+ serializetofile(tmaname,data,true,saveoptions)
end
utilities.lua.compile(tmaname,tmcname)
end
@@ -369,7 +370,7 @@ function caches.savecontent(cachename,dataname,content)
content = content,
uuid = os.uuid(),
}
- local ok = io.savedata(luaname,table.serialize(data,true))
+ local ok = io.savedata(luaname,serialize(data,true))
if ok then
if trace_locating then
report_resolvers("category '%s', cachename '%s' saved in '%s'",dataname,cachename,luaname)
diff --git a/tex/context/base/font-afm.lua b/tex/context/base/font-afm.lua
index b719a9b31..0aca634bb 100644
--- a/tex/context/base/font-afm.lua
+++ b/tex/context/base/font-afm.lua
@@ -28,6 +28,7 @@ local next, type, tonumber = next, type, tonumber
local format, match, gmatch, lower, gsub, strip = string.format, string.match, string.gmatch, string.lower, string.gsub, string.strip
local abs = math.abs
local P, S, C, R, lpegmatch, patterns = lpeg.P, lpeg.S, lpeg.C, lpeg.R, lpeg.match, lpeg.patterns
+local derivetable = table.derive
local fonts = fonts
local afm = { }
@@ -768,9 +769,9 @@ local function copytotfm(data)
if data and data.descriptions then
local metadata = data.metadata
local resources = data.resources
- local properties = table.derive(data.properties)
- local descriptions = table.derive(data.descriptions)
- local goodies = table.derive(data.goodies)
+ local properties = derivetable(data.properties)
+ local descriptions = derivetable(data.descriptions)
+ local goodies = derivetable(data.goodies)
local characters = { }
local parameters = { }
local unicodes = resources.unicodes
diff --git a/tex/context/base/font-col.lua b/tex/context/base/font-col.lua
index e0d2fed11..95e390ae2 100644
--- a/tex/context/base/font-col.lua
+++ b/tex/context/base/font-col.lua
@@ -11,6 +11,7 @@ if not modules then modules = { } end modules ['font-col'] = {
local gmatch, type = string.gmatch, type
local traverse_id = node.traverse_id
local lpegmatch = lpeg.match
+local fastcopy = table.fastcopy
local settings_to_hash = utilities.parsers.settings_to_hash
local trace_collecting = false trackers.register("fonts.collecting", function(v) trace_collecting = v end)
@@ -82,7 +83,7 @@ function collections.define(name,font,ranges,details)
end
end
details.font, details.start, details.stop = font, start, stop
- d[#d+1] = table.fastcopy(details)
+ d[#d+1] = fastcopy(details)
end
end
end
diff --git a/tex/context/base/font-con.lua b/tex/context/base/font-con.lua
index 5d30842ef..61970f734 100644
--- a/tex/context/base/font-con.lua
+++ b/tex/context/base/font-con.lua
@@ -13,6 +13,7 @@ local next, tostring, rawget = next, tostring, rawget
local format, match, lower, gsub = string.format, string.match, string.lower, string.gsub
local utfbyte = utf.byte
local sort, insert, concat, sortedkeys, serialize, fastcopy = table.sort, table.insert, table.concat, table.sortedkeys, table.serialize, table.fastcopy
+local derivetable = table.derive
local trace_defining = false trackers.register("fonts.defining", function(v) trace_defining = v end)
local trace_scaling = false trackers.register("fonts.scaling" , function(v) trace_scaling = v end)
@@ -194,10 +195,9 @@ function constructors.scale(tfmdata,specification)
local mathparameters = tfmdata.mathparameters or { }
--
local targetcharacters = { }
- local targetdescriptions = table.derive(descriptions)
- local targetparameters = table.derive(parameters)
- -- local targetmathparameters = table.fastcopy(mathparameters) -- happens elsewhere
- local targetproperties = table.derive(properties)
+ local targetdescriptions = derivetable(descriptions)
+ local targetparameters = derivetable(parameters)
+ local targetproperties = derivetable(properties)
local targetgoodies = goodies -- we need to loop so no metatable
target.characters = targetcharacters
target.descriptions = targetdescriptions
diff --git a/tex/context/base/font-ctx.lua b/tex/context/base/font-ctx.lua
index 329894407..9f2e4f255 100644
--- a/tex/context/base/font-ctx.lua
+++ b/tex/context/base/font-ctx.lua
@@ -11,7 +11,8 @@ if not modules then modules = { } end modules ['font-ctx'] = {
local texcount, texsetcount = tex.count, tex.setcount
local format, gmatch, match, find, lower, gsub, byte = string.format, string.gmatch, string.match, string.find, string.lower, string.gsub, string.byte
-local concat, serialize, sort = table.concat, table.serialize, table.sort
+local concat, serialize, sort, fastcopy, mergedtable = table.concat, table.serialize, table.sort, table.fastcopy, table.merged
+local sortedhash, sortedkeys, sequenced = table.sortedhash, table.sortedkeys, table.sequenced
local settings_to_hash, hash_to_string = utilities.parsers.settings_to_hash, utilities.parsers.hash_to_string
local formatcolumns = utilities.formatters.formatcolumns
@@ -351,7 +352,7 @@ local function contextnumber(name) -- will be replaced
else
local script, language = languages.association(lng)
if t.script ~= script or t.language ~= language then
- local s = table.fastcopy(t)
+ local s = fastcopy(t)
local n = #numbers + 1
setups[tag] = s
numbers[n] = tag
@@ -532,7 +533,7 @@ end
specifiers.splitcontext = splitcontext
function specifiers.contexttostring(name,kind,separator,yes,no,strict,omit) -- not used
- return hash_to_string(table.merged(handlers[kind].features.defaults or {},setups[name] or {}),separator,yes,no,strict,omit)
+ return hash_to_string(mergedtable(handlers[kind].features.defaults or {},setups[name] or {}),separator,yes,no,strict,omit)
end
local function starred(features) -- no longer fallbacks here
@@ -917,7 +918,7 @@ helpers.nametoslot = nametoslot
function loggers.reportdefinedfonts()
if trace_usage then
local t, tn = { }, 0
- for id, data in table.sortedhash(fontdata) do
+ for id, data in sortedhash(fontdata) do
local properties = data.properties or { }
local parameters = data.parameters or { }
tn = tn + 1
@@ -930,7 +931,7 @@ function loggers.reportdefinedfonts()
properties.psname or "",
properties.fullname or "",
}
-report_status("%s: %s",properties.name,concat(table.sortedkeys(data)," "))
+report_status("%s: %s",properties.name,concat(sortedkeys(data)," "))
end
formatcolumns(t," ")
report_status()
@@ -953,7 +954,7 @@ function loggers.reportusedfeatures()
local setup = setups[name]
local n = setup.number
setup.number = nil -- we have no reason to show this
- t[i] = { i, name, table.sequenced(setup,false,true) } -- simple mode
+ t[i] = { i, name, sequenced(setup,false,true) } -- simple mode
setup.number = n -- restore it (normally not needed as we're done anyway)
end
formatcolumns(t," ")
@@ -1026,7 +1027,7 @@ function fonts.definetypeface(name,t)
context.definefontsynonym( { format("%sBoldItalic", Shape) }, { format("spec:%s-%s-italic-%s", fontname, boldweight, boldwidth ) } )
context.definefontsynonym( { format("%sItalic", Shape) }, { format("spec:%s-%s-italic-%s", fontname, normalweight, normalwidth) } )
context.stopfontclass()
- local settings = table.sequenced({ features= t.features },",")
+ local settings = sequenced({ features= t.features },",")
context.dofastdefinetypeface(name, shortcut, shape, size, settings)
end
diff --git a/tex/context/base/font-ini.mkiv b/tex/context/base/font-ini.mkiv
index 0613ac709..29a84cd66 100644
--- a/tex/context/base/font-ini.mkiv
+++ b/tex/context/base/font-ini.mkiv
@@ -3894,7 +3894,7 @@
\unexpanded\def\getnamedglyphdirect#1#2{{\setdirectsymbolicfont{#1}\ctxcommand{fontchar("#2")}}}
\unexpanded\def\getglyphstyled #1#2{{\setstyledsymbolicfont{#1}\doifnumberelse{#2}\char\donothing#2}}
\unexpanded\def\getglyphdirect #1#2{{\setdirectsymbolicfont{#1}\doifnumberelse{#2}\char\donothing#2}}
-\unexpanded\def\getscaledglyph #1#2#3{{\setscaledstyledsymbolicfont{#1}{#2}\doifnumberelse{#3}\char\donothing#3}}
+\unexpanded\def\getscaledglyph #1#2#3{{\setscaledstyledsymbolicfont\fontbody{#1}{#2}\doifnumberelse{#3}\char\donothing#3}}
\let\getglyph \getglyphstyled % old
\let\getrawglyph \getglyphdirect % old
diff --git a/tex/context/base/font-otd.lua b/tex/context/base/font-otd.lua
index b22889217..a8061d6bc 100644
--- a/tex/context/base/font-otd.lua
+++ b/tex/context/base/font-otd.lua
@@ -7,6 +7,7 @@ if not modules then modules = { } end modules ['font-otd'] = {
}
local match = string.match
+local sequenced = table.sequenced
local trace_dynamics = false trackers.register("otf.dynamics", function(v) trace_dynamics = v end)
local trace_applied = false trackers.register("otf.applied", function(v) trace_applied = v end)
@@ -80,7 +81,7 @@ function otf.setdynamics(font,attribute)
set.mode = "node" -- really needed
dsla = otf.setfeatures(tfmdata,set)
if trace_dynamics then
- report_otf("setting dynamics %s: attribute %s, script %s, language %s, set: %s",contextnumbers[attribute],attribute,script,language,table.sequenced(set))
+ report_otf("setting dynamics %s: attribute %s, script %s, language %s, set: %s",contextnumbers[attribute],attribute,script,language,sequenced(set))
end
-- we need to restore some values
properties.script = s_script
diff --git a/tex/context/base/font-otf.lua b/tex/context/base/font-otf.lua
index 36d18a236..29735dee0 100644
--- a/tex/context/base/font-otf.lua
+++ b/tex/context/base/font-otf.lua
@@ -22,7 +22,7 @@ local getn = table.getn
local lpegmatch = lpeg.match
local reversed, concat, remove = table.reversed, table.concat, table.remove
local ioflush = io.flush
-local fastcopy, tohash = table.fastcopy, table.tohash
+local fastcopy, tohash, derivetable = table.fastcopy, table.tohash, table.derive
local allocate = utilities.storage.allocate
local registertracker = trackers.register
@@ -1671,9 +1671,9 @@ local function copytotfm(data,cache_id)
if data then
local metadata = data.metadata
local resources = data.resources
- local properties = table.derive(data.properties)
- local descriptions = table.derive(data.descriptions)
- local goodies = table.derive(data.goodies)
+ local properties = derivetable(data.properties)
+ local descriptions = derivetable(data.descriptions)
+ local goodies = derivetable(data.goodies)
local characters = { }
local parameters = { }
local mathparameters = { }
diff --git a/tex/context/base/font-syn.lua b/tex/context/base/font-syn.lua
index 1d444cdc7..2483f887c 100644
--- a/tex/context/base/font-syn.lua
+++ b/tex/context/base/font-syn.lua
@@ -13,6 +13,7 @@ local next, tonumber = next, tonumber
local gsub, lower, match, find, lower, upper = string.gsub, string.lower, string.match, string.find, string.lower, string.upper
local find, gmatch = string.find, string.gmatch
local concat, sort, format = table.concat, table.sort, string.format
+local serialize = table.serialize
local lpegmatch = lpeg.match
local utfgsub, utflower = utf.gsub, utf.lower
local unpack = unpack or table.unpack
@@ -916,8 +917,8 @@ local function is_reloaded()
if not reloaded then
local data = names.data
if autoreload then
- local c_status = table.serialize(resolvers.datastate())
- local f_status = table.serialize(data.datastate)
+ local c_status = serialize(resolvers.datastate())
+ local f_status = serialize(data.datastate)
if c_status == f_status then
report_names("font database has matching configuration and file hashes")
return
@@ -1240,7 +1241,7 @@ local function collect(stage,found,done,name,weight,style,width,variant,all)
report_names("resolving name '%s', weight '%s', style '%s', width '%s', variant '%s'",
name or "?",tostring(weight),tostring(style),tostring(width),tostring(variant))
end
- --~ print(name,table.serialize(family))
+ --~ print(name,serialize(family))
if weight and weight ~= "" then
if style and style ~= "" then
if width and width ~= "" then
diff --git a/tex/context/base/grph-inc.lua b/tex/context/base/grph-inc.lua
index 7dee63eaf..e41453513 100644
--- a/tex/context/base/grph-inc.lua
+++ b/tex/context/base/grph-inc.lua
@@ -1270,7 +1270,7 @@ function figures.applyratio(width,height,w,h) -- width and height are strings an
if not height or height == "" then
return figures.defaultwidth, figures.defaultheight
else
- height = string.todimen(height)
+ height = todimen(height)
if w and h then
return height * w/h, height
else
@@ -1278,7 +1278,7 @@ function figures.applyratio(width,height,w,h) -- width and height are strings an
end
end
else
- width = string.todimen(width)
+ width = todimen(width)
if not height or height == "" then
if w and h then
return width, width * h/w
@@ -1286,7 +1286,7 @@ function figures.applyratio(width,height,w,h) -- width and height are strings an
return width, figures.defaultheight
end
else
- return width, string.todimen(height)
+ return width, todimen(height)
end
end
end
diff --git a/tex/context/base/java-ini.lua b/tex/context/base/java-ini.lua
index b3b066678..55b60c14f 100644
--- a/tex/context/base/java-ini.lua
+++ b/tex/context/base/java-ini.lua
@@ -122,7 +122,7 @@ function javascripts.usepreamblenow(name) -- now later
end
end
-local splitter = lpeg.Ct(lpeg.splitat(lpeg.patterns.commaspacer))
+local splitter = lpeg.tsplitat(lpeg.patterns.commaspacer)
local used, reported = false, { } -- we can cache more
diff --git a/tex/context/base/l-aux.lua b/tex/context/base/l-aux.lua
deleted file mode 100644
index aa04951bf..000000000
--- a/tex/context/base/l-aux.lua
+++ /dev/null
@@ -1,13 +0,0 @@
-if not modules then modules = { } end modules ['l-aux'] = {
- version = 1.001,
- comment = "companion to luat-lib.mkiv",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files"
-}
-
-aux = aux or { }
-
-require("util-int") for k, v in next, utilities.interfaces do aux[k] = v end
-require("util-tab") for k, v in next, utilities.tables do aux[k] = v end
-require("util-fmt") for k, v in next, utilities.formatters do aux[k] = v end
diff --git a/tex/context/base/l-boolean.lua b/tex/context/base/l-boolean.lua
index 3fff7c126..2d502f164 100644
--- a/tex/context/base/l-boolean.lua
+++ b/tex/context/base/l-boolean.lua
@@ -11,10 +11,6 @@ local type, tonumber = type, tonumber
boolean = boolean or { }
local boolean = boolean
--- function boolean.tonumber(b)
--- return b and 1 or 0 -- test and test and return or return
--- end
-
function boolean.tonumber(b)
if b then return 1 else return 0 end -- test and return or return
end
diff --git a/tex/context/base/l-lpeg.lua b/tex/context/base/l-lpeg.lua
index ce0cc67ef..9e59194e8 100644
--- a/tex/context/base/l-lpeg.lua
+++ b/tex/context/base/l-lpeg.lua
@@ -9,6 +9,7 @@ if not modules then modules = { } end modules ['l-lpeg'] = {
local lpeg = require("lpeg")
local type = type
+local byte, char = string.byte, string.char
-- Beware, we predefine a bunch of patterns here and one reason for doing so
-- is that we get consistent behaviour in some of the visualizers.
@@ -89,7 +90,7 @@ patterns.space = space
patterns.tab = P("\t")
patterns.spaceortab = patterns.space + patterns.tab
patterns.eol = S("\n\r")
-patterns.spacer = S(" \t\f\v") -- + string.char(0xc2, 0xa0) if we want utf (cf mail roberto)
+patterns.spacer = S(" \t\f\v") -- + char(0xc2, 0xa0) if we want utf (cf mail roberto)
patterns.newline = newline
patterns.emptyline = newline^1
patterns.nonspacer = 1 - patterns.spacer
@@ -121,6 +122,19 @@ function string.unquoted(str)
return match(unquoted,str) or str
end
+-- more efficient:
+
+local unquoted = (
+ squote * Cs(1 - P(-2)) * squote
+ + dquote * Cs(1 - P(-2)) * dquote
+)
+
+function string.unquoted(str)
+ return match(unquoted,str) or str
+end
+
+patterns.unquoted = unquoted
+
--~ print(string.unquoted("test"))
--~ print(string.unquoted([["t\"est"]]))
--~ print(string.unquoted([["t\"est"x]]))
@@ -134,7 +148,14 @@ function lpeg.splitter(pattern, action)
return (((1-P(pattern))^1)/action+1)^0
end
-local splitters_s, splitters_m = { }, { }
+function lpeg.tsplitter(pattern, action)
+ return Ct((((1-P(pattern))^1)/action+1)^0)
+end
+
+-- probleem: separator can be lpeg and that does not hash too well, but
+-- it's quite okay as the key is then not garbage collected
+
+local splitters_s, splitters_m, splitters_t = { }, { }, { }
local function splitat(separator,single)
local splitter = (single and splitters_s[separator]) or splitters_m[separator]
@@ -153,7 +174,17 @@ local function splitat(separator,single)
return splitter
end
-lpeg.splitat = splitat
+local function tsplitat(separator)
+ local splitter = splitters_t[separator]
+ if not splitter then
+ splitter = Ct(splitat(separator))
+ splitters_t[separator] = splitter
+ end
+ return splitter
+end
+
+lpeg.splitat = splitat
+lpeg.tsplitat = tsplitat
--~ local p = splitat("->",false) print(match(p,"oeps->what->more")) -- oeps what more
--~ local p = splitat("->",true) print(match(p,"oeps->what->more")) -- oeps what->more
@@ -165,7 +196,7 @@ local cache = { }
function lpeg.split(separator,str)
local c = cache[separator]
if not c then
- c = Ct(splitat(separator))
+ c = tsplitat(separator)
cache[separator] = c
end
return match(c,str)
@@ -174,7 +205,7 @@ end
function string.split(str,separator)
local c = cache[separator]
if not c then
- c = Ct(splitat(separator))
+ c = tsplitat(separator)
cache[separator] = c
end
return match(c,str)
@@ -193,7 +224,7 @@ patterns.textline = content
--~ return match(linesplitter,str)
--~ end
-local linesplitter = Ct(splitat(newline))
+local linesplitter = tsplitat(newline)
patterns.linesplitter = linesplitter
@@ -201,7 +232,7 @@ function string.splitlines(str)
return match(linesplitter,str)
end
-local utflinesplitter = utfbom^-1 * Ct(splitat(newline))
+local utflinesplitter = utfbom^-1 * tsplitat(newline)
patterns.utflinesplitter = utflinesplitter
@@ -237,13 +268,11 @@ end
--~ from roberto's site:
-local f1 = string.byte
-
-local function f2(s) local c1, c2 = f1(s,1,2) return c1 * 64 + c2 - 12416 end
-local function f3(s) local c1, c2, c3 = f1(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
-local function f4(s) local c1, c2, c3, c4 = f1(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
+local function f2(s) local c1, c2 = byte(s,1,2) return c1 * 64 + c2 - 12416 end
+local function f3(s) local c1, c2, c3 = byte(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
+local function f4(s) local c1, c2, c3, c4 = byte(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
-local utf8byte = patterns.utf8one/f1 + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
+local utf8byte = patterns.utf8one/byte + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
patterns.utf8byte = utf8byte
@@ -538,4 +567,32 @@ function lpeg.is_lpeg(p)
return p and lpegtype(p) == "pattern"
end
+-- For the moment here, but it might move to utilities:
+
+local sort, fastcopy, sortedpairs = table.sort, table.fastcopy, table.sortedpairs -- dependency!
+
+function lpeg.append(list,pp)
+ local p = pp
+ if #list > 0 then
+ list = fastcopy(list)
+ sort(list)
+ for l=1,#list do
+ if p then
+ p = P(list[l]) + p
+ else
+ p = P(list[l])
+ end
+ end
+ else
+ for k, v in sortedpairs(list) do
+ if p then
+ p = P(k)/v + p
+ else
+ p = P(k)/v
+ end
+ end
+ end
+ return p
+end
+
--~ Cf(Ct("") * (Cg(C(...) * "=" * Cs(...)))^0, rawset)
diff --git a/tex/context/base/l-table.lua b/tex/context/base/l-table.lua
index 9f5bf6eda..eeb3f47f6 100644
--- a/tex/context/base/l-table.lua
+++ b/tex/context/base/l-table.lua
@@ -929,3 +929,13 @@ end
function table.has_one_entry(t)
return t and not next(t,next(t))
end
+
+-- new
+
+function table.loweredkeys(t) -- maybe utf
+ local l = { }
+ for k, v in next, t do
+ l[lower(k)] = v
+ end
+ return l
+end
diff --git a/tex/context/base/l-utils.lua b/tex/context/base/l-utils.lua
deleted file mode 100644
index 30df04694..000000000
--- a/tex/context/base/l-utils.lua
+++ /dev/null
@@ -1,12 +0,0 @@
-if not modules then modules = { } end modules ['l-utils'] = {
- version = 1.001,
- comment = "this module is replaced by the util-* ones",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files"
-}
-
-utils = utils or { }
-
-require("util-mrg") for k, v in next, utilities.merger do utils[k] = v end
-require("util-lua") for k, v in next, utilities.lua do utils[k] = v end
diff --git a/tex/context/base/lang-ini.lua b/tex/context/base/lang-ini.lua
index 525e0f17a..0022c8a41 100644
--- a/tex/context/base/lang-ini.lua
+++ b/tex/context/base/lang-ini.lua
@@ -20,7 +20,7 @@ local type, tonumber = type, tonumber
local utf = unicode.utf8
local utfbyte = utf.byte
local format, gsub = string.format, string.gsub
-local concat = table.concat
+local concat, sortedkeys, sortedpairs = table.concat, table.sortedkeys, table.sortedpairs
local lpegmatch = lpeg.match
local texwrite = tex.write
@@ -170,7 +170,7 @@ function languages.synonym(synonym,tag) -- convenience function
end
function languages.installed(separator)
- context(concat(table.sortedkeys(registered),separator or ","))
+ context(concat(sortedkeys(registered),separator or ","))
end
function languages.current(n)
@@ -320,10 +320,7 @@ languages.logger = languages.logger or { }
function languages.logger.report()
local result, r = { }, 0
- local sorted = table.sortedkeys(registered)
- for i=1,#sorted do
- local tag = sorted[i]
- local l = registered[tag]
+ for tag, l in sortedpairs(registered) do
if l.loaded then
r = r + 1
result[r] = format("%s:%s:%s", tag, l.parent, l.number)
diff --git a/tex/context/base/lpdf-ano.lua b/tex/context/base/lpdf-ano.lua
index a133c24d7..24f1e903f 100644
--- a/tex/context/base/lpdf-ano.lua
+++ b/tex/context/base/lpdf-ano.lua
@@ -282,7 +282,7 @@ local function use_shared_annotations()
statistics.register("pdf annotations", function()
if nofused > 0 then
-- table.print(hashed,"hashed_annotations")
- return string.format("%s embedded, %s unique",nofused,nofunique)
+ return format("%s embedded, %s unique",nofused,nofunique)
else
return nil
end
diff --git a/tex/context/base/lpdf-epa.lua b/tex/context/base/lpdf-epa.lua
index 17c55eb0c..c8d23a618 100644
--- a/tex/context/base/lpdf-epa.lua
+++ b/tex/context/base/lpdf-epa.lua
@@ -10,7 +10,7 @@ if not modules then modules = { } end modules ['lpdf-epa'] = {
-- change.
local type, tonumber = type, tonumber
-local format = string.format
+local format, gsub = string.format, string.gsub
local trace_links = false trackers.register("figures.links", function(v) trace_links = v end)
@@ -171,12 +171,11 @@ function codeinjections.mergeviewerlayers(specification)
local layers = document.layers
if layers then
for i=1,layers.n do
- local tag = layers[i]
-tag = namespace .. string.gsub(tag," ",":")
-local title = tag
-if trace_links then
- report_link("using layer '%s'",tag)
-end
+ local tag = namespace .. gsub(layers[i]," ",":")
+ local title = tag
+ if trace_links then
+ report_link("using layer '%s'",tag)
+ end
attributes.viewerlayers.define { -- also does some cleaning
tag = tag, -- todo: #3A or so
title = title,
diff --git a/tex/context/base/lpdf-mis.lua b/tex/context/base/lpdf-mis.lua
index 024127a4c..42304f1b2 100644
--- a/tex/context/base/lpdf-mis.lua
+++ b/tex/context/base/lpdf-mis.lua
@@ -16,7 +16,7 @@ if not modules then modules = { } end modules ['lpdf-mis'] = {
-- course there are a couple of more changes.
local next, tostring = next, tostring
-local format = string.format
+local format, gsub = string.format, string.gsub
local texset = tex.set
local backends, lpdf, nodes = backends, lpdf, nodes
@@ -191,7 +191,7 @@ local function setupidentity()
end
local keywords = identity.keywords or ""
if keywords ~= "" then
- keywords = string.gsub(keywords, "[%s,]+", " ")
+ keywords = gsub(keywords, "[%s,]+", " ")
lpdf.addtoinfo("Keywords",pdfunicode(keywords), keywords)
end
local id = lpdf.id()
diff --git a/tex/context/base/lpdf-wid.lua b/tex/context/base/lpdf-wid.lua
index 3aa51c536..026845698 100644
--- a/tex/context/base/lpdf-wid.lua
+++ b/tex/context/base/lpdf-wid.lua
@@ -7,6 +7,7 @@ if not modules then modules = { } end modules ['lpdf-wid'] = {
}
local gmatch, gsub, find, lower, format = string.gmatch, string.gsub, string.find, string.lower, string.format
+local stripstring = string.strip
local texbox, texcount = tex.box, tex.count
local settings_to_array = utilities.parsers.settings_to_array
local settings_to_hash = utilities.parsers.settings_to_hash
@@ -243,7 +244,7 @@ function codeinjections.embedfile(specification)
end
end
local basename = keepdir == true and filename or file.basename(filename)
-local basename = string.gsub(basename,"%./","")
+local basename = gsub(basename,"%./","")
local savename = file.addsuffix(name ~= "" and name or basename,"txt") -- else no valid file
local a = pdfdictionary { Type = pdfconstant("EmbeddedFile") }
local f
@@ -379,7 +380,7 @@ end
function nodeinjections.comment(specification) -- brrr: seems to be done twice
nofcomments = nofcomments + 1
- local text = string.strip(specification.data or "")
+ local text = stripstring(specification.data or "")
if stripleading then
text = gsub(text,"[\n\r] *","\n")
end
diff --git a/tex/context/base/luat-bas.mkiv b/tex/context/base/luat-bas.mkiv
index 77b3be781..683c0e92f 100644
--- a/tex/context/base/luat-bas.mkiv
+++ b/tex/context/base/luat-bas.mkiv
@@ -14,11 +14,11 @@
\writestatus{loading}{ConTeXt Lua Macros / Basic Lua Libraries}
\registerctxluafile{l-string} {1.001}
+\registerctxluafile{l-table} {1.001}
\registerctxluafile{l-lpeg} {1.001}
\registerctxluafile{l-boolean}{1.001}
\registerctxluafile{l-number} {1.001}
\registerctxluafile{l-math} {1.001}
-\registerctxluafile{l-table} {1.001}
%registerctxluafile{l-aux} {1.001}
\registerctxluafile{l-io} {1.001}
\registerctxluafile{l-os} {1.001}
diff --git a/tex/context/base/luat-cbk.lua b/tex/context/base/luat-cbk.lua
index 031a24e0d..6622c64cd 100644
--- a/tex/context/base/luat-cbk.lua
+++ b/tex/context/base/luat-cbk.lua
@@ -9,6 +9,7 @@ if not modules then modules = { } end modules ['luat-cbk'] = {
local insert, remove, find, format = table.insert, table.remove, string.find, string.format
local collectgarbage, type, next = collectgarbage, type, next
local round = math.round
+local sortedhash, tohash = table.sortedhash, table.tohash
local trace_checking = false trackers.register("memory.checking", function(v) trace_checking = v end)
@@ -48,7 +49,7 @@ if not callbacks.list then -- otherwise counters get reset
end
-local delayed = table.tohash {
+local delayed = tohash {
"buildpage_filter",
}
@@ -102,7 +103,7 @@ function callbacks.known(name)
end
function callbacks.report()
- for name, _ in table.sortedhash(list) do
+ for name, _ in sortedhash(list) do
local str = frozen[name]
if str then
report_callbacks("%s: %s -> %s",state(name),name,str)
@@ -115,7 +116,7 @@ end
function callbacks.table()
local NC, NR, verbatim = context.NC, context.NR, context.type
context.starttabulate { "|l|l|p|" }
- for name, _ in table.sortedhash(list) do
+ for name, _ in sortedhash(list) do
NC() verbatim(name) NC() verbatim(state(name)) NC() context(frozen[name] or "") NC() NR()
end
context.stoptabulate()
@@ -190,7 +191,7 @@ end
if trace_calls then
statistics.register("callback details", function()
local t = { } -- todo: pass function to register and quit at nil
- for name, n in table.sortedhash(list) do
+ for name, n in sortedhash(list) do
if n > 0 then
t[#t+1] = format("%s -> %s",name,n)
end
diff --git a/tex/context/base/luat-cod.lua b/tex/context/base/luat-cod.lua
index a9cd4551b..3512673f8 100644
--- a/tex/context/base/luat-cod.lua
+++ b/tex/context/base/luat-cod.lua
@@ -6,7 +6,7 @@ if not modules then modules = { } end modules ['luat-cod'] = {
license = "see context related readme files"
}
-local match, gsub, find = string.match, string.gsub, string.find
+local match, gsub, find, format = string.match, string.gsub, string.find, string.format
local texconfig, lua = texconfig, lua
@@ -57,7 +57,7 @@ function lua.registerfinalizer(f,comment)
if type(f) == "function" then
finalizers[#finalizers+1] = { action = f, comment = comment }
else
- print(string.format("fatal error: invalid finalizer, action: %s",finalizer.comment or "unknown"))
+ print(format("fatal error: invalid finalizer, action: %s",finalizer.comment or "unknown"))
os.exit()
end
end
diff --git a/tex/context/base/luat-sto.lua b/tex/context/base/luat-sto.lua
index 946ce3756..461bd52ae 100644
--- a/tex/context/base/luat-sto.lua
+++ b/tex/context/base/luat-sto.lua
@@ -8,7 +8,7 @@ if not modules then modules = { } end modules ['luat-sto'] = {
local type, next, setmetatable, getmetatable = type, next, setmetatable, getmetatable
local gmatch, format, write_nl = string.gmatch, string.format, texio.write_nl
-local serialize, concat = table.serialize, table.concat
+local serialize, concat, sortedhash = table.serialize, table.concat, table.sortedhash
local bytecode = lua.bytecode
local report_storage = logs.reporter("system","storage")
@@ -97,20 +97,20 @@ end
function statistics.reportstorage(whereto)
whereto = whereto or "term and log"
write_nl(whereto," ","stored tables:"," ")
- for k,v in table.sortedhash(storage.data) do
+ for k,v in sortedhash(storage.data) do
write_nl(whereto,format("%03i %s",k,v[1]))
end
write_nl(whereto," ","stored modules:"," ")
- for k,v in table.sortedhash(lua.bytedata) do
+ for k,v in sortedhash(lua.bytedata) do
write_nl(whereto,format("%03i %s %s",k,v[2],v[1]))
end
write_nl(whereto," ","stored attributes:"," ")
- for k,v in table.sortedhash(attributes.names) do
+ for k,v in sortedhash(attributes.names) do
write_nl(whereto,format("%03i %s",k,v))
end
write_nl(whereto," ","stored catcodetables:"," ")
- for k,v in table.sortedhash(catcodes.names) do
- write_nl(whereto,format("%03i %s",k,table.concat(v," ")))
+ for k,v in sortedhash(catcodes.names) do
+ write_nl(whereto,format("%03i %s",k,concat(v," ")))
end
write_nl(whereto," ")
end
diff --git a/tex/context/base/lxml-ctx.lua b/tex/context/base/lxml-ctx.lua
index 1f6f6ffd3..3319dc638 100644
--- a/tex/context/base/lxml-ctx.lua
+++ b/tex/context/base/lxml-ctx.lua
@@ -8,6 +8,8 @@ if not modules then modules = { } end modules ['lxml-ctx'] = {
-- is this still used?
+local format, find = string.format, string.find
+
local xml = xml
xml.ctx = { }
@@ -21,7 +23,7 @@ function xml.ctx.enhancers.compound(root,lpath,before,tokens,after) -- todo lpeg
local after = after or "[%a%d][%a%d][%a%d]"
local pattern = "(" .. before .. ")(" .. tokens .. ")(" .. after .. ")"
local action = function(a,b,c)
- return a .. "<compound token=" .. string.format("%q",b) .. "/>" .. c
+ return a .. "<compound token=" .. format("%q",b) .. "/>" .. c
end
xml.enhance(root,lpath,pattern,action) -- still present?
end
@@ -38,7 +40,7 @@ function xml.ctx.tshow(specification)
local attribute = specification.attribute
if context then
local xmlpattern = pattern
- if not string.find(xmlpattern,"^[%a]+://") then
+ if not find(xmlpattern,"^[%a]+://") then
xmlpattern = "xml://" .. pattern
end
local parsed = xml.lpath(xmlpattern)
diff --git a/tex/context/base/lxml-sor.lua b/tex/context/base/lxml-sor.lua
index a159fd4e0..951017bcd 100644
--- a/tex/context/base/lxml-sor.lua
+++ b/tex/context/base/lxml-sor.lua
@@ -6,9 +6,8 @@ if not modules then modules = { } end modules ['lxml-sor'] = {
license = "see context related readme files"
}
-local format, concat = string.format, table.concat
+local format, concat, rep = string.format, table.concat, string.rep
local lpegmatch = lpeg.match
-local texsprint, ctxcatcodes = tex.sprint, tex.ctxcatcodes
local xml, lxml = xml, lxml
@@ -66,7 +65,7 @@ function lxml.sorters.show(name)
for i=1,#entries do
if #entries[i][2] > maxn then maxn = #entries[i][2] end
end
- context.starttabulate { "|Tr|Tr|" .. string.rep("Tlp|",maxn) }
+ context.starttabulate { "|Tr|Tr|" .. rep("Tlp|",maxn) }
NC() bold("n")
NC() bold("id")
if maxn > 1 then
diff --git a/tex/context/base/lxml-tex.lua b/tex/context/base/lxml-tex.lua
index eff2c6297..1afccbfcb 100644
--- a/tex/context/base/lxml-tex.lua
+++ b/tex/context/base/lxml-tex.lua
@@ -8,7 +8,7 @@ if not modules then modules = { } end modules ['lxml-tst'] = {
local utf = unicode.utf8
-local utfchar = utf.char
+local utfchar, utfupper = utf.char, utf.upper
local concat, insert, remove = table.concat, table.insert, table.remove
local format, sub, gsub, find, gmatch, match = string.format, string.sub, string.gsub, string.find, string.gmatch, string.match
local type, next, tonumber, tostring = type, next, tonumber, tostring
diff --git a/tex/context/base/m-dimensions.lua b/tex/context/base/m-dimensions.lua
new file mode 100644
index 000000000..19a3e9702
--- /dev/null
+++ b/tex/context/base/m-dimensions.lua
@@ -0,0 +1,398 @@
+if not modules then modules = { } end modules ['m-dimensions'] = {
+ version = 1.001,
+ comment = "companion to m-dimensions.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+-- This is pretty old code that I found back, but let's give it a try
+-- in practice. It started out as m-units.lua but as we want to keep that
+-- module around we moved the code to the dimensions module.
+
+local P, C, Cc, Cs, matchlpeg = lpeg.P, lpeg.C, lpeg.Cc, lpeg.Cs, lpeg.match
+local format = string.format
+local appendlpeg = lpeg.append
+
+local mergetable, mergedtable, keys, loweredkeys = table.merge, table.merged, table.keys, table.loweredkeys
+
+local long_prefixes = {
+ Yocto = [[y]], -- 10^{-24}
+ Zepto = [[z]], -- 10^{-21}
+ Atto = [[a]], -- 10^{-18}
+ Femto = [[f]], -- 10^{-15}
+ Pico = [[p]], -- 10^{-12}
+ Nano = [[n]], -- 10^{-9}
+ Micro = [[\mu]],-- 10^{-6}
+ Milli = [[m]], -- 10^{-3}
+ Centi = [[c]], -- 10^{-2}
+ Deci = [[d]], -- 10^{-1}
+
+ Deca = [[da]], -- 10^{1}
+ Hecto = [[h]], -- 10^{2}
+ Kilo = [[k]], -- 10^{3}
+ Mega = [[M]], -- 10^{6}
+ Giga = [[G]], -- 10^{9}
+ Tera = [[T]], -- 10^{12}
+ Peta = [[P]], -- 10^{15}
+ Exa = [[E]], -- 10^{18}
+ Zetta = [[Z]], -- 10^{21}
+ Yotta = [[Y]], -- 10^{24}
+
+ Kibi = [[ki]], -- 2^{10}
+ Mebi = [[Mi]], -- 2^{20}
+ Gibi = [[Gi]], -- 2^{30}
+ Tebi = [[Ti]], -- 2^{40}
+ Pebi = [[Pi]], -- 2^{50}
+
+ Kibi = [[Ki]], -- binary
+ Mebi = [[Mi]], -- binary
+ Gibi = [[Gi]], -- binary
+ Tebi = [[Ti]], -- binary
+ Pebi = [[Pi]], -- binary
+ Exbi = [[Ei]], -- binary
+ Zebi = [[Zi]], -- binary
+ Yobi = [[Yi]], -- binary
+}
+
+local long_units = {
+ Meter = [[m]],
+ Hertz = [[hz]],
+ Second = [[s]],
+ Hour = [[h]],
+ Liter = [[l]],
+ Litre = [[l]],
+ Gram = [[g]],
+ Newton = [[N]],
+ Pascal = [[Pa]],
+ Atom = [[u]],
+ Joule = [[W]],
+ Watt = [[J]],
+ Celsius = [[C]], -- no SI
+ Kelvin = [[K]],
+ Fahrenheit = [[F]], -- no SI
+ Mol = [[mol]],
+ Mole = [[mol]],
+ Equivalent = [[eql]],
+ Farad = [[F]],
+ Ohm = [[\Omega]],
+ Siemens = [[S]],
+ Ampere = [[A]],
+ Coulomb = [[C]],
+ Volt = [[V]],
+ eVolt = [[eV]],
+ Tesla = [[T]],
+ VoltAC = [[V\scientificunitbackspace\scientificunitlower{ac}]],
+ VoltDC = [[V\scientificunitbackspace\scientificunitlower{dc}]],
+ AC = [[V\scientificunitbackspace\scientificunitlower{ac}]],
+ DC = [[V\scientificunitbackspace\scientificunitlower{dc}]],
+ Bit = [[bit]],
+ Baud = [[Bd]],
+ Byte = [[B]],
+ Erlang = [[E]],
+ Bequerel = [[Bq]],
+ Sievert = [[Sv]],
+ Candela = [[cd]],
+ Bell = [[B]],
+ At = [[at]],
+ Atm = [[atm]],
+ Bar = [[bar]],
+ Foot = [[ft]],
+ Inch = [[inch]],
+ Cal = [[cal]],
+ Force = [[f]],
+ Lux = [[lux]],
+ Gray = [[Gr]],
+ Weber = [[Wb]],
+ Henry = [[H]],
+ Sterant = [[sr]],
+ Angstrom = [[Å]],
+ Gauss = [[G]],
+ Rad = [[rad]],
+ Deg = [[°]],
+ RPS = [[RPS]],
+ RPM = [[RPM]],
+ RevPerSec = [[RPS]],
+ RevPerMin = [[RPM]],
+ Percent = [[\percent]],
+ Promille = [[\promille]],
+}
+
+local long_operators = {
+ Times = [[\scientificunitTIMES]], -- cdot
+ Solidus = [[\scientificunitSOLIDUS]],
+ Per = [[\scientificunitSOLIDUS]],
+ OutOf = [[\scientificunitOUTOF]],
+}
+
+local long_suffixes = {
+ Linear = [[1]],
+ Square = [[2]],
+ Cubic = [[3]],
+ Inverse = [[-1]],
+ ILinear = [[-1]],
+ ISquare = [[-2]],
+ ICubic = [[-3]],
+}
+
+mergetable(long_prefixes, loweredkeys(long_prefixes))
+mergetable(long_units, loweredkeys(long_units))
+mergetable(long_operators, loweredkeys(long_operators))
+mergetable(long_suffixes, loweredkeys(long_suffixes))
+
+local short_prefixes = {
+ y = long_prefixes.Yocto,
+ z = long_prefixes.Zetto,
+ a = long_prefixes.Atto,
+ f = long_prefixes.Femto,
+ p = long_prefixes.Pico,
+ n = long_prefixes.Nano,
+ u = long_prefixes.Micro,
+ m = long_prefixes.Milli,
+ c = long_prefixes.Centi,
+ d = long_prefixes.Deci,
+ da = long_prefixes.Deca,
+ h = long_prefixes.Hecto,
+ k = long_prefixes.Kilo,
+ M = long_prefixes.Mega,
+ G = long_prefixes.Giga,
+ T = long_prefixes.Tera,
+ P = long_prefixes.Peta,
+ E = long_prefixes.Exa,
+ Z = long_prefixes.Zetta,
+ Y = long_prefixes.Yotta,
+}
+
+local short_units = {
+ m = long_units.Meter,
+ hz = long_units.Hertz,
+ u = long_units.Hour,
+ h = long_units.Hour,
+ s = long_units.Second,
+}
+
+local short_operators = {
+ ["."] = long_operators.Times,
+ ["*"] = long_operators.Times,
+ ["/"] = long_operators.Solidus,
+ [":"] = long_operators.OutOf,
+}
+
+local short_suffixes = { -- maybe just raw digit match
+ ["1"] = long_suffixes.Linear,
+ ["2"] = long_suffixes.Square,
+ ["3"] = long_suffixes.Cubic,
+ ["+1"] = long_suffixes.Linear,
+ ["+2"] = long_suffixes.Square,
+ ["+3"] = long_suffixes.Cubic,
+ ["-1"] = long_suffixes.Inverse,
+ ["-1"] = long_suffixes.ILinear,
+ ["-2"] = long_suffixes.ISquare,
+ ["-3"] = long_suffixes.ICubic,
+ ["^1"] = long_suffixes.Linear,
+ ["^2"] = long_suffixes.Square,
+ ["^3"] = long_suffixes.Cubic,
+ ["^+1"] = long_suffixes.Linear,
+ ["^+2"] = long_suffixes.Square,
+ ["^+3"] = long_suffixes.Cubic,
+ ["^-1"] = long_suffixes.Inverse,
+ ["^-1"] = long_suffixes.ILinear,
+ ["^-2"] = long_suffixes.ISquare,
+ ["^-3"] = long_suffixes.ICubic,
+}
+
+local prefixes = mergedtable(long_prefixes,short_prefixes)
+local units = mergedtable(long_units,short_units)
+local operators = mergedtable(long_operators,short_operators)
+local suffixes = mergedtable(long_suffixes,short_suffixes)
+
+local space = P(" ")^0/""
+
+local l_prefix = appendlpeg(keys(long_prefixes))
+local l_unit = appendlpeg(keys(long_units))
+local l_operator = appendlpeg(keys(long_operators))
+local l_suffix = appendlpeg(keys(long_suffixes))
+
+local s_prefix = appendlpeg(keys(short_prefixes))
+local s_unit = appendlpeg(keys(short_units))
+local s_operator = appendlpeg(keys(short_operators))
+local s_suffix = appendlpeg(keys(short_suffixes))
+
+-- space inside Cs else funny captures and args to function
+
+-- square centi meter per square kilo seconds
+
+local l_suffix = Cs(space * l_suffix)
+local s_suffix = Cs(space * s_suffix) + Cc("")
+local l_operator = Cs(space * l_operator)
+local l_combination = (Cs(space * l_prefix) + Cc("")) * Cs(space * l_unit)
+local s_combination = Cs(space * s_prefix) * Cs(space * s_unit) + Cc("") * Cs(space * s_unit)
+
+local combination = l_combination + s_combination
+
+-- square kilo meter
+-- square km
+
+local function dimpus(p,u,s)
+ p = prefixes[p] or p
+ u = units[u] or u
+ s = suffixes[s] or s
+ if p ~= "" then
+ if u ~= "" then
+ if s ~= "" then
+ return format(" p=%s u=%s s=%s ",p,u,s)
+ else
+ return format(" p=%s u=%s ",p,u)
+ end
+ elseif s ~= "" then
+ return format(" p=%s s=%s ",p,s)
+ else
+ return format(" p=%s ",p)
+ end
+ else
+ if u ~= "" then
+ if s ~= "" then
+ return format(" u=%s s=%s ",u,s)
+ else
+ return format(" u=%s ",u)
+ end
+ elseif s ~= "" then
+ return format(" s=%s ",s)
+ else
+ return format(" p=%s ",p)
+ end
+ end
+end
+
+local function dimop(o)
+ o = operators[o] or o
+ if o then
+ return format(" o=%s ",o)
+ end
+end
+
+local function dimnum(n)
+ if n ~= "" then
+ return format(" n=%s ",n)
+ end
+end
+
+local function dimerror(s)
+ return s ~= "" and s or "error"
+end
+
+local dimension =
+ (l_suffix * combination) / function (s,p,u)
+ return dimpus(p,u,s)
+ end
+ + (combination * s_suffix) / function (p,u,s)
+ return dimpus(p,u,s)
+ end
+
+local operator = (l_operator + s_operator) / function(o)
+ return dimop(o)
+end
+
+local number = (lpeg.patterns.number / function(n)
+ return dimnum(n)
+end)^-1
+
+dimension = space * dimension * space
+number = space * number * space
+operator = space * operator * space
+
+local expression = lpeg.Cs (
+ number * dimension * dimension^0 * (operator * dimension^1)^-1 * P(-1)
+ + (P(1)^0) / function(s) return dimerror(s) end
+)
+
+if commands and context then
+
+ local scientificunitPUS = context.scientificunitPUS
+ local scientificunitPU = context.scientificunitPU
+ local scientificunitPS = context.scientificunitPS
+ local scientificunitP = context.scientificunitP
+ local scientificunitUS = context.scientificunitUS
+ local scientificunitU = context.scientificunitU
+ local scientificunitS = context.scientificunitS
+ local scientificunitO = context.scientificunitO
+ local scientificunitN = context.scientificunitN
+
+ dimpus = function(p,u,s)
+ p = prefixes[p] or p
+ u = units[u] or u
+ s = suffixes[s] or s
+ if p ~= "" then
+ if u ~= "" then
+ if s ~= "" then
+ scientificunitPUS(p,u,s)
+ else
+ scientificunitPU(p,u)
+ end
+ elseif s ~= "" then
+ scientificunitPS(p,s)
+ else
+ scientificunitP(p)
+ end
+ else
+ if u ~= "" then
+ if s ~= "" then
+ scientificunitUS(u,s)
+ else
+ scientificunitU(u)
+ end
+ elseif s ~= "" then
+ scientificunitS(s)
+ else
+ scientificunitP(p)
+ end
+ end
+ end
+
+ dimop = function(o)
+ o = operators[o] or o
+ if o then
+ scientificunitO(o)
+ end
+ end
+
+ dimnum = function(n)
+ if n ~= "" then
+ scientificunitN(n)
+ end
+ end
+
+ dimerror = function(s)
+ scientificunitU(s)
+ end
+
+ function commands.scientificunit(str)
+ matchlpeg(expression,str)
+ end
+
+else
+
+ local tests = {
+--~ "m/u",
+--~ "km/u",
+--~ "km",
+--~ "km/s2",
+--~ "km/ms2",
+--~ "km/ms-2",
+--~ "km/h",
+--~ " meter ",
+--~ " meter per meter",
+--~ "cubic meter per square meter",
+--~ "cubic kilo meter per square meter",
+--~ "KiloMeter/Hour",
+--~ "10.5 kilo pascal",
+--~ "kilo pascal meter liter per second",
+--~ "100 crap",
+ }
+
+ for i=1,#tests do
+ local test = tests[i]
+ print(test,matchlpeg(expression,test) or test)
+ end
+
+end
diff --git a/tex/context/base/m-dimensions.mkiv b/tex/context/base/m-dimensions.mkiv
new file mode 100644
index 000000000..2e4495e82
--- /dev/null
+++ b/tex/context/base/m-dimensions.mkiv
@@ -0,0 +1,194 @@
+%D \module
+%D [ file=m-dimensions,
+%D version=1997.03.19,
+%D title=\CONTEXT\ Extra Modules,
+%D subtitle=Scientific Units,
+%D author={Hans Hagen},
+%D date=\currentdate,
+%D copyright={PRAGMA ADE \& \CONTEXT\ Development Team}]
+%C
+%C This module is part of the \CONTEXT\ macro||package and is
+%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
+%C details.
+
+\unprotect
+
+\registerctxluafile{m-dimensions}{}
+
+\startmodule[dimensions]
+
+%D \macros
+%D {su}
+%D
+%D We have been using the units module (and its predecessor) for over a decade
+%D now but when we moved on to \LUATEX\ a variant was prototyped that permits a
+%D less texie coding. I finally picked up that thread and cleaned up the code a
+%D bit so users can now play with it. (The main reason was that I wanted to
+%D test exporting.)
+%D
+%D \startbuffer
+%D \su{10 km/h}
+%D 10\su{km/h}
+%D 10 \su{km/h}
+%D $10\su{km/h}$
+%D $10 \su{km/h}$
+%D 10 \su{KiloMeter/Hour}
+%D 10 \su{kilometer/hour}
+%D 10 \su{km/h}
+%D 10 \su{kilometer per hour}
+%D 10 \su{km / h}
+%D 10 \su{ km / h }
+%D 10 \su{km/ms2}
+%D 10 \su{meter per second}
+%D 10 \su{cubic meter}
+%D 10 \su{cubic meter per second}
+%D 10 \su{cubic meter / second}
+%D $10 \su{cubic meter / second}$
+%D 30 \su{kilo pascal }
+%D 30 \su{kilo pascal square meter / second}
+%D 30 \su{kilo pascal square meter / second kelvin}
+%D 30 \su{crap}
+%D $ \frac{10 \su{m/s}}{20 \su{m/s}} $
+%D \stopbuffer
+%D
+%D \typebuffer
+%D
+%D Result: \getbuffer
+
+\newconstant \c_scientificunit_mode % 0=text 1=math
+\newconstant \c_scientificunit_state % 0=start 1=suffix 2=operator 3=unit 4=prefix 5=number
+\newconditional\c_scientificunit_number
+
+% tags and export
+% smash == snapper
+% hbox ook in mmode
+
+\def\scientificunithalfspace{\thinspace}
+\def\scientificunitbackspace{\negthinspace}
+
+\newtoks \everyscientificunit % we keep the old \units command so we need a longer one
+
+\unexpanded\def\scientificunit#1%
+ {\begingroup
+ \the\everyscientificunit
+ \removeunwantedspaces
+ \ifmmode
+ \c_scientificunit_mode\plusone
+ \rm\tf
+ \mathtf
+ \fi
+ \scientificunit_indeed{#1}%
+ \scientificunit_finish
+ \endgroup}
+
+\appendtoks
+ \let\scientificunit\scientificunit_indeed
+\to \everyscientificunit
+
+\let\su\scientificunit
+
+\appendtoks
+ \let\su\scientificunit_indeed
+\to \everyscientificunit
+
+\unexpanded\def\scientificunit_indeed#1{\ctxcommand{scientificunit(\!!bs#1\!!es)}}
+
+\unexpanded\def\scientificunitPUS#1#2#3{\scientificunit_next#1#2\scientificunitraise{#3}\c_scientificunit_state\plusone} % suffix
+\unexpanded\def\scientificunitPU #1#2{\scientificunit_next#1#2\c_scientificunit_state \plusthree} % unit
+\unexpanded\def\scientificunitPS #1#2{\scientificunit_next#1\scientificunitraise{#2}\c_scientificunit_state \plusone} % suffix
+\unexpanded\def\scientificunitUS #1#2{\scientificunit_next#1\scientificunitraise{#2}\c_scientificunit_state \plusone} % suffix
+\unexpanded\def\scientificunitP #1{\scientificunit_next#1\c_scientificunit_state \plusfour} % prefix
+\unexpanded\def\scientificunitU #1{\scientificunit_next#1\c_scientificunit_state \plusthree} % unit
+\unexpanded\def\scientificunitS #1{\scientificunit_start{}\scientificunitraise{#1}\c_scientificunit_state \plusone} % suffix
+\unexpanded\def\scientificunitO #1{\scientificunit_start#1\c_scientificunit_state \plustwo} % operator
+\unexpanded\def\scientificunitN #1{\scientificunit_start#1\c_scientificunit_state \plusfive} % number
+
+\setelementnature[unit] [mixed]
+\setelementnature[quantity][mixed]
+
+\unexpanded\def\scientificunitN#1%
+ {\ifmmode
+ #1%
+ \else
+ \dostarttagged{quantity}\empty
+ \dostarttagged{number}\empty
+ #1%
+ \dostoptagged
+ \settrue\c_scientificunit_number
+ \fi
+ %\scientificunit_start
+ \c_scientificunit_state\plusfive}
+
+\def\scientificunit_start
+ {\ifmmode
+ \dostarttagged\t!mathaction{unit}%
+ \bgroup % make an mrow
+ \else
+ \dostarttagged{unit}\empty
+ \fi
+ \let\scientificunit_finish\scientificunit_stop
+ \let\scientificunit_start\relax}
+
+\def\scientificunit_stop
+ {\ifmmode
+ \egroup
+ \fi
+ \ifconditional\c_scientificunit_number
+ \dostoptagged
+ \fi
+ \dostoptagged}
+
+\def\scientificunitraise
+ {\ifnum\c_scientificunit_mode=\plusone
+ \expandafter\normalsuperscript
+ \else
+ \expandafter\high
+ \fi}
+
+\def\scientificunitlower
+ {\ifnum\c_scientificunit_mode=\plusone
+ \expandafter\normalsubscript
+ \else
+ \expandafter\low
+ \fi}
+
+\unexpanded\def\scientificunit_next
+ {\ifcase\c_scientificunit_state % start
+ \scientificunithalfspace
+ \scientificunithalfspace
+ \or % suffix
+ {\cdot}% \scientificunithalfspace
+ \or % operator
+ \or % unit
+ {\cdot}% \scientificunithalfspace
+ \or % prefix
+ \or % number
+ \scientificunithalfspace
+ \scientificunithalfspace
+ \fi
+ \scientificunit_start}
+
+\unexpanded\def\scientificunitTIMES
+ {\ifnum\c_scientificunit_state=\plusone % suffix
+ \else
+ \scientificunithalfspace
+ \fi
+ \cdot} % or \times
+
+\unexpanded\def\scientificunitOUTOF
+ {\ifnum\c_scientificunit_state=\plusone % suffix
+ \else
+ \scientificunithalfspace
+ \fi
+ :}
+
+\unexpanded\def\scientificunitSOLIDUS
+ {\ifnum\c_scientificunit_state=\plusone % suffix
+ \scientificunitbackspace
+ \fi
+ {/}%
+ }%\scientificunitbackspace}
+
+\stopmodule
+
+\protect \endinput
diff --git a/tex/context/base/m-pstricks.lua b/tex/context/base/m-pstricks.lua
index ab2719084..7f795feac 100644
--- a/tex/context/base/m-pstricks.lua
+++ b/tex/context/base/m-pstricks.lua
@@ -50,7 +50,7 @@ end
function moduledata.pstricks.process(n)
graphics = graphics + 1
- local name = string.format("%s-pstricks-%04i",tex.jobname,graphics)
+ local name = format("%s-pstricks-%04i",tex.jobname,graphics)
local data = buffers.collectcontent("def-"..n)
local tmpfile = name .. ".tmp"
local epsfile = name .. ".ps"
diff --git a/tex/context/base/m-units.mkiv b/tex/context/base/m-units.mkiv
index 3f3f6233a..23aecaaa4 100644
--- a/tex/context/base/m-units.mkiv
+++ b/tex/context/base/m-units.mkiv
@@ -29,8 +29,6 @@
%D macro defined in the core modules. Let's say that this is
%D an upward compatibility issue.
-% \registerctxluafile{x-units}{}
-
\startmessages dutch library: units
title: eenheden
1: gebruik \string\Degrees\space\string\Celsius\space in plaats van \string\Celsius !
diff --git a/tex/context/base/math-map.lua b/tex/context/base/math-map.lua
index 238b72edd..bf02a4c9d 100644
--- a/tex/context/base/math-map.lua
+++ b/tex/context/base/math-map.lua
@@ -21,6 +21,7 @@ if not modules then modules = { } end modules ['math-map'] = {
local type, next = type, next
local floor, div = math.floor, math.div
+local merged = table.merged
local allocate = utilities.storage.allocate
@@ -37,246 +38,329 @@ local mathematics = mathematics
-- following approach permits easier remapping of a-a, A-Z and 0-9 to
-- fallbacks; symbols is currently mostly greek
-local alphabets = allocate {
- regular = {
- tf = {
- digits = 0x00030,
- ucletters = 0x00041,
- lcletters = 0x00061,
- ucgreek = {
- [0x0391]=0x0391, [0x0392]=0x0392, [0x0393]=0x0393, [0x0394]=0x0394, [0x0395]=0x0395,
- [0x0396]=0x0396, [0x0397]=0x0397, [0x0398]=0x0398, [0x0399]=0x0399, [0x039A]=0x039A,
- [0x039B]=0x039B, [0x039C]=0x039C, [0x039D]=0x039D, [0x039E]=0x039E, [0x039F]=0x039F,
- [0x03A0]=0x03A0, [0x03A1]=0x03A1, [0x03A3]=0x03A3, [0x03A4]=0x03A4, [0x03A5]=0x03A5,
- [0x03A6]=0x03A6, [0x03A7]=0x03A7, [0x03A8]=0x03A8, [0x03A9]=0x03A9,
- },
- lcgreek = {
- [0x03B1]=0x03B1, [0x03B2]=0x03B2, [0x03B3]=0x03B3, [0x03B4]=0x03B4, [0x03B5]=0x03B5,
- [0x03B6]=0x03B6, [0x03B7]=0x03B7, [0x03B8]=0x03B8, [0x03B9]=0x03B9, [0x03BA]=0x03BA,
- [0x03BB]=0x03BB, [0x03BC]=0x03BC, [0x03BD]=0x03BD, [0x03BE]=0x03BE, [0x03BF]=0x03BF,
- [0x03C0]=0x03C0, [0x03C1]=0x03C1, [0x03C2]=0x03C2, [0x03C3]=0x03C3, [0x03C4]=0x03C4,
- [0x03C5]=0x03C5, [0x03C6]=0x03C6, [0x03C7]=0x03C7, [0x03C8]=0x03C8, [0x03C9]=0x03C9,
- [0x03D1]=0x03D1, [0x03D5]=0x03D5, [0x03D6]=0x03D6, [0x03F0]=0x03F0, [0x03F1]=0x03F1,
- [0x03F4]=0x03F4, [0x03F5]=0x03F5,
- },
- symbols = {
- [0x2202]=0x2202, [0x2207]=0x2207,
- },
- },
- it = {
- ucletters = 0x1D434,
- lcletters = { -- H
- [0x00061]=0x1D44E, [0x00062]=0x1D44F, [0x00063]=0x1D450, [0x00064]=0x1D451, [0x00065]=0x1D452,
- [0x00066]=0x1D453, [0x00067]=0x1D454, [0x00068]=0x0210E, [0x00069]=0x1D456, [0x0006A]=0x1D457,
- [0x0006B]=0x1D458, [0x0006C]=0x1D459, [0x0006D]=0x1D45A, [0x0006E]=0x1D45B, [0x0006F]=0x1D45C,
- [0x00070]=0x1D45D, [0x00071]=0x1D45E, [0x00072]=0x1D45F, [0x00073]=0x1D460, [0x00074]=0x1D461,
- [0x00075]=0x1D462, [0x00076]=0x1D463, [0x00077]=0x1D464, [0x00078]=0x1D465, [0x00079]=0x1D466,
- [0x0007A]=0x1D467,
- },
- ucgreek = {
- [0x0391]=0x1D6E2, [0x0392]=0x1D6E3, [0x0393]=0x1D6E4, [0x0394]=0x1D6E5, [0x0395]=0x1D6E6,
- [0x0396]=0x1D6E7, [0x0397]=0x1D6E8, [0x0398]=0x1D6E9, [0x0399]=0x1D6EA, [0x039A]=0x1D6EB,
- [0x039B]=0x1D6EC, [0x039C]=0x1D6ED, [0x039D]=0x1D6EE, [0x039E]=0x1D6EF, [0x039F]=0x1D6F0,
- [0x03A0]=0x1D6F1, [0x03A1]=0x1D6F2, [0x03A3]=0x1D6F4, [0x03A4]=0x1D6F5, [0x03A5]=0x1D6F6,
- [0x03A6]=0x1D6F7, [0x03A7]=0x1D6F8, [0x03A8]=0x1D6F9, [0x03A9]=0x1D6FA,
- },
- lcgreek = {
- [0x03B1]=0x1D6FC, [0x03B2]=0x1D6FD, [0x03B3]=0x1D6FE, [0x03B4]=0x1D6FF, [0x03B5]=0x1D700,
- [0x03B6]=0x1D701, [0x03B7]=0x1D702, [0x03B8]=0x1D703, [0x03B9]=0x1D704, [0x03BA]=0x1D705,
- [0x03BB]=0x1D706, [0x03BC]=0x1D707, [0x03BD]=0x1D708, [0x03BE]=0x1D709, [0x03BF]=0x1D70A,
- [0x03C0]=0x1D70B, [0x03C1]=0x1D70C, [0x03C2]=0x1D70D, [0x03C3]=0x1D70E, [0x03C4]=0x1D70F,
- [0x03C5]=0x1D710, [0x03C6]=0x1D711, [0x03C7]=0x1D712, [0x03C8]=0x1D713, [0x03C9]=0x1D714,
- [0x03D1]=0x1D717, [0x03D5]=0x1D719, [0x03D6]=0x1D71B, [0x03F0]=0x1D718, [0x03F1]=0x1D71A,
- [0x03F4]=0x1D6F3, [0x03F5]=0x1D716,
- },
- symbols = {
- [0x2202]=0x1D715, [0x2207]=0x1D6FB,
- },
- },
- bf= {
- digits = 0x1D7CE,
- ucletters = 0x1D400,
- lcletters = 0x1D41A,
- ucgreek = {
- [0x0391]=0x1D6A8, [0x0392]=0x1D6A9, [0x0393]=0x1D6AA, [0x0394]=0x1D6AB, [0x0395]=0x1D6AC,
- [0x0396]=0x1D6AD, [0x0397]=0x1D6AE, [0x0398]=0x1D6AF, [0x0399]=0x1D6B0, [0x039A]=0x1D6B1,
- [0x039B]=0x1D6B2, [0x039C]=0x1D6B3, [0x039D]=0x1D6B4, [0x039E]=0x1D6B5, [0x039F]=0x1D6B6,
- [0x03A0]=0x1D6B7, [0x03A1]=0x1D6B8, [0x03A3]=0x1D6BA, [0x03A4]=0x1D6BB, [0x03A5]=0x1D6BC,
- [0x03A6]=0x1D6BD, [0x03A7]=0x1D6BE, [0x03A8]=0x1D6BF, [0x03A9]=0x1D6C0,
- },
- lcgreek = {
- [0x03B1]=0x1D6C2, [0x03B2]=0x1D6C3, [0x03B3]=0x1D6C4, [0x03B4]=0x1D6C5, [0x03B5]=0x1D6C6,
- [0x03B6]=0x1D6C7, [0x03B7]=0x1D6C8, [0x03B8]=0x1D6C9, [0x03B9]=0x1D6CA, [0x03BA]=0x1D6CB,
- [0x03BB]=0x1D6CC, [0x03BC]=0x1D6CD, [0x03BD]=0x1D6CE, [0x03BE]=0x1D6CF, [0x03BF]=0x1D6D0,
- [0x03C0]=0x1D6D1, [0x03C1]=0x1D6D2, [0x03C2]=0x1D6D3, [0x03C3]=0x1D6D4, [0x03C4]=0x1D6D5,
- [0x03C5]=0x1D6D6, [0x03C6]=0x1D6D7, [0x03C7]=0x1D6D8, [0x03C8]=0x1D6D9, [0x03C9]=0x1D6DA,
- [0x03D1]=0x1D6DD, [0x03D5]=0x1D6DF, [0x03D6]=0x1D6E1, [0x03F0]=0x1D6DE, [0x03F1]=0x1D6E0,
- [0x03F4]=0x1D6B9, [0x03F5]=0x1D6DC,
- },
- symbols = {
- [0x2202]=0x1D6DB, [0x2207]=0x1D6C1,
- },
- },
- bi = {
- ucletters = 0x1D468,
- lcletters = 0x1D482,
- ucgreek = {
- [0x0391]=0x1D71C, [0x0392]=0x1D71D, [0x0393]=0x1D71E, [0x0394]=0x1D71F, [0x0395]=0x1D720,
- [0x0396]=0x1D721, [0x0397]=0x1D722, [0x0398]=0x1D723, [0x0399]=0x1D724, [0x039A]=0x1D725,
- [0x039B]=0x1D726, [0x039C]=0x1D727, [0x039D]=0x1D728, [0x039E]=0x1D729, [0x039F]=0x1D72A,
- [0x03A0]=0x1D72B, [0x03A1]=0x1D72C, [0x03A3]=0x1D72E, [0x03A4]=0x1D72F, [0x03A5]=0x1D730,
- [0x03A6]=0x1D731, [0x03A7]=0x1D732, [0x03A8]=0x1D733, [0x03A9]=0x1D734,
- },
- lcgreek = {
- [0x03B1]=0x1D736, [0x03B2]=0x1D737, [0x03B3]=0x1D738, [0x03B4]=0x1D739, [0x03B5]=0x1D73A,
- [0x03B6]=0x1D73B, [0x03B7]=0x1D73C, [0x03B8]=0x1D73D, [0x03B9]=0x1D73E, [0x03BA]=0x1D73F,
- [0x03BB]=0x1D740, [0x03BC]=0x1D741, [0x03BD]=0x1D742, [0x03BE]=0x1D743, [0x03BF]=0x1D744,
- [0x03C0]=0x1D745, [0x03C1]=0x1D746, [0x03C2]=0x1D747, [0x03C3]=0x1D748, [0x03C4]=0x1D749,
- [0x03C5]=0x1D74A, [0x03C6]=0x1D74B, [0x03C7]=0x1D74C, [0x03C8]=0x1D74D, [0x03C9]=0x1D74E,
- [0x03D1]=0x1D751, [0x03D5]=0x1D753, [0x03D6]=0x1D755, [0x03F0]=0x1D752, [0x03F1]=0x1D754,
- [0x03F4]=0x1D72D, [0x03F5]=0x1D750,
- },
- symbols = {
- [0x2202]=0x1D74F, [0x2207]=0x1D735,
- },
+local regular_tf = {
+ digits = 0x00030,
+ ucletters = 0x00041,
+ lcletters = 0x00061,
+ ucgreek = {
+ [0x0391]=0x0391, [0x0392]=0x0392, [0x0393]=0x0393, [0x0394]=0x0394, [0x0395]=0x0395,
+ [0x0396]=0x0396, [0x0397]=0x0397, [0x0398]=0x0398, [0x0399]=0x0399, [0x039A]=0x039A,
+ [0x039B]=0x039B, [0x039C]=0x039C, [0x039D]=0x039D, [0x039E]=0x039E, [0x039F]=0x039F,
+ [0x03A0]=0x03A0, [0x03A1]=0x03A1, [0x03A3]=0x03A3, [0x03A4]=0x03A4, [0x03A5]=0x03A5,
+ [0x03A6]=0x03A6, [0x03A7]=0x03A7, [0x03A8]=0x03A8, [0x03A9]=0x03A9,
},
+ lcgreek = {
+ [0x03B1]=0x03B1, [0x03B2]=0x03B2, [0x03B3]=0x03B3, [0x03B4]=0x03B4, [0x03B5]=0x03B5,
+ [0x03B6]=0x03B6, [0x03B7]=0x03B7, [0x03B8]=0x03B8, [0x03B9]=0x03B9, [0x03BA]=0x03BA,
+ [0x03BB]=0x03BB, [0x03BC]=0x03BC, [0x03BD]=0x03BD, [0x03BE]=0x03BE, [0x03BF]=0x03BF,
+ [0x03C0]=0x03C0, [0x03C1]=0x03C1, [0x03C2]=0x03C2, [0x03C3]=0x03C3, [0x03C4]=0x03C4,
+ [0x03C5]=0x03C5, [0x03C6]=0x03C6, [0x03C7]=0x03C7, [0x03C8]=0x03C8, [0x03C9]=0x03C9,
+ [0x03D1]=0x03D1, [0x03D5]=0x03D5, [0x03D6]=0x03D6, [0x03F0]=0x03F0, [0x03F1]=0x03F1,
+ [0x03F4]=0x03F4, [0x03F5]=0x03F5,
},
- sansserif = {
- tf = {
- digits = 0x1D7E2,
- ucletters = 0x1D5A0,
- lcletters = 0x1D5BA,
- },
- it = {
- ucletters = 0x1D608,
- lcletters = 0x1D622,
- },
- bf = {
- digits = 0x1D7EC,
- ucletters = 0x1D5D4,
- lcletters = 0x1D5EE,
- ucgreek = {
- [0x0391]=0x1D756, [0x0392]=0x1D757, [0x0393]=0x1D758, [0x0394]=0x1D759, [0x0395]=0x1D75A,
- [0x0396]=0x1D75B, [0x0397]=0x1D75C, [0x0398]=0x1D75D, [0x0399]=0x1D75E, [0x039A]=0x1D75F,
- [0x039B]=0x1D760, [0x039C]=0x1D761, [0x039D]=0x1D762, [0x039E]=0x1D763, [0x039F]=0x1D764,
- [0x03A0]=0x1D765, [0x03A1]=0x1D766, [0x03A3]=0x1D768, [0x03A4]=0x1D769, [0x03A5]=0x1D76A,
- [0x03A6]=0x1D76B, [0x03A7]=0x1D76C, [0x03A8]=0x1D76D, [0x03A9]=0x1D76E,
- },
- lcgreek = {
- [0x03B1]=0x1D770, [0x03B2]=0x1D771, [0x03B3]=0x1D772, [0x03B4]=0x1D773, [0x03B5]=0x1D774,
- [0x03B6]=0x1D775, [0x03B7]=0x1D776, [0x03B8]=0x1D777, [0x03B9]=0x1D778, [0x03BA]=0x1D779,
- [0x03BB]=0x1D77A, [0x03BC]=0x1D77B, [0x03BD]=0x1D77C, [0x03BE]=0x1D77D, [0x03BF]=0x1D77E,
- [0x03C0]=0x1D77F, [0x03C1]=0x1D780, [0x03C2]=0x1D781, [0x03C3]=0x1D782, [0x03C4]=0x1D783,
- [0x03C5]=0x1D784, [0x03C6]=0x1D785, [0x03C7]=0x1D786, [0x03C8]=0x1D787, [0x03C9]=0x1D788,
- [0x03D1]=0x1D78B, [0x03D5]=0x1D78D, [0x03D6]=0x1D78F, [0x03F0]=0x1D78C, [0x03F1]=0x1D78E,
- [0x03F4]=0x1D767, [0x03F5]=0x1D78A,
- },
- symbols = {
- [0x2202]=0x1D789, [0x2207]=0x1D76F,
- },
- },
- bi = {
- ucletters = 0x1D63C,
- lcletters = 0x1D656,
- ucgreek = {
- [0x0391]=0x1D790, [0x0392]=0x1D791, [0x0393]=0x1D792, [0x0394]=0x1D793, [0x0395]=0x1D794,
- [0x0396]=0x1D795, [0x0397]=0x1D796, [0x0398]=0x1D797, [0x0399]=0x1D798, [0x039A]=0x1D799,
- [0x039B]=0x1D79A, [0x039C]=0x1D79B, [0x039D]=0x1D79C, [0x039E]=0x1D79D, [0x039F]=0x1D79E,
- [0x03A0]=0x1D79F, [0x03A1]=0x1D7A0, [0x03A3]=0x1D7A2, [0x03A4]=0x1D7A3, [0x03A5]=0x1D7A4,
- [0x03A6]=0x1D7A5, [0x03A7]=0x1D7A6, [0x03A8]=0x1D7A7, [0x03A9]=0x1D7A8,
- },
- lcgreek = {
- [0x03B1]=0x1D7AA, [0x03B2]=0x1D7AB, [0x03B3]=0x1D7AC, [0x03B4]=0x1D7AD, [0x03B5]=0x1D7AE,
- [0x03B6]=0x1D7AF, [0x03B7]=0x1D7B0, [0x03B8]=0x1D7B1, [0x03B9]=0x1D7B2, [0x03BA]=0x1D7B3,
- [0x03BB]=0x1D7B4, [0x03BC]=0x1D7B5, [0x03BD]=0x1D7B6, [0x03BE]=0x1D7B7, [0x03BF]=0x1D7B8,
- [0x03C0]=0x1D7B9, [0x03C1]=0x1D7BA, [0x03C2]=0x1D7BB, [0x03C3]=0x1D7BC, [0x03C4]=0x1D7BD,
- [0x03C5]=0x1D7BE, [0x03C6]=0x1D7BF, [0x03C7]=0x1D7C0, [0x03C8]=0x1D7C1, [0x03C9]=0x1D7C2,
- [0x03D1]=0x1D7C5, [0x03D5]=0x1D7C7, [0x03D6]=0x1D7C9, [0x03F0]=0x1D7C6, [0x03F1]=0x1D7C8,
- [0x03F4]=0x1D7A1, [0x03F5]=0x1D7C4,
- },
- symbols = {
- [0x2202]=0x1D7C3, [0x2207]=0x1D7A9,
- },
- },
+ symbols = {
+ [0x2202]=0x2202, [0x2207]=0x2207,
},
- monospaced = {
- tf = {
- digits = 0x1D7F6,
- ucletters = 0x1D670,
- lcletters = 0x1D68A,
- },
+}
+
+local regular_it = {
+ digits = regular_tf.digits,
+ ucletters = 0x1D434,
+ lcletters = { -- H
+ [0x00061]=0x1D44E, [0x00062]=0x1D44F, [0x00063]=0x1D450, [0x00064]=0x1D451, [0x00065]=0x1D452,
+ [0x00066]=0x1D453, [0x00067]=0x1D454, [0x00068]=0x0210E, [0x00069]=0x1D456, [0x0006A]=0x1D457,
+ [0x0006B]=0x1D458, [0x0006C]=0x1D459, [0x0006D]=0x1D45A, [0x0006E]=0x1D45B, [0x0006F]=0x1D45C,
+ [0x00070]=0x1D45D, [0x00071]=0x1D45E, [0x00072]=0x1D45F, [0x00073]=0x1D460, [0x00074]=0x1D461,
+ [0x00075]=0x1D462, [0x00076]=0x1D463, [0x00077]=0x1D464, [0x00078]=0x1D465, [0x00079]=0x1D466,
+ [0x0007A]=0x1D467,
},
- blackboard = { -- ok
- tf = {
- digits = 0x1D7D8,
- ucletters = { -- C H N P Q R Z
- [0x00041]=0x1D538, [0x00042]=0x1D539, [0x00043]=0x02102, [0x00044]=0x1D53B, [0x00045]=0x1D53C,
- [0x00046]=0x1D53D, [0x00047]=0x1D53E, [0x00048]=0x0210D, [0x00049]=0x1D540, [0x0004A]=0x1D541,
- [0x0004B]=0x1D542, [0x0004C]=0x1D543, [0x0004D]=0x1D544, [0x0004E]=0x02115, [0x0004F]=0x1D546,
- [0x00050]=0x02119, [0x00051]=0x0211A, [0x00052]=0x0211D, [0x00053]=0x1D54A, [0x00054]=0x1D54B,
- [0x00055]=0x1D54C, [0x00056]=0x1D54D, [0x00057]=0x1D54E, [0x00058]=0x1D54F, [0x00059]=0x1D550,
- [0x0005A]=0x02124,
- },
- lcletters = 0x1D552,
- lcgreek = { -- gamma pi
- [0x03B3]=0x0213C, [0x03C0]=0x0213D,
- },
- ucgreek = { -- Gamma pi
- [0x0393]=0x0213E, [0x03A0]=0x0213F,
- },
- symbols = { -- sum
- [0x2211]=0x02140,
- },
+ ucgreek = {
+ [0x0391]=0x1D6E2, [0x0392]=0x1D6E3, [0x0393]=0x1D6E4, [0x0394]=0x1D6E5, [0x0395]=0x1D6E6,
+ [0x0396]=0x1D6E7, [0x0397]=0x1D6E8, [0x0398]=0x1D6E9, [0x0399]=0x1D6EA, [0x039A]=0x1D6EB,
+ [0x039B]=0x1D6EC, [0x039C]=0x1D6ED, [0x039D]=0x1D6EE, [0x039E]=0x1D6EF, [0x039F]=0x1D6F0,
+ [0x03A0]=0x1D6F1, [0x03A1]=0x1D6F2, [0x03A3]=0x1D6F4, [0x03A4]=0x1D6F5, [0x03A5]=0x1D6F6,
+ [0x03A6]=0x1D6F7, [0x03A7]=0x1D6F8, [0x03A8]=0x1D6F9, [0x03A9]=0x1D6FA,
},
+ lcgreek = {
+ [0x03B1]=0x1D6FC, [0x03B2]=0x1D6FD, [0x03B3]=0x1D6FE, [0x03B4]=0x1D6FF, [0x03B5]=0x1D700,
+ [0x03B6]=0x1D701, [0x03B7]=0x1D702, [0x03B8]=0x1D703, [0x03B9]=0x1D704, [0x03BA]=0x1D705,
+ [0x03BB]=0x1D706, [0x03BC]=0x1D707, [0x03BD]=0x1D708, [0x03BE]=0x1D709, [0x03BF]=0x1D70A,
+ [0x03C0]=0x1D70B, [0x03C1]=0x1D70C, [0x03C2]=0x1D70D, [0x03C3]=0x1D70E, [0x03C4]=0x1D70F,
+ [0x03C5]=0x1D710, [0x03C6]=0x1D711, [0x03C7]=0x1D712, [0x03C8]=0x1D713, [0x03C9]=0x1D714,
+ [0x03D1]=0x1D717, [0x03D5]=0x1D719, [0x03D6]=0x1D71B, [0x03F0]=0x1D718, [0x03F1]=0x1D71A,
+ [0x03F4]=0x1D6F3, [0x03F5]=0x1D716,
+ },
+ symbols = {
+ [0x2202]=0x1D715, [0x2207]=0x1D6FB,
},
- fraktur = { -- ok
- tf= {
- ucletters = { -- C H I R Z
- [0x00041]=0x1D504, [0x00042]=0x1D505, [0x00043]=0x0212D, [0x00044]=0x1D507, [0x00045]=0x1D508,
- [0x00046]=0x1D509, [0x00047]=0x1D50A, [0x00048]=0x0210C, [0x00049]=0x02111, [0x0004A]=0x1D50D,
- [0x0004B]=0x1D50E, [0x0004C]=0x1D50F, [0x0004D]=0x1D510, [0x0004E]=0x1D511, [0x0004F]=0x1D512,
- [0x00050]=0x1D513, [0x00051]=0x1D514, [0x00052]=0x0211C, [0x00053]=0x1D516, [0x00054]=0x1D517,
- [0x00055]=0x1D518, [0x00056]=0x1D519, [0x00057]=0x1D51A, [0x00058]=0x1D51B, [0x00059]=0x1D51C,
- [0x0005A]=0x02128,
- },
- lcletters = 0x1D51E,
+}
+
+local regular_bf= {
+ digits = 0x1D7CE,
+ ucletters = 0x1D400,
+ lcletters = 0x1D41A,
+ ucgreek = {
+ [0x0391]=0x1D6A8, [0x0392]=0x1D6A9, [0x0393]=0x1D6AA, [0x0394]=0x1D6AB, [0x0395]=0x1D6AC,
+ [0x0396]=0x1D6AD, [0x0397]=0x1D6AE, [0x0398]=0x1D6AF, [0x0399]=0x1D6B0, [0x039A]=0x1D6B1,
+ [0x039B]=0x1D6B2, [0x039C]=0x1D6B3, [0x039D]=0x1D6B4, [0x039E]=0x1D6B5, [0x039F]=0x1D6B6,
+ [0x03A0]=0x1D6B7, [0x03A1]=0x1D6B8, [0x03A3]=0x1D6BA, [0x03A4]=0x1D6BB, [0x03A5]=0x1D6BC,
+ [0x03A6]=0x1D6BD, [0x03A7]=0x1D6BE, [0x03A8]=0x1D6BF, [0x03A9]=0x1D6C0,
},
- bf = {
- ucletters = 0x1D56C,
- lcletters = 0x1D586,
+ lcgreek = {
+ [0x03B1]=0x1D6C2, [0x03B2]=0x1D6C3, [0x03B3]=0x1D6C4, [0x03B4]=0x1D6C5, [0x03B5]=0x1D6C6,
+ [0x03B6]=0x1D6C7, [0x03B7]=0x1D6C8, [0x03B8]=0x1D6C9, [0x03B9]=0x1D6CA, [0x03BA]=0x1D6CB,
+ [0x03BB]=0x1D6CC, [0x03BC]=0x1D6CD, [0x03BD]=0x1D6CE, [0x03BE]=0x1D6CF, [0x03BF]=0x1D6D0,
+ [0x03C0]=0x1D6D1, [0x03C1]=0x1D6D2, [0x03C2]=0x1D6D3, [0x03C3]=0x1D6D4, [0x03C4]=0x1D6D5,
+ [0x03C5]=0x1D6D6, [0x03C6]=0x1D6D7, [0x03C7]=0x1D6D8, [0x03C8]=0x1D6D9, [0x03C9]=0x1D6DA,
+ [0x03D1]=0x1D6DD, [0x03D5]=0x1D6DF, [0x03D6]=0x1D6E1, [0x03F0]=0x1D6DE, [0x03F1]=0x1D6E0,
+ [0x03F4]=0x1D6B9, [0x03F5]=0x1D6DC,
+ },
+ symbols = {
+ [0x2202]=0x1D6DB, [0x2207]=0x1D6C1,
+ },
+}
+
+local regular_bi = {
+ digits = regular_bf.digits,
+ ucletters = 0x1D468,
+ lcletters = 0x1D482,
+ ucgreek = {
+ [0x0391]=0x1D71C, [0x0392]=0x1D71D, [0x0393]=0x1D71E, [0x0394]=0x1D71F, [0x0395]=0x1D720,
+ [0x0396]=0x1D721, [0x0397]=0x1D722, [0x0398]=0x1D723, [0x0399]=0x1D724, [0x039A]=0x1D725,
+ [0x039B]=0x1D726, [0x039C]=0x1D727, [0x039D]=0x1D728, [0x039E]=0x1D729, [0x039F]=0x1D72A,
+ [0x03A0]=0x1D72B, [0x03A1]=0x1D72C, [0x03A3]=0x1D72E, [0x03A4]=0x1D72F, [0x03A5]=0x1D730,
+ [0x03A6]=0x1D731, [0x03A7]=0x1D732, [0x03A8]=0x1D733, [0x03A9]=0x1D734,
},
+ lcgreek = {
+ [0x03B1]=0x1D736, [0x03B2]=0x1D737, [0x03B3]=0x1D738, [0x03B4]=0x1D739, [0x03B5]=0x1D73A,
+ [0x03B6]=0x1D73B, [0x03B7]=0x1D73C, [0x03B8]=0x1D73D, [0x03B9]=0x1D73E, [0x03BA]=0x1D73F,
+ [0x03BB]=0x1D740, [0x03BC]=0x1D741, [0x03BD]=0x1D742, [0x03BE]=0x1D743, [0x03BF]=0x1D744,
+ [0x03C0]=0x1D745, [0x03C1]=0x1D746, [0x03C2]=0x1D747, [0x03C3]=0x1D748, [0x03C4]=0x1D749,
+ [0x03C5]=0x1D74A, [0x03C6]=0x1D74B, [0x03C7]=0x1D74C, [0x03C8]=0x1D74D, [0x03C9]=0x1D74E,
+ [0x03D1]=0x1D751, [0x03D5]=0x1D753, [0x03D6]=0x1D755, [0x03F0]=0x1D752, [0x03F1]=0x1D754,
+ [0x03F4]=0x1D72D, [0x03F5]=0x1D750,
},
- script = {
- tf= {
- ucletters = { -- B E F H I L M R -- P 2118
- [0x00041]=0x1D49C, [0x00042]=0x0212C, [0x00043]=0x1D49E, [0x00044]=0x1D49F, [0x00045]=0x02130,
- [0x00046]=0x02131, [0x00047]=0x1D4A2, [0x00048]=0x0210B, [0x00049]=0x02110, [0x0004A]=0x1D4A5,
- [0x0004B]=0x1D4A6, [0x0004C]=0x02112, [0x0004D]=0x02133, [0x0004E]=0x1D4A9, [0x0004F]=0x1D4AA,
- [0x00050]=0x1D4AB, [0x00051]=0x1D4AC, [0x00052]=0x0211B, [0x00053]=0x1D4AE, [0x00054]=0x1D4AF,
- [0x00055]=0x1D4B0, [0x00056]=0x1D4B1, [0x00057]=0x1D4B2, [0x00058]=0x1D4B3, [0x00059]=0x1D4B4,
- [0x0005A]=0x1D4B5,
- },
- lcletters = { -- E G O -- L 2113
- [0x00061]=0x1D4B6, [0x00062]=0x1D4B7, [0x00063]=0x1D4B8, [0x00064]=0x1D4B9, [0x00065]=0x0212F,
- [0x00066]=0x1D4BB, [0x00067]=0x0210A, [0x00068]=0x1D4BD, [0x00069]=0x1D4BE, [0x0006A]=0x1D4BF,
- [0x0006B]=0x1D4C0, [0x0006C]=0x1D4C1, [0x0006D]=0x1D4C2, [0x0006E]=0x1D4C3, [0x0006F]=0x02134,
- [0x00070]=0x1D4C5, [0x00071]=0x1D4C6, [0x00072]=0x1D4C7, [0x00073]=0x1D4C8, [0x00074]=0x1D4C9,
- [0x00075]=0x1D4CA, [0x00076]=0x1D4CB, [0x00077]=0x1D4CC, [0x00078]=0x1D4CD, [0x00079]=0x1D4CE,
- [0x0007A]=0x1D4CF,
- }
+ symbols = {
+ [0x2202]=0x1D74F, [0x2207]=0x1D735,
+ },
+}
+
+local regular = {
+ tf = regular_tf,
+ it = regular_it,
+ bf = regular_bf,
+ bi = regular_bi,
+}
+
+local sansserif_tf = {
+ digits = 0x1D7E2,
+ ucletters = 0x1D5A0,
+ lcletters = 0x1D5BA,
+ lcgreek = regular_tf.lcgreek,
+ ucgreek = regular_tf.ucgreek,
+ symbols = regular_tf.symbols,
+}
+
+local sansserif_it = {
+ digits = regular_tf.digits,
+ ucletters = 0x1D608,
+ lcletters = 0x1D622,
+ lcgreek = regular_tf.lcgreek,
+ ucgreek = regular_tf.ucgreek,
+ symbols = regular_tf.symbols,
+}
+
+local sansserif_bf = {
+ digits = 0x1D7EC,
+ ucletters = 0x1D5D4,
+ lcletters = 0x1D5EE,
+ ucgreek = {
+ [0x0391]=0x1D756, [0x0392]=0x1D757, [0x0393]=0x1D758, [0x0394]=0x1D759, [0x0395]=0x1D75A,
+ [0x0396]=0x1D75B, [0x0397]=0x1D75C, [0x0398]=0x1D75D, [0x0399]=0x1D75E, [0x039A]=0x1D75F,
+ [0x039B]=0x1D760, [0x039C]=0x1D761, [0x039D]=0x1D762, [0x039E]=0x1D763, [0x039F]=0x1D764,
+ [0x03A0]=0x1D765, [0x03A1]=0x1D766, [0x03A3]=0x1D768, [0x03A4]=0x1D769, [0x03A5]=0x1D76A,
+ [0x03A6]=0x1D76B, [0x03A7]=0x1D76C, [0x03A8]=0x1D76D, [0x03A9]=0x1D76E,
},
- bf = {
- ucletters = 0x1D4D0,
- lcletters = 0x1D4EA,
+ lcgreek = {
+ [0x03B1]=0x1D770, [0x03B2]=0x1D771, [0x03B3]=0x1D772, [0x03B4]=0x1D773, [0x03B5]=0x1D774,
+ [0x03B6]=0x1D775, [0x03B7]=0x1D776, [0x03B8]=0x1D777, [0x03B9]=0x1D778, [0x03BA]=0x1D779,
+ [0x03BB]=0x1D77A, [0x03BC]=0x1D77B, [0x03BD]=0x1D77C, [0x03BE]=0x1D77D, [0x03BF]=0x1D77E,
+ [0x03C0]=0x1D77F, [0x03C1]=0x1D780, [0x03C2]=0x1D781, [0x03C3]=0x1D782, [0x03C4]=0x1D783,
+ [0x03C5]=0x1D784, [0x03C6]=0x1D785, [0x03C7]=0x1D786, [0x03C8]=0x1D787, [0x03C9]=0x1D788,
+ [0x03D1]=0x1D78B, [0x03D5]=0x1D78D, [0x03D6]=0x1D78F, [0x03F0]=0x1D78C, [0x03F1]=0x1D78E,
+ [0x03F4]=0x1D767, [0x03F5]=0x1D78A,
+ },
+ symbols = {
+ [0x2202]=0x1D789, [0x2207]=0x1D76F,
+ },
+}
+
+local sansserif_bi = {
+ digits = sansserif_bf.digits,
+ ucletters = 0x1D63C,
+ lcletters = 0x1D656,
+ ucgreek = {
+ [0x0391]=0x1D790, [0x0392]=0x1D791, [0x0393]=0x1D792, [0x0394]=0x1D793, [0x0395]=0x1D794,
+ [0x0396]=0x1D795, [0x0397]=0x1D796, [0x0398]=0x1D797, [0x0399]=0x1D798, [0x039A]=0x1D799,
+ [0x039B]=0x1D79A, [0x039C]=0x1D79B, [0x039D]=0x1D79C, [0x039E]=0x1D79D, [0x039F]=0x1D79E,
+ [0x03A0]=0x1D79F, [0x03A1]=0x1D7A0, [0x03A3]=0x1D7A2, [0x03A4]=0x1D7A3, [0x03A5]=0x1D7A4,
+ [0x03A6]=0x1D7A5, [0x03A7]=0x1D7A6, [0x03A8]=0x1D7A7, [0x03A9]=0x1D7A8,
},
+ lcgreek = {
+ [0x03B1]=0x1D7AA, [0x03B2]=0x1D7AB, [0x03B3]=0x1D7AC, [0x03B4]=0x1D7AD, [0x03B5]=0x1D7AE,
+ [0x03B6]=0x1D7AF, [0x03B7]=0x1D7B0, [0x03B8]=0x1D7B1, [0x03B9]=0x1D7B2, [0x03BA]=0x1D7B3,
+ [0x03BB]=0x1D7B4, [0x03BC]=0x1D7B5, [0x03BD]=0x1D7B6, [0x03BE]=0x1D7B7, [0x03BF]=0x1D7B8,
+ [0x03C0]=0x1D7B9, [0x03C1]=0x1D7BA, [0x03C2]=0x1D7BB, [0x03C3]=0x1D7BC, [0x03C4]=0x1D7BD,
+ [0x03C5]=0x1D7BE, [0x03C6]=0x1D7BF, [0x03C7]=0x1D7C0, [0x03C8]=0x1D7C1, [0x03C9]=0x1D7C2,
+ [0x03D1]=0x1D7C5, [0x03D5]=0x1D7C7, [0x03D6]=0x1D7C9, [0x03F0]=0x1D7C6, [0x03F1]=0x1D7C8,
+ [0x03F4]=0x1D7A1, [0x03F5]=0x1D7C4,
},
+ symbols = {
+ [0x2202]=0x1D7C3, [0x2207]=0x1D7A9,
+ },
+}
+
+local sansserif = {
+ tf = sansserif_tf,
+ it = sansserif_it,
+ bf = sansserif_bf,
+ bi = sansserif_bi,
+}
+
+local monospaced_tf = {
+ digits = 0x1D7F6,
+ ucletters = 0x1D670,
+ lcletters = 0x1D68A,
+ lcgreek = sansserif_tf.lcgreek,
+ ucgreek = sansserif_tf.ucgreek,
+ symbols = sansserif_tf.symbols,
+}
+
+local monospaced = {
+ tf = monospaced_tf,
+ it = sansserif_tf,
+ bf = sansserif_tf,
+ bi = sansserif_bf,
+}
+
+local blackboard_tf = {
+ digits = 0x1D7D8,
+ ucletters = { -- C H N P Q R Z
+ [0x00041]=0x1D538, [0x00042]=0x1D539, [0x00043]=0x02102, [0x00044]=0x1D53B, [0x00045]=0x1D53C,
+ [0x00046]=0x1D53D, [0x00047]=0x1D53E, [0x00048]=0x0210D, [0x00049]=0x1D540, [0x0004A]=0x1D541,
+ [0x0004B]=0x1D542, [0x0004C]=0x1D543, [0x0004D]=0x1D544, [0x0004E]=0x02115, [0x0004F]=0x1D546,
+ [0x00050]=0x02119, [0x00051]=0x0211A, [0x00052]=0x0211D, [0x00053]=0x1D54A, [0x00054]=0x1D54B,
+ [0x00055]=0x1D54C, [0x00056]=0x1D54D, [0x00057]=0x1D54E, [0x00058]=0x1D54F, [0x00059]=0x1D550,
+ [0x0005A]=0x02124,
+ },
+ lcletters = 0x1D552,
+ lcgreek = { -- gamma pi
+ [0x03B3]=0x0213C, [0x03C0]=0x0213D,
+ },
+ ucgreek = { -- Gamma pi
+ [0x0393]=0x0213E, [0x03A0]=0x0213F,
+ },
+ symbols = { -- sum
+ [0x2211]=0x02140,
+ },
+}
+
+blackboard_tf.lcgreek = merged(regular_tf.lcgreek, blackboard_tf.lcgreek)
+blackboard_tf.ucgreek = merged(regular_tf.ucgreek, blackboard_tf.ucgreek)
+blackboard_tf.symbols = merged(regular_tf.symbols, blackboard_tf.symbols)
+
+local blackboard = {
+ tf = blackboard_tf,
+ it = blackboard_tf,
+ bf = blackboard_tf,
+ bi = blackboard_tf,
+}
+
+local fraktur_tf= {
+ digits = regular_tf.digits,
+ ucletters = { -- C H I R Z
+ [0x00041]=0x1D504, [0x00042]=0x1D505, [0x00043]=0x0212D, [0x00044]=0x1D507, [0x00045]=0x1D508,
+ [0x00046]=0x1D509, [0x00047]=0x1D50A, [0x00048]=0x0210C, [0x00049]=0x02111, [0x0004A]=0x1D50D,
+ [0x0004B]=0x1D50E, [0x0004C]=0x1D50F, [0x0004D]=0x1D510, [0x0004E]=0x1D511, [0x0004F]=0x1D512,
+ [0x00050]=0x1D513, [0x00051]=0x1D514, [0x00052]=0x0211C, [0x00053]=0x1D516, [0x00054]=0x1D517,
+ [0x00055]=0x1D518, [0x00056]=0x1D519, [0x00057]=0x1D51A, [0x00058]=0x1D51B, [0x00059]=0x1D51C,
+ [0x0005A]=0x02128,
+ },
+ lcletters = 0x1D51E,
+ lcgreek = regular_tf.lcgreek,
+ ucgreek = regular_tf.ucgreek,
+ symbols = regular_tf.symbols,
+}
+
+local fraktur_bf = {
+ digits = regular_bf.digits,
+ ucletters = 0x1D56C,
+ lcletters = 0x1D586,
+ lcgreek = regular_bf.lcgreek,
+ ucgreek = regular_bf.ucgreek,
+ symbols = regular_bf.symbols,
+}
+
+local fraktur = { -- ok
+ tf = fraktur_tf,
+ bf = fraktur_bf,
+ it = fraktur_tf,
+ bi = fraktur_bf,
+}
+
+local script_tf= {
+ digits = regular_tf.digits,
+ ucletters = { -- B E F H I L M R -- P 2118
+ [0x00041]=0x1D49C, [0x00042]=0x0212C, [0x00043]=0x1D49E, [0x00044]=0x1D49F, [0x00045]=0x02130,
+ [0x00046]=0x02131, [0x00047]=0x1D4A2, [0x00048]=0x0210B, [0x00049]=0x02110, [0x0004A]=0x1D4A5,
+ [0x0004B]=0x1D4A6, [0x0004C]=0x02112, [0x0004D]=0x02133, [0x0004E]=0x1D4A9, [0x0004F]=0x1D4AA,
+ [0x00050]=0x1D4AB, [0x00051]=0x1D4AC, [0x00052]=0x0211B, [0x00053]=0x1D4AE, [0x00054]=0x1D4AF,
+ [0x00055]=0x1D4B0, [0x00056]=0x1D4B1, [0x00057]=0x1D4B2, [0x00058]=0x1D4B3, [0x00059]=0x1D4B4,
+ [0x0005A]=0x1D4B5,
+ },
+ lcletters = { -- E G O -- L 2113
+ [0x00061]=0x1D4B6, [0x00062]=0x1D4B7, [0x00063]=0x1D4B8, [0x00064]=0x1D4B9, [0x00065]=0x0212F,
+ [0x00066]=0x1D4BB, [0x00067]=0x0210A, [0x00068]=0x1D4BD, [0x00069]=0x1D4BE, [0x0006A]=0x1D4BF,
+ [0x0006B]=0x1D4C0, [0x0006C]=0x1D4C1, [0x0006D]=0x1D4C2, [0x0006E]=0x1D4C3, [0x0006F]=0x02134,
+ [0x00070]=0x1D4C5, [0x00071]=0x1D4C6, [0x00072]=0x1D4C7, [0x00073]=0x1D4C8, [0x00074]=0x1D4C9,
+ [0x00075]=0x1D4CA, [0x00076]=0x1D4CB, [0x00077]=0x1D4CC, [0x00078]=0x1D4CD, [0x00079]=0x1D4CE,
+ [0x0007A]=0x1D4CF,
+ },
+ lcgreek = regular_tf.lcgreek,
+ ucgreek = regular_tf.ucgreek,
+ symbols = regular_tf.symbols,
+}
+
+local script_bf = {
+ digits = regular_bf.digits,
+ ucletters = 0x1D4D0,
+ lcletters = 0x1D4EA,
+ lcgreek = regular_bf.lcgreek,
+ ucgreek = regular_bf.ucgreek,
+ symbols = regular_bf.symbols,
+}
+
+local script = {
+ tf = script_tf,
+ bf = script_bf,
+ it = script_tf,
+ bi = script_bf,
+}
+
+local alphabets = allocate {
+ regular = regular,
+ sansserif = sansserif,
+ monospaced = monospaced,
+ blackboard = blackboard,
+ fraktur = fraktur,
+ script = script,
}
mathematics.alphabets = alphabets
local mathremap = { }
-for alphabet, styles in next, alphabets do
+for alphabet, styles in next, alphabets do -- per 9/6/2011 we also have attr for missing
for style, data in next, styles do
-- let's keep the long names (for tracing)
local n = #mathremap + 1
@@ -290,80 +374,31 @@ end
-- beware, these are shared tables (no problem since they're not
-- in unicode)
-alphabets.regular.it.digits = alphabets.regular.tf.digits
-alphabets.regular.bi.digits = alphabets.regular.bf.digits
-
-alphabets.sansserif.tf.symbols = alphabets.regular.tf.symbols
-alphabets.sansserif.tf.lcgreek = alphabets.regular.tf.lcgreek
-alphabets.sansserif.tf.ucgreek = alphabets.regular.tf.ucgreek
-alphabets.sansserif.tf.digits = alphabets.regular.tf.digits
-alphabets.sansserif.it.symbols = alphabets.regular.tf.symbols
-alphabets.sansserif.it.lcgreek = alphabets.regular.tf.lcgreek
-alphabets.sansserif.it.ucgreek = alphabets.regular.tf.ucgreek
-alphabets.sansserif.bi.digits = alphabets.regular.bf.digits
-
-alphabets.monospaced.tf.symbols = alphabets.sansserif.tf.symbols
-alphabets.monospaced.tf.lcgreek = alphabets.sansserif.tf.lcgreek
-alphabets.monospaced.tf.ucgreek = alphabets.sansserif.tf.ucgreek
-alphabets.monospaced.it = alphabets.sansserif.tf
-alphabets.monospaced.bf = alphabets.sansserif.tf
-alphabets.monospaced.bi = alphabets.sansserif.bf
-
-alphabets.regular.normal = alphabets.regular.tf
-alphabets.regular.italic = alphabets.regular.it
-alphabets.regular.bold = alphabets.regular.bf
-alphabets.regular.bolditalic = alphabets.regular.bi
-
-alphabets.sansserif.normal = alphabets.sansserif.tf
-alphabets.sansserif.italic = alphabets.sansserif.it
-alphabets.sansserif.bold = alphabets.sansserif.bf
-alphabets.sansserif.bolditalic = alphabets.sansserif.bi
-
-alphabets.monospaced.normal = alphabets.monospaced.tf
-alphabets.monospaced.italic = alphabets.monospaced.it
-alphabets.monospaced.bold = alphabets.monospaced.bf
-alphabets.monospaced.bolditalic = alphabets.monospaced.bi
-
-alphabets.blackboard.tf.symbols = table.merged(alphabets.regular.tf.symbols, alphabets.blackboard.tf.symbols)
-alphabets.blackboard.tf.lcgreek = table.merged(alphabets.regular.tf.lcgreek, alphabets.blackboard.tf.lcgreek)
-alphabets.blackboard.tf.ucgreek = table.merged(alphabets.regular.tf.ucgreek, alphabets.blackboard.tf.ucgreek)
-
-alphabets.blackboard.it = alphabets.blackboard.tf
-alphabets.blackboard.bf = alphabets.blackboard.tf
-alphabets.blackboard.bi = alphabets.blackboard.bf
-
-alphabets.fraktur.tf.digits = alphabets.regular.tf.digits
-alphabets.fraktur.tf.symbols = alphabets.regular.tf.symbols
-alphabets.fraktur.tf.lcgreek = alphabets.regular.tf.lcgreek
-alphabets.fraktur.tf.ucgreek = alphabets.regular.tf.ucgreek
-alphabets.fraktur.bf.digits = alphabets.regular.bf.digits
-alphabets.fraktur.bf.symbols = alphabets.regular.bf.symbols
-alphabets.fraktur.bf.lcgreek = alphabets.regular.bf.lcgreek
-alphabets.fraktur.bf.ucgreek = alphabets.regular.bf.ucgreek
-alphabets.fraktur.it = alphabets.fraktur.tf
-alphabets.fraktur.bi = alphabets.fraktur.bf
-
-alphabets.script.tf.digits = alphabets.regular.tf.digits
-alphabets.script.tf.symbols = alphabets.regular.tf.symbols
-alphabets.script.tf.lcgreek = alphabets.regular.tf.lcgreek
-alphabets.script.tf.ucgreek = alphabets.regular.tf.ucgreek
-alphabets.script.bf.digits = alphabets.regular.bf.digits
-alphabets.script.bf.symbols = alphabets.regular.bf.symbols
-alphabets.script.bf.lcgreek = alphabets.regular.bf.lcgreek
-alphabets.script.bf.ucgreek = alphabets.regular.bf.ucgreek
-alphabets.script.it = alphabets.script.tf
-alphabets.script.bi = alphabets.script.bf
-
-alphabets.tt = alphabets.monospaced
-alphabets.ss = alphabets.sansserif
-alphabets.rm = alphabets.regular
-alphabets.bb = alphabets.blackboard
-alphabets.fr = alphabets.fraktur
-alphabets.sr = alphabets.script
-
-alphabets.serif = alphabets.regular
-alphabets.type = alphabets.monospaced
-alphabets.teletype = alphabets.monospaced
+alphabets.tt = monospaced
+alphabets.ss = sansserif
+alphabets.rm = regular
+alphabets.bb = blackboard
+alphabets.fr = fraktur
+alphabets.sr = script
+
+alphabets.serif = regular
+alphabets.type = monospaced
+alphabets.teletype = monospaced
+
+regular.normal = regular_tf
+regular.italic = regular_it
+regular.bold = regular_bf
+regular.bolditalic = regular_bi
+
+sansserif.normal = sansserif_tf
+sansserif.italic = sansserif_it
+sansserif.bold = sansserif_bf
+sansserif.bolditalic = sansserif_bi
+
+monospaced.normal = monospaced_tf
+monospaced.italic = monospaced_it
+monospaced.bold = monospaced_bf
+monospaced.bolditalic = monospaced_bi
function mathematics.tostyle(attribute)
local r = mathremap[attribute]
@@ -380,7 +415,7 @@ end
local mathalphabet = attributes.private("mathalphabet")
function mathematics.getboth(alphabet,style)
- local data = alphabets[alphabet or "regular"] or alphabets.regular
+ local data = alphabets[alphabet or "regular"] or regular
data = data[style or "tf"] or data.tf
return data and data.attribute
end
@@ -393,7 +428,7 @@ function mathematics.getstyle(style)
end
function mathematics.syncboth(alphabet,style)
- local data = alphabets[alphabet or "regular"] or alphabets.regular
+ local data = alphabets[alphabet or "regular"] or regular
data = data[style or "tf"] or data.tf
texattribute[mathalphabet] = data and data.attribute or texattribute[mathalphabet]
end
@@ -413,9 +448,9 @@ function mathematics.syncname(alphabet)
texattribute[mathalphabet] = data and data.attribute or texattribute[mathalphabet]
end
-local issymbol = mathematics.alphabets.regular.tf.symbols
-local islcgreek = mathematics.alphabets.regular.tf.lcgreek
-local isucgreek = mathematics.alphabets.regular.tf.ucgreek
+local issymbol = regular.tf.symbols
+local islcgreek = regular.tf.lcgreek
+local isucgreek = regular.tf.ucgreek
local remapping = {
[1] = { what = "unchanged" }, -- upright
@@ -482,7 +517,6 @@ end
function mathematics.addfallbacks(main)
local characters = main.characters
- local regular = alphabets.regular
checkedcopy(characters,regular.bf.ucgreek,regular.tf.ucgreek)
checkedcopy(characters,regular.bf.lcgreek,regular.tf.lcgreek)
checkedcopy(characters,regular.bi.ucgreek,regular.it.ucgreek)
diff --git a/tex/context/base/math-tag.lua b/tex/context/base/math-tag.lua
index 815e76b9a..a5dfc933a 100644
--- a/tex/context/base/math-tag.lua
+++ b/tex/context/base/math-tag.lua
@@ -6,7 +6,8 @@ if not modules then modules = { } end modules ['math-tag'] = {
license = "see context related readme files"
}
-local find = string.find
+local find, match = string.find, string.match
+local insert, remove = table.insert, table.remove
local attributes, nodes = attributes, nodes
@@ -33,6 +34,7 @@ local math_fence_code = nodecodes.fence -- attr subtype
local hlist_code = nodecodes.hlist
local vlist_code = nodecodes.vlist
local glyph_code = nodecodes.glyph
+local glue_code = nodecodes.glue
local a_tagged = attributes.private('tagged')
local a_exportstatus = attributes.private('exportstatus')
@@ -84,6 +86,8 @@ end
-- todo: check function here and keep attribute the same
+local actionstack = { }
+
process = function(start) -- we cannot use the processor as we have no finalizers (yet)
while start do
local id = start.id
@@ -137,67 +141,84 @@ process = function(start) -- we cannot use the processor as we have no finalizer
elseif id == math_box_code or id == hlist_code or id == vlist_code then
-- keep an eye on math_box_code and see what ends up in there
local attr = get_attribute(start,a_tagged)
-local last = attr and taglist[attr]
-if last and find(last[#last],"formulacaption%-") then
- -- leave alone, will nicely move to the outer level
-else
- local text = start_tagged("mtext")
- set_attribute(start,a_tagged,text)
- local list = start.list
- if not list then
- -- empty list
- elseif not attr then
- -- box comes from strange place
- set_attributes(list,a_tagged,text)
+ local last = attr and taglist[attr]
+ if last and find(last[#last],"formulacaption[:%-]") then
+ -- leave alone, will nicely move to the outer level
else
- -- Beware, the first node in list is the actual list so we definitely
- -- need to nest. This approach is a hack, maybe I'll make a proper
- -- nesting feature to deal with this at another level. Here we just
- -- fake structure by enforcing the inner one.
- local tagdata = taglist[attr]
- local common = #tagdata + 1
- local function runner(list) -- quite inefficient
- local cache = { } -- we can have nested unboxed mess so best local to runner
- for n in traverse_nodes(list) do
- local id = n.id
- if id == hlist_code or id == vlist_code then
- runner(n.list)
- else -- if id == glyph_code then
- local aa = get_attribute(n,a_tagged) -- only glyph needed (huh?)
- if aa then
- local ac = cache[aa]
- if not ac then
- local tagdata = taglist[aa]
- local extra = #tagdata
- if common <= extra then
- for i=common,extra do
- ac = start_tagged(tagdata[i]) -- can be made faster
- end
- for i=common,extra do
- stop_tagged() -- can be made faster
+ local text = start_tagged("mtext")
+ set_attribute(start,a_tagged,text)
+ local list = start.list
+ if not list then
+ -- empty list
+ elseif not attr then
+ -- box comes from strange place
+ set_attributes(list,a_tagged,text)
+ else
+ -- Beware, the first node in list is the actual list so we definitely
+ -- need to nest. This approach is a hack, maybe I'll make a proper
+ -- nesting feature to deal with this at another level. Here we just
+ -- fake structure by enforcing the inner one.
+ local tagdata = taglist[attr]
+ local common = #tagdata + 1
+ local function runner(list) -- quite inefficient
+ local cache = { } -- we can have nested unboxed mess so best local to runner
+ for n in traverse_nodes(list) do
+ local id = n.id
+ if id == hlist_code or id == vlist_code then
+ runner(n.list)
+ else -- if id == glyph_code then
+ local aa = get_attribute(n,a_tagged) -- only glyph needed (huh?)
+ if aa then
+ local ac = cache[aa]
+ if not ac then
+ local tagdata = taglist[aa]
+ local extra = #tagdata
+ if common <= extra then
+ for i=common,extra do
+ ac = start_tagged(tagdata[i]) -- can be made faster
+ end
+ for i=common,extra do
+ stop_tagged() -- can be made faster
+ end
+ else
+ ac = text
end
- else
- ac = text
+ cache[aa] = ac
end
- cache[aa] = ac
+ set_attribute(n,a_tagged,ac)
+ else
+ set_attribute(n,a_tagged,text)
end
- set_attribute(n,a_tagged,ac)
- else
- set_attribute(n,a_tagged,text)
end
end
end
+ runner(list)
end
- runner(list)
+ stop_tagged()
end
- stop_tagged()
-end
elseif id == math_sub_code then
local list = start.list
if list then
- set_attribute(start,a_tagged,start_tagged("mrow"))
- process(list)
- stop_tagged()
+ local attr = get_attribute(start,a_tagged)
+ local last = attr and taglist[attr]
+ local action = last and match(last[#last],"maction:(.-)%-")
+ if action and action ~= "" then
+ if actionstack[#actionstack] == action then
+ set_attribute(start,a_tagged,start_tagged("mrow"))
+ process(list)
+ stop_tagged()
+ else
+ insert(actionstack,action)
+ set_attribute(start,a_tagged,start_tagged("mrow",{ detail = action }))
+ process(list)
+ stop_tagged()
+ remove(actionstack)
+ end
+ else
+ set_attribute(start,a_tagged,start_tagged("mrow"))
+ process(list)
+ stop_tagged()
+ end
end
elseif id == math_fraction_code then
local num, denom, left, right = start.num, start.denom, start.left, start.right
@@ -297,8 +318,11 @@ end
else
processsubsup(start)
end
+ elseif id == glue_code then
+ set_attribute(start,a_tagged,start_tagged("mspace"))
+ stop_tagged()
else
- set_attribute(start,a_tagged,start_tagged("merror"))
+ set_attribute(start,a_tagged,start_tagged("merror", { detail = nodecodes[i] } ))
stop_tagged()
end
start = start.next
diff --git a/tex/context/base/meta-ini.lua b/tex/context/base/meta-ini.lua
index 872c628aa..6e7053667 100644
--- a/tex/context/base/meta-ini.lua
+++ b/tex/context/base/meta-ini.lua
@@ -6,7 +6,8 @@ if not modules then modules = { } end modules ['meta-ini'] = {
license = "see context related readme files"
}
-local format = string.format
+local tonumber = tonumber
+local format, gmatch, match = string.format, string.gmatch, string.match
metapost = metapost or { }
@@ -35,7 +36,6 @@ local colorhash = attributes.list[attributes.private('color')]
local validdimen = lpeg.patterns.validdimen * lpeg.P(-1)
local lpegmatch = lpeg.match
-local gmatch = string.gmatch
local textype = tex.type
local MPcolor = context.MPcolor
@@ -43,7 +43,7 @@ function commands.prepareMPvariable(v) -- slow but ok
if v == "" then
MPcolor("black")
else
- local typ, var = string.match(v,"(.):(.*)")
+ local typ, var = match(v,"(.):(.*)")
if not typ then
-- parse
if colorhash[v] then
diff --git a/tex/context/base/mlib-pps.lua b/tex/context/base/mlib-pps.lua
index 27269a14e..a9fd5e9e2 100644
--- a/tex/context/base/mlib-pps.lua
+++ b/tex/context/base/mlib-pps.lua
@@ -13,7 +13,7 @@ if not modules then modules = { } end modules ['mlib-pps'] = {
--
-- todo: report max textexts
-local format, gmatch, match = string.format, string.gmatch, string.match
+local format, gmatch, match, split = string.format, string.gmatch, string.match, string.split
local tonumber, type = tonumber, type
local round = math.round
local insert, concat = table.insert, table.concat
@@ -103,9 +103,9 @@ end
--~
-local specificationsplitter = Ct(lpeg.splitat(" "))
-local colorsplitter = Ct(lpeg.splitter(":",tonumber)) -- no need for :
-local domainsplitter = Ct(lpeg.splitter(" ",tonumber))
+local specificationsplitter = lpeg.tsplitat(" ")
+local colorsplitter = lpeg.tsplitter(":",tonumber) -- no need for :
+local domainsplitter = lpeg.tsplitter(" ",tonumber)
local centersplitter = domainsplitter
local coordinatesplitter = domainsplitter
@@ -142,11 +142,13 @@ local function spotcolorconverter(parent, n, d, p)
return pdfcolor(colors.model,registercolor(nil,'spot',parent,n,d,p)), outercolor
end
+local commasplitter = lpeg.tsplitat(",")
+
local function checkandconvertspot(n_a,f_a,c_a,v_a,n_b,f_b,c_b,v_b)
-- must be the same but we don't check
local name = format("MpSh%s",nofshades)
- local ca = string.split(v_a,",")
- local cb = string.split(v_b,",")
+ local ca = lpegmatch(commasplitter,v_a)
+ local cb = lpegmatch(commasplitter,v_b)
if #ca == 0 or #cb == 0 then
return { 0 }, { 1 }, "DeviceGray", name
else
diff --git a/tex/context/base/mlib-run.lua b/tex/context/base/mlib-run.lua
index 72c16775d..1155792b3 100644
--- a/tex/context/base/mlib-run.lua
+++ b/tex/context/base/mlib-run.lua
@@ -36,6 +36,7 @@ local report_metapost = logs.reporter("metapost")
local texerrormessage = logs.texerrormessage
local format, gsub, match, find = string.format, string.gsub, string.match, string.find
+local emptystring = string.is_empty
local starttiming, stoptiming = statistics.starttiming, statistics.stoptiming
@@ -323,7 +324,7 @@ function metapost.process(mpx, data, trialrun, flusher, multipass, isextrapass,
if not metapost.reporterror(result) then
if metapost.showlog then
local str = (result.term ~= "" and result.term) or "no terminal output"
- if not string.is_empty(str) then
+ if not emptystring(str) then
metapost.lastlog = metapost.lastlog .. "\n" .. str
report_metapost("log: %s",str)
end
diff --git a/tex/context/base/node-acc.lua b/tex/context/base/node-acc.lua
index d773b7acf..d6032ebca 100644
--- a/tex/context/base/node-acc.lua
+++ b/tex/context/base/node-acc.lua
@@ -19,51 +19,61 @@ local copy_node = node.copy
local free_nodelist = node.flush_list
local glue_code = nodecodes.glue
+local kern_code = nodecodes.kern
local glyph_code = nodecodes.glyph
local hlist_code = nodecodes.hlist
local vlist_code = nodecodes.vlist
local a_characters = attributes.private("characters")
+local threshold = 65536
+
-- todo: nbsp etc
+-- todo: collapse kerns
local function injectspaces(head)
local p
- for n in traverse_nodes(head) do
+ local n = head
+ while n do
local id = n.id
if id == glue_code then -- todo: check for subtype related to spacing (13/14 but most seems to be 0)
- -- local at = has_attribute(n,attribute)
- -- if at then
---~ local a = has_attribute(n,a_characters)
---~ if a then
---~ -- handle this in the export
---~ else
- if p and p.id == glyph_code then
- local g = copy_node(p)
- local c = g.components
- if c then -- it happens that we copied a ligature
- free_nodelist(c)
- g.components = nil
- g.subtype = 256
- end
- local a = has_attribute(n,a_characters)
- local s = copy_node(n.spec)
- g.char, n.spec = 32, s
- p.next, g.prev = g, p
- g.next, n.prev = n, g
- s.width = s.width - g.width
- if a then
- set_attribute(g,a_characters,a)
- end
- set_attribute(s,a_characters,0)
- set_attribute(n,a_characters,0)
+--~ if n.spec.width > 0 then -- threshold
+ if p and p.id == glyph_code then
+ local g = copy_node(p)
+ local c = g.components
+ if c then -- it happens that we copied a ligature
+ free_nodelist(c)
+ g.components = nil
+ g.subtype = 256
+ end
+ local a = has_attribute(n,a_characters)
+ local s = copy_node(n.spec)
+ g.char, n.spec = 32, s
+ p.next, g.prev = g, p
+ g.next, n.prev = n, g
+ s.width = s.width - g.width
+ if a then
+ set_attribute(g,a_characters,a)
end
- -- end
+ set_attribute(s,a_characters,0)
+ set_attribute(n,a_characters,0)
+ end
--~ end
elseif id == hlist_code or id == vlist_code then
injectspaces(n.list,attribute)
+ elseif id == kern_code then
+ local first = n
+ while true do -- maybe we should delete kerns but who cares at this stage
+ local nn = n.next
+ if nn.id == kern_code
+ first.kern = first.kern + nn.kern
+ nn.kern = 0
+ n = nn
+ end
+ end
end
p = n
+ n = n.next
end
return head, true
end
diff --git a/tex/context/base/node-fnt.lua b/tex/context/base/node-fnt.lua
index 7ceb96f80..036ff8fa1 100644
--- a/tex/context/base/node-fnt.lua
+++ b/tex/context/base/node-fnt.lua
@@ -9,7 +9,7 @@ if not modules then modules = { } end modules ['node-fnt'] = {
if not context then os.exit() end -- generic function in node-dum
local next, type = next, type
-local concat = table.concat
+local concat, keys = table.concat, table.keys
local nodes, node, fonts = nodes, node, fonts
@@ -130,8 +130,8 @@ function handlers.characters(head)
end
if trace_fontrun then
report_fonts()
- report_fonts("statics : %s",(u > 0 and concat(table.keys(usedfonts)," ")) or "none")
- report_fonts("dynamics: %s",(a > 0 and concat(table.keys(attrfonts)," ")) or "none")
+ report_fonts("statics : %s",(u > 0 and concat(keys(usedfonts)," ")) or "none")
+ report_fonts("dynamics: %s",(a > 0 and concat(keys(attrfonts)," ")) or "none")
report_fonts()
end
-- we could combine these and just make the attribute nil
diff --git a/tex/context/base/node-ini.lua b/tex/context/base/node-ini.lua
index eb70fa6e6..a27efe0cf 100644
--- a/tex/context/base/node-ini.lua
+++ b/tex/context/base/node-ini.lua
@@ -15,9 +15,10 @@ modules.</p>
local utf = unicode.utf8
local next, type = next, type
-local format, concat, match, gsub = string.format, table.concat, string.match, string.gsub
+local format, match, gsub = string.format, string.match, string.gsub
+local concat, remove = table.concat, table.remove
+local sortedhash, sortedkeys, swapped, tohash = table.sortedhash, table.sortedkeys, table.swapped, table.tohash
local utfchar = utf.char
-local swapped = table.swapped
local lpegmatch = lpeg.match
local formatcolumns = utilities.formatters.formatcolumns
@@ -196,8 +197,8 @@ nodes.codes = allocate {
function nodes.showcodes()
local t = { }
- for name, codes in table.sortedhash(nodes.codes) do
- local sorted = table.sortedkeys(codes)
+ for name, codes in sortedhash(nodes.codes) do
+ local sorted = sortedkeys(codes)
for i=1,#sorted do
local s = sorted[i]
if type(s) ~= "number" then
@@ -213,7 +214,7 @@ end
local whatsit_node = nodecodes.whatsit
-local messyhack = table.tohash { -- temporary solution
+local messyhack = tohash { -- temporary solution
nodecodes.attributelist,
nodecodes.attribute,
nodecodes.gluespec,
@@ -229,7 +230,7 @@ function nodes.fields(n)
if messyhack[id] then
for i=1,#t do
if t[i] == "subtype" then
- table.remove(t,i)
+ remove(t,i)
break
end
end
diff --git a/tex/context/base/node-ref.lua b/tex/context/base/node-ref.lua
index 19d602ed6..e0c241b35 100644
--- a/tex/context/base/node-ref.lua
+++ b/tex/context/base/node-ref.lua
@@ -16,6 +16,8 @@ if not modules then modules = { } end modules ['node-bck'] = {
-- is grouplevel still used?
+local format = string.format
+
local allocate, mark = utilities.storage.allocate, utilities.storage.mark
local cleanupreferences, cleanupdestinations = false, true
@@ -560,7 +562,7 @@ end
statistics.register("interactive elements", function()
if nofreferences > 0 or nofdestinations > 0 then
- return string.format("%s references, %s destinations",nofreferences,nofdestinations)
+ return format("%s references, %s destinations",nofreferences,nofdestinations)
else
return nil
end
diff --git a/tex/context/base/node-ser.lua b/tex/context/base/node-ser.lua
index aa4615626..a460a0953 100644
--- a/tex/context/base/node-ser.lua
+++ b/tex/context/base/node-ser.lua
@@ -9,7 +9,8 @@ if not modules then modules = { } end modules ['node-ser'] = {
-- beware, some field names will change in a next releases
-- of luatex; this is pretty old code that needs an overhaul
-local type, format, concat, rep = type, string.format, table.concat, string.rep
+local type, format, rep = type, string.format, string.rep
+local concat, tohash, sortedkeys = table.concat, table.tohash, table.sortedkeys
local allocate = utilities.storage.allocate
@@ -23,7 +24,7 @@ local nodefields = nodes.fields
local hlist_code = nodecodes.hlist
local vlist_code = nodecodes.vlist
-local expand = allocate ( table.tohash {
+local expand = allocate ( tohash {
"list", -- list_ptr & ins_ptr & adjust_ptr
"pre", --
"post", --
@@ -43,13 +44,13 @@ local expand = allocate ( table.tohash {
-- page_insert: "height", "last_ins_ptr", "best_ins_ptr"
-- split_insert: "height", "last_ins_ptr", "best_ins_ptr", "broken_ptr", "broken_ins"
-local ignore = allocate ( table.tohash {
+local ignore = allocate ( tohash {
"page_insert",
"split_insert",
"ref_count",
} )
-local dimension = allocate ( table.tohash {
+local dimension = allocate ( tohash {
"width", "height", "depth", "shift",
"stretch", "shrink",
"xoffset", "yoffset",
@@ -178,7 +179,7 @@ local function serialize(root,name,handle,depth,m)
if root.id then
fld = nodefields(root) -- we can cache these (todo)
else
- fld = table.sortedkeys(root)
+ fld = sortedkeys(root)
end
if type(root) == 'table' and root['type'] then -- userdata or table
handle(format("%s %s=%q,",depth,'type',root['type']))
diff --git a/tex/context/base/node-tsk.lua b/tex/context/base/node-tsk.lua
index 29a665ff0..a78393b82 100644
--- a/tex/context/base/node-tsk.lua
+++ b/tex/context/base/node-tsk.lua
@@ -10,6 +10,8 @@ if not modules then modules = { } end modules ['node-tsk'] = {
-- we already have dirty flags as well. On the other hand, nodes are
-- rather specialized and here we focus on node related tasks.
+local format = string.format
+
local trace_tasks = false trackers.register("tasks.creation", function(v) trace_tasks = v end)
local report_tasks = logs.reporter("tasks")
@@ -165,7 +167,7 @@ local created, total = 0, 0
statistics.register("node list callback tasks", function()
if total > 0 then
- return string.format("%s unique task lists, %s instances (re)created, %s calls",table.count(tasksdata),created,total)
+ return format("%s unique task lists, %s instances (re)created, %s calls",table.count(tasksdata),created,total)
else
return nil
end
diff --git a/tex/context/base/page-str.lua b/tex/context/base/page-str.lua
index 7ce0f3c0f..48edd4cfe 100644
--- a/tex/context/base/page-str.lua
+++ b/tex/context/base/page-str.lua
@@ -10,7 +10,7 @@ if not modules then modules = { } end modules ['page-str'] = {
-- work in progresss .. unfinished
-local concat = table.concat
+local concat, insert, remove = table.concat, table.insert, table.remove
local find_tail, write_node, free_node, copy_nodelist = node.slide, node.write, node.free, node.copy_list
local vpack_nodelist, hpack_nodelist = node.vpack, node.hpack
@@ -48,12 +48,12 @@ function streams.disable()
end
function streams.start(newname)
- table.insert(stack,name)
+ insert(stack,name)
name = newname
end
function streams.stop(newname)
- name = table.remove(stack)
+ name = remove(stack)
end
function streams.collect(head,where)
diff --git a/tex/context/base/spac-ali.lua b/tex/context/base/spac-ali.lua
index 37bff74d1..dfe8016ed 100644
--- a/tex/context/base/spac-ali.lua
+++ b/tex/context/base/spac-ali.lua
@@ -7,6 +7,7 @@ if not modules then modules = { } end modules ['spac-ali'] = {
}
local div = math.div
+local format = string.format
local tasks = nodes.tasks
local appendaction = tasks.appendaction
@@ -125,7 +126,7 @@ commands.setrealign = alignments.set
statistics.register("realigning", function()
if nofrealigned > 0 then
- return string.format("%s processed",nofrealigned)
+ return format("%s processed",nofrealigned)
else
return nil
end
diff --git a/tex/context/base/spac-ver.lua b/tex/context/base/spac-ver.lua
index 83ee6e492..6796c8206 100644
--- a/tex/context/base/spac-ver.lua
+++ b/tex/context/base/spac-ver.lua
@@ -27,6 +27,7 @@ local lpegmatch = lpeg.match
local unpack = unpack or table.unpack
local points = number.points
local allocate = utilities.storage.allocate
+local todimen = string.todimen
local P, C, R, S, Cc = lpeg.P, lpeg.C, lpeg.R, lpeg.S, lpeg.Cc
@@ -141,7 +142,7 @@ local function listtohash(str)
else
k = values[key]
if k then
- detail = string.todimen(detail)
+ detail = todimen(detail)
if detail then
t[k] = detail
end
diff --git a/tex/context/base/spac-ver.mkiv b/tex/context/base/spac-ver.mkiv
index 9f3af04ac..8bb83bdc0 100644
--- a/tex/context/base/spac-ver.mkiv
+++ b/tex/context/base/spac-ver.mkiv
@@ -1827,6 +1827,7 @@
\linesparameter\c!before
\pushmacro\checkindentation
\whitespace
+ \dostarttagged\t!lines\currentlines
\begingroup
\dosetlinesattributes\c!style\c!color
\setupindenting[\linesparameter\c!indenting]%
@@ -1840,14 +1841,19 @@
\gdef\afterfirstobeyedline
{\ifx\linesoption\v!packed\nobreak\fi
\linesparameter\c!command}}%
+ \dostarttagged\t!line\empty
\def\obeyedline
- {\par
+ {\dostoptagged
+ \par
+ \dostarttagged\t!line\empty
\futurelet\next\dobetweenthelines}%
\activatespacehandler{\linesparameter\c!space}%
\GotoPar}
\def\dostoplines
- {\endgroup
+ {\dostoptagged
+ \endgroup
+ \dostoptagged
\popmacro\checkindentation
\linesparameter\c!after
\egroup}
diff --git a/tex/context/base/status-files.pdf b/tex/context/base/status-files.pdf
index 6ed453c24..0f70d464e 100644
--- a/tex/context/base/status-files.pdf
+++ b/tex/context/base/status-files.pdf
Binary files differ
diff --git a/tex/context/base/status-lua.pdf b/tex/context/base/status-lua.pdf
index 692de4b23..eb731ba05 100644
--- a/tex/context/base/status-lua.pdf
+++ b/tex/context/base/status-lua.pdf
Binary files differ
diff --git a/tex/context/base/strc-reg.lua b/tex/context/base/strc-reg.lua
index c19ae12d6..8ba01682b 100644
--- a/tex/context/base/strc-reg.lua
+++ b/tex/context/base/strc-reg.lua
@@ -227,7 +227,7 @@ end
registers.define = allocate
-local entrysplitter = lpeg.Ct(lpeg.splitat('+')) -- & obsolete in mkiv
+local entrysplitter = lpeg.tsplitat('+') -- & obsolete in mkiv
local tagged = { }
diff --git a/tex/context/base/strc-tag.lua b/tex/context/base/strc-tag.lua
index 3815deef0..c44c758f3 100644
--- a/tex/context/base/strc-tag.lua
+++ b/tex/context/base/strc-tag.lua
@@ -66,12 +66,15 @@ local properties = allocate {
descriptionsymbol = { pdf = "Span", nature = "inline" }, -- note reference
verbatimblock = { pdf = "Code", nature = "display" },
- verbatimlines = { pdf = "Code", nature = "display" },
+ verbatimlines = { pdf = "Code", nature = "display" },
verbatimline = { pdf = "Code", nature = "mixed" },
verbatim = { pdf = "Code", nature = "inline" },
+ lines = { pdf = "Code", nature = "display" },
+ line = { pdf = "Code", nature = "mixed" },
+
synonym = { pdf = "Span", nature = "inline" },
- sort = { pdf = "Span", nature = "inline" },
+ sorting = { pdf = "Span", nature = "inline" },
register = { pdf = "Div", nature = "display" },
registersection = { pdf = "Div", nature = "display" },
@@ -146,6 +149,7 @@ local properties = allocate {
mroot = { pdf = "Span", nature = "display" },
msqrt = { pdf = "Span", nature = "display" },
mfenced = { pdf = "Span", nature = "display" },
+ maction = { pdf = "Span", nature = "display" },
mtable = { pdf = "Table", nature = "display" }, -- might change
mtr = { pdf = "TR", nature = "display" }, -- might change
@@ -154,6 +158,10 @@ local properties = allocate {
ignore = { pdf = "Span", nature = "mixed" },
metadata = { pdf = "Div", nature = "display" },
+ sub = { pdf = "Span", nature = "inline" },
+ sup = { pdf = "Span", nature = "inline" },
+ subsup = { pdf = "Span", nature = "inline" },
+
}
tags.properties = properties
diff --git a/tex/context/base/strc-tag.mkiv b/tex/context/base/strc-tag.mkiv
index 40ded256e..558541f62 100644
--- a/tex/context/base/strc-tag.mkiv
+++ b/tex/context/base/strc-tag.mkiv
@@ -47,6 +47,9 @@
\def\t!verbatimline {verbatimline} % Code
\def\t!verbatim {verbatim} % Code
+\def\t!lines {lines} % Code
+\def\t!line {line} % Code
+
\def\t!sorting {sorting} % Span
\def\t!synonym {synonym} % Span
@@ -71,6 +74,7 @@
\def\t!mathtable {mtable} % Table
\def\t!mathtablerow {mtr} % TR
\def\t!mathtablecell {mtd} % TD
+\def\t!mathaction {maction} %
\def\t!list {list} % TOC
\def\t!listitem {listitem} % TOCI
@@ -114,6 +118,10 @@
\def\t!ignore {ignore} % Span
+\def\t!sub {sub} % Span
+\def\t!sup {sup} % Span
+\def\t!subsup {subsup} % Span
+
% \setuptaglabeltext
% [en]
% [\t!document=document]
diff --git a/tex/context/base/syst-lua.lua b/tex/context/base/syst-lua.lua
index 8a5a9531c..678842025 100644
--- a/tex/context/base/syst-lua.lua
+++ b/tex/context/base/syst-lua.lua
@@ -9,7 +9,7 @@ if not modules then modules = { } end modules ['syst-lua'] = {
local texsprint, texprint, texwrite, texiowrite_nl = tex.sprint, tex.print, tex.write, texio.write_nl
local format, find = string.format, string.find
local tonumber = tonumber
-local S, Ct, lpegmatch, lpegsplitat = lpeg.S, lpeg.Ct, lpeg.match, lpeg.splitat
+local S, lpegmatch, lpegtsplitat = lpeg.S, lpeg.match, lpeg.tsplitat
local ctxcatcodes = tex.ctxcatcodes
@@ -54,7 +54,7 @@ function commands.doifelsespaces(str)
return commands.doifelse(find(str,"^ +$"))
end
-local s = Ct(lpegsplitat(","))
+local s = lpegtsplitat(",")
local h = { }
function commands.doifcommonelse(a,b)
@@ -89,7 +89,7 @@ function commands.doifdimenstringelse(str)
testcase(lpegmatch(pattern,str))
end
-local splitter = lpegsplitat(S(". "))
+local splitter = lpegtsplitat(S(". "))
function commands.doifolderversionelse(one,two) -- one >= two
if not two then
diff --git a/tex/context/base/trac-fil.lua b/tex/context/base/trac-fil.lua
new file mode 100644
index 000000000..54ca6ac6b
--- /dev/null
+++ b/tex/context/base/trac-fil.lua
@@ -0,0 +1,144 @@
+if not modules then modules = { } end modules ['trac-fil'] = {
+ version = 1.001,
+ comment = "for the moment for myself",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+local format, concat = string.format, table.concat
+local openfile = io.open
+local date = os.date
+
+local P, C, Cc, Cg, Cf, Ct, Cs = lpeg.P, lpeg.C, lpeg.Cc, lpeg.Cg, lpeg.Cf, lpeg.Ct, lpeg.Cs
+
+local patterns = lpeg.patterns
+local cardinal = patterns.cardinal
+
+patterns.timestamp = Cf(Ct("") * (
+ Cg (Cc("year") * (cardinal/tonumber)) * P("-")
+ * Cg (Cc("month") * (cardinal/tonumber)) * P("-")
+ * Cg (Cc("day") * (cardinal/tonumber)) * P(" ")
+ * Cg (Cc("hour") * (cardinal/tonumber)) * P(":")
+ * Cg (Cc("minute") * (cardinal/tonumber)) * P(":")
+ * Cg (Cc("second") * (cardinal/tonumber)) * P("+")
+ * Cg (Cc("thour") * (cardinal/tonumber)) * P(":")
+ * Cg (Cc("tminute") * (cardinal/tonumber))
+)^0, rawset)
+
+patterns.statusline = Cf(Ct("") * (
+ P("[") * Cg(Cc("timestamp") * patterns.timestamp) * P("]")
+ * patterns.whitespace^0
+ * Cg(Cc("status") * Cf(Ct("") * (Cg(C(patterns.letter^0) * "=" * Cs(patterns.unquoted)) * patterns.whitespace^0)^0, rawset))
+),rawset)
+
+
+loggers = loggers or { }
+
+local tz = os.timezone(true)
+
+local bugged = { }
+
+function loggers.message(filename,t)
+ if not bugged[filename] then
+ local f = openfile(filename,"a+")
+ if not f then
+ dir.mkdirs(file.dirname(filename))
+ f = openfile(filename,"a+")
+ end
+ if f then
+ f:write("[",date("!%Y-%m-%d %H:%M:%S"),tz,"]")
+ for k, v in table.sortedpairs(t) do
+ f:write(" ",k,'="',v,'"')
+ end
+ f:write("\n")
+ f:close()
+ else
+ bugged[filename] = true
+ end
+ end
+end
+
+--~ function loggers.collect(filename)
+--~ if lfs.isfile(filename) then
+--~ return lpeg.match(Ct(patterns.statusline^0),io.loaddata(filename))
+--~ else
+--~ return { }
+--~ end
+--~ end
+
+function loggers.collect(filename,result)
+ if lfs.isfile(filename) then
+ local r = lpeg.match(Ct(patterns.statusline^0),io.loaddata(filename))
+ if result then -- append
+ local nofresult = #result
+ for i=1,#r do
+ nofresult = nofresult + 1
+ result[nofresult] = r[i]
+ end
+ return result
+ else
+ return r
+ end
+ else
+ return result or { }
+ end
+end
+
+--~ local template = [[
+--~ <table>
+--~ <tr>%s</tr>
+--~ %s
+--~ </table>
+--~ ]]
+
+--~ function loggers.tohtml(entries,fields)
+--~ if not fields or #fields == 0 then
+--~ return ""
+--~ end
+--~ if type(entries) == "string" then
+--~ entries = loggers.collect(entries)
+--~ end
+--~ local scratch, lines = { }, { }
+--~ for i=1,#entries do
+--~ local entry = entries[i]
+--~ local status = entry.status
+--~ for i=1,#fields do
+--~ local field = fields[i]
+--~ local v = status[field.name]
+--~ if v ~= nil then
+--~ v = tostring(v)
+--~ local f = field.format
+--~ if f then v = format(f,v) end
+--~ scratch[i] = format("<td nowrap='nowrap' align='%s'>%s</td>",field.align or "left",v)
+--~ else
+--~ scratch[i] = "<td/>"
+--~ end
+--~ end
+--~ lines[i] = "<tr>" .. concat(scratch) .. "</tr>"
+--~ end
+--~ for i=1,#fields do
+--~ local field = fields[i]
+--~ scratch[i] = format("<th nowrap='nowrap' align='left'>%s</th>", field.label or field.name)
+--~ end
+--~ local result = format(template,concat(scratch),concat(lines,"\n"))
+--~ return result, entries
+--~ end
+
+--~ -- loggers.message("test.log","name","whatever","more",123)
+
+--~ local fields = {
+--~ -- { name = "id", align = "left" },
+--~ -- { name = "timestamp", align = "left" },
+--~ { name = "assessment", align = "left" },
+--~ { name = "assessmentname", align = "left" },
+--~ -- { name = "category", align = "left" },
+--~ { name = "filesize", align = "right" },
+--~ { name = "nofimages", align = "center" },
+--~ -- { name = "product", align = "left" },
+--~ { name = "resultsize", align = "right" },
+--~ { name = "fetchtime", align = "right", format = "%2.3f" },
+--~ { name = "runtime", align = "right", format = "%2.3f" },
+--~ { name = "organization", align = "left" },
+--~ -- { name = "username", align = "left" },
+--~ }
diff --git a/tex/context/base/trac-inf.lua b/tex/context/base/trac-inf.lua
index 5719b953f..5d8ea3cf8 100644
--- a/tex/context/base/trac-inf.lua
+++ b/tex/context/base/trac-inf.lua
@@ -11,7 +11,7 @@ if not modules then modules = { } end modules ['trac-inf'] = {
-- get warnings about assignments. This is more efficient than using rawset
-- and rawget.
-local format = string.format
+local format, lower = string.format, string.lower
local clock = os.gettimeofday or os.clock -- should go in environment
local write_nl = texio.write_nl
@@ -113,7 +113,7 @@ function statistics.show(reporter)
-- this code will move
local register = statistics.register
register("luatex banner", function()
- return string.lower(status.banner)
+ return lower(status.banner)
end)
register("control sequences", function()
return format("%s of %s", status.cs_count, status.hash_size+status.hash_extra)
diff --git a/tex/context/base/typo-mar.lua b/tex/context/base/typo-mar.lua
index 6bc985868..3b7e0317e 100644
--- a/tex/context/base/typo-mar.lua
+++ b/tex/context/base/typo-mar.lua
@@ -572,7 +572,7 @@ end
statistics.register("margin data", function()
if nofsaved > 0 then
- return string.format("%s entries, %s pending",nofsaved,nofdelayed)
+ return format("%s entries, %s pending",nofsaved,nofdelayed)
else
return nil
end
diff --git a/tex/context/base/util-deb.lua b/tex/context/base/util-deb.lua
index ce55de5c7..be0c244ff 100644
--- a/tex/context/base/util-deb.lua
+++ b/tex/context/base/util-deb.lua
@@ -144,6 +144,7 @@ end
--~ debugger.showstats(print,3)
local is_node = node and node.is_node
+local is_lpeg = lpeg and lpeg.type
function inspect(i) -- global function
local ti = type(i)
@@ -151,6 +152,8 @@ function inspect(i) -- global function
table.print(i,"table")
elseif is_node and is_node(i) then
table.print(nodes.astable(i),tostring(i))
+ elseif is_lpeg and is_lpeg(i) then
+ lpeg.print(i)
else
print(tostring(i))
end
diff --git a/tex/context/base/util-dim.lua b/tex/context/base/util-dim.lua
index 4e2cc1662..47e43c386 100644
--- a/tex/context/base/util-dim.lua
+++ b/tex/context/base/util-dim.lua
@@ -91,7 +91,12 @@ local function numbertodimen(n,unit,fmt)
return n
else
unit = unit or 'pt'
- return format(fmt or "%s%s",n*dimenfactors[unit],unit)
+ if not fmt then
+ fmt = "%s%s"
+ elseif fmt == true then
+ fmt = "%0.5f%s"
+ end
+ return format(fmt,n*dimenfactors[unit],unit)
-- if fmt then
-- return format(fmt,n*dimenfactors[unit],unit)
-- else
@@ -108,18 +113,18 @@ number.maxdimen = 1073741823
number.todimen = numbertodimen
number.dimenfactors = dimenfactors
-function number.topoints (n) return numbertodimen(n,"pt") end
-function number.toinches (n) return numbertodimen(n,"in") end
-function number.tocentimeters (n) return numbertodimen(n,"cm") end
-function number.tomillimeters (n) return numbertodimen(n,"mm") end
-function number.toscaledpoints(n) return numbertodimen(n,"sp") end
-function number.toscaledpoints(n) return n .. "sp" end
-function number.tobasepoints (n) return numbertodimen(n,"bp") end
-function number.topicas (n) return numbertodimen(n "pc") end
-function number.todidots (n) return numbertodimen(n,"dd") end
-function number.tociceros (n) return numbertodimen(n,"cc") end
-function number.tonewdidots (n) return numbertodimen(n,"nd") end
-function number.tonewciceros (n) return numbertodimen(n,"nc") end
+function number.topoints (n,fmt) return numbertodimen(n,"pt",fmt) end
+function number.toinches (n,fmt) return numbertodimen(n,"in",fmt) end
+function number.tocentimeters (n,fmt) return numbertodimen(n,"cm",fmt) end
+function number.tomillimeters (n,fmt) return numbertodimen(n,"mm",fmt) end
+function number.toscaledpoints(n,fmt) return numbertodimen(n,"sp",fmt) end
+function number.toscaledpoints(n) return n .. "sp" end
+function number.tobasepoints (n,fmt) return numbertodimen(n,"bp",fmt) end
+function number.topicas (n,fmt) return numbertodimen(n "pc",fmt) end
+function number.todidots (n,fmt) return numbertodimen(n,"dd",fmt) end
+function number.tociceros (n,fmt) return numbertodimen(n,"cc",fmt) end
+function number.tonewdidots (n,fmt) return numbertodimen(n,"nd",fmt) end
+function number.tonewciceros (n,fmt) return numbertodimen(n,"nc",fmt) end
--[[ldx--
<p>More interesting it to implement a (sort of) dimen datatype, one
diff --git a/tex/context/base/util-tab.lua b/tex/context/base/util-tab.lua
index 2aa0b34f0..81746630f 100644
--- a/tex/context/base/util-tab.lua
+++ b/tex/context/base/util-tab.lua
@@ -13,6 +13,7 @@ local tables = utilities.tables
local format, gmatch, rep = string.format, string.gmatch, string.rep
local concat, insert, remove = table.concat, table.insert, table.remove
local setmetatable, getmetatable, tonumber, tostring = setmetatable, getmetatable, tonumber, tostring
+local type, next, rawset = type, next, rawset
function tables.definetable(target) -- defines undefined tables
local composed, t, n = nil, { }, 0
@@ -105,3 +106,40 @@ function table.toxml(t,name,nobanner,indent,spaces)
end
return concat(result,"\n")
end
+
+-- also experimental
+
+-- encapsulate(table,utilities.tables)
+-- encapsulate(table,utilities.tables,true)
+-- encapsulate(table,true)
+
+function tables.encapsulate(core,capsule,protect)
+ if type(capsule) ~= "table" then
+ protect = true
+ capsule = { }
+ end
+ for key, value in next, core do
+ if capsule[key] then
+ print(format("\ninvalid inheritance '%s' in '%s': %s",key,tostring(core)))
+ os.exit()
+ else
+ capsule[key] = value
+ end
+ end
+ if protect then
+ for key, value in next, core do
+ core[key] = nil
+ end
+ setmetatable(core, {
+ __index = capsule,
+ __newindex = function(t,key,value)
+ if capsule[key] then
+ print(format("\ninvalid overload '%s' in '%s'",key,tostring(core)))
+ os.exit()
+ else
+ rawset(t,key,value)
+ end
+ end
+ } )
+ end
+end
diff --git a/tex/context/base/x-calcmath.lua b/tex/context/base/x-calcmath.lua
index bcf72f26f..707abe82a 100644
--- a/tex/context/base/x-calcmath.lua
+++ b/tex/context/base/x-calcmath.lua
@@ -7,6 +7,7 @@ if not modules then modules = { } end modules ['x-calcmath'] = {
}
local format, lower, upper, gsub, sub = string.format, string.lower, string.upper, string.gsub, string.sub
+local concat = table.concat
local lpegmatch = lpeg.match
local calcmath = { }
@@ -225,7 +226,7 @@ if false then
local parser = space * grammar * -1
- local texprint = function(...) texio.write(table.concat{ ... }) end
+ local texprint = function(...) texio.write(concat{ ... }) end
local function has_factor(t)
for i=1,#t do
diff --git a/tex/context/base/x-ldx.lua b/tex/context/base/x-ldx.lua
index 18e7b9b38..2a04d6126 100644
--- a/tex/context/base/x-ldx.lua
+++ b/tex/context/base/x-ldx.lua
@@ -1,3 +1,15 @@
+if not modules then modules = { } end modules ['x-ldx'] = {
+ version = 1.001,
+ comment = "companion to x-ldx.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+-- --[[ldx--
+-- <topic>Introduction</topic>
+-- --ldx]]--
+
--[[ldx--
<source>Lua Documentation Module</source>
@@ -5,12 +17,11 @@ This file is part of the <logo label='context'/> documentation suite and
itself serves as an example of using <logo label='lua'/> in combination
with <logo label='tex'/>.
-I will rewrite this using lpeg once I have the time to study that nice new
-subsystem. On the other hand, we cannot expect proper <logo label='tex'/>
-ad for educational purposed the syntax migh be wrong.
+I will rewrite this using lpeg. On the other hand, we cannot expect proper
+<logo label='tex'/> and for educational purposed the syntax might be wrong.
--ldx]]--
--- there is anice parser on from http://lua-users.org/wiki/LpegRecipes (by
+-- there is a nice parser on from http://lua-users.org/wiki/LpegRecipes (by
-- Patrick Donnelly) but lua crashes when I apply functions to some of the
-- matches
@@ -32,6 +43,10 @@ That way, the libraries included in the runner will be used.
-- begin library merge
-- end library merge
+local gsub, find, sub = string.gsub, string.find, string.sub
+local splitstring, emptystring = string.split, string.is_empty
+local concat = table.concat
+
--[[
Just a demo comment line. We will handle such multiline comments but
only when they start and end at the beginning of a line. More rich
@@ -57,14 +72,14 @@ function ldx.load(filename)
local i, j, t = 0, 0, { }
while true do
local comment, ni
- ni, j, comment = data:find(expr, j)
+ ni, j, comment = find(data, expr, j)
if not ni then break end
- t[#t+1] = { code = data:sub(i, ni-1) }
+ t[#t+1] = { code = sub(data, i, ni-1) }
t[#t+1] = { comment = comment }
i = j + 1
end
- local str = data:sub(i, #data)
- str = str:gsub("^%s*(.-)%s*$", "%1")
+ local str = sub(data, i, #data)
+ str = gsub(str, "^%s*(.-)%s*$", "%1")
if #str > 0 then
t[#t+1] = { code = str }
end
@@ -115,7 +130,7 @@ construction.
do
local e = { [">"] = "&gt;", ["<"] = "&lt;", ["&"] = "&amp;" }
function ldx.escape(str)
- return (str:gsub("([><&])",e))
+ return (gsub(str, "([><&])",e))
end
end
@@ -135,25 +150,25 @@ function ldx.enhance(data) -- i need to use lpeg and then we can properly autoin
local v = data[k]
if v.code then
local dqs, sqs, com, cmt, cod = { }, { }, { }, { }, e(v.code)
- cod = cod:gsub('\\"', "##d##")
- cod = cod:gsub("\\'", "##s##")
- cod = cod:gsub("%-%-%[%[.-%]%]%-%-", function(s)
+ cod = gsub(cod, '\\"', "##d##")
+ cod = gsub(cod, "\\'", "##s##")
+ cod = gsub(cod, "%-%-%[%[.-%]%]%-%-", function(s)
cmt[#cmt+1] = s
return "<l<<<".. #cmt ..">>>l>"
end)
- cod = cod:gsub("%-%-([^\n]*)", function(s)
+ cod = gsub(cod, "%-%-([^\n]*)", function(s)
com[#com+1] = s
return "<c<<<".. #com ..">>>c>"
end)
- cod = cod:gsub("(%b\"\")", function(s)
- dqs[#dqs+1] = s:sub(2,-2) or ""
+ cod = gsub(cod, "(%b\"\")", function(s)
+ dqs[#dqs+1] = sub(s,2,-2) or ""
return "<d<<<".. #dqs ..">>>d>"
end)
- cod = cod:gsub("(%b\'\')", function(s)
- sqs[#sqs+1] = s:sub(2,-2) or ""
+ cod = gsub(cod, "(%b\'\')", function(s)
+ sqs[#sqs+1] = sub(s,2,-2) or ""
return "<s<<<".. #sqs ..">>>s>"
end)
- cod = cod:gsub("(%a+)",function(key)
+ cod = gsub(cod, "(%a+)",function(key)
local class = ldx.keywords.reserved[key]
if class then
return "<key class='" .. class .. "'>" .. key .. "</key>"
@@ -161,41 +176,41 @@ function ldx.enhance(data) -- i need to use lpeg and then we can properly autoin
return key
end
end)
- cod = cod:gsub("<s<<<(%d+)>>>s>", function(s)
+ cod = gsub(cod, "<s<<<(%d+)>>>s>", function(s)
return "<sqs>" .. sqs[tonumber(s)] .. "</sqs>"
end)
- cod = cod:gsub("<d<<<(%d+)>>>d>", function(s)
+ cod = gsub(cod, "<d<<<(%d+)>>>d>", function(s)
return "<dqs>" .. dqs[tonumber(s)] .. "</dqs>"
end)
- cod = cod:gsub("<c<<<(%d+)>>>c>", function(s)
+ cod = gsub(cod, "<c<<<(%d+)>>>c>", function(s)
return "<com>" .. com[tonumber(s)] .. "</com>"
end)
- cod = cod:gsub("<l<<<(%d+)>>>l>", function(s)
+ cod = gsub(cod, "<l<<<(%d+)>>>l>", function(s)
return cmt[tonumber(s)]
end)
- cod = cod:gsub("##d##", "\\\"")
- cod = cod:gsub("##s##", "\\\'")
+ cod = gsub(cod, "##d##", "\\\"")
+ cod = gsub(cod, "##s##", "\\\'")
if ldx.make_index then
- local lines = cod:split("\n")
+ local lines = splitstring(cod,"\n")
local f = "(<key class='1'>function</key>)%s+([%w%.]+)%s*%("
for k=1,#lines do
local v = lines[k]
-- functies
- v = v:gsub(f,function(key, str)
+ v = gsub(v,f,function(key, str)
return "<function>" .. str .. "</function>("
end)
-- variables
- v = v:gsub("^([%w][%w%,%s]-)(=[^=])",function(str, rest)
- local t = string.split(str, ",%s*")
+ v = gsub(v,"^([%w][%w%,%s]-)(=[^=])",function(str, rest)
+ local t = splitstring(str,",%s*")
for k=1,#t do
t[k] = "<variable>" .. t[k] .. "</variable>"
end
- return table.concat(t,", ") .. rest
+ return concat(t,", ") .. rest
end)
-- so far
lines[k] = v
end
- v.code = table.concat(lines,"\n")
+ v.code = concat(lines,"\n")
else
v.code = cod
end
@@ -217,30 +232,30 @@ function ldx.as_xml(data) -- ldx: not needed
t[#t+1] = "\n<document xmlns:ldx='http://www.pragma-ade.com/schemas/ldx.rng' xmlns='http://www.pragma-ade.com/schemas/ldx.rng'>\n"
for k=1,#data do
local v = data[k]
- if v.code and not v.code:is_empty() then
+ if v.code and not emptystring(v.code) then
t[#t+1] = "\n<code>\n"
- local split = v.code:split("\n")
+ local split = splitstring(v.code,"\n")
for k=1,#split do -- make this faster
local v = split[k]
- local a, b = v:find("^(%s+)")
- if v then v = v:gsub("[\n\r ]+$","") end
+ local a, b = find(v,"^(%s+)")
+ if v then v = gsub(v,"[\n\r ]+$","") end
if a and b then
- v = v:sub(b+1,#v)
+ v = sub(v,b+1,#v)
if cmode then
t[#t+1] = "<line comment='yes' n='" .. b .. "'>" .. v .. "</line>\n"
else
t[#t+1] = "<line n='" .. b .. "'>" .. v .. "</line>\n"
end
- elseif v:is_empty() then
+ elseif emptystring(v) then
if cmode then
t[#t+1] = "<line comment='yes'/>\n"
else
t[#t+1] = "<line/>\n"
end
- elseif v:find("^%-%-%[%[") then
+ elseif find(v,"^%-%-%[%[") then
t[#t+1] = "<line comment='yes'>" .. v .. "</line>\n"
cmode= true
- elseif v:find("^%]%]%-%-") then
+ elseif find(v,"^%]%]%-%-") then
t[#t+1] = "<line comment='yes'>" .. v .. "</line>\n"
cmode= false
elseif cmode then
@@ -257,7 +272,7 @@ function ldx.as_xml(data) -- ldx: not needed
end
end
t[#t+1] = "\n</document>\n"
- return table.concat(t,"")
+ return concat(t,"")
end
--[[ldx--
diff --git a/tex/context/base/x-mathml.lua b/tex/context/base/x-mathml.lua
index 7481cef0d..ccd9c1e4b 100644
--- a/tex/context/base/x-mathml.lua
+++ b/tex/context/base/x-mathml.lua
@@ -736,7 +736,7 @@ str = gsub(str,"&.-;","")
tex.sprint(ctxcatcodes,"\\egroup")
end
-local spacesplitter = lpeg.Ct(lpeg.splitat(" "))
+local spacesplitter = lpeg.tsplitat(" ")
function mathml.mtable(root)
-- todo: align, rowspacing, columnspacing, rowlines, columnlines
diff --git a/tex/context/base/x-mathml.mkiv b/tex/context/base/x-mathml.mkiv
index 5b431399f..b064e4987 100644
--- a/tex/context/base/x-mathml.mkiv
+++ b/tex/context/base/x-mathml.mkiv
@@ -2061,6 +2061,10 @@
% mrow / option: no fenced
+\startxmlsetups mml:maction
+ \xmlflush{#1}
+\stopxmlsetups
+
\startxmlsetups mml:mrow
\begingroup
\edef\nofmmlrows{\xmlcount{#1}{/mml:mo}}%
diff --git a/tex/generic/context/luatex-fonts-merged.lua b/tex/generic/context/luatex-fonts-merged.lua
index c90043534..0cfbf2a0c 100644
--- a/tex/generic/context/luatex-fonts-merged.lua
+++ b/tex/generic/context/luatex-fonts-merged.lua
@@ -1,6 +1,6 @@
-- merged file : luatex-fonts-merged.lua
-- parent file : luatex-fonts.lua
--- merge date : 06/09/11 12:49:16
+-- merge date : 06/11/11 16:45:35
do -- begin closure to overcome local limits and interference
@@ -127,650 +127,6 @@ end -- closure
do -- begin closure to overcome local limits and interference
-if not modules then modules = { } end modules ['l-lpeg'] = {
- version = 1.001,
- comment = "companion to luat-lib.mkiv",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files"
-}
-
-local lpeg = require("lpeg")
-
-local type = type
-
--- Beware, we predefine a bunch of patterns here and one reason for doing so
--- is that we get consistent behaviour in some of the visualizers.
-
-lpeg.patterns = lpeg.patterns or { } -- so that we can share
-local patterns = lpeg.patterns
-
-local P, R, S, V, match = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.match
-local Ct, C, Cs, Cc = lpeg.Ct, lpeg.C, lpeg.Cs, lpeg.Cc
-local lpegtype = lpeg.type
-
-local utfcharacters = string.utfcharacters
-local utfgmatch = unicode and unicode.utf8.gmatch
-
-local anything = P(1)
-local endofstring = P(-1)
-local alwaysmatched = P(true)
-
-patterns.anything = anything
-patterns.endofstring = endofstring
-patterns.beginofstring = alwaysmatched
-patterns.alwaysmatched = alwaysmatched
-
-local digit, sign = R('09'), S('+-')
-local cr, lf, crlf = P("\r"), P("\n"), P("\r\n")
-local newline = crlf + cr + lf
-local escaped = P("\\") * anything
-local squote = P("'")
-local dquote = P('"')
-local space = P(" ")
-
-local utfbom_32_be = P('\000\000\254\255')
-local utfbom_32_le = P('\255\254\000\000')
-local utfbom_16_be = P('\255\254')
-local utfbom_16_le = P('\254\255')
-local utfbom_8 = P('\239\187\191')
-local utfbom = utfbom_32_be + utfbom_32_le
- + utfbom_16_be + utfbom_16_le
- + utfbom_8
-local utftype = utfbom_32_be / "utf-32-be" + utfbom_32_le / "utf-32-le"
- + utfbom_16_be / "utf-16-be" + utfbom_16_le / "utf-16-le"
- + utfbom_8 / "utf-8" + alwaysmatched / "unknown"
-
-local utf8next = R("\128\191")
-
-patterns.utf8one = R("\000\127")
-patterns.utf8two = R("\194\223") * utf8next
-patterns.utf8three = R("\224\239") * utf8next * utf8next
-patterns.utf8four = R("\240\244") * utf8next * utf8next * utf8next
-patterns.utfbom = utfbom
-patterns.utftype = utftype
-
-local utf8char = patterns.utf8one + patterns.utf8two + patterns.utf8three + patterns.utf8four
-local validutf8char = utf8char^0 * endofstring * Cc(true) + Cc(false)
-
-patterns.utf8 = utf8char
-patterns.utf8char = utf8char
-patterns.validutf8 = validutf8char
-patterns.validutf8char = validutf8char
-
-patterns.digit = digit
-patterns.sign = sign
-patterns.cardinal = sign^0 * digit^1
-patterns.integer = sign^0 * digit^1
-patterns.float = sign^0 * digit^0 * P('.') * digit^1
-patterns.cfloat = sign^0 * digit^0 * P(',') * digit^1
-patterns.number = patterns.float + patterns.integer
-patterns.cnumber = patterns.cfloat + patterns.integer
-patterns.oct = P("0") * R("07")^1
-patterns.octal = patterns.oct
-patterns.HEX = P("0x") * R("09","AF")^1
-patterns.hex = P("0x") * R("09","af")^1
-patterns.hexadecimal = P("0x") * R("09","AF","af")^1
-patterns.lowercase = R("az")
-patterns.uppercase = R("AZ")
-patterns.letter = patterns.lowercase + patterns.uppercase
-patterns.space = space
-patterns.tab = P("\t")
-patterns.spaceortab = patterns.space + patterns.tab
-patterns.eol = S("\n\r")
-patterns.spacer = S(" \t\f\v") -- + string.char(0xc2, 0xa0) if we want utf (cf mail roberto)
-patterns.newline = newline
-patterns.emptyline = newline^1
-patterns.nonspacer = 1 - patterns.spacer
-patterns.whitespace = patterns.eol + patterns.spacer
-patterns.nonwhitespace = 1 - patterns.whitespace
-patterns.equal = P("=")
-patterns.comma = P(",")
-patterns.commaspacer = P(",") * patterns.spacer^0
-patterns.period = P(".")
-patterns.colon = P(":")
-patterns.semicolon = P(";")
-patterns.underscore = P("_")
-patterns.escaped = escaped
-patterns.squote = squote
-patterns.dquote = dquote
-patterns.nosquote = (escaped + (1-squote))^0
-patterns.nodquote = (escaped + (1-dquote))^0
-patterns.unsingle = (squote/"") * patterns.nosquote * (squote/"")
-patterns.undouble = (dquote/"") * patterns.nodquote * (dquote/"")
-patterns.unquoted = patterns.undouble + patterns.unsingle -- more often undouble
-patterns.unspacer = ((patterns.spacer^1)/"")^0
-
-patterns.somecontent = (anything - newline - space)^1 -- (utf8char - newline - space)^1
-patterns.beginline = #(1-newline)
-
-local unquoted = Cs(patterns.unquoted * endofstring) -- not C
-
-function string.unquoted(str)
- return match(unquoted,str) or str
-end
-
---~ print(string.unquoted("test"))
---~ print(string.unquoted([["t\"est"]]))
---~ print(string.unquoted([["t\"est"x]]))
---~ print(string.unquoted("\'test\'"))
-
-function lpeg.anywhere(pattern) --slightly adapted from website
- return P { P(pattern) + 1 * V(1) } -- why so complex?
-end
-
-function lpeg.splitter(pattern, action)
- return (((1-P(pattern))^1)/action+1)^0
-end
-
-local splitters_s, splitters_m = { }, { }
-
-local function splitat(separator,single)
- local splitter = (single and splitters_s[separator]) or splitters_m[separator]
- if not splitter then
- separator = P(separator)
- local other = C((1 - separator)^0)
- if single then
- local any = anything
- splitter = other * (separator * C(any^0) + "") -- ?
- splitters_s[separator] = splitter
- else
- splitter = other * (separator * other)^0
- splitters_m[separator] = splitter
- end
- end
- return splitter
-end
-
-lpeg.splitat = splitat
-
---~ local p = splitat("->",false) print(match(p,"oeps->what->more")) -- oeps what more
---~ local p = splitat("->",true) print(match(p,"oeps->what->more")) -- oeps what->more
---~ local p = splitat("->",false) print(match(p,"oeps")) -- oeps
---~ local p = splitat("->",true) print(match(p,"oeps")) -- oeps
-
-local cache = { }
-
-function lpeg.split(separator,str)
- local c = cache[separator]
- if not c then
- c = Ct(splitat(separator))
- cache[separator] = c
- end
- return match(c,str)
-end
-
-function string.split(str,separator)
- local c = cache[separator]
- if not c then
- c = Ct(splitat(separator))
- cache[separator] = c
- end
- return match(c,str)
-end
-
-local spacing = patterns.spacer^0 * newline -- sort of strip
-local empty = spacing * Cc("")
-local nonempty = Cs((1-spacing)^1) * spacing^-1
-local content = (empty + nonempty)^1
-
-patterns.textline = content
-
---~ local linesplitter = Ct(content^0)
---~
---~ function string.splitlines(str)
---~ return match(linesplitter,str)
---~ end
-
-local linesplitter = Ct(splitat(newline))
-
-patterns.linesplitter = linesplitter
-
-function string.splitlines(str)
- return match(linesplitter,str)
-end
-
-local utflinesplitter = utfbom^-1 * Ct(splitat(newline))
-
-patterns.utflinesplitter = utflinesplitter
-
-function string.utfsplitlines(str)
- return match(utflinesplitter,str)
-end
-
---~ lpeg.splitters = cache -- no longer public
-
-local cache = { }
-
-function lpeg.checkedsplit(separator,str)
- local c = cache[separator]
- if not c then
- separator = P(separator)
- local other = C((1 - separator)^1)
- c = Ct(separator^0 * other * (separator^1 * other)^0)
- cache[separator] = c
- end
- return match(c,str)
-end
-
-function string.checkedsplit(str,separator)
- local c = cache[separator]
- if not c then
- separator = P(separator)
- local other = C((1 - separator)^1)
- c = Ct(separator^0 * other * (separator^1 * other)^0)
- cache[separator] = c
- end
- return match(c,str)
-end
-
---~ from roberto's site:
-
-local f1 = string.byte
-
-local function f2(s) local c1, c2 = f1(s,1,2) return c1 * 64 + c2 - 12416 end
-local function f3(s) local c1, c2, c3 = f1(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
-local function f4(s) local c1, c2, c3, c4 = f1(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
-
-local utf8byte = patterns.utf8one/f1 + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
-
-patterns.utf8byte = utf8byte
-
---~ local str = " a b c d "
-
---~ local s = lpeg.stripper(lpeg.R("az")) print("["..lpeg.match(s,str).."]")
---~ local s = lpeg.keeper(lpeg.R("az")) print("["..lpeg.match(s,str).."]")
---~ local s = lpeg.stripper("ab") print("["..lpeg.match(s,str).."]")
---~ local s = lpeg.keeper("ab") print("["..lpeg.match(s,str).."]")
-
-local cache = { }
-
-function lpeg.stripper(str)
- if type(str) == "string" then
- local s = cache[str]
- if not s then
- s = Cs(((S(str)^1)/"" + 1)^0)
- cache[str] = s
- end
- return s
- else
- return Cs(((str^1)/"" + 1)^0)
- end
-end
-
-local cache = { }
-
-function lpeg.keeper(str)
- if type(str) == "string" then
- local s = cache[str]
- if not s then
- s = Cs((((1-S(str))^1)/"" + 1)^0)
- cache[str] = s
- end
- return s
- else
- return Cs((((1-str)^1)/"" + 1)^0)
- end
-end
-
-function lpeg.frontstripper(str) -- or pattern (yet undocumented)
- return (P(str) + P(true)) * Cs(P(1)^0)
-end
-
-function lpeg.endstripper(str) -- or pattern (yet undocumented)
- return Cs((1 - P(str) * P(-1))^0)
-end
-
--- Just for fun I looked at the used bytecode and
--- p = (p and p + pp) or pp gets one more (testset).
-
-function lpeg.replacer(one,two)
- if type(one) == "table" then
- local no = #one
- if no > 0 then
- local p
- for i=1,no do
- local o = one[i]
- local pp = P(o[1]) / o[2]
- if p then
- p = p + pp
- else
- p = pp
- end
- end
- return Cs((p + 1)^0)
- end
- else
- two = two or ""
- return Cs((P(one)/two + 1)^0)
- end
-end
-
-local splitters_f, splitters_s = { }, { }
-
-function lpeg.firstofsplit(separator) -- always return value
- local splitter = splitters_f[separator]
- if not splitter then
- separator = P(separator)
- splitter = C((1 - separator)^0)
- splitters_f[separator] = splitter
- end
- return splitter
-end
-
-function lpeg.secondofsplit(separator) -- nil if not split
- local splitter = splitters_s[separator]
- if not splitter then
- separator = P(separator)
- splitter = (1 - separator)^0 * separator * C(anything^0)
- splitters_s[separator] = splitter
- end
- return splitter
-end
-
-function lpeg.balancer(left,right)
- left, right = P(left), P(right)
- return P { left * ((1 - left - right) + V(1))^0 * right }
-end
-
---~ print(1,match(lpeg.firstofsplit(":"),"bc:de"))
---~ print(2,match(lpeg.firstofsplit(":"),":de")) -- empty
---~ print(3,match(lpeg.firstofsplit(":"),"bc"))
---~ print(4,match(lpeg.secondofsplit(":"),"bc:de"))
---~ print(5,match(lpeg.secondofsplit(":"),"bc:")) -- empty
---~ print(6,match(lpeg.secondofsplit(":",""),"bc"))
---~ print(7,match(lpeg.secondofsplit(":"),"bc"))
---~ print(9,match(lpeg.secondofsplit(":","123"),"bc"))
-
---~ -- slower:
---~
---~ function lpeg.counter(pattern)
---~ local n, pattern = 0, (lpeg.P(pattern)/function() n = n + 1 end + lpeg.anything)^0
---~ return function(str) n = 0 ; lpegmatch(pattern,str) ; return n end
---~ end
-
-local nany = utf8char/""
-
-function lpeg.counter(pattern)
- pattern = Cs((P(pattern)/" " + nany)^0)
- return function(str)
- return #match(pattern,str)
- end
-end
-
-if utfgmatch then
-
- function lpeg.count(str,what) -- replaces string.count
- if type(what) == "string" then
- local n = 0
- for _ in utfgmatch(str,what) do
- n = n + 1
- end
- return n
- else -- 4 times slower but still faster than / function
- return #match(Cs((P(what)/" " + nany)^0),str)
- end
- end
-
-else
-
- local cache = { }
-
- function lpeg.count(str,what) -- replaces string.count
- if type(what) == "string" then
- local p = cache[what]
- if not p then
- p = Cs((P(what)/" " + nany)^0)
- cache[p] = p
- end
- return #match(p,str)
- else -- 4 times slower but still faster than / function
- return #match(Cs((P(what)/" " + nany)^0),str)
- end
- end
-
-end
-
-local patterns_escapes = { -- also defines in l-string
- ["%"] = "%%",
- ["."] = "%.",
- ["+"] = "%+", ["-"] = "%-", ["*"] = "%*",
- ["["] = "%[", ["]"] = "%]",
- ["("] = "%)", [")"] = "%)",
- -- ["{"] = "%{", ["}"] = "%}"
- -- ["^"] = "%^", ["$"] = "%$",
-}
-
-local simple_escapes = { -- also defines in l-string
- ["-"] = "%-",
- ["."] = "%.",
- ["?"] = ".",
- ["*"] = ".*",
-}
-
-local p = Cs((S("-.+*%()[]") / patterns_escapes + anything)^0)
-local s = Cs((S("-.+*%()[]") / simple_escapes + anything)^0)
-
-function string.escapedpattern(str,simple)
- return match(simple and s or p,str)
-end
-
--- utf extensies
-
-lpeg.UP = lpeg.P
-
-if utfcharacters then
-
- function lpeg.US(str)
- local p
- for uc in utfcharacters(str) do
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- return p
- end
-
-
-elseif utfgmatch then
-
- function lpeg.US(str)
- local p
- for uc in utfgmatch(str,".") do
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- return p
- end
-
-else
-
- function lpeg.US(str)
- local p
- local f = function(uc)
- if p then
- p = p + P(uc)
- else
- p = P(uc)
- end
- end
- match((utf8char/f)^0,str)
- return p
- end
-
-end
-
-local range = Cs(utf8byte) * (Cs(utf8byte) + Cc(false))
-
-local utfchar = unicode and unicode.utf8 and unicode.utf8.char
-
-function lpeg.UR(str,more)
- local first, last
- if type(str) == "number" then
- first = str
- last = more or first
- else
- first, last = match(range,str)
- if not last then
- return P(str)
- end
- end
- if first == last then
- return P(str)
- elseif utfchar and last - first < 8 then -- a somewhat arbitrary criterium
- local p
- for i=first,last do
- if p then
- p = p + P(utfchar(i))
- else
- p = P(utfchar(i))
- end
- end
- return p -- nil when invalid range
- else
- local f = function(b)
- return b >= first and b <= last
- end
- return utf8byte / f -- nil when invalid range
- end
-end
-
---~ lpeg.print(lpeg.R("ab","cd","gh"))
---~ lpeg.print(lpeg.P("a","b","c"))
---~ lpeg.print(lpeg.S("a","b","c"))
-
---~ print(lpeg.count("äáàa",lpeg.P("á") + lpeg.P("à")))
---~ print(lpeg.count("äáàa",lpeg.UP("áà")))
---~ print(lpeg.count("äáàa",lpeg.US("àá")))
---~ print(lpeg.count("äáàa",lpeg.UR("aá")))
---~ print(lpeg.count("äáàa",lpeg.UR("àá")))
---~ print(lpeg.count("äáàa",lpeg.UR(0x0000,0xFFFF)))
-
-function lpeg.oneof(list,...) -- lpeg.oneof("elseif","else","if","then")
- if type(list) ~= "table" then
- list = { list, ... }
- end
- -- sort(list) -- longest match first
- local p = P(list[1])
- for l=2,#list do
- p = p + P(list[l])
- end
- return p
-end
-
-function lpeg.is_lpeg(p)
- return p and lpegtype(p) == "pattern"
-end
-
---~ Cf(Ct("") * (Cg(C(...) * "=" * Cs(...)))^0, rawset)
-
-end -- closure
-
-do -- begin closure to overcome local limits and interference
-
-if not modules then modules = { } end modules ['l-boolean'] = {
- version = 1.001,
- comment = "companion to luat-lib.mkiv",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files"
-}
-
-local type, tonumber = type, tonumber
-
-boolean = boolean or { }
-local boolean = boolean
-
--- function boolean.tonumber(b)
--- return b and 1 or 0 -- test and test and return or return
--- end
-
-function boolean.tonumber(b)
- if b then return 1 else return 0 end -- test and return or return
-end
-
-function toboolean(str,tolerant)
- if tolerant then
- local tstr = type(str)
- if tstr == "string" then
- return str == "true" or str == "yes" or str == "on" or str == "1" or str == "t"
- elseif tstr == "number" then
- return tonumber(str) ~= 0
- elseif tstr == "nil" then
- return false
- else
- return str
- end
- elseif str == "true" then
- return true
- elseif str == "false" then
- return false
- else
- return str
- end
-end
-
-string.toboolean = toboolean
-
-function string.is_boolean(str,default)
- if type(str) == "string" then
- if str == "true" or str == "yes" or str == "on" or str == "t" then
- return true
- elseif str == "false" or str == "no" or str == "off" or str == "f" then
- return false
- end
- end
- return default
-end
-
-end -- closure
-
-do -- begin closure to overcome local limits and interference
-
-if not modules then modules = { } end modules ['l-math'] = {
- version = 1.001,
- comment = "companion to luat-lib.mkiv",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files"
-}
-
-local floor, sin, cos, tan = math.floor, math.sin, math.cos, math.tan
-
-if not math.round then
- function math.round(x) return floor(x + 0.5) end
-end
-
-if not math.div then
- function math.div(n,m) return floor(n/m) end
-end
-
-if not math.mod then
- function math.mod(n,m) return n % m end
-end
-
-local pipi = 2*math.pi/360
-
-if not math.sind then
- function math.sind(d) return sin(d*pipi) end
- function math.cosd(d) return cos(d*pipi) end
- function math.tand(d) return tan(d*pipi) end
-end
-
-if not math.odd then
- function math.odd (n) return n % 2 == 0 end
- function math.even(n) return n % 2 ~= 0 end
-end
-
-end -- closure
-
-do -- begin closure to overcome local limits and interference
-
if not modules then modules = { } end modules ['l-table'] = {
version = 1.001,
comment = "companion to luat-lib.mkiv",
@@ -1703,6 +1059,713 @@ function table.has_one_entry(t)
return t and not next(t,next(t))
end
+-- new
+
+function table.loweredkeys(t) -- maybe utf
+ local l = { }
+ for k, v in next, t do
+ l[lower(k)] = v
+ end
+ return l
+end
+
+end -- closure
+
+do -- begin closure to overcome local limits and interference
+
+if not modules then modules = { } end modules ['l-lpeg'] = {
+ version = 1.001,
+ comment = "companion to luat-lib.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+local lpeg = require("lpeg")
+
+local type = type
+local byte, char = string.byte, string.char
+
+-- Beware, we predefine a bunch of patterns here and one reason for doing so
+-- is that we get consistent behaviour in some of the visualizers.
+
+lpeg.patterns = lpeg.patterns or { } -- so that we can share
+local patterns = lpeg.patterns
+
+local P, R, S, V, match = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.match
+local Ct, C, Cs, Cc = lpeg.Ct, lpeg.C, lpeg.Cs, lpeg.Cc
+local lpegtype = lpeg.type
+
+local utfcharacters = string.utfcharacters
+local utfgmatch = unicode and unicode.utf8.gmatch
+
+local anything = P(1)
+local endofstring = P(-1)
+local alwaysmatched = P(true)
+
+patterns.anything = anything
+patterns.endofstring = endofstring
+patterns.beginofstring = alwaysmatched
+patterns.alwaysmatched = alwaysmatched
+
+local digit, sign = R('09'), S('+-')
+local cr, lf, crlf = P("\r"), P("\n"), P("\r\n")
+local newline = crlf + cr + lf
+local escaped = P("\\") * anything
+local squote = P("'")
+local dquote = P('"')
+local space = P(" ")
+
+local utfbom_32_be = P('\000\000\254\255')
+local utfbom_32_le = P('\255\254\000\000')
+local utfbom_16_be = P('\255\254')
+local utfbom_16_le = P('\254\255')
+local utfbom_8 = P('\239\187\191')
+local utfbom = utfbom_32_be + utfbom_32_le
+ + utfbom_16_be + utfbom_16_le
+ + utfbom_8
+local utftype = utfbom_32_be / "utf-32-be" + utfbom_32_le / "utf-32-le"
+ + utfbom_16_be / "utf-16-be" + utfbom_16_le / "utf-16-le"
+ + utfbom_8 / "utf-8" + alwaysmatched / "unknown"
+
+local utf8next = R("\128\191")
+
+patterns.utf8one = R("\000\127")
+patterns.utf8two = R("\194\223") * utf8next
+patterns.utf8three = R("\224\239") * utf8next * utf8next
+patterns.utf8four = R("\240\244") * utf8next * utf8next * utf8next
+patterns.utfbom = utfbom
+patterns.utftype = utftype
+
+local utf8char = patterns.utf8one + patterns.utf8two + patterns.utf8three + patterns.utf8four
+local validutf8char = utf8char^0 * endofstring * Cc(true) + Cc(false)
+
+patterns.utf8 = utf8char
+patterns.utf8char = utf8char
+patterns.validutf8 = validutf8char
+patterns.validutf8char = validutf8char
+
+patterns.digit = digit
+patterns.sign = sign
+patterns.cardinal = sign^0 * digit^1
+patterns.integer = sign^0 * digit^1
+patterns.float = sign^0 * digit^0 * P('.') * digit^1
+patterns.cfloat = sign^0 * digit^0 * P(',') * digit^1
+patterns.number = patterns.float + patterns.integer
+patterns.cnumber = patterns.cfloat + patterns.integer
+patterns.oct = P("0") * R("07")^1
+patterns.octal = patterns.oct
+patterns.HEX = P("0x") * R("09","AF")^1
+patterns.hex = P("0x") * R("09","af")^1
+patterns.hexadecimal = P("0x") * R("09","AF","af")^1
+patterns.lowercase = R("az")
+patterns.uppercase = R("AZ")
+patterns.letter = patterns.lowercase + patterns.uppercase
+patterns.space = space
+patterns.tab = P("\t")
+patterns.spaceortab = patterns.space + patterns.tab
+patterns.eol = S("\n\r")
+patterns.spacer = S(" \t\f\v") -- + char(0xc2, 0xa0) if we want utf (cf mail roberto)
+patterns.newline = newline
+patterns.emptyline = newline^1
+patterns.nonspacer = 1 - patterns.spacer
+patterns.whitespace = patterns.eol + patterns.spacer
+patterns.nonwhitespace = 1 - patterns.whitespace
+patterns.equal = P("=")
+patterns.comma = P(",")
+patterns.commaspacer = P(",") * patterns.spacer^0
+patterns.period = P(".")
+patterns.colon = P(":")
+patterns.semicolon = P(";")
+patterns.underscore = P("_")
+patterns.escaped = escaped
+patterns.squote = squote
+patterns.dquote = dquote
+patterns.nosquote = (escaped + (1-squote))^0
+patterns.nodquote = (escaped + (1-dquote))^0
+patterns.unsingle = (squote/"") * patterns.nosquote * (squote/"")
+patterns.undouble = (dquote/"") * patterns.nodquote * (dquote/"")
+patterns.unquoted = patterns.undouble + patterns.unsingle -- more often undouble
+patterns.unspacer = ((patterns.spacer^1)/"")^0
+
+patterns.somecontent = (anything - newline - space)^1 -- (utf8char - newline - space)^1
+patterns.beginline = #(1-newline)
+
+local unquoted = Cs(patterns.unquoted * endofstring) -- not C
+
+function string.unquoted(str)
+ return match(unquoted,str) or str
+end
+
+-- more efficient:
+
+local unquoted = (
+ squote * Cs(1 - P(-2)) * squote
+ + dquote * Cs(1 - P(-2)) * dquote
+)
+
+function string.unquoted(str)
+ return match(unquoted,str) or str
+end
+
+patterns.unquoted = unquoted
+
+--~ print(string.unquoted("test"))
+--~ print(string.unquoted([["t\"est"]]))
+--~ print(string.unquoted([["t\"est"x]]))
+--~ print(string.unquoted("\'test\'"))
+
+function lpeg.anywhere(pattern) --slightly adapted from website
+ return P { P(pattern) + 1 * V(1) } -- why so complex?
+end
+
+function lpeg.splitter(pattern, action)
+ return (((1-P(pattern))^1)/action+1)^0
+end
+
+function lpeg.tsplitter(pattern, action)
+ return Ct((((1-P(pattern))^1)/action+1)^0)
+end
+
+-- probleem: separator can be lpeg and that does not hash too well, but
+-- it's quite okay as the key is then not garbage collected
+
+local splitters_s, splitters_m, splitters_t = { }, { }, { }
+
+local function splitat(separator,single)
+ local splitter = (single and splitters_s[separator]) or splitters_m[separator]
+ if not splitter then
+ separator = P(separator)
+ local other = C((1 - separator)^0)
+ if single then
+ local any = anything
+ splitter = other * (separator * C(any^0) + "") -- ?
+ splitters_s[separator] = splitter
+ else
+ splitter = other * (separator * other)^0
+ splitters_m[separator] = splitter
+ end
+ end
+ return splitter
+end
+
+local function tsplitat(separator)
+ local splitter = splitters_t[separator]
+ if not splitter then
+ splitter = Ct(splitat(separator))
+ splitters_t[separator] = splitter
+ end
+ return splitter
+end
+
+lpeg.splitat = splitat
+lpeg.tsplitat = tsplitat
+
+--~ local p = splitat("->",false) print(match(p,"oeps->what->more")) -- oeps what more
+--~ local p = splitat("->",true) print(match(p,"oeps->what->more")) -- oeps what->more
+--~ local p = splitat("->",false) print(match(p,"oeps")) -- oeps
+--~ local p = splitat("->",true) print(match(p,"oeps")) -- oeps
+
+local cache = { }
+
+function lpeg.split(separator,str)
+ local c = cache[separator]
+ if not c then
+ c = tsplitat(separator)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+function string.split(str,separator)
+ local c = cache[separator]
+ if not c then
+ c = tsplitat(separator)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+local spacing = patterns.spacer^0 * newline -- sort of strip
+local empty = spacing * Cc("")
+local nonempty = Cs((1-spacing)^1) * spacing^-1
+local content = (empty + nonempty)^1
+
+patterns.textline = content
+
+--~ local linesplitter = Ct(content^0)
+--~
+--~ function string.splitlines(str)
+--~ return match(linesplitter,str)
+--~ end
+
+local linesplitter = tsplitat(newline)
+
+patterns.linesplitter = linesplitter
+
+function string.splitlines(str)
+ return match(linesplitter,str)
+end
+
+local utflinesplitter = utfbom^-1 * tsplitat(newline)
+
+patterns.utflinesplitter = utflinesplitter
+
+function string.utfsplitlines(str)
+ return match(utflinesplitter,str)
+end
+
+--~ lpeg.splitters = cache -- no longer public
+
+local cache = { }
+
+function lpeg.checkedsplit(separator,str)
+ local c = cache[separator]
+ if not c then
+ separator = P(separator)
+ local other = C((1 - separator)^1)
+ c = Ct(separator^0 * other * (separator^1 * other)^0)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+function string.checkedsplit(str,separator)
+ local c = cache[separator]
+ if not c then
+ separator = P(separator)
+ local other = C((1 - separator)^1)
+ c = Ct(separator^0 * other * (separator^1 * other)^0)
+ cache[separator] = c
+ end
+ return match(c,str)
+end
+
+--~ from roberto's site:
+
+local function f2(s) local c1, c2 = byte(s,1,2) return c1 * 64 + c2 - 12416 end
+local function f3(s) local c1, c2, c3 = byte(s,1,3) return (c1 * 64 + c2) * 64 + c3 - 925824 end
+local function f4(s) local c1, c2, c3, c4 = byte(s,1,4) return ((c1 * 64 + c2) * 64 + c3) * 64 + c4 - 63447168 end
+
+local utf8byte = patterns.utf8one/byte + patterns.utf8two/f2 + patterns.utf8three/f3 + patterns.utf8four/f4
+
+patterns.utf8byte = utf8byte
+
+--~ local str = " a b c d "
+
+--~ local s = lpeg.stripper(lpeg.R("az")) print("["..lpeg.match(s,str).."]")
+--~ local s = lpeg.keeper(lpeg.R("az")) print("["..lpeg.match(s,str).."]")
+--~ local s = lpeg.stripper("ab") print("["..lpeg.match(s,str).."]")
+--~ local s = lpeg.keeper("ab") print("["..lpeg.match(s,str).."]")
+
+local cache = { }
+
+function lpeg.stripper(str)
+ if type(str) == "string" then
+ local s = cache[str]
+ if not s then
+ s = Cs(((S(str)^1)/"" + 1)^0)
+ cache[str] = s
+ end
+ return s
+ else
+ return Cs(((str^1)/"" + 1)^0)
+ end
+end
+
+local cache = { }
+
+function lpeg.keeper(str)
+ if type(str) == "string" then
+ local s = cache[str]
+ if not s then
+ s = Cs((((1-S(str))^1)/"" + 1)^0)
+ cache[str] = s
+ end
+ return s
+ else
+ return Cs((((1-str)^1)/"" + 1)^0)
+ end
+end
+
+function lpeg.frontstripper(str) -- or pattern (yet undocumented)
+ return (P(str) + P(true)) * Cs(P(1)^0)
+end
+
+function lpeg.endstripper(str) -- or pattern (yet undocumented)
+ return Cs((1 - P(str) * P(-1))^0)
+end
+
+-- Just for fun I looked at the used bytecode and
+-- p = (p and p + pp) or pp gets one more (testset).
+
+function lpeg.replacer(one,two)
+ if type(one) == "table" then
+ local no = #one
+ if no > 0 then
+ local p
+ for i=1,no do
+ local o = one[i]
+ local pp = P(o[1]) / o[2]
+ if p then
+ p = p + pp
+ else
+ p = pp
+ end
+ end
+ return Cs((p + 1)^0)
+ end
+ else
+ two = two or ""
+ return Cs((P(one)/two + 1)^0)
+ end
+end
+
+local splitters_f, splitters_s = { }, { }
+
+function lpeg.firstofsplit(separator) -- always return value
+ local splitter = splitters_f[separator]
+ if not splitter then
+ separator = P(separator)
+ splitter = C((1 - separator)^0)
+ splitters_f[separator] = splitter
+ end
+ return splitter
+end
+
+function lpeg.secondofsplit(separator) -- nil if not split
+ local splitter = splitters_s[separator]
+ if not splitter then
+ separator = P(separator)
+ splitter = (1 - separator)^0 * separator * C(anything^0)
+ splitters_s[separator] = splitter
+ end
+ return splitter
+end
+
+function lpeg.balancer(left,right)
+ left, right = P(left), P(right)
+ return P { left * ((1 - left - right) + V(1))^0 * right }
+end
+
+--~ print(1,match(lpeg.firstofsplit(":"),"bc:de"))
+--~ print(2,match(lpeg.firstofsplit(":"),":de")) -- empty
+--~ print(3,match(lpeg.firstofsplit(":"),"bc"))
+--~ print(4,match(lpeg.secondofsplit(":"),"bc:de"))
+--~ print(5,match(lpeg.secondofsplit(":"),"bc:")) -- empty
+--~ print(6,match(lpeg.secondofsplit(":",""),"bc"))
+--~ print(7,match(lpeg.secondofsplit(":"),"bc"))
+--~ print(9,match(lpeg.secondofsplit(":","123"),"bc"))
+
+--~ -- slower:
+--~
+--~ function lpeg.counter(pattern)
+--~ local n, pattern = 0, (lpeg.P(pattern)/function() n = n + 1 end + lpeg.anything)^0
+--~ return function(str) n = 0 ; lpegmatch(pattern,str) ; return n end
+--~ end
+
+local nany = utf8char/""
+
+function lpeg.counter(pattern)
+ pattern = Cs((P(pattern)/" " + nany)^0)
+ return function(str)
+ return #match(pattern,str)
+ end
+end
+
+if utfgmatch then
+
+ function lpeg.count(str,what) -- replaces string.count
+ if type(what) == "string" then
+ local n = 0
+ for _ in utfgmatch(str,what) do
+ n = n + 1
+ end
+ return n
+ else -- 4 times slower but still faster than / function
+ return #match(Cs((P(what)/" " + nany)^0),str)
+ end
+ end
+
+else
+
+ local cache = { }
+
+ function lpeg.count(str,what) -- replaces string.count
+ if type(what) == "string" then
+ local p = cache[what]
+ if not p then
+ p = Cs((P(what)/" " + nany)^0)
+ cache[p] = p
+ end
+ return #match(p,str)
+ else -- 4 times slower but still faster than / function
+ return #match(Cs((P(what)/" " + nany)^0),str)
+ end
+ end
+
+end
+
+local patterns_escapes = { -- also defines in l-string
+ ["%"] = "%%",
+ ["."] = "%.",
+ ["+"] = "%+", ["-"] = "%-", ["*"] = "%*",
+ ["["] = "%[", ["]"] = "%]",
+ ["("] = "%)", [")"] = "%)",
+ -- ["{"] = "%{", ["}"] = "%}"
+ -- ["^"] = "%^", ["$"] = "%$",
+}
+
+local simple_escapes = { -- also defines in l-string
+ ["-"] = "%-",
+ ["."] = "%.",
+ ["?"] = ".",
+ ["*"] = ".*",
+}
+
+local p = Cs((S("-.+*%()[]") / patterns_escapes + anything)^0)
+local s = Cs((S("-.+*%()[]") / simple_escapes + anything)^0)
+
+function string.escapedpattern(str,simple)
+ return match(simple and s or p,str)
+end
+
+-- utf extensies
+
+lpeg.UP = lpeg.P
+
+if utfcharacters then
+
+ function lpeg.US(str)
+ local p
+ for uc in utfcharacters(str) do
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ return p
+ end
+
+
+elseif utfgmatch then
+
+ function lpeg.US(str)
+ local p
+ for uc in utfgmatch(str,".") do
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ return p
+ end
+
+else
+
+ function lpeg.US(str)
+ local p
+ local f = function(uc)
+ if p then
+ p = p + P(uc)
+ else
+ p = P(uc)
+ end
+ end
+ match((utf8char/f)^0,str)
+ return p
+ end
+
+end
+
+local range = Cs(utf8byte) * (Cs(utf8byte) + Cc(false))
+
+local utfchar = unicode and unicode.utf8 and unicode.utf8.char
+
+function lpeg.UR(str,more)
+ local first, last
+ if type(str) == "number" then
+ first = str
+ last = more or first
+ else
+ first, last = match(range,str)
+ if not last then
+ return P(str)
+ end
+ end
+ if first == last then
+ return P(str)
+ elseif utfchar and last - first < 8 then -- a somewhat arbitrary criterium
+ local p
+ for i=first,last do
+ if p then
+ p = p + P(utfchar(i))
+ else
+ p = P(utfchar(i))
+ end
+ end
+ return p -- nil when invalid range
+ else
+ local f = function(b)
+ return b >= first and b <= last
+ end
+ return utf8byte / f -- nil when invalid range
+ end
+end
+
+--~ lpeg.print(lpeg.R("ab","cd","gh"))
+--~ lpeg.print(lpeg.P("a","b","c"))
+--~ lpeg.print(lpeg.S("a","b","c"))
+
+--~ print(lpeg.count("äáàa",lpeg.P("á") + lpeg.P("à")))
+--~ print(lpeg.count("äáàa",lpeg.UP("áà")))
+--~ print(lpeg.count("äáàa",lpeg.US("àá")))
+--~ print(lpeg.count("äáàa",lpeg.UR("aá")))
+--~ print(lpeg.count("äáàa",lpeg.UR("àá")))
+--~ print(lpeg.count("äáàa",lpeg.UR(0x0000,0xFFFF)))
+
+function lpeg.oneof(list,...) -- lpeg.oneof("elseif","else","if","then")
+ if type(list) ~= "table" then
+ list = { list, ... }
+ end
+ -- sort(list) -- longest match first
+ local p = P(list[1])
+ for l=2,#list do
+ p = p + P(list[l])
+ end
+ return p
+end
+
+function lpeg.is_lpeg(p)
+ return p and lpegtype(p) == "pattern"
+end
+
+-- For the moment here, but it might move to utilities:
+
+local sort, fastcopy, sortedpairs = table.sort, table.fastcopy, table.sortedpairs -- dependency!
+
+function lpeg.append(list,pp)
+ local p = pp
+ if #list > 0 then
+ list = fastcopy(list)
+ sort(list)
+ for l=1,#list do
+ if p then
+ p = P(list[l]) + p
+ else
+ p = P(list[l])
+ end
+ end
+ else
+ for k, v in sortedpairs(list) do
+ if p then
+ p = P(k)/v + p
+ else
+ p = P(k)/v
+ end
+ end
+ end
+ return p
+end
+
+--~ Cf(Ct("") * (Cg(C(...) * "=" * Cs(...)))^0, rawset)
+
+end -- closure
+
+do -- begin closure to overcome local limits and interference
+
+if not modules then modules = { } end modules ['l-boolean'] = {
+ version = 1.001,
+ comment = "companion to luat-lib.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+local type, tonumber = type, tonumber
+
+boolean = boolean or { }
+local boolean = boolean
+
+function boolean.tonumber(b)
+ if b then return 1 else return 0 end -- test and return or return
+end
+
+function toboolean(str,tolerant)
+ if tolerant then
+ local tstr = type(str)
+ if tstr == "string" then
+ return str == "true" or str == "yes" or str == "on" or str == "1" or str == "t"
+ elseif tstr == "number" then
+ return tonumber(str) ~= 0
+ elseif tstr == "nil" then
+ return false
+ else
+ return str
+ end
+ elseif str == "true" then
+ return true
+ elseif str == "false" then
+ return false
+ else
+ return str
+ end
+end
+
+string.toboolean = toboolean
+
+function string.is_boolean(str,default)
+ if type(str) == "string" then
+ if str == "true" or str == "yes" or str == "on" or str == "t" then
+ return true
+ elseif str == "false" or str == "no" or str == "off" or str == "f" then
+ return false
+ end
+ end
+ return default
+end
+
+end -- closure
+
+do -- begin closure to overcome local limits and interference
+
+if not modules then modules = { } end modules ['l-math'] = {
+ version = 1.001,
+ comment = "companion to luat-lib.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+local floor, sin, cos, tan = math.floor, math.sin, math.cos, math.tan
+
+if not math.round then
+ function math.round(x) return floor(x + 0.5) end
+end
+
+if not math.div then
+ function math.div(n,m) return floor(n/m) end
+end
+
+if not math.mod then
+ function math.mod(n,m) return n % m end
+end
+
+local pipi = 2*math.pi/360
+
+if not math.sind then
+ function math.sind(d) return sin(d*pipi) end
+ function math.cosd(d) return cos(d*pipi) end
+ function math.tand(d) return tan(d*pipi) end
+end
+
+if not math.odd then
+ function math.odd (n) return n % 2 == 0 end
+ function math.even(n) return n % 2 ~= 0 end
+end
+
end -- closure
do -- begin closure to overcome local limits and interference
@@ -2958,6 +3021,7 @@ local next, tostring, rawget = next, tostring, rawget
local format, match, lower, gsub = string.format, string.match, string.lower, string.gsub
local utfbyte = utf.byte
local sort, insert, concat, sortedkeys, serialize, fastcopy = table.sort, table.insert, table.concat, table.sortedkeys, table.serialize, table.fastcopy
+local derivetable = table.derive
local trace_defining = false trackers.register("fonts.defining", function(v) trace_defining = v end)
local trace_scaling = false trackers.register("fonts.scaling" , function(v) trace_scaling = v end)
@@ -3139,10 +3203,9 @@ function constructors.scale(tfmdata,specification)
local mathparameters = tfmdata.mathparameters or { }
--
local targetcharacters = { }
- local targetdescriptions = table.derive(descriptions)
- local targetparameters = table.derive(parameters)
- -- local targetmathparameters = table.fastcopy(mathparameters) -- happens elsewhere
- local targetproperties = table.derive(properties)
+ local targetdescriptions = derivetable(descriptions)
+ local targetparameters = derivetable(parameters)
+ local targetproperties = derivetable(properties)
local targetgoodies = goodies -- we need to loop so no metatable
target.characters = targetcharacters
target.descriptions = targetdescriptions
@@ -4827,7 +4890,7 @@ local getn = table.getn
local lpegmatch = lpeg.match
local reversed, concat, remove = table.reversed, table.concat, table.remove
local ioflush = io.flush
-local fastcopy, tohash = table.fastcopy, table.tohash
+local fastcopy, tohash, derivetable = table.fastcopy, table.tohash, table.derive
local allocate = utilities.storage.allocate
local registertracker = trackers.register
@@ -6476,9 +6539,9 @@ local function copytotfm(data,cache_id)
if data then
local metadata = data.metadata
local resources = data.resources
- local properties = table.derive(data.properties)
- local descriptions = table.derive(data.descriptions)
- local goodies = table.derive(data.goodies)
+ local properties = derivetable(data.properties)
+ local descriptions = derivetable(data.descriptions)
+ local goodies = derivetable(data.goodies)
local characters = { }
local parameters = { }
local mathparameters = { }
diff --git a/tex/generic/context/luatex-fonts.lua b/tex/generic/context/luatex-fonts.lua
index 23d33f26b..1d844911d 100644
--- a/tex/generic/context/luatex-fonts.lua
+++ b/tex/generic/context/luatex-fonts.lua
@@ -122,10 +122,10 @@ else
-- version 1.0 there will be an official api defined.
loadmodule('l-string.lua')
+ loadmodule('l-table.lua')
loadmodule('l-lpeg.lua')
loadmodule('l-boolean.lua')
loadmodule('l-math.lua')
- loadmodule('l-table.lua')
loadmodule('l-file.lua')
loadmodule('l-io.lua')
diff --git a/tex/generic/context/luatex-plain.tex b/tex/generic/context/luatex-plain.tex
new file mode 100644
index 000000000..e47ad58ad
--- /dev/null
+++ b/tex/generic/context/luatex-plain.tex
@@ -0,0 +1,25 @@
+%D \module
+%D [ file=luatex-plain,
+%D version=2009.12.01,
+%D title=\LUATEX\ Macros,
+%D subtitle=Plain Format,
+%D author=Hans Hagen,
+%D date=\currentdate,
+%D copyright={PRAGMA ADE \& \CONTEXT\ Development Team}]
+
+\input plain
+
+\directlua {tex.enableprimitives('', tex.extraprimitives())}
+
+\pdfoutput=1
+
+\everyjob \expandafter {%
+ \the\everyjob
+ \input luatex-basics\relax
+ \input luatex-fonts\relax
+ \input luatex-mplib\relax
+}
+
+\edef\fmtversion{\fmtversion+luatex}
+
+\dump