summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--context/data/scite/lexers/scite-context-lexer-cld.lua82
-rw-r--r--context/data/scite/lexers/scite-context-lexer-mps.lua75
-rw-r--r--context/data/scite/lexers/scite-context-lexer-tex.lua162
-rw-r--r--context/data/scite/lexers/scite-context-lexer.lua238
-rw-r--r--context/data/scite/lexers/themes/scite-context-theme.lua103
-rw-r--r--context/data/scite/scite-context-readme.tex1
-rw-r--r--context/data/scite/scite-context.properties4
-rw-r--r--metapost/context/base/mp-tool.mp8
-rw-r--r--tex/context/base/attr-ini.mkiv5
-rw-r--r--tex/context/base/char-def.lua1
-rw-r--r--tex/context/base/cont-new.mkii2
-rw-r--r--tex/context/base/cont-new.mkiv2
-rw-r--r--tex/context/base/context-version.pdfbin4089 -> 4090 bytes
-rw-r--r--tex/context/base/context-version.pngbin106228 -> 105346 bytes
-rw-r--r--tex/context/base/context.mkii2
-rw-r--r--tex/context/base/context.mkiv2
-rw-r--r--tex/context/base/math-map.lua9
-rw-r--r--tex/context/base/mult-def.lua56
-rw-r--r--tex/context/base/mult-low.lua129
-rw-r--r--tex/context/base/mult-mps.lua12
-rw-r--r--tex/context/base/mult-prm.lua6
-rw-r--r--tex/context/base/mult-prm.mkiv7
-rw-r--r--tex/context/base/node-res.lua2
-rw-r--r--tex/context/base/status-files.pdfbin23942 -> 23926 bytes
-rw-r--r--tex/context/base/status-lua.pdfbin162348 -> 162348 bytes
-rw-r--r--tex/context/base/syst-aux.mkiv394
-rw-r--r--tex/context/base/syst-ini.mkiv163
-rw-r--r--tex/generic/context/luatex/luatex-fonts-merged.lua2
28 files changed, 831 insertions, 636 deletions
diff --git a/context/data/scite/lexers/scite-context-lexer-cld.lua b/context/data/scite/lexers/scite-context-lexer-cld.lua
index 1e5d8b59c..1abc55f91 100644
--- a/context/data/scite/lexers/scite-context-lexer-cld.lua
+++ b/context/data/scite/lexers/scite-context-lexer-cld.lua
@@ -9,13 +9,15 @@ local info = {
-- Adapted from lua.lua by Mitchell who based it on a lexer by Peter Odding.
local lexer = lexer
-local token, style, colors, word_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.word_match, lexer.style_nothing
+local token, style, colors, exact_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.exact_match, lexer.style_nothing
local P, R, S, C, Cg, Cb, Cs, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cg, lpeg.Cb, lpeg.Cs, lpeg.Cmt
local match, find = string.match, string.find
local global = _G
module(...)
+local cldlexer = _M
+
local keywords = {
'and', 'break', 'do', 'else', 'elseif', 'end', 'false', 'for', 'function',
'if', 'in', 'local', 'nil', 'not', 'or', 'repeat', 'return', 'then', 'true',
@@ -77,53 +79,58 @@ local longcomment = Cmt(#('[[' + ('[' * C(P('=')^0) * '[')), function(input,ind
return stop and stop + 1 or #input + 1
end)
-local whitespace = token(lexer.WHITESPACE, lexer.space^1)
-local any_char = lexer.any_char
+local whitespace = cldlexer.WHITESPACE -- triggers states
+
+local space = lexer.space -- S(" \n\r\t\f\v")
+local any = lexer.any
local squote = P("'")
local dquote = P('"')
local escaped = P("\\") * P(1)
local dashes = P('--')
+local spacing = token(whitespace, space^1)
+local rest = token("default", any)
+
local shortcomment = dashes * lexer.nonnewline^0
local longcomment = dashes * longcomment
-local comment = token(lexer.COMMENT, longcomment + shortcomment)
-
-local shortstring = token("quote", squote)
- * token(lexer.STRING, (escaped + (1-squote))^0 )
- * token("quote", squote)
- + token("quote", dquote)
- * token(lexer.STRING, (escaped + (1-dquote))^0 )
- * token("quote", dquote)
-
-local longstring = token("quote", longonestart)
- * token(lexer.STRING, longonestring)
- * token("quote", longonestop)
- + token("quote", longtwostart)
- * token(lexer.STRING, longtwostring)
- * token("quote", longtwostop)
+local comment = token("comment", longcomment + shortcomment)
+
+local shortstring = token("quote", squote)
+ * token("string", (escaped + (1-squote))^0 )
+ * token("quote", squote)
+ + token("quote", dquote)
+ * token("string", (escaped + (1-dquote))^0 )
+ * token("quote", dquote)
+
+local longstring = token("quote", longonestart)
+ * token("string", longonestring)
+ * token("quote", longonestop)
+ + token("quote", longtwostart)
+ * token("string", longtwostring)
+ * token("quote", longtwostop)
local string = shortstring
+ longstring
local integer = P('-')^-1 * (lexer.hex_num + lexer.dec_num)
-local number = token(lexer.NUMBER, lexer.float + integer)
+local number = token("number", lexer.float + integer)
local word = R('AZ','az','__','\127\255') * (lexer.alnum + '_')^0
-local identifier = token(lexer.IDENTIFIER, word)
+local identifier = token("default", word)
-local operator = token(lexer.OPERATOR, P('~=') + S('+-*/%^#=<>;:,.{}[]()')) -- maybe split of {}[]()
+local operator = token("special", P('~=') + S('+-*/%^#=<>;:,.{}[]()')) -- maybe split of {}[]()
-local keyword = token(lexer.KEYWORD, word_match(keywords))
-local builtin = token(lexer.FUNCTION, word_match(functions))
-local constant = token(lexer.CONSTANT, word_match(constants))
-local csname = token("user", word_match(csnames)) * (
- whitespace^0 * #S("{(")
- + ( whitespace^0 * token(lexer.OPERATOR, P(".")) * whitespace^0 * token("csname",word) )^1
+local keyword = token("keyword", exact_match(keywords))
+local builtin = token("plain", exact_match(functions))
+local constant = token("data", exact_match(constants))
+local csname = token("user", exact_match(csnames)) * (
+ spacing^0 * #S("{(")
+ + ( spacing^0 * token("special", P(".")) * spacing^0 * token("csname",word) )^1
)
_rules = {
- { 'whitespace', whitespace },
+ { 'whitespace', spacing },
{ 'keyword', keyword },
{ 'function', builtin },
{ 'csname', csname },
@@ -133,24 +140,17 @@ _rules = {
{ 'comment', comment },
{ 'number', number },
{ 'operator', operator },
- { 'any_char', any_char },
+ { 'rest', rest },
}
-_tokenstyles = {
- { "comment", lexer.style_context_comment },
- { "quote", lexer.style_context_quote },
- { "keyword", lexer.style_context_keyword },
- { "user", lexer.style_context_user },
- { "specials", lexer.style_context_specials },
- { "extras", lexer.style_context_extras },
-}
+_tokenstyles = lexer.context.styleset
_foldsymbols = {
_patterns = {
'%l+',
'[%({%)}%[%]]',
},
- [lexer.KEYWORD] = {
+ ['keyword'] = {
['if'] = 1,
['end'] = -1,
['do'] = 1,
@@ -158,13 +158,13 @@ _foldsymbols = {
['repeat'] = 1,
['until'] = -1,
},
- [lexer.COMMENT] = {
+ ['comment'] = {
['['] = 1, [']'] = -1,
},
- ["quote"] = { -- to be tested
+ ['quote'] = { -- to be tested
['['] = 1, [']'] = -1,
},
- [lexer.OPERATOR] = {
+ ['special'] = {
['('] = 1, [')'] = -1,
['{'] = 1, ['}'] = -1,
},
diff --git a/context/data/scite/lexers/scite-context-lexer-mps.lua b/context/data/scite/lexers/scite-context-lexer-mps.lua
index fa7d88c5d..70324f340 100644
--- a/context/data/scite/lexers/scite-context-lexer-mps.lua
+++ b/context/data/scite/lexers/scite-context-lexer-mps.lua
@@ -8,8 +8,7 @@ local info = {
local lexer = lexer
local global, string, table, lpeg = _G, string, table, lpeg
-local token, style, colors, word_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.word_match, lexer.style_nothing
-local exact_match = lexer.context.exact_match
+local token, style, colors, exact_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.exact_match, lexer.style_nothing
local P, R, S, V, C, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt
local type, next, pcall, loadfile = type, next, pcall, loadfile
@@ -34,13 +33,15 @@ do
end
-local whitespace = lexer.WHITESPACE -- triggers states
-local any_char = lexer.any_char
+local whitespace = metafunlexer.WHITESPACE -- triggers states
local space = lexer.space -- S(" \n\r\t\f\v")
+local any = lexer.any
+
local digit = R("09")
local sign = S("+-")
local period = P(".")
+local dquote = P('"')
local cstoken = R("az","AZ") + P("_")
local number = sign^-1 * ( -- at most one
digit^1 * period * digit^0 -- 10.0 10.
@@ -48,19 +49,20 @@ local number = sign^-1 * ( -- at most one
+ digit^1 -- 10
)
-local spacing = token(whitespace, space^1)
-local comment = token('comment', P('%') * (1-S("\n\r"))^0)
-local metafun = token('command', exact_match(metafuncommands))
-local plain = token('plain', exact_match(plaincommands))
-local quoted = token('specials', P('"'))
- * token('default', P(1-P('"'))^1)
- * token('specials', P('"'))
-local primitive = token('primitive', exact_match(primitivecommands))
-local csname = token('user', cstoken^1)
-local specials = token('specials', S("#()[]<>=:\""))
-local number = token('number', number)
-local extras = token('extras', S("`~%^&_-+/\'|\\"))
-local default = token('default', P(1))
+local spacing = token(whitespace, space^1)
+local rest = token('default', any)
+local comment = token('comment', P('%') * (1-S("\n\r"))^0)
+local metafun = token('command', exact_match(metafuncommands))
+local plain = token('plain', exact_match(plaincommands))
+local quoted = token('quote', dquote)
+ * token('string', P(1-dquote)^1)
+ * token('quote', dquote)
+local primitive = token('primitive', exact_match(primitivecommands))
+----- csname = token('user', cstoken^1)
+local identifier = token('default', cstoken^1)
+local number = token('number', number)
+local special = token('special', S("#()[]<>=:\""))
+local extra = token('extra', S("`~%^&_-+/\'|\\"))
_rules = {
{ 'whitespace', spacing },
@@ -68,22 +70,33 @@ _rules = {
{ 'metafun', metafun },
{ 'plain', plain },
{ 'primitive', primitive },
- { 'csname', csname },
+ { 'identifier', identifier },
{ 'number', number },
{ 'quoted', quoted },
- { 'specials', specials },
- { 'extras', extras },
- { 'any_char', any_char },
+ { 'special', special },
+ { 'extra', extra },
+ { 'rest', rest },
}
-_tokenstyles = {
- { "comment", lexer.style_context_comment },
- { "default", lexer.style_context_default },
- { "number" , lexer.style_context_number },
- { "primitive", lexer.style_context_primitive },
- { "plain", lexer.style_context_plain },
- { "command", lexer.style_context_command },
- { "user", lexer.style_context_user },
- { "specials", lexer.style_context_specials },
- { "extras", lexer.style_context_extras },
+_tokenstyles = lexer.context.styleset
+
+_foldsymbols = {
+ _patterns = {
+ "%l+",
+ },
+ ["primitive"] = {
+ ["beginfig"] = 1,
+ ["endfig"] = -1,
+ ["def"] = 1,
+ ["vardef"] = 1,
+ ["primarydef"] = 1,
+ ["secondarydef" ] = 1,
+ ["tertiarydef"] = 1,
+ ["enddef"] = -1,
+ ["if"] = 1,
+ ["fi"] = -1,
+ ["for"] = 1,
+ ["forever"] = 1,
+ ["endfor"] = -1,
+ }
}
diff --git a/context/data/scite/lexers/scite-context-lexer-tex.lua b/context/data/scite/lexers/scite-context-lexer-tex.lua
index 4a1a0a766..caab6fc4b 100644
--- a/context/data/scite/lexers/scite-context-lexer-tex.lua
+++ b/context/data/scite/lexers/scite-context-lexer-tex.lua
@@ -6,7 +6,7 @@ local info = {
license = "see context related readme files",
}
-
+-- maybe: _LINEBYLINE variant for large files (no nesting)
-- maybe: protected_macros
--[[
@@ -24,11 +24,8 @@ local info = {
-- local interface = props["keywordclass.macros.context.en"]
-- local interface = lexer.get_property("keywordclass.macros.context.en","")
- -- the embedded lexers don't backtrack (so they're not that usefull on large
- -- texts) which is probably a scintilla issue (trade off between speed and lexable
- -- area); also there is some weird bleeding back to the parent lexer with respect
- -- to colors (i.e. the \ in \relax can become black) so I might as well use private
- -- color specifications
+ -- it seems that whitespace triggers the lexer when embedding happens, but this
+ -- is quite fragile due to duplicate styles
-- this lexer does not care about other macro packages (one can of course add a fake
-- interface but it's not on the agenda)
@@ -37,21 +34,23 @@ local info = {
local lexer = lexer
local global, string, table, lpeg = _G, string, table, lpeg
-local token, style, colors, word_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.word_match, lexer.style_nothing
-local exact_match = lexer.context.exact_match
-local P, R, S, V, C, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt
+local token, style, colors, exact_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.exact_match, lexer.style_nothing
+local P, R, S, V, C, Cmt, Cp, Cc, Ct = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt, lpeg.Cp, lpeg.Cc, lpeg.Ct
local type, next, pcall, loadfile, setmetatable = type, next, pcall, loadfile, setmetatable
+local find, match = string.find, string.match
module(...)
local contextlexer = _M
+
local basepath = lexer.context and lexer.context.path or _LEXERHOME
local commands = { en = { } }
local primitives = { }
local helpers = { }
+local constants = { }
-do
+do -- todo: only once, store in global
local definitions = lexer.context.loaddefinitions("mult-def.lua")
@@ -67,7 +66,13 @@ do
end
end
end
- helpers = definitions.helpers or { }
+ end
+
+ local definitions = lexer.context.loaddefinitions("mult-low.lua")
+
+ if definitions then
+ helpers = definitions.helpers or { }
+ constants = definitions.constants or { }
end
local definitions = lexer.context.loaddefinitions("mult-prm.lua")
@@ -84,43 +89,81 @@ end
local currentcommands = commands.en or { }
-local knowncommand = Cmt(R("az","AZ")^1, function(_,i,s)
+local cstoken = R("az","AZ","\127\255") + S("@!?_")
+
+local knowncommand = Cmt(cstoken^1, function(_,i,s)
return currentcommands[s] and i
end)
-local find, match = string.find, string.match
-
-local knownpreamble = Cmt(P('% '), function(input,i,_)
+local knownpreamble = Cmt(P("% "), function(input,i,_)
if i < 10 then
- local s, e, word = find(input,'^(.+)[\n\r]',i)
+ local s, e, word = find(input,'^(.+)[\n\r]',i) -- combine with match
if word then
local interface = match(word,"interface=(..)")
if interface then
- currentcommands = commands[interface] or commands.en or { }
+ currentcommands = commands[interface] or commands.en or { }
end
end
end
return false
end)
-local whitespace = lexer.WHITESPACE -- triggers states
-local any_char = lexer.any_char
+-- -- the token list contains { "style", endpos } entries
+-- --
+-- -- in principle this is faster but it is also crash sensitive for large files
+
+-- local constants_hash = { } for i=1,#constants do constants_hash [constants [i]] = true end
+-- local helpers_hash = { } for i=1,#helpers do helpers_hash [helpers [i]] = true end
+-- local primitives_hash = { } for i=1,#primitives do primitives_hash[primitives[i]] = true end
+
+-- local specialword = Ct( P('\\') * Cmt( C(cstoken^1), function(input,i,s)
+-- if currentcommands[s] then
+-- return true, "command", i
+-- elseif constants_hash[s] then
+-- return true, "data", i
+-- elseif helpers_hash[s] then
+-- return true, "plain", i
+-- elseif primitives_hash[s] then
+-- return true, "primitive", i
+-- else -- if starts with if then primitive
+-- return true, "user", i
+-- end
+-- end) )
+
+-- local specialword = P('\\') * Cmt( C(cstoken^1), function(input,i,s)
+-- if currentcommands[s] then
+-- return true, { "command", i }
+-- elseif constants_hash[s] then
+-- return true, { "data", i }
+-- elseif helpers_hash[s] then
+-- return true, { "plain", i }
+-- elseif primitives_hash[s] then
+-- return true, { "primitive", i }
+-- else -- if starts with if then primitive
+-- return true, { "user", i }
+-- end
+-- end)
+
+local whitespace = contextlexer.WHITESPACE -- triggers states
local space = lexer.space -- S(" \n\r\t\f\v")
-local cstoken = R("az","AZ") + S("@!?_") -- todo: utf8
+local any = lexer.any
local spacing = token(whitespace, space^1)
+local rest = token('default', any)
local preamble = token('preamble', knownpreamble)
local comment = token('comment', P('%') * (1-S("\n\r"))^0)
local command = token('command', P('\\') * knowncommand)
+local constant = token('data', P('\\') * exact_match(constants))
local helper = token('plain', P('\\') * exact_match(helpers))
local primitive = token('primitive', P('\\') * exact_match(primitives))
local ifprimitive = token('primitive', P('\\if') * cstoken^1)
local csname = token('user', P('\\') * (cstoken^1 + P(1)))
-local grouping = token('grouping', S("{$}"))
-local specials = token('specials', S("#()[]<>=\""))
-local extras = token('extras', S("`~%^&_-+/\'|"))
-local default = token('default', P(1))
+local grouping = token('grouping', S("{$}")) -- maybe also \bgroup \egroup \begingroup \endgroup
+local special = token('special', S("#()[]<>=\""))
+local extra = token('extra', S("`~%^&_-+/\'|"))
+
+local text = token('default', cstoken^1 )
----- startluacode = token("grouping", P("\\startluacode"))
----- stopluacode = token("grouping", P("\\stopluacode"))
@@ -128,8 +171,7 @@ local default = token('default', P(1))
local luastatus = nil
local luaenvironment = P("luacode")
-local inlinelua = P("\\ctxlua")
- + P("\\ctxcommand")
+local inlinelua = P("\\ctx") * ( P("lua") + P("command") )
+ P("\\cldcontext")
local startlua = P("\\start") * Cmt(luaenvironment,function(_,i,s) luastatus = s return true end)
@@ -142,13 +184,17 @@ local stoplua = P("\\stop") * Cmt(luaenvironment,function(_,i,s)
local startluacode = token("embedded", startlua)
local stopluacode = token("embedded", stoplua)
-local metafunenvironment = P("MPcode")
- + P("useMPgraphic")
- + P("reusableMPgraphic")
- + P("uniqueMPgraphic")
- + P("MPinclusions")
- + P("MPextensions")
- + P("MPgraphic")
+-- local metafunenvironment = P("useMPgraphic")
+-- + P("reusableMPgraphic")
+-- + P("uniqueMPgraphic")
+-- + P("MPcode")
+-- + P("MPpage")
+-- + P("MPinclusions")
+-- + P("MPextensions")
+-- + P("MPgraphic")
+
+local metafunenvironment = ( P("use") + P("reusable") + P("unique") ) * ("MPgraphic")
+ + P("MP") * ( P("code")+ P("page") + P("inclusions") + P("extensions") + P("graphic") )
-- local metafunstatus = nil -- this does not work, as the status gets lost in an embedded lexer
-- local startmetafun = P("\\start") * Cmt(metafunenvironment,function(_,i,s) metafunstatus = s return true end)
@@ -157,59 +203,50 @@ local metafunenvironment = P("MPcode")
local startmetafun = P("\\start") * metafunenvironment
local stopmetafun = P("\\stop") * metafunenvironment
-local openargument = token("specials",P("{"))
-local closeargument = token("specials",P("}"))
-local argumentcontent = token("any_char",(1-P("}"))^0)
+local openargument = token("special", P("{"))
+local closeargument = token("special", P("}"))
+local argumentcontent = token("default",(1-P("}"))^0)
-local metafunarguments = (token("default",spacing^0) * openargument * argumentcontent * closeargument)^-2
+local metafunarguments = (spacing^0 * openargument * argumentcontent * closeargument)^-2
local startmetafuncode = token("embedded", startmetafun) * metafunarguments
local stopmetafuncode = token("embedded", stopmetafun)
--- Function load(lexer_name) starts with _M.WHITESPACE = lexer_name..'_whitespace' which means that we need to
--- have frozen at the moment we load another lexer. Because spacing is used to revert to a parent lexer we need
--- to make sure that we load children as late as possible in order not to get the wrong whitespace trigger. This
--- took me quite a while to figure out (not being that familiar with the internals). BTW, if performance becomes
--- an issue we can rewrite the main lex function (memorize the grammars and speed up the byline variant).
-
local cldlexer = lexer.load('scite-context-lexer-cld')
local mpslexer = lexer.load('scite-context-lexer-mps')
lexer.embed_lexer(contextlexer, cldlexer, startluacode, stopluacode)
lexer.embed_lexer(contextlexer, mpslexer, startmetafuncode, stopmetafuncode)
+-- Watch the text grabber, after all, we're talking mostly of text (beware,
+-- no punctuation here as it can be special. We might go for utf here.
+
_rules = {
{ "whitespace", spacing },
{ "preamble", preamble },
+
+ { "text", text },
+
{ "comment", comment },
+
+ { "constant", constant },
{ "helper", helper },
{ "command", command },
{ "ifprimitive", ifprimitive },
{ "primitive", primitive },
{ "csname", csname },
+
+ -- { "whatever", specialword }, -- not yet, crashes
+
{ "grouping", grouping },
- { "specials", specials },
- { "extras", extras },
- { 'any_char', any_char },
-}
+ { "special", special },
+ { "extra", extra },
-_tokenstyles = {
- { "preamble", lexer.style_context_preamble },
- { "comment", lexer.style_context_comment },
- { "default", lexer.style_context_default },
- { 'number', lexer.style_context_number },
- { "embedded", lexer.style_context_embedded },
- { "grouping", lexer.style_context_grouping },
- { "primitive", lexer.style_context_primitive },
- { "plain", lexer.style_context_plain },
- { "command", lexer.style_context_command },
- { "user", lexer.style_context_user },
- { "specials", lexer.style_context_specials },
- { "extras", lexer.style_context_extras },
- { "quote", lexer.style_context_quote },
- { "keyword", lexer.style_context_keyword },
+ { "rest", rest },
}
+_tokenstyles = lexer.context.styleset
+
local folds = {
["\\start"] = 1, ["\\stop" ] = -1,
["\\begin"] = 1, ["\\end" ] = -1,
@@ -222,5 +259,6 @@ _foldsymbols = {
},
["helper"] = folds,
["command"] = folds,
+ ["user"] = folds,
["grouping"] = folds,
}
diff --git a/context/data/scite/lexers/scite-context-lexer.lua b/context/data/scite/lexers/scite-context-lexer.lua
index 688eb5776..04937cbc0 100644
--- a/context/data/scite/lexers/scite-context-lexer.lua
+++ b/context/data/scite/lexers/scite-context-lexer.lua
@@ -16,6 +16,14 @@ local info = {
-- function and optimizing the lex function gained another 2+ seconds. A 6 second load
-- is quite ok for me.
+-- Function load(lexer_name) starts with _M.WHITESPACE = lexer_name..'_whitespace' which
+-- means that we need to have it frozen at the moment we load another lexer. Because spacing
+-- is used to revert to a parent lexer we need to make sure that we load children as late
+-- as possible in order not to get the wrong whitespace trigger. This took me quite a while
+-- to figure out (not being that familiar with the internals). BTW, if performance becomes
+-- an issue we can rewrite the main lex function (memorize the grammars and speed up the
+-- byline variant).
+
local R, P, S, Cp, Cs, Ct, Cmt, Cc = lpeg.R, lpeg.P, lpeg.S, lpeg.Cp, lpeg.Cs, lpeg.Ct, lpeg.Cmt, lpeg.Cc
local lpegmatch = lpeg.match
local find, gmatch, match, lower, upper, gsub = string.find, string.gmatch, string.match, string.lower, string.upper, string.gsub
@@ -27,22 +35,6 @@ dofile(_LEXERHOME .. '/lexer.lua')
lexer.context = lexer.context or { }
--- function lexer.context.loaddefinitions(name)
--- local basepath = lexer.context and lexer.context.path or _LEXERHOME
--- local definitions = loadfile(basepath and (basepath .. "/" .. name) or name)
--- if not definitions then
--- definitions = loadfile(_LEXERHOME .. "/context/" .. name)
--- end
--- if type(definitions) == "function" then
--- definitions = definitions()
--- end
--- if type(definitions) == "table" then
--- return definitions
--- else
--- return nil
--- end
--- end
-
function lexer.context.loaddefinitions(name)
local definitions = loadfile(_LEXERHOME .. "/context/" .. name)
if not definitions and lexer.context and lexer.context.path then
@@ -91,19 +83,34 @@ function lexer.context.word_match(words,word_chars,case_insensitive)
end
end
--- nicer anyway:
+-- nicer (todo: utf):
--- todo: utf
+local defaults = R("az","AZ","\127\255","__")
-function lexer.context.exact_match(words,case_insensitive)
- local pattern = S(concat(words)) + R("az","AZ","\127\255") -- the concat catches _ etc
+function lexer.context.exact_match(words,word_chars,case_insensitive)
+ local characters = concat(words)
+ local pattern -- the concat catches _ etc
+ if word_chars == true or word_chars == false or word_chars == nil then
+ word_chars = ""
+ end
+ if type(word_chars) == "string" then
+ pattern = S(characters) + defaults
+ if case_insensitive then
+ pattern = pattern + S(upper(characters)) + S(lower(characters))
+ end
+ if word_chars ~= "" then
+ pattern = pattern + S(word_chars)
+ end
+ elseif word_chars then
+ pattern = word_chars
+ end
if case_insensitive then
local list = { }
for i=1,#words do
list[lower(words[i])] = true
end
return Cmt(pattern^1, function(_,i,s)
- return list[lower(s)] and i
+ return list[lower(s)] -- and i
end)
else
local list = { }
@@ -111,28 +118,20 @@ function lexer.context.exact_match(words,case_insensitive)
list[words[i]] = true
end
return Cmt(pattern^1, function(_,i,s)
- return list[s] and i
+ return list[s] -- and i
end)
end
end
-function lexer.context.word_match(words,word_chars,case_insensitive) -- word_chars not used (can be omitted)
- if word_chars == true then
- return lexer.context.exact_match(words,true)
- else
- return lexer.context.exact_match(words,case_insensitive)
- end
-end
+-- overloaded functions
--- Overloaded functions.
+local FOLD_BASE = SC_FOLDLEVELBASE
+local FOLD_HEADER = SC_FOLDLEVELHEADERFLAG
+local FOLD_BLANK = SC_FOLDLEVELWHITEFLAG
-local FOLD_BASE = SC_FOLDLEVELBASE
-local FOLD_HEADER = SC_FOLDLEVELHEADERFLAG
-local FOLD_BLANK = SC_FOLDLEVELWHITEFLAG
-
-local newline = P("\r\n") + S("\r\n")
-
-local splitlines = Ct( ( Ct ( (Cp() * Cs((1-newline)^1) * newline^-1) + (Cp() * Cc("") * newline) ) )^0)
+local get_style_at = GetStyleAt
+local get_property = GetProperty
+local get_indent_amount = GetIndentAmount
local h_table, b_table, n_table = { }, { }, { }
@@ -140,53 +139,53 @@ setmetatable(h_table, { __index = function(t,level) local v = { level, FOLD_HEAD
setmetatable(b_table, { __index = function(t,level) local v = { level, FOLD_BLANK } t[level] = v return v end })
setmetatable(n_table, { __index = function(t,level) local v = { level } t[level] = v return v end })
-local get_style_at = GetStyleAt
-local get_property = GetProperty
-local get_indent_amount = GetIndentAmount
-
--- local lines = lpegmatch(splitlines,text) -- iterating over lines is faster
--- for i=1, #lines do
--- local li = lines[i]
--- local line = li[2]
--- if line ~= "" then
--- local pos = li[1]
--- for i=1,nofpatterns do
--- for s, m in gmatch(line,patterns[i]) do
--- if hash[m] then
--- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)]
--- if symbols then
--- local l = symbols[m]
--- if l then
--- local t = type(l)
--- if t == 'number' then
--- current_level = current_level + l
--- elseif t == 'function' then
--- current_level = current_level + l(text, pos, line, s, match)
--- end
--- if current_level < FOLD_BASE then -- integrate in previous
--- current_level = FOLD_BASE
--- end
--- end
+-- local newline = P("\r\n") + S("\r\n")
+-- local splitlines = Ct( ( Ct ( (Cp() * Cs((1-newline)^1) * newline^-1) + (Cp() * Cc("") * newline) ) )^0)
+--
+-- local lines = lpegmatch(splitlines,text) -- iterating over lines is faster
+-- for i=1, #lines do
+-- local li = lines[i]
+-- local line = li[2]
+-- if line ~= "" then
+-- local pos = li[1]
+-- for i=1,nofpatterns do
+-- for s, m in gmatch(line,patterns[i]) do
+-- if hash[m] then
+-- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)]
+-- if symbols then
+-- local l = symbols[m]
+-- if l then
+-- local t = type(l)
+-- if t == 'number' then
+-- current_level = current_level + l
+-- elseif t == 'function' then
+-- current_level = current_level + l(text, pos, line, s, match)
+-- end
+-- if current_level < FOLD_BASE then -- integrate in previous
+-- current_level = FOLD_BASE
-- end
-- end
-- end
-- end
--- if current_level > prev_level then
--- folds[line_num] = h_table[prev_level] -- { prev_level, FOLD_HEADER }
--- else
--- folds[line_num] = n_table[prev_level] -- { prev_level }
--- end
--- prev_level = current_level
--- else
--- folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK }
-- end
--- line_num = line_num + 1
-- end
-
--- not that much faster but less memory:
+-- if current_level > prev_level then
+-- folds[line_num] = h_table[prev_level] -- { prev_level, FOLD_HEADER }
+-- else
+-- folds[line_num] = n_table[prev_level] -- { prev_level }
+-- end
+-- prev_level = current_level
+-- else
+-- folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK }
+-- end
+-- line_num = line_num + 1
+-- end
+--
+-- -- not that much faster but less memory:
local action_y, action_n
+local newline = P("\r\n") + S("\r\n")
local splitlines = ( (
(Cp() * Cs((1-newline)^1) * newline^-1) / function(p,l) action_y(p,l) end
+ ( newline ) / function() action_n() end
@@ -194,7 +193,7 @@ local splitlines = ( (
function lexer.context.fold(text, start_pos, start_line, start_level)
if text == '' then
- return folds
+ return { }
end
local lexer = global._LEXER
if lexer._fold then
@@ -220,7 +219,7 @@ function lexer.context.fold(text, start_pos, start_line, start_level)
end
fold_symbols._hash = hash
end
- action_y = function(pos,line)
+ action_y = function(pos,line) -- we can consider moving this one outside the function
for i=1,nofpatterns do
for s, m in gmatch(line,patterns[i]) do
if hash[m] then
@@ -253,12 +252,12 @@ function lexer.context.fold(text, start_pos, start_line, start_level)
prev_level = current_level
line_num = line_num + 1
end
- action_n = function()
+ action_n = function() -- we can consider moving this one outside the function
folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK }
line_num = line_num + 1
end
local lines = lpegmatch(splitlines,text)
- elseif get_property('fold.by.indentation', 1) == 1 then
+ elseif get_property('fold.by.indentation',1) == 1 then
local current_line = start_line
local prev_level = start_level
for _, line in gmatch(text,'([\t ]*)(.-)\r?\n') do
@@ -303,58 +302,97 @@ function lexer.context.fold(text, start_pos, start_line, start_level)
return folds
end
-function lexer.context.lex(text, init_style)
+function lexer.context.lex(text,init_style)
local lexer = global._LEXER
local grammar = lexer._GRAMMAR
if not grammar then
return { }
- elseif lexer._LEXBYLINE then
+ elseif lexer._LEXBYLINE then -- we could keep token
local tokens = { }
local offset = 0
local noftokens = 0
- for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg
- local line_tokens = lpeg_match(grammar, line)
- if line_tokens then
- for i=1,#line_tokens do
- local token = line_tokens[i]
- token[2] = token[2] + offset
+ if true then
+ for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg
+ local line_tokens = lpeg_match(grammar,line)
+ if line_tokens then
+ for i=1,#line_tokens do
+ local token = line_tokens[i]
+ token[2] = token[2] + offset
+ noftokens = noftokens + 1
+ tokens[noftokens] = token
+ end
+ end
+ offset = offset + #line
+ if noftokens > 0 and tokens[noftokens][2] ~= offset then
noftokens = noftokens + 1
- tokens[noftokens] = token
+ tokens[noftokens] = { 'default', offset + 1 }
end
end
- offset = offset + #line
- if noftokens > 0 and tokens[noftokens][2] ~= offset then
- noftokens = noftokens + 1
- tokens[noftokens] = { 'default', offset + 1 }
+ else -- alternative
+ local lasttoken, lastoffset
+ for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg
+ local line_tokens = lpeg_match(grammar,line)
+ if line_tokens then
+ for i=1,#line_tokens do
+ lasttoken = line_tokens[i]
+ lastoffset = lasttoken[2] + offset
+ lasttoken[2] = lastoffset
+ noftokens = noftokens + 1
+ tokens[noftokens] = lasttoken
+ end
+ end
+ offset = offset + #line
+ if lastoffset ~= offset then
+ lastoffset = offset + 1
+ lasttoken = { 'default', lastoffset }
+ noftokens = noftokens + 1
+ tokens[noftokens] = lasttoken
+ end
end
end
return tokens
elseif lexer._CHILDREN then
+ -- as we cannot print, tracing is not possible ... this might change as we can as well
+ -- generate them all in one go (sharing as much as possible)
local _hash = lexer._HASH
if not hash then
hash = { }
lexer._HASH = hash
end
grammar = hash[init_style]
- if not grammar then
+ if grammar then
+ lexer._GRAMMAR = grammar
+ else
for style, style_num in next, lexer._TOKENS do
if style_num == init_style then
local lexer_name = match(style,'^(.+)_whitespace') or lexer._NAME
if lexer._INITIALRULE ~= lexer_name then
- build_grammar(lexer, lexer_name)
+ grammar = hash[lexer_name]
+ if not grammar then
+ build_grammar(lexer,lexer_name)
+ grammar = lexer._GRAMMAR
+ hash[lexer_name] = grammar
+ end
end
break
end
end
- grammar = lexer._GRAMMAR
+ grammar = grammar or lexer._GRAMMAR
hash[init_style] = grammar
end
- return lpegmatch(grammar, text)
+ return lpegmatch(grammar,text)
else
- return lpegmatch(grammar, text)
+ return lpegmatch(grammar,text)
end
end
-lexer.fold = lexer.context.fold
-lexer.lex = lexer.context.lex
-lexer.word_match = lexer.context.word_match
+-- todo: keywords: one lookup and multiple matches
+
+-- function lexer.context.token(name, patt)
+-- return Ct(patt * Cc(name) * Cp())
+-- end
+
+lexer.fold = lexer.context.fold
+lexer.lex = lexer.context.lex
+-- lexer.token = lexer.context.token
+lexer.exact_match = lexer.context.exact_match
diff --git a/context/data/scite/lexers/themes/scite-context-theme.lua b/context/data/scite/lexers/themes/scite-context-theme.lua
index 556779ce6..360a5d435 100644
--- a/context/data/scite/lexers/themes/scite-context-theme.lua
+++ b/context/data/scite/lexers/themes/scite-context-theme.lua
@@ -6,10 +6,8 @@ local info = {
license = "see context related readme files",
}
--- we need a proper pipe:
---
--- -- context_path = string.split(os.resultof("mtxrun --find-file context.mkiv"))[1] or ""
--- -- global.trace("OEPS") -- how do we get access to the regular lua extensions
+-- context_path = string.split(os.resultof("mtxrun --find-file context.mkiv"))[1] or ""
+-- global.trace("OEPS") -- how do we get access to the regular lua extensions
local context_path = "t:/sources" -- c:/data/tex-context/tex/texmf-context/tex/base
local font_name = 'Dejavu Sans Mono'
@@ -17,7 +15,7 @@ local font_size = 14
local global = _G
-dofile(_LEXERHOME .. '/themes/scite.lua') -- starting point so we miss nothing
+-- dofile(_LEXERHOME .. '/themes/scite.lua') -- starting point so we miss nothing
module('lexer', package.seeall)
@@ -63,44 +61,59 @@ style_nothing = style {
-- empty
}
-style_comment = style { fore = colors.yellow }
-style_string = style { fore = colors.magenta }
-
-style_char = style { fore = colors.magenta }
-style_class = style { fore = colors.black, bold = true }
-style_constant = style { fore = colors.cyan, bold = true }
-style_definition = style { fore = colors.black, bold = true }
-style_error = style { fore = colors.red }
-style_function = style { fore = colors.black, bold = true }
-style_keyword = style { fore = colors.blue, bold = true }
-style_number = style { fore = colors.cyan }
-style_operator = style { fore = colors.blue }
-style_preproc = style { fore = colors.yellow, bold = true }
-style_tag = style { fore = colors.cyan }
-style_type = style { fore = colors.blue }
-style_variable = style { fore = colors.black }
-style_identifier = style_nothing
-
-style_line_number = style { back = colors.linepanel, }
-style_bracelight = style { bold = true, fore = colors.orange }
-style_bracebad = style { bold = true, fore = colors.orange }
-style_indentguide = style { fore = colors.linepanel, back = colors.white }
-style_calltip = style { fore = colors.white, back = colors.tippanel }
-style_controlchar = style_nothing
-
-style_context_preamble = style_comment
-style_context_comment = style_comment
-style_context_string = style_string
-style_context_default = style_nothing
-style_context_number = style_number
-style_context_keyword = style_keyword
-style_context_quote = style { fore = colors.blue, bold = true }
-style_context_primitive = style_keyword
-style_context_plain = style { fore = colors.dark, bold = true }
-style_context_command = style { fore = colors.green, bold = true }
-style_context_embedded = style { fore = colors.black, bold = true }
-style_context_user = style { fore = colors.green }
-style_context_grouping = style { fore = colors.red }
-style_context_specials = style { fore = colors.blue }
-style_context_extras = style { fore = colors.yellow }
+style_number = style { fore = colors.cyan }
+style_comment = style { fore = colors.yellow }
+style_string = style { fore = colors.magenta }
+style_keyword = style { fore = colors.blue, bold = true }
+style_char = style { fore = colors.magenta }
+style_class = style { fore = colors.black, bold = true }
+style_constant = style { fore = colors.cyan, bold = true }
+style_definition = style { fore = colors.black, bold = true }
+style_error = style { fore = colors.red }
+style_function = style { fore = colors.black, bold = true }
+style_operator = style { fore = colors.blue }
+style_preproc = style { fore = colors.yellow, bold = true }
+style_tag = style { fore = colors.cyan }
+style_type = style { fore = colors.blue }
+style_variable = style { fore = colors.black }
+style_identifier = style_nothing
+
+style_line_number = style { back = colors.linepanel }
+style_bracelight = style { fore = colors.orange, bold = true }
+style_bracebad = style { fore = colors.orange, bold = true }
+style_indentguide = style { fore = colors.linepanel, back = colors.white }
+style_calltip = style { fore = colors.white, back = colors.tippanel }
+style_controlchar = style_nothing
+
+lexer.context.styles = {
+
+ -- ["whitespace"] = style_whitespace,
+
+ ["default"] = style_nothing,
+ ["number"] = style_number,
+ ["comment"] = style_comment,
+ ["keyword"] = style_keyword,
+ ["string"] = style_string,
+
+ ["command"] = style { fore = colors.green, bold = true },
+ ["preamble"] = style_comment,
+ ["embedded"] = style { fore = colors.black, bold = true },
+ ["grouping"] = style { fore = colors.red },
+ ["primitive"] = style_keyword,
+ ["plain"] = style { fore = colors.dark, bold = true },
+ ["user"] = style { fore = colors.green },
+ ["data"] = style_constant,
+ ["special"] = style { fore = colors.blue },
+ ["extra"] = style { fore = colors.yellow },
+ ["quote"] = style { fore = colors.blue, bold = true },
+
+}
+
+local styleset = { }
+
+for k, v in next, lexer.context.styles do
+ styleset[#styleset+1] = { k, v }
+end
+
+lexer.context.styleset = styleset
diff --git a/context/data/scite/scite-context-readme.tex b/context/data/scite/scite-context-readme.tex
index 7af38ecd9..32c3cfb10 100644
--- a/context/data/scite/scite-context-readme.tex
+++ b/context/data/scite/scite-context-readme.tex
@@ -15,6 +15,7 @@ lexers/scite-context-lexer-cld.lua
lexers/scite-context-lexer.lua
lexers/context/mult-def.lua
lexers/context/mult-prm.lua
+lexers/context/mult-low.lua
lexers/context/mult-mps.lua
lexers/themes/scite-context-theme.lua
\stoptyping
diff --git a/context/data/scite/scite-context.properties b/context/data/scite/scite-context.properties
index ea48ecc10..14af2182d 100644
--- a/context/data/scite/scite-context.properties
+++ b/context/data/scite/scite-context.properties
@@ -626,6 +626,6 @@ comment.block.at.line.start.lpeg_scite-context-lexer-cld=1
comment.block.lpeg_props=#
comment.block.at.line.start.lpeg_props=1
-style.*.34=bold,fore=#7F0000,back:#CFCFCF
-style.*.35=bold,fore=#7F0000,back:#CFCFCF
+style.*.34=bold
+style.*.35=bold
diff --git a/metapost/context/base/mp-tool.mp b/metapost/context/base/mp-tool.mp
index 480d9d186..35eca4727 100644
--- a/metapost/context/base/mp-tool.mp
+++ b/metapost/context/base/mp-tool.mp
@@ -846,15 +846,15 @@ vardef bottomboundary primary p =
if pair p : p else : (llcorner p -- lrcorner p) fi
enddef ;
-vardef rightboundary primary p =
+vardef rightboundary primary p =
if pair p : p else : (lrcorner p -- urcorner p) fi
enddef ;
-vardef topboundary primary p =
+vardef topboundary primary p =
if pair p : p else : (urcorner p -- ulcorner p) fi
enddef ;
-vardef leftboundary primary p =
+vardef leftboundary primary p =
if pair p : p else : (ulcorner p -- llcorner p) fi
enddef ;
@@ -2250,7 +2250,7 @@ vardef simplified expr p =
(reverse dostraightened(+1,dostraightened(+1,reverse p)))
enddef ;
-vardef unspiked expr p =
+vardef unspiked expr p =
(reverse dostraightened(-1,dostraightened(-1,reverse p)))
enddef ;
diff --git a/tex/context/base/attr-ini.mkiv b/tex/context/base/attr-ini.mkiv
index 578d02c27..38038e743 100644
--- a/tex/context/base/attr-ini.mkiv
+++ b/tex/context/base/attr-ini.mkiv
@@ -43,9 +43,8 @@
\def\dodefineattribute[#1][#2]% alternatively we can let lua do the housekeeping
{\expandafter\newattribute\csname @attr@#1\endcsname
\expandafter\newconstant \csname :attr:#1\endcsname
- \csname :attr:#1\endcsname\lastallocatedattribute
- \ctxcommand{defineattribute("#1",\number\lastallocatedattribute)}%
- %\writestatus\m!system{defining attribute #1 with number \number\lastallocatedattribute}%
+ \csname :attr:#1\endcsname\last_allocated_attribute
+ \ctxcommand{defineattribute("#1",\number\last_allocated_attribute)}%
\doifnotinset\s!global{#2}{\appendetoks\csname @attr@#1\endcsname\attributeunsetvalue\to\attributesresetlist}%
\doifinset \s!public{#2}{\expandafter\let\csname#1attribute\expandafter\endcsname\csname :attr:#1\endcsname}}
diff --git a/tex/context/base/char-def.lua b/tex/context/base/char-def.lua
index 7d16f193e..28d46ee93 100644
--- a/tex/context/base/char-def.lua
+++ b/tex/context/base/char-def.lua
@@ -186695,3 +186695,4 @@ characters.data={
unicodeslot=0xE01EF,
},
}
+
diff --git a/tex/context/base/cont-new.mkii b/tex/context/base/cont-new.mkii
index abd0357f8..9804622e2 100644
--- a/tex/context/base/cont-new.mkii
+++ b/tex/context/base/cont-new.mkii
@@ -11,7 +11,7 @@
%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
%C details.
-\newcontextversion{2011.09.14 12:21}
+\newcontextversion{2011.09.15 09:08}
%D This file is loaded at runtime, thereby providing an
%D excellent place for hacks, patches, extensions and new
diff --git a/tex/context/base/cont-new.mkiv b/tex/context/base/cont-new.mkiv
index d80c38ea8..7495bc69e 100644
--- a/tex/context/base/cont-new.mkiv
+++ b/tex/context/base/cont-new.mkiv
@@ -11,7 +11,7 @@
%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
%C details.
-\newcontextversion{2011.09.14 12:21}
+\newcontextversion{2011.09.15 09:08}
%D This file is loaded at runtime, thereby providing an
%D excellent place for hacks, patches, extensions and new
diff --git a/tex/context/base/context-version.pdf b/tex/context/base/context-version.pdf
index f28a6957c..569c409bf 100644
--- a/tex/context/base/context-version.pdf
+++ b/tex/context/base/context-version.pdf
Binary files differ
diff --git a/tex/context/base/context-version.png b/tex/context/base/context-version.png
index b421b99a0..1c500d943 100644
--- a/tex/context/base/context-version.png
+++ b/tex/context/base/context-version.png
Binary files differ
diff --git a/tex/context/base/context.mkii b/tex/context/base/context.mkii
index ce28c02a6..f01740290 100644
--- a/tex/context/base/context.mkii
+++ b/tex/context/base/context.mkii
@@ -20,7 +20,7 @@
%D your styles an modules.
\edef\contextformat {\jobname}
-\edef\contextversion{2011.09.14 12:21}
+\edef\contextversion{2011.09.15 09:08}
%D For those who want to use this:
diff --git a/tex/context/base/context.mkiv b/tex/context/base/context.mkiv
index f52f46334..6ba54096d 100644
--- a/tex/context/base/context.mkiv
+++ b/tex/context/base/context.mkiv
@@ -20,7 +20,7 @@
%D your styles an modules.
\edef\contextformat {\jobname}
-\edef\contextversion{2011.09.14 12:21}
+\edef\contextversion{2011.09.15 09:08}
%D For those who want to use this:
diff --git a/tex/context/base/math-map.lua b/tex/context/base/math-map.lua
index b21bbc027..cd1673611 100644
--- a/tex/context/base/math-map.lua
+++ b/tex/context/base/math-map.lua
@@ -19,6 +19,15 @@ if not modules then modules = { } end modules ['math-map'] = {
-- todo: alphabets namespace
-- maybe: script/scriptscript dynamic,
+-- to be looked into once the fonts are ready (will become font
+-- goodie):
+--
+-- (U+2202,U+1D715) : upright
+-- (U+2202,U+1D715) : italic
+-- (U+2202,U+1D715) : upright
+--
+-- plus add them to the regular vectors below so that they honor \it etc
+
local type, next = type, next
local floor, div = math.floor, math.div
local merged = table.merged
diff --git a/tex/context/base/mult-def.lua b/tex/context/base/mult-def.lua
index 7e0ed0bf1..1144dba22 100644
--- a/tex/context/base/mult-def.lua
+++ b/tex/context/base/mult-def.lua
@@ -7,62 +7,6 @@ if not modules then modules = { } end modules ['mult-def'] = {
}
return {
- ["helpers"]={ -- for syntax highlighters, only the ones that are for users (boring to collect them)
- --
- "doif", "doifnot", "doifelse",
- "doifinset", "doifnotinset", "doifinsetelse",
- "doifnextcharelse", "doifnextoptionalelse", "doifnextparenthesiselse", "doiffastoptionalcheckelse",
- "doifundefinedelse", "doifdefinedelse", "doifundefined", "doifdefined",
- "doifelsevalue", "doifvalue", "doifnotvalue",
- "doifnothing", "doifsomething", "doifelsenothing", "doifsomethingelse",
- "doifvaluenothing", "doifvaluesomething", "doifelsevaluenothing",
- "doifdimensionelse",
- --
- "tracingall", "tracingnone", "loggingall",
- --
- "appendtoks", "prependtoks", "appendtotoks", "prependtotoks",
- --
- "endgraf", "empty", "null", "space", "obeyspaces", "obeylines",
- --
- "executeifdefined",
- --
- "dontleavehmode",
- --
- "setmeasure", "setemeasure", "setgmeasure", "setxmeasure", "definemeasure", "measure",
- --
- "getvalue", "setvalue", "setevalue", "setgvalue", "setxvalue", "letvalue", "letgvalue",
- "resetvalue", "undefinevalue", "ignorevalue",
- "setuvalue", "setuevalue", "setugvalue", "setuxvalue",
- "globallet", "glet",
- "getparameters", "geteparameters",
- --
- "processcommalist", "processcommacommand", "quitcommalist",
- "processaction", "processallactions",
- --
- "startsetups", "stopsetups",
- "startxmlsetups", "stopxmlsetups",
- "starttexdefinition", "stoptexdefinition",
- --
- "unexpanded", "expanded", "startexpanded", "stopexpanded", "protected", "protect", "unprotect",
- --
- "firstofoneargument",
- "firstoftwoarguments", "secondoftwoarguments",
- "firstofthreearguments", "secondofthreearguments", "thirdofthreearguments",
- "firstoffourarguments", "secondoffourarguments", "thirdoffourarguments", "fourthoffourarguments",
- "firstoffivearguments", "secondoffivearguments", "thirdoffivearguments", "fourthoffivearguments", "fifthoffivearguments",
- "firstofsixarguments", "secondofsixarguments", "thirdofsixarguments", "fourthofsixarguments", "fifthofsixarguments", "sixthofsixarguments",
- --
- "gobbleoneargument", "gobbletwoarguments", "gobblethreearguments", "gobblefourarguments", "gobblefivearguments", "gobblesixarguments", "gobblesevenarguments", "gobbleeightarguments", "gobbleninearguments", "gobbletenarguments",
- "gobbleoneoptional", "gobbletwooptionals", "gobblethreeoptionals", "gobblefouroptionals", "gobblefiveoptionals",
- --
- "dorecurse", "doloop", "exitloop", "dostepwiserecurse", "recurselevel", "recursedepth",
- --
- "newconstant", "setnewconstant", "newconditional", "settrue", "setfalse",
- --
- "dosingleempty", "dodoubleempty", "dotripleempty", "doquadrupleempty", "doquintupleempty", "dosixtupleempty", "doseventupleempty",
- "dosinglegroupempty", "dodoublegroupempty", "dotriplegroupempty", "doquadruplegroupempty", "doquintuplegroupempty",
- --
- },
["commands"]={
["CAPPED"]={
["cs"]="KAP",
diff --git a/tex/context/base/mult-low.lua b/tex/context/base/mult-low.lua
new file mode 100644
index 000000000..2084b9ad7
--- /dev/null
+++ b/tex/context/base/mult-low.lua
@@ -0,0 +1,129 @@
+if not modules then modules = { } end modules ['mult-low'] = {
+ version = 1.001,
+ comment = "companion to mult-ini.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+-- for syntax highlighters, only the ones that are for users (boring to collect them)
+
+return {
+ ["constants"] = {
+ --
+ "zerocount", "minusone", "minustwo", "plusone", "plustwo", "plusthree", "plusfour", "plusfive",
+ "plussix", "plusseven", "pluseight", "plusnine", "plusten", "plussixteen", "plushundred",
+ "plusthousand", "plustenthousand", "plustwentythousand", "medcard", "maxcard",
+ "zeropoint", "onepoint", "onebasepoint", "maxdimen", "scaledpoint", "thousandpoint", "points",
+ "zeroskip",
+ "pluscxxvii", "pluscxxviii", "pluscclv", "pluscclvi",
+ --
+ "endoflinetoken", "outputnewlinechar",
+ --
+ "emptytoks", "empty", "undefined",
+ --
+ "voidbox", "emptybox", "emptyvbox", "emptyhbox",
+ --
+ "bigskipamount", "medskipamount", "smallskipamount",
+ --
+ "fmtname", "fmtversion", "texengine", "texenginename", "texengineversion",
+ "luatexengine", "pdftexengine", "xetexengine", "unknownengine",
+ "etexversion", "pdftexversion", "xetexversion", "xetexrevision",
+ --
+ "activecatcode",
+ --
+ "bgroup", "egroup",
+ "endline",
+ --
+ "attributeunsetvalue",
+ --
+ "uprotationangle", "rightrotatioangle", "downrotatioangle", "leftrotatioangle",
+ --
+ },
+ ["helpers"] = {
+ --
+ "newcount", "newdimen", "newskip", "newmuskip", "newbox", "newtoks", "newread", "newwrite", "newmarks", "newinsert", "newattribute", "newif",
+ "newlanguage", "newfamily", "newfam", "newhelp", -- not used
+ --
+ "htdp",
+ "unvoidbox",
+ --
+ "scratchcounter", "globalscratchcounter",
+ "scratchdimen", "globalscratchdimen",
+ "scratchskip", "globalscratchskip",
+ "scratchmuskip", "globalscratchmuskip",
+ "scratchtoks", "globalscratchtoks",
+ "scratchbox", "globalscratchbox",
+ --
+ "scratchwidth", "scratchheight", "scratchdepth",
+ --
+ "scratchcounterone", "scratchcountertwo", "scratchcounterthree",
+ "scratchdimenone", "scratchdimentwo", "scratchdimenthree",
+ "scratchskipone", "scratchskiptwo", "scratchskipthree",
+ "scratchmuskipone", "scratchmuskiptwo", "scratchmuskipthree",
+ "scratchtoksone", "scratchtokstwo", "scratchtoksthree",
+ "scratchboxone", "scratchboxtwo", "scratchboxthree",
+ --
+ "doif", "doifnot", "doifelse",
+ "doifinset", "doifnotinset", "doifinsetelse",
+ "doifnextcharelse", "doifnextoptionalelse", "doifnextparenthesiselse", "doiffastoptionalcheckelse",
+ "doifundefinedelse", "doifdefinedelse", "doifundefined", "doifdefined",
+ "doifelsevalue", "doifvalue", "doifnotvalue",
+ "doifnothing", "doifsomething", "doifelsenothing", "doifsomethingelse",
+ "doifvaluenothing", "doifvaluesomething", "doifelsevaluenothing",
+ "doifdimensionelse", "doifnumberelse",
+ "doifcommonelse", "doifcommon", "doifnotcommon",
+ "doifinstring", "doifnotinstring", "doifinstringelse",
+ --
+ "tracingall", "tracingnone", "loggingall",
+ --
+ "appendtoks", "prependtoks", "appendtotoks", "prependtotoks",
+ --
+ "endgraf", "empty", "null", "space", "obeyspaces", "obeylines", "normalspace",
+ --
+ "executeifdefined",
+ --
+ "dontleavehmode",
+ --
+ "wait", "writestatus", "define", "redefine",
+ --
+ "setmeasure", "setemeasure", "setgmeasure", "setxmeasure", "definemeasure", "measure",
+ --
+ "getvalue", "setvalue", "setevalue", "setgvalue", "setxvalue", "letvalue", "letgvalue",
+ "resetvalue", "undefinevalue", "ignorevalue",
+ "setuvalue", "setuevalue", "setugvalue", "setuxvalue",
+ "globallet", "glet",
+ "getparameters", "geteparameters", "getgparameters", "getxparameters", "forgetparameters",
+ --
+ "processcommalist", "processcommacommand", "quitcommalist", "quitprevcommalist",
+ "processaction", "processallactions", "processfirstactioninset", "processallactionsinset",
+ --
+ "startsetups", "stopsetups",
+ "startxmlsetups", "stopxmlsetups",
+ "starttexdefinition", "stoptexdefinition",
+ "starttexcode", "stoptexcode",
+ --
+ "unexpanded", "expanded", "startexpanded", "stopexpanded", "protected", "protect", "unprotect",
+ --
+ "firstofoneargument",
+ "firstoftwoarguments", "secondoftwoarguments",
+ "firstofthreearguments", "secondofthreearguments", "thirdofthreearguments",
+ "firstoffourarguments", "secondoffourarguments", "thirdoffourarguments", "fourthoffourarguments",
+ "firstoffivearguments", "secondoffivearguments", "thirdoffivearguments", "fourthoffivearguments", "fifthoffivearguments",
+ "firstofsixarguments", "secondofsixarguments", "thirdofsixarguments", "fourthofsixarguments", "fifthofsixarguments", "sixthofsixarguments",
+ --
+ "gobbleoneargument", "gobbletwoarguments", "gobblethreearguments", "gobblefourarguments", "gobblefivearguments", "gobblesixarguments", "gobblesevenarguments", "gobbleeightarguments", "gobbleninearguments", "gobbletenarguments",
+ "gobbleoneoptional", "gobbletwooptionals", "gobblethreeoptionals", "gobblefouroptionals", "gobblefiveoptionals",
+ --
+ "dorecurse", "doloop", "exitloop", "dostepwiserecurse", "recurselevel", "recursedepth",
+ --
+ "newconstant", "setnewconstant", "newconditional", "settrue", "setfalse",
+ --
+ "dosingleempty", "dodoubleempty", "dotripleempty", "doquadrupleempty", "doquintupleempty", "dosixtupleempty", "doseventupleempty",
+ "dosinglegroupempty", "dodoublegroupempty", "dotriplegroupempty", "doquadruplegroupempty", "doquintuplegroupempty",
+ --
+ "nopdfcompression", "maximumpdfcompression", "normalpdfcompression",
+ --
+ "modulonumber", "dividenumber",
+ }
+}
diff --git a/tex/context/base/mult-mps.lua b/tex/context/base/mult-mps.lua
index f382433de..d596fd518 100644
--- a/tex/context/base/mult-mps.lua
+++ b/tex/context/base/mult-mps.lua
@@ -20,6 +20,7 @@ return {
"addto", "clip", "input", "interim", "let", "newinternal", "save", "setbounds",
"shipout", "show", "showdependencies", "showtoken", "showvariable",
"special",
+ "rgbcolor", "cmykcolor", "graycolor",
"begingroup", "endgroup", "of", "curl", "tension", "and", "controls",
"reflectedabout", "rotatedaround", "interpath", "on", "off", "beginfig",
"endfig", "def", "vardef", "enddef", "expr", "suffix", "text", "primary", "secondary",
@@ -62,23 +63,30 @@ return {
"define_whole_blacker_pixels", "define_whole_pixels",
"define_whole_vertical_blacker_pixels",
"define_whole_vertical_pixels", "endchar", "extra_beginchar",
- "extra_endchar", "extra_setup", "font_coding_scheme",
+ "extra_endchar", "extra_setup", "font_coding_scheme", "clearxy",
"font_extra_space",
},
metafun = {
+ "sqr", "log", "ln", "exp", "inv", "pow", "pi", "radian",
+ "tand", "cotd", "sin", "cos", "tan", "cot", "atan", "asin", "acos",
+ "invsin", "invcos", "acosh", "asinh", "sinh", "cosh",
+ "paired", "tripled",
"unitcircle", "fulldiamond", "unitdiamond",
"halfcircle", "quartercircle",
"llcircle", "lrcircle", "urcircle", "ulcircle",
"tcircle", "bcircle", "lcircle", "rcircle",
"lltriangle", "lrtriangle", "urtriangle", "ultriangle",
- "smoothed", "cornered", "superellipsed", "randomized", "squeezed",
+ "smoothed", "cornered", "superellipsed", "randomized", "squeezed", "enlonged", "shortened",
"punked", "curved", "unspiked", "simplified", "blownup", "stretched",
"enlarged", "leftenlarged", "topenlarged", "rightenlarged", "bottomenlarged",
"llenlarged", "lrenlarged", "urenlarged", "ulenlarged",
"llmoved", "lrmoved", "urmoved", "ulmoved",
+ "crossed", "laddered", "randomshifted", "interpolated", "paralleled", "cutends",
+ "rightarrow", "leftarrow", "centerarrow",
"boundingbox", "innerboundingbox", "outerboundingbox",
"bottomboundary", "leftboundary", "topboundary", "rightboundary",
"xsized", "ysized", "xysized",
+ "bbwidth", "bbheight",
"cmyk", "transparent", "withshade", "spotcolor",
"drawfill", "undrawfill",
"inverted", "uncolored", "softened", "grayed",
diff --git a/tex/context/base/mult-prm.lua b/tex/context/base/mult-prm.lua
index 61ba38199..feeeb3332 100644
--- a/tex/context/base/mult-prm.lua
+++ b/tex/context/base/mult-prm.lua
@@ -153,7 +153,6 @@ return {
"badness",
"baselineskip",
"batchmode",
- "bodydir",
"botmark",
"botmarks",
"box",
@@ -256,6 +255,7 @@ return {
"hfil",
"hfill",
"hfilneg",
+ "hoffset",
"holdinginserts",
"hrule",
"hsize",
@@ -316,7 +316,6 @@ return {
"lineskiplimit",
"localbrokenpenalty",
"localinterlinepenalty",
- "localleftbox",
"localrightbox",
"long",
"lowercase",
@@ -364,6 +363,7 @@ return {
"nonscript",
"nullfont",
"number",
+ "numexpr",
"odelcode",
"odelimiter",
"omathaccent",
@@ -506,6 +506,7 @@ return {
"rpcode",
"savecatcodetable",
"savinghyphcodes",
+ "savingvdiscards",
"scantextokens",
"scriptfont",
"scriptscriptfont",
@@ -584,6 +585,7 @@ return {
"vfill",
"vfilneg",
"vfuzz",
+ "voffset",
"vrule",
"vsize",
"vss",
diff --git a/tex/context/base/mult-prm.mkiv b/tex/context/base/mult-prm.mkiv
index ab0c91b8e..af9773b6a 100644
--- a/tex/context/base/mult-prm.mkiv
+++ b/tex/context/base/mult-prm.mkiv
@@ -10,7 +10,12 @@
tex.extraprimitives('luatex'),
tex.extraprimitives('aleph'),
tex.extraprimitives('omega'),
- { "def", "catcode", "futurelet", "chardef", }
+ {
+ "def", "catcode", "futurelet", "chardef",
+ "voffset", "hoffset", "savingvdiscards",
+ "numexpr", "dimexpr",
+ "write", "dump", "skipdef,"
+ }
)
)
table.sort(primitives)
diff --git a/tex/context/base/node-res.lua b/tex/context/base/node-res.lua
index 9e17155f2..3247e153a 100644
--- a/tex/context/base/node-res.lua
+++ b/tex/context/base/node-res.lua
@@ -372,7 +372,7 @@ function pool.usertokens(id,tokens)
end
statistics.register("cleaned up reserved nodes", function()
- return format("%s nodes, %s lists of %s", pool.cleanup(tex.count["lastallocatedbox"]))
+ return format("%s nodes, %s lists of %s", pool.cleanup(tex.count["last_allocated_box"]))
end) -- \topofboxstack
statistics.register("node memory usage", function() -- comes after cleanup !
diff --git a/tex/context/base/status-files.pdf b/tex/context/base/status-files.pdf
index 2abcef338..6c93547e2 100644
--- a/tex/context/base/status-files.pdf
+++ b/tex/context/base/status-files.pdf
Binary files differ
diff --git a/tex/context/base/status-lua.pdf b/tex/context/base/status-lua.pdf
index e89a371d1..6c9d6852c 100644
--- a/tex/context/base/status-lua.pdf
+++ b/tex/context/base/status-lua.pdf
Binary files differ
diff --git a/tex/context/base/syst-aux.mkiv b/tex/context/base/syst-aux.mkiv
index 796f3321e..4b26388b2 100644
--- a/tex/context/base/syst-aux.mkiv
+++ b/tex/context/base/syst-aux.mkiv
@@ -226,16 +226,16 @@
{\let\charactertoken=#1% = needed here
\def\!!stringa{#2}%
\def\!!stringb{#3}%
- \futurelet\nexttoken\inspectnextcharacter}
+ \futurelet\nexttoken\inspect_next_character}
-\def\inspectnextcharacter
+\def\inspect_next_character
{\ifx\nexttoken\blankspace
- \@EA\reinspectnextcharacter
+ \@EA\reinspect_next_character
\else
- \@EA\inspectnextcharacterindeed
+ \@EA\inspect_next_character_indeed
\fi}
-\def\inspectnextcharacterindeed
+\def\inspect_next_character_indeed
{\ifx\nexttoken\charactertoken
\@EA\!!stringa
\else
@@ -248,87 +248,87 @@
%D test in a run. Of course it also is more convenient to read a
%D trace then.
-\newif\ifnextblankspacetoken
+\newif\if_next_blank_space_token
-\let\nextoptionalcharactertoken=[
+\let\next_optional_character_token=[
\long\def\doifnextoptionalelse#1#2%
- {\def\nextoptionalcommandyes{#1}%
- \def\nextoptionalcommandnop{#2}%
- \let\ifnextblankspacetoken\iffalse
- \futurelet\nexttoken\inspectnextoptionalcharacter}
+ {\def\next_optional_command_yes{#1}%
+ \def\next_optional_command_nop{#2}%
+ \let\if_next_blank_space_token\iffalse
+ \futurelet\nexttoken\inspect_next_optional_character}
-\def\inspectnextoptionalcharacter
+\def\inspect_next_optional_character
{\ifx\nexttoken\blankspace
- \@EA\reinspectnextoptionalcharacter
+ \@EA\reinspect_next_optional_character
\else
- \@EA\inspectnextoptionalcharacterindeed
+ \@EA\inspect_next_optional_character_indeed
\fi}
-\def\inspectnextoptionalcharacterindeed
- {\ifx\nexttoken\nextoptionalcharactertoken
- \@EA\nextoptionalcommandyes
+\def\inspect_next_optional_character_indeed
+ {\ifx\nexttoken\next_optional_character_token
+ \@EA\next_optional_command_yes
\else
- \@EA\nextoptionalcommandnop
+ \@EA\next_optional_command_nop
\fi}
-\let\nextbgroupcharactertoken\bgroup
+\let\next_bgroup_character_token\bgroup
\long\def\doifnextbgroupelse#1#2%
- {\def\nextbgroupcommandyes{#1}%
- \def\nextbgroupcommandnop{#2}%
- \let\ifnextblankspacetoken\iffalse
- \futurelet\nexttoken\inspectnextbgroupcharacter}
+ {\def\next_bgroup_command_yes{#1}%
+ \def\next_bgroup_command_nop{#2}%
+ \let\if_next_blank_space_token\iffalse
+ \futurelet\nexttoken\inspect_next_bgroup_character}
-\def\inspectnextbgroupcharacter
+\def\inspect_next_bgroup_character
{\ifx\nexttoken\blankspace
- \@EA\reinspectnextbgroupcharacter
+ \@EA\reinspect_next_bgroup_character
\else
- \@EA\inspectnextbgroupcharacterindeed
+ \@EA\inspect_next_bgroup_character_indeed
\fi}
-\def\inspectnextbgroupcharacterindeed
- {\ifx\nexttoken\nextbgroupcharactertoken
- \@EA\nextbgroupcommandyes
+\def\inspect_next_bgroup_character_indeed
+ {\ifx\nexttoken\next_bgroup_character_token
+ \@EA\next_bgroup_command_yes
\else
- \@EA\nextbgroupcommandnop
+ \@EA\next_bgroup_command_nop
\fi}
-\let\nextparenthesischaractertoken(
+\let\next_parenthesis_character_token(
\long\def\doifnextparenthesiselse#1#2%
- {\def\nextparenthesiscommandyes{#1}%
- \def\nextparenthesiscommandnop{#2}%
- \let\ifnextblankspacetoken\iffalse
- \futurelet\nexttoken\inspectnextparenthesischaracter}
+ {\def\next_parenthesis_command_yes{#1}%
+ \def\next_parenthesis_command_nop{#2}%
+ \let\if_next_blank_space_token\iffalse
+ \futurelet\nexttoken\inspect_next_parenthesis_character}
-\def\inspectnextparenthesischaracter
+\def\inspect_next_parenthesis_character
{\ifx\nexttoken\blankspace
- \@EA\reinspectnextparenthesischaracter
+ \@EA\reinspect_next_parenthesis_character
\else
- \@EA\inspectnextparenthesischaracterindeed
+ \@EA\inspect_next_parenthesis_character_indeed
\fi}
-\def\inspectnextparenthesischaracterindeed
- {\ifx\nexttoken\nextparenthesischaractertoken
- \@EA\nextparenthesiscommandyes
+\def\inspect_next_parenthesis_character_indeed
+ {\ifx\nexttoken\next_parenthesis_character_token
+ \@EA\next_parenthesis_command_yes
\else
- \@EA\nextparenthesiscommandnop
+ \@EA\next_parenthesis_command_nop
\fi}
%D The next one is handy in predictable situations:
\long\def\doiffastoptionalcheckelse#1#2%
- {\def\nextoptionalcommandyes{#1}%
- \def\nextoptionalcommandnop{#2}%
- \let\ifnextblankspacetoken\iffalse % not needed
- \futurelet\nexttoken\dodoiffastoptionalcheckelse}
+ {\def\next_optional_command_yes{#1}%
+ \def\next_optional_command_nop{#2}%
+ \let\if_next_blank_space_token\iffalse % not needed
+ \futurelet\nexttoken\do_if_fast_optional_check_else}
-\def\dodoiffastoptionalcheckelse
- {\ifx\nexttoken\nextoptionalcharactertoken
- \expandafter\nextoptionalcommandyes
+\def\do_if_fast_optional_check_else
+ {\ifx\nexttoken\next_optional_character_token
+ \expandafter\next_optional_command_yes
\else
- \expandafter\nextoptionalcommandnop
+ \expandafter\next_optional_command_nop
\fi}
%D This macro uses some auxiliary macros. Although we were able
@@ -350,14 +350,14 @@
\def\:{\let\blankspace= } \:
-\def\:{\reinspectnextcharacter}
-\expandafter\def\: {\let\ifnextblankspacetoken\iftrue\futurelet\nexttoken\inspectnextcharacter}
+\def\:{\reinspect_next_character}
+\expandafter\def\: {\let\if_next_blank_space_token\iftrue\futurelet\nexttoken\inspect_next_character}
-\def\:{\reinspectnextoptionalcharacter}
-\expandafter\def\: {\let\ifnextblankspacetoken\iftrue\futurelet\nexttoken\inspectnextoptionalcharacter}
+\def\:{\reinspect_next_optional_character}
+\expandafter\def\: {\let\if_next_blank_space_token\iftrue\futurelet\nexttoken\inspect_next_optional_character}
-\def\:{\reinspectnextbgroupcharacter}
-\expandafter\def\: {\let\ifnextblankspacetoken\iftrue\futurelet\nexttoken\inspectnextbgroupcharacter}
+\def\:{\reinspect_next_bgroup_character}
+\expandafter\def\: {\let\if_next_blank_space_token\iftrue\futurelet\nexttoken\inspect_next_bgroup_character}
\let\:\next
@@ -525,21 +525,22 @@
%D was due to this grouping subtilities. We therefore decided
%D to use \type{\begingroup} instead of \type{\bgroup}.
-\def\docheckonedefined#1%
- {\ifcsname#1\endcsname\else
- \donefalse
- \expandafter\quitcommalist % added
- \fi}
-
\def\doifalldefinedelse#1%
{\begingroup
- \donetrue \processcommalist[#1]\docheckonedefined
+ \donetrue
+ \processcommalist[#1]\do_if_all_defined_else
\ifdone
\endgroup\expandafter\firstoftwoarguments
\else
\endgroup\expandafter\secondoftwoarguments
\fi}
+\def\do_if_all_defined_else#1%
+ {\ifcsname#1\endcsname\else
+ \donefalse
+ \expandafter\quitcommalist % added
+ \fi}
+
%D \macros
%D {doif,doifelse,doifnot}
%D
@@ -560,7 +561,8 @@
%D \stoptyping
\long\def\doif#1#2%
- {\edef\!!stringa{#1}\edef\!!stringb{#2}%
+ {\edef\!!stringa{#1}%
+ \edef\!!stringb{#2}%
\ifx\!!stringa\!!stringb
\expandafter\firstofoneargument
\else
@@ -568,7 +570,8 @@
\fi}
\long\def\doifnot#1#2%
- {\edef\!!stringa{#1}\edef\!!stringb{#2}%
+ {\edef\!!stringa{#1}%
+ \edef\!!stringb{#2}%
\ifx\!!stringa\!!stringb
\expandafter\gobbleoneargument
\else
@@ -576,7 +579,8 @@
\fi}
\long\def\doifelse#1#2%
- {\edef\!!stringa{#1}\edef\!!stringb{#2}%
+ {\edef\!!stringa{#1}%
+ \edef\!!stringb{#2}%
\ifx\!!stringa\!!stringb
\expandafter\firstoftwoarguments
\else
@@ -662,28 +666,28 @@
\def\rightoptionalbracket{]}
-\long\def\doquitifiteminsetelse#1],\relax{\firstoftwoarguments}
-\long\def\doquitifiteminset #1],\relax{\firstofoneargument}
-\long\def\doquitifitemnotinset #1],\relax{\gobbleoneargument}
+\long\def\do_quit_if_item_in_set_else#1],\relax{\firstoftwoarguments}
+\long\def\do_quit_if_item_in_set #1],\relax{\firstofoneargument}
+\long\def\do_quit_if_item_not_in_set #1],\relax{\gobbleoneargument}
-\long\def\redoifinsetelse{\expandafter\docheckifiteminsetelse\!!stringb,],\relax}
-\long\def\redoifinset {\expandafter\docheckifiteminset \!!stringb,],\relax}
-\long\def\redoifnotinset {\expandafter\docheckifitemnotinset \!!stringb,],\relax}
+\long\def\redo_if_in_set_else{\expandafter\do_check_if_item_in_set_else\!!stringb,],\relax}
+\long\def\redo_if_in_set {\expandafter\do_check_if_item_in_set \!!stringb,],\relax}
+\long\def\redo_if_not_in_set {\expandafter\do_check_if_item_not_in_set \!!stringb,],\relax}
\long\def\doifinsetelse#1% make this two step too
{\edef\!!stringa{#1}%
\ifx\!!stringa\empty
\expandafter\thirdofthreearguments
\else
- \expandafter\dodoifinsetelse
+ \expandafter\do_if_in_set_else
\fi}
-\long\def\dodoifinsetelse#1%
+\long\def\do_if_in_set_else#1%
{\edef\!!stringb{#1}%
\ifx\!!stringb\empty
\expandafter\secondoftwoarguments
\else
- \expandafter\redoifinsetelse
+ \expandafter\redo_if_in_set_else
\fi}
\long\def\doifinset#1%
@@ -691,15 +695,15 @@
\ifx\!!stringa\empty
\expandafter\gobbletwoarguments
\else
- \expandafter\dodoifinset
+ \expandafter\do_if_in_set
\fi}
-\long\def\dodoifinset#1%
+\long\def\do_if_in_set#1%
{\edef\!!stringb{#1}%
\ifx\!!stringb\empty
\expandafter\gobbleoneargument
\else
- \expandafter\redoifinset
+ \expandafter\redo_if_in_set
\fi}
\long\def\doifnotinset#1%
@@ -707,81 +711,81 @@
\ifx\!!stringa\empty
\expandafter\secondoftwoarguments
\else
- \expandafter\dodoifnotinset
+ \expandafter\do_if_not_in_set
\fi}
-\long\def\dodoifnotinset#1%
+\long\def\do_if_not_in_set#1%
{\edef\!!stringb{#1}%
\ifx\!!stringb\empty
\expandafter\firstofoneargument
\else
- \expandafter\redoifnotinset % ...]{true}
+ \expandafter\redo_if_not_in_set % ...]{true}
\fi}
-\def\docheckifiteminsetelse#1,#2% #2 eats up preceding space
+\def\do_check_if_item_in_set_else#1,#2% #2 eats up preceding space
{\edef\!!stringb{#1}%
\ifx\!!stringb\empty
- \expandafter\docheckifiteminsetelse
+ \expandafter\do_check_if_item_in_set_else
\else
- \expandafter\dodocheckifiteminsetelse
+ \expandafter\do_do_check_if_item_in_set_else
\fi#2}
-\def\dodocheckifiteminsetelse
+\def\do_do_check_if_item_in_set_else
{\ifx\!!stringb\rightoptionalbracket
\expandafter\thirdofthreearguments
\else
- \expandafter\dododocheckifiteminsetelse
+ \expandafter\do_do_do_check_if_item_in_set_else
\fi}
-\def\dododocheckifiteminsetelse
+\def\do_do_do_check_if_item_in_set_else
{\ifx\!!stringa\!!stringb
- \expandafter\doquitifiteminsetelse
+ \expandafter\do_quit_if_item_in_set_else
\else
- \expandafter\docheckifiteminsetelse
+ \expandafter\do_check_if_item_in_set_else
\fi}
-\def\docheckifiteminset#1,#2% #2 eats up preceding space
+\def\do_check_if_item_in_set#1,#2% #2 eats up preceding space
{\edef\!!stringb{#1}%
\ifx\!!stringb\empty
- \expandafter\docheckifiteminset
+ \expandafter\do_check_if_item_in_set
\else
- \expandafter\dodocheckifiteminset
+ \expandafter\do_do_check_if_item_in_set
\fi#2}
-\def\dodocheckifiteminset
+\def\do_do_check_if_item_in_set
{\ifx\!!stringb\rightoptionalbracket
\expandafter\gobbletwoarguments
\else
- \expandafter\dododocheckifiteminset
+ \expandafter\do_do_do_check_if_item_in_set
\fi}
-\def\dododocheckifiteminset
+\def\do_do_do_check_if_item_in_set
{\ifx\!!stringa\!!stringb
- \expandafter\doquitifiteminset
+ \expandafter\do_quit_if_item_in_set
\else
- \expandafter\docheckifiteminset
+ \expandafter\do_check_if_item_in_set
\fi}
-\def\docheckifitemnotinset#1,#2% #2 eats up preceding space
+\def\do_check_if_item_not_in_set#1,#2% #2 eats up preceding space
{\edef\!!stringb{#1}%
\ifx\!!stringb\empty
- \expandafter\docheckifitemnotinset
+ \expandafter\do_check_if_item_not_in_set
\else
- \expandafter\dodocheckifitemnotinset
+ \expandafter\do_do_check_if_item_not_in_set
\fi#2}
-\def\dodocheckifitemnotinset
+\def\do_do_check_if_item_not_in_set
{\ifx\!!stringb\rightoptionalbracket
\expandafter\secondoftwoarguments
\else
- \expandafter\dododocheckifitemnotinset
+ \expandafter\do_do_do_check_if_item_not_in_set
\fi}
-\def\dododocheckifitemnotinset
+\def\do_do_do_check_if_item_not_in_set
{\ifx\!!stringa\!!stringb
- \expandafter\doquitifitemnotinset
+ \expandafter\do_quit_if_item_not_in_set
\else
- \expandafter\docheckifitemnotinset
+ \expandafter\do_check_if_item_not_in_set
\fi}
%D \macros
@@ -812,43 +816,41 @@
% !9yes=\doifcommonelse{,a,}{,,,a,}{yes}{nop}
% !9yes=\doifcommonelse{,,a,}{,,,a,}{yes}{nop}
-\long\def\doquitifcommonelse#1],\relax#2],\relax{\firstoftwoarguments}
+\long\def\do_quit_if_common_else#1],\relax#2],\relax{\firstoftwoarguments}
-\long\def\doquitifcommonelsenop{\secondoftwoarguments}
-
-\def\docheckifcommonelseone#1,#2%
+\def\do_check_if_common_else_one#1,#2%
{\edef\!!stringc{#1}%
\ifx\!!stringc\rightoptionalbracket
\expandafter\thirdofthreearguments
\else
- \expandafter\p!docommoncheck
+ \expandafter\do_common_check
\fi#2}
-\def\docheckifcommonelsetwo#1,#2% we can do an empty #1 check too
+\def\do_check_if_common_else_two#1,#2% we can do an empty #1 check too
{\edef\commalistelement{#1}%
\ifx\commalistelement\rightoptionalbracket
- \expandafter\redocheckifcommonelseone
+ \expandafter\re_do_check_if_common_else_one
\else
- \expandafter\dodocheckifcommonelsetwo
+ \expandafter\do_do_check_if_common_else_two
\fi#2}
-\def\dodocheckifcommonelsetwo
+\def\do_do_check_if_common_else_two
{\ifx\commalistelement\empty
- \expandafter\docheckifcommonelsetwo
+ \expandafter\do_check_if_common_else_two
\else
- \expandafter\dododocheckifcommonelsetwo
+ \expandafter\do_do_do_check_if_common_else_two
\fi}
-\def\dododocheckifcommonelsetwo
+\def\do_do_do_check_if_common_else_two
{\ifx\!!stringc\commalistelement
- \expandafter\doquitifcommonelse
+ \expandafter\do_quit_if_common_else
\else
- \expandafter\docheckifcommonelsetwo
+ \expandafter\do_check_if_common_else_two
\fi}
-\def\redocheckifcommonelseone#1{\docheckifcommonelseone}
+\def\re_do_check_if_common_else_one#1{\do_check_if_common_else_one}
-\def\p!doifcommonelse#1#2#3#4%
+\def\do_do_if_common_else#1#2#3#4%
{\edef\!!stringa{#3}%
\edef\!!stringb{#4}%
\ifx\!!stringa\empty
@@ -856,17 +858,17 @@
\else\ifx\!!stringb\empty
\expandafter\expandafter\expandafter\secondoftwoarguments
\else
- \expandafter\expandafter\expandafter\pp!doifcommonelse
+ \expandafter\expandafter\expandafter\do_do_do_if_common_else
\fi\fi
#1#2}
-\def\pp!doifcommonelse
- {\def\p!docommoncheck{\expandafter\docheckifcommonelsetwo\!!stringb,],\relax}%
- \expandafter\docheckifcommonelseone\!!stringa,],\relax}
+\def\do_do_do_if_common_else
+ {\def\do_common_check{\expandafter\do_check_if_common_else_two\!!stringb,],\relax}%
+ \expandafter\do_check_if_common_else_one\!!stringa,],\relax}
-\def\doifcommonelse{\p!doifcommonelse\firstoftwoarguments\secondoftwoarguments}
-\def\doifcommon {\p!doifcommonelse\firstofoneargument \gobbleoneargument }
-\def\doifnotcommon {\p!doifcommonelse\gobbleoneargument \firstofoneargument }
+\def\doifcommonelse{\do_do_if_common_else\firstoftwoarguments\secondoftwoarguments}
+\def\doifcommon {\do_do_if_common_else\firstofoneargument \gobbleoneargument }
+\def\doifnotcommon {\do_do_if_common_else\gobbleoneargument \firstofoneargument }
%D \macros
%D {processcommalist,processcommacommand,quitcommalist,
@@ -2034,7 +2036,7 @@
% {\let\charactertoken=#1%
% \def\!!stringa{\noshowargumenterror#3\dodogetargument}%
% \def\!!stringb{\doshowargumenterror#4\dodogetargument#1#2}%
-% \futurelet\nexttoken\inspectnextcharacter}
+% \futurelet\nexttoken\inspect_next_character}
% \def\getsingleempty#1#2#3%
% {\def\dodogetargument%
@@ -2244,7 +2246,7 @@
\def\dodoubleemptyNOPtwo
{\secondargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dodoubleemptyonespaced
\else
\expandafter\dodoubleemptyonenormal
@@ -2282,7 +2284,7 @@
\def\dotripleemptyNOPtwo
{\secondargumentfalse
\thirdargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dotripleemptytwospaced
\else
\expandafter\dotripleemptytwonormal
@@ -2290,7 +2292,7 @@
\def\dotripleemptyNOPthree
{\thirdargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dotripleemptythreespaced
\else
\expandafter\dotripleemptythreenormal
@@ -2338,7 +2340,7 @@
{\secondargumentfalse
\thirdargumentfalse
\fourthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\doquadrupleemptytwospaced
\else
\expandafter\doquadrupleemptytwonormal
@@ -2347,7 +2349,7 @@
\def\doquadrupleemptyNOPthree
{\thirdargumentfalse
\fourthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\doquadrupleemptythreespaced
\else
\expandafter\doquadrupleemptythreenormal
@@ -2355,7 +2357,7 @@
\def\doquadrupleemptyNOPfour
{\fourthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\doquadrupleemptyfourspaced
\else
\expandafter\doquadrupleemptyfournormal
@@ -2413,7 +2415,7 @@
\thirdargumentfalse
\fourthargumentfalse
\fifthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\doquintupleemptytwospaced
\else
\expandafter\doquintupleemptytwonormal
@@ -2423,7 +2425,7 @@
{\thirdargumentfalse
\fourthargumentfalse
\fifthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\doquintupleemptythreespaced
\else
\expandafter\doquintupleemptythreenormal
@@ -2432,7 +2434,7 @@
\def\doquintupleemptyNOPfour
{\fourthargumentfalse
\fifthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\doquintupleemptyfourspaced
\else
\expandafter\doquintupleemptyfournormal
@@ -2440,7 +2442,7 @@
\def\doquintupleemptyNOPfive
{\fifthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\doquintupleemptyfivespaced
\else
\expandafter\doquintupleemptyfivenormal
@@ -2508,7 +2510,7 @@
\fourthargumentfalse
\fifthargumentfalse
\sixthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosixemptytwospaced
\else
\expandafter\dosixemptytwonormal
@@ -2519,7 +2521,7 @@
\fourthargumentfalse
\fifthargumentfalse
\sixthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosixemptythreespaced
\else
\expandafter\dosixemptythreenormal
@@ -2529,7 +2531,7 @@
{\fourthargumentfalse
\fifthargumentfalse
\sixthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosixemptyfourspaced
\else
\expandafter\dosixemptyfournormal
@@ -2538,7 +2540,7 @@
\def\dosixtupleemptyNOPfive
{\fifthargumentfalse
\sixthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosixemptyfivespaced
\else
\expandafter\dosixemptyfivenormal
@@ -2546,7 +2548,7 @@
\def\dosixtupleemptyNOPsix
{\sixthargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosixemptysixspaced
\else
\expandafter\dosixemptysixnormal
@@ -2624,7 +2626,7 @@
\fifthargumentfalse
\sixthargumentfalse
\seventhargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosevenemptytwospaced
\else
\expandafter\dosevenemptytwonormal
@@ -2636,7 +2638,7 @@
\fifthargumentfalse
\sixthargumentfalse
\seventhargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosevenemptythreespaced
\else
\expandafter\dosevenemptythreenormal
@@ -2647,7 +2649,7 @@
\fifthargumentfalse
\sixthargumentfalse
\seventhargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosevenemptyfourspaced
\else
\expandafter\dosevenemptyfournormal
@@ -2657,7 +2659,7 @@
{\fifthargumentfalse
\sixthargumentfalse
\seventhargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosevenemptyfivespaced
\else
\expandafter\dosevenemptyfivenormal
@@ -2666,7 +2668,7 @@
\def\doseventupleemptyNOPsix
{\sixthargumentfalse
\seventhargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosevenemptysixspaced
\else
\expandafter\dosevenemptysixnormal
@@ -2674,7 +2676,7 @@
\def\doseventupleemptyNOPseven
{\seventhargumentfalse
- \ifnextblankspacetoken
+ \if_next_blank_space_token
\expandafter\dosevenemptysevenspaced
\else
\expandafter\dosevenemptysevennormal
@@ -3297,26 +3299,20 @@
%D Nowadays we don't mind a few more tokens if we can gain a
%D bit of speed.
-\def\doincrement#1%
- {\dodoglobal\edef#1{\the\numexpr\ifdefined#1\ifx#1\relax\else#1\fi\fi+\plusone \relax}}
-\def\dodecrement#1%
- {\dodoglobal\edef#1{\the\numexpr\ifdefined#1\ifx#1\relax\else#1\fi\fi+\minusone\relax}}
+\def\do_increment#1{\dodoglobal\edef#1{\the\numexpr\ifdefined#1\ifx#1\relax\else#1\fi\fi+\plusone \relax}}
+\def\do_decrement#1{\dodoglobal\edef#1{\the\numexpr\ifdefined#1\ifx#1\relax\else#1\fi\fi+\minusone\relax}}
-\def\dododoincrement#1,#2)%
- {\dodoglobal\edef#1{\the\numexpr\ifdefined#1\ifx#1\relax\else#1\fi\fi+#2\relax}}
-\def\dodododecrement#1,#2)%
- {\dodoglobal\edef#1{\the\numexpr\ifdefined#1\ifx#1\relax\else#1\fi\fi-#2\relax}}
+\def\do_do_do_increment#1,#2){\dodoglobal\edef#1{\the\numexpr\ifdefined#1\ifx#1\relax\else#1\fi\fi+#2\relax}}
+\def\do_do_do_decrement#1,#2){\dodoglobal\edef#1{\the\numexpr\ifdefined#1\ifx#1\relax\else#1\fi\fi-#2\relax}}
-\def\dodoincrement(#1%
- {\doifnextcharelse,{\dododoincrement#1}{\dododoincrement#1,\plusone}}
-\def\dododecrement(#1%
- {\doifnextcharelse,{\dodododecrement#1}{\dodododecrement#1,\plusone}}
+\def\do_do_increment(#1{\doifnextcharelse,{\do_do_do_increment#1}{\do_do_do_increment#1,\plusone}}
+\def\do_do_decrement(#1{\doifnextcharelse,{\do_do_do_decrement#1}{\do_do_do_decrement#1,\plusone}}
\def\fastincrement#1{\dodoglobal\edef#1{\the\numexpr#1+\plusone \relax}}
\def\fastdecrement#1{\dodoglobal\edef#1{\the\numexpr#1+\minusone\relax}}
-\def\increment{\doifnextcharelse(\dodoincrement\doincrement}
-\def\decrement{\doifnextcharelse(\dododecrement\dodecrement}
+\def\increment{\doifnextcharelse(\do_do_increment\do_increment}
+\def\decrement{\doifnextcharelse(\do_do_decrement\do_decrement}
\def\incrementvalue#1{\expandafter\increment\csname#1\endcsname}
\def\decrementvalue#1{\expandafter\decrement\csname#1\endcsname}
@@ -5497,22 +5493,17 @@
%D \type {\doifallcommonelse}, where the first two
%D arguments are sets.
-\def\@@doifallcommonelse#1#2#3#4% slow
- {\def\p!docommoncheck##1%
+\def\do_if_all_common_else#1#2#3#4% slow
+ {\def\do_common_check##1%
{\doifnotinset{##1}{#4}\donefalse
\ifdone\else\expandafter\quitcommalist\fi}%
\donetrue
- \processcommalist[#3]\p!docommoncheck
+ \processcommalist[#3]\do_common_check
\ifdone\expandafter#1\else\expandafter#2\fi}
-\def\doifallcommonelse
- {\@@doifallcommonelse\firstoftwoarguments\secondoftwoarguments}
-
-\def\doifallcommon
- {\@@doifallcommonelse\firstofonearguments\gobbleoneargument}
-
-\def\doifnotallcommon
- {\@@doifallcommonelse\gobbleoneargument\firstofonearguments}
+\def\doifallcommonelse{\do_if_all_common_else\firstoftwoarguments\secondoftwoarguments}
+\def\doifallcommon {\do_if_all_common_else\firstofonearguments\gobbleoneargument }
+\def\doifnotallcommon {\do_if_all_common_else\gobbleoneargument \firstofonearguments }
%D \macros
%D {DOIF,DOIFELSE,DOIFNOT}
@@ -5531,38 +5522,38 @@
%D We have to use a two||step implementation, because the
%D expansion has to take place outside \type{\uppercase}.
-\unexpanded\def\DOIF#1#2%
+\unexpanded\def\DO_IF#1#2%
{\uppercase{{$#1$}{$#2$}}%
\expandafter\firstofoneargument
\else
\expandafter\gobbleoneargument
\fi}
-\unexpanded\def\DOIFNOT#1#2%
+\unexpanded\def\DO_IF_NOT#1#2%
{\uppercase{{$#1$}{$#2$}}%
\expandafter\gobbleoneargument
\else
\expandafter\firstofoneargument
\fi}
-\unexpanded\def\DOIFELSE#1#2%
+\unexpanded\def\DO_IF_ELSE#1#2%
{\uppercase{{$#1$}{$#2$}}%
\expandafter\firstoftwoarguments
\else
\expandafter\secondoftwoarguments
\fi}
-\unexpanded\def\DOIFINSTRINGELSE#1#2%
+\unexpanded\def\DO_IF_INSTRING_ELSE#1#2%
{\uppercase{{$#1$}{$#2$}}%
\expandafter\firstoftwoarguments
\else
\expandafter\secondoftwoarguments
\fi}
-\def\DOIF #1#2{\normalexpanded{\p!DOIF {#1}{#2}}}
-\def\DOIFNOT #1#2{\normalexpanded{\p!DOIFNOT {#1}{#2}}}
-\def\DOIFELSE #1#2{\normalexpanded{\p!DOIFELSE {#1}{#2}}}
-\def\DOIFINSTRINGELSE #1#2{\normalexpanded{\p!DOIFINSTRINGELSE{#1}{#2}}}
+\unexpanded\def\DOIF #1#2{\normalexpanded{\DO_IF {#1}{#2}}}
+\unexpanded\def\DOIFNOT #1#2{\normalexpanded{\DO_IF_NOT {#1}{#2}}}
+\unexpanded\def\DOIFELSE #1#2{\normalexpanded{\DO_IF_ELSE {#1}{#2}}}
+\unexpanded\def\DOIFINSTRINGELSE #1#2{\normalexpanded{\DO_IF_INSTRING_ELSE{#1}{#2}}}
%D \macros
%D {dosingleargumentwithset,
@@ -6224,19 +6215,24 @@
%D macro rounds a real number to two digits. It takes one
%D argument and only works in \ETEX.
-\def\dointegerrounding #1.#2\relax {#1}
-\def\doonedigitrounding #1.#2#3\relax {\ifx#2*#1\else#1.#2\fi}
-\def\dotwodigitrounding #1.#2#3#4\relax {\ifx#2*#1\else#1.#2#3\fi}
-\def\dothreedigitrounding#1.#2#3#4#5\relax{\ifx#2*#1\else#1.#2#3#4\fi}
+% \def\dointegerrounding #1.#2\relax {#1}
+% \def\doonedigitrounding #1.#2#3\relax {\ifx#2*#1\else#1.#2\fi}
+% \def\dotwodigitrounding #1.#2#3#4\relax {\ifx#2*#1\else#1.#2#3\fi}
+% \def\dothreedigitrounding#1.#2#3#4#5\relax{\ifx#2*#1\else#1.#2#3#4\fi}
+%
+% \def\integerrounding#1%
+% {\@EA\@EA\@EA\dointegerrounding \@EA\WITHOUTPT\the\dimexpr#1\points+.5\points \relax .\relax}
+% \def\onedigitrounding#1%
+% {\@EA\@EA\@EA\doonedigitrounding \@EA\WITHOUTPT\the\dimexpr#1\points+.05\points \relax 00.*0\relax}
+% \def\twodigitrounding#1%
+% {\@EA\@EA\@EA\dotwodigitrounding \@EA\WITHOUTPT\the\dimexpr#1\points+.005\points \relax 000.*00\relax}
+% \def\threedigitrounding#1%
+% {\@EA\@EA\@EA\dothreedigitrounding\@EA\WITHOUTPT\the\dimexpr#1\points+.0005\points\relax0000.*00\relax}
-\def\integerrounding#1%
- {\@EA\@EA\@EA\dointegerrounding \@EA\WITHOUTPT\the\dimexpr#1\points+.5\points \relax .\relax}
-\def\onedigitrounding#1%
- {\@EA\@EA\@EA\doonedigitrounding \@EA\WITHOUTPT\the\dimexpr#1\points+.05\points \relax 00.*0\relax}
-\def\twodigitrounding#1%
- {\@EA\@EA\@EA\dotwodigitrounding \@EA\WITHOUTPT\the\dimexpr#1\points+.005\points \relax 000.*00\relax}
-\def\threedigitrounding#1%
- {\@EA\@EA\@EA\dothreedigitrounding\@EA\WITHOUTPT\the\dimexpr#1\points+.0005\points\relax0000.*00\relax}
+\def\integerrounding #1{\cldcontext{"\letterpercent 0.0f",#1}}
+\def\onedigitrounding #1{\cldcontext{"\letterpercent 0.1f",#1}}
+\def\twodigitrounding #1{\cldcontext{"\letterpercent 0.2f",#1}}
+\def\threedigitrounding#1{\cldcontext{"\letterpercent 0.3f",#1}}
%D \macros
%D {processcontent}
@@ -6422,9 +6418,9 @@
%D
%D This one if for Taco's bibliography module:
-\let\normalinspectnextcharacter\inspectnextcharacter
+\let\normal_inspect_next_character\inspect_next_character
-\def\strictinspectnextcharacter% no user macro !
+\def\strict_inspect_next_character% no user macro !
{\ifx\nexttoken\charactertoken
\expandafter\!!stringa
\else
@@ -6434,10 +6430,10 @@
% better: push/pop
\unexpanded\def\startstrictinspectnextcharacter
- {\let\inspectnextcharacter\strictinspectnextcharacter}
+ {\let\inspect_next_character\strict_inspect_next_character}
\unexpanded\def\stopstrictinspectnextcharacter
- {\let\inspectnextcharacter\normalinspectnextcharacter}
+ {\let\inspect_next_character\normal_inspect_next_character}
\def\strictdoifnextoptionalelse#1#2%
{\startstrictinspectnextcharacter
diff --git a/tex/context/base/syst-ini.mkiv b/tex/context/base/syst-ini.mkiv
index 7ff0c5575..46f08854c 100644
--- a/tex/context/base/syst-ini.mkiv
+++ b/tex/context/base/syst-ini.mkiv
@@ -126,14 +126,13 @@
\long\def\gobbleoneargument#1{} % will be defined later on anyway
-\mathchardef\etexversion =
- \numexpr\eTeXversion*100+\expandafter\gobbleoneargument\eTeXrevision\relax
+\mathchardef\etexversion = \numexpr\eTeXversion*100+\expandafter\gobbleoneargument\eTeXrevision\relax
%D First we define a simplified version of the \CONTEXT\
%D protection mechanism.
-\def\unprotect{\catcode`@=11 }
-\def\protect {\catcode`@=12 }
+\def\protect {\catcode`@=\the\catcode`@ \catcode`_=\the\catcode`_}
+\def\unprotect{\catcode`@=11 \catcode`_=11 }
\unprotect
@@ -163,34 +162,34 @@
% 255 : page
% 256 - : user
-\countdef \minallocatedregister = 52 \minallocatedregister = 256 % can change
-\countdef \maxallocatedregister = 53 \maxallocatedregister = 32767
-\countdef \minallocatediochannel = 54 \minallocatediochannel = -1
-\countdef \maxallocatediochannel = 55 \maxallocatediochannel = 16
-\countdef \minallocatedlanguage = 56 \minallocatedlanguage = 0
-\countdef \maxallocatedlanguage = 57 \maxallocatedlanguage = 255
-\countdef \maxallocatedinsert = 58 \maxallocatedinsert = 254
-\countdef \minallocatedinsert = 59 \minallocatedinsert = 128
-\countdef \minallocatedfamily = 60 \minallocatedfamily = 128
-\countdef \maxallocatedfamily = 61 \maxallocatedfamily = 255
-\countdef \minallocatedattribute = 62 \minallocatedattribute = 1024 % 127-1023 : private
-
-\countdef \lastallocatedcount = 32 \lastallocatedcount = \minallocatedregister
-\countdef \lastallocateddimen = 33 \lastallocateddimen = \minallocatedregister
-\countdef \lastallocatedskip = 34 \lastallocatedskip = \minallocatedregister
-\countdef \lastallocatedmuskip = 35 \lastallocatedmuskip = \minallocatedregister
-\countdef \lastallocatedbox = 36 \lastallocatedbox = \minallocatedregister
-\countdef \lastallocatedtoks = 37 \lastallocatedtoks = \minallocatedregister
-\countdef \lastallocatedread = 38 \lastallocatedread = \minallocatediochannel
-\countdef \lastallocatedwrite = 39 \lastallocatedwrite = \minallocatediochannel
-\countdef \lastallocatedmarks = 40 \lastallocatedmarks = \minallocatedregister
-\countdef \lastallocatedlanguage = 41 \lastallocatedlanguage = \minallocatedlanguage % not used in context
-\countdef \lastallocatedinsertion = 42 \lastallocatedinsertion = \minallocatedinsert
-\countdef \lastallocatedfamily = 43 \lastallocatedfamily = \minallocatedfamily % not used in context
-\countdef \lastallocatedattribute = 44 \lastallocatedattribute = \minallocatedattribute
-
-\countdef \mincountervalue = 125 \mincountervalue = -"7FFFFFFF % beware, we use index 125 at the lua end
-\countdef \maxcountervalue = 126 \maxcountervalue = "7FFFFFFF % beware, we use index 126 at the lua end
+\countdef \min_allocated_register = 52 \min_allocated_register = 256 % can change
+\countdef \max_allocated_register = 53 \max_allocated_register = 32767
+\countdef \min_allocated_iochannel = 54 \min_allocated_iochannel = -1
+\countdef \max_allocated_iochannel = 55 \max_allocated_iochannel = 16
+\countdef \min_allocated_language = 56 \min_allocated_language = 0
+\countdef \max_allocated_language = 57 \max_allocated_language = 255
+\countdef \max_allocated_insert = 58 \max_allocated_insert = 254
+\countdef \min_allocated_insert = 59 \min_allocated_insert = 128
+\countdef \min_allocated_family = 60 \min_allocated_family = 128
+\countdef \max_allocated_family = 61 \max_allocated_family = 255
+\countdef \min_allocated_attribute = 62 \min_allocated_attribute = 1024 % 127-1023 : private
+
+\countdef \last_allocated_count = 32 \last_allocated_count = \min_allocated_register
+\countdef \last_allocated_dimen = 33 \last_allocated_dimen = \min_allocated_register
+\countdef \last_allocated_skip = 34 \last_allocated_skip = \min_allocated_register
+\countdef \last_allocated_muskip = 35 \last_allocated_muskip = \min_allocated_register
+\countdef \last_allocated_box = 36 \last_allocated_box = \min_allocated_register
+\countdef \last_allocated_toks = 37 \last_allocated_toks = \min_allocated_register
+\countdef \last_allocated_read = 38 \last_allocated_read = \min_allocated_iochannel
+\countdef \last_allocated_write = 39 \last_allocated_write = \min_allocated_iochannel
+\countdef \last_allocated_marks = 40 \last_allocated_marks = \min_allocated_register
+\countdef \last_allocated_language = 41 \last_allocated_language = \min_allocated_language % not used in context
+\countdef \last_allocated_insertion = 42 \last_allocated_insertion = \min_allocated_insert
+\countdef \last_allocated_family = 43 \last_allocated_family = \min_allocated_family % not used in context
+\countdef \last_allocated_attribute = 44 \last_allocated_attribute = \min_allocated_attribute
+
+\countdef \min_counter_value = 125 \min_counter_value = -"7FFFFFFF % beware, we use index 125 at the lua end
+\countdef \max_counter_value = 126 \max_counter_value = "7FFFFFFF % beware, we use index 126 at the lua end
%countdef \minusone = 127 \minusone = -1
%chardef \zerocount = 0
@@ -220,29 +219,29 @@
%D The allocators share a common helper macro.
-\def\newcount {\allocateregister\lastallocatedcount \count \countdef \maxallocatedregister}
-\def\newdimen {\allocateregister\lastallocateddimen \dimen \dimendef \maxallocatedregister}
-\def\newskip {\allocateregister\lastallocatedskip \skip \skipdef \maxallocatedregister}
-\def\newmuskip {\allocateregister\lastallocatedmuskip \muskip \muskipdef \maxallocatedregister}
-\def\newbox {\allocateregister\lastallocatedbox \box \mathchardef\maxallocatedregister}
-\def\newtoks {\allocateregister\lastallocatedtoks \toks \toksdef \maxallocatedregister}
-\def\newread {\allocateregister\lastallocatedread \read \chardef \maxallocatediochannel}
-\def\newwrite {\allocateregister\lastallocatedwrite \write \chardef \maxallocatediochannel}
-\def\newmarks {\allocateregister\lastallocatedmarks \marks \mathchardef\maxallocatedregister}
-\def\newinsert {\allocateregister\lastallocatedinsertion\insert \chardef \maxallocatedinsert}
+\normalprotected\def\newcount {\allocate_register\last_allocated_count \count \countdef \max_allocated_register}
+\normalprotected\def\newdimen {\allocate_register\last_allocated_dimen \dimen \dimendef \max_allocated_register}
+\normalprotected\def\newskip {\allocate_register\last_allocated_skip \skip \skipdef \max_allocated_register}
+\normalprotected\def\newmuskip {\allocate_register\last_allocated_muskip \muskip \muskipdef \max_allocated_register}
+\normalprotected\def\newbox {\allocate_register\last_allocated_box \box \mathchardef\max_allocated_register}
+\normalprotected\def\newtoks {\allocate_register\last_allocated_toks \toks \toksdef \max_allocated_register}
+\normalprotected\def\newread {\allocate_register\last_allocated_read \read \chardef \max_allocated_iochannel}
+\normalprotected\def\newwrite {\allocate_register\last_allocated_write \write \chardef \max_allocated_iochannel}
+\normalprotected\def\newmarks {\allocate_register\last_allocated_marks \marks \mathchardef\max_allocated_register}
+\normalprotected\def\newinsert {\allocate_register\last_allocated_insertion\insert \chardef \max_allocated_insert}
%D We don't need these in \CONTEXT:
-\def\newlanguage{\allocateregister\lastallocatedlanguage \language\chardef \maxallocatedlanguage}
-\def\newfamily {\allocateregister\lastallocatedfamily \fam \chardef \maxallocatedfamily}
+\normalprotected\def\newlanguage{\allocate_register\last_allocated_language \language\chardef \max_allocated_language}
+\normalprotected\def\newfamily {\allocate_register\last_allocated_family \fam \chardef \max_allocated_family}
\let\newfam\newfamily
% Watch out, for the moment we disable the check for already being defined
% later we will revert this but first all chardefs must be replaced.
-\def\newconstant #1{\ifdefined#1\let#1\undefined\fi\newcount#1}
-\def\setnewconstant#1{\ifdefined#1\let#1\undefined\fi\newcount#1#1} % just a number
+\normalprotected\def\newconstant #1{\ifdefined#1\let#1\undefined\fi\newcount#1}
+\normalprotected\def\setnewconstant#1{\ifdefined#1\let#1\undefined\fi\newcount#1#1} % just a number
% maybe setconstant with check
@@ -259,10 +258,10 @@
%D now provide many registers we removed all traces.
\ifdefined\writestatus \else
- \def\writestatus#1#2{\immediate\write16{#1: #2}}
+ \normalprotected\def\writestatus#1#2{\immediate\write16{#1: #2}}
\fi
-\def\allocateregisteryes#1#2#3#4#5% last class method max name
+\def\allocate_register_yes#1#2#3#4#5% last class method max name
{\ifnum#1<#4\relax
\global\advance#1\plusone
\global#3#5=#1\relax
@@ -270,16 +269,16 @@
\writestatus{warning}{no room for \string#2\space \string#5\space (max: \number#4)}%
\fi}
-\def\allocateregisternop#1#2#3#4#5% last class method max name
+\def\allocate_register_nop#1#2#3#4#5% last class method max name
{\writestatus{warning}{\string#2 \string#5 is already defined (\string\relax\space it first)}}
-\def\allocateregister#1#2#3#4#5% last class method max name
+\def\allocate_register#1#2#3#4#5% last class method max name
{\ifx#5\undefined
- \expandafter\allocateregisteryes
+ \expandafter\allocate_register_yes
\else\ifx#5\relax
- \expandafter\expandafter\expandafter\allocateregisteryes
+ \expandafter\expandafter\expandafter\allocate_register_yes
\else
- \expandafter\expandafter\expandafter\allocateregisternop
+ \expandafter\expandafter\expandafter\allocate_register_nop
\fi\fi
#1#2#3#4#5}
@@ -287,8 +286,8 @@
%D {\chardef} instead of the more limited \type {\mathchardef}.
\ifnum\texengine>\pdftexengine
- \def\newbox {\allocateregister\lastallocatedbox \box \chardef\maxallocatedregister}
- \def\newmarks{\allocateregister\lastallocatedmarks\marks\chardef\maxallocatedregister}
+ \normalprotected\def\newbox {\allocate_register\last_allocated_box \box \chardef\max_allocated_register}
+ \normalprotected\def\newmarks{\allocate_register\last_allocated_marks\marks\chardef\max_allocated_register}
\fi
%D Attributes are something very \LUATEX. In \CONTEXT\ you are not
@@ -298,15 +297,15 @@
%D and should not be touched.
\ifnum\texengine=\luatexengine
- \let\attributeunsetvalue\mincountervalue % used to be \minusone
- \def\newattribute{\allocateregister\minallocatedattribute\attribute\attributedef\maxallocatedregister}
+ \let\attributeunsetvalue\min_counter_value % used to be \minusone
+ \normalprotected\def\newattribute{\allocate_register\min_allocated_attribute\attribute\attributedef\max_allocated_register}
\fi
%D Not used by \CONTEXT\ but for instance \PICTEX\ needs it. It's a
%D trick to force strings instead of tokens that take more memory.
%D It's a trick to trick to force strings.
-\def\newhelp#1#2{\newtoks#1#1\expandafter{\csname#2\endcsname}}
+\normalprotected\def\newhelp#1#2{\newtoks#1#1\expandafter{\csname#2\endcsname}}
%D \macros
%D {scratchcounter,
@@ -317,12 +316,12 @@
%D We now define a few scratch registers, so that successive
%D loads at least have some available.
-\newcount \scratchcounter \newcount \globalscratchcounter
-\newdimen \scratchdimen \newdimen \globalscratchdimen
-\newskip \scratchskip \newskip \globalscratchskip
-\newmuskip \scratchmuskip \newmuskip \globalscratchmuskip
-\newtoks \scratchtoks \newtoks \globalscratchtoks
-\newbox \scratchbox \newbox \globalscratchbox
+\newcount \scratchcounter \newcount \globalscratchcounter
+\newdimen \scratchdimen \newdimen \globalscratchdimen
+\newskip \scratchskip \newskip \globalscratchskip
+\newmuskip\scratchmuskip \newmuskip\globalscratchmuskip
+\newtoks \scratchtoks \newtoks \globalscratchtoks
+\newbox \scratchbox \newbox \globalscratchbox
\newcount\scratchcounterone \newcount\scratchcountertwo \newcount\scratchcounterthree
\newdimen \scratchdimenone \newdimen \scratchdimentwo \newdimen \scratchdimenthree
@@ -415,7 +414,7 @@
\let \@ne \plusone
\let \tw@ \plustwo
\let \thr@@ \plusthree
-\let \sixt@@n \sixteen
+\let \sixt@@n \plussixteen
\let \@cclv \pluscclv
\let \@cclvi \pluscclvi
\newbox \voidb@x
@@ -435,7 +434,7 @@
%D with \type {\iffoo}.
%D \stopnarrower
-\def\newif#1%
+\normalprotected\def\newif#1%
{\count@\escapechar
\escapechar\minusone
\expandafter\expandafter\expandafter\def\@if #1{true}{\let#1\iftrue }%
@@ -653,8 +652,8 @@
\normaleveryjob{\the\everyjob}
-\def\appendtotoks #1{\def\temp{#1}\afterassignment\doappendtotoks \scratchtoks=}
-\def\prependtotoks#1{\def\temp{#1}\afterassignment\doprependtotoks\scratchtoks=}
+\normalprotected\def\appendtotoks #1{\def\temp{#1}\afterassignment\doappendtotoks \scratchtoks=}
+\normalprotected\def\prependtotoks#1{\def\temp{#1}\afterassignment\doprependtotoks\scratchtoks=}
\def\doappendtotoks {\expandafter\expandafter\expandafter{\expandafter\the\expandafter\temp\the\scratchtoks}}
\def\doprependtotoks{\expandafter\expandafter\expandafter{\expandafter\the\expandafter\scratchtoks\the\temp}}
@@ -749,7 +748,7 @@
\lineskip = 1pt
\lineskiplimit = 0pt
-%D Again a few kind-of-extensions the core:
+%D Again a few kind-of-extensions the core: (might go away)
\newskip \hideskip \hideskip = -1000pt plus 1fill
\newskip \centering \centering = 0pt plus 1000pt minus 1000pt
@@ -855,9 +854,9 @@
\ifdefined\pdfgentounicode \else \newcount\pdfgentounicode \fi \pdfgentounicode \plusone
\ifdefined\pdfinclusioncopyfonts\else \newcount\pdfinclusioncopyfonts \fi \pdfinclusioncopyfonts\plusone
- \def\nopdfcompression {\pdfobjcompresslevel\zerocount \pdfcompresslevel\zerocount}
- \def\maximumpdfcompression{\pdfobjcompresslevel\plusnine \pdfcompresslevel\plusnine }
- \def\normalpdfcompression {\pdfobjcompresslevel\plusthree \pdfcompresslevel\plusthree}
+ \normalprotected\def\nopdfcompression {\pdfobjcompresslevel\zerocount \pdfcompresslevel\zerocount}
+ \normalprotected\def\maximumpdfcompression{\pdfobjcompresslevel\plusnine \pdfcompresslevel\plusnine }
+ \normalprotected\def\normalpdfcompression {\pdfobjcompresslevel\plusthree \pdfcompresslevel\plusthree}
\normalpdfcompression
@@ -888,12 +887,12 @@
\ifx\fmtname \undefined \def\fmtname {ConTeXt Minimized Plain TeX} \fi
\ifx\fmtversion\undefined \def\fmtversion{3.1415926} \fi
-\let\normalfmtversion\fmtversion
+\let\normalfmtversion\fmtversion % still needed ?
%D A few bonus macros:
\def\modulonumber#1#2{\the\numexpr#2-((((#2+(#1/2))/#1)-1)*#1)\relax}
-\def\dividonumber#1#2{\the\numexpr(#2-(#1/2))/#1\relax}
+\def\dividenumber#1#2{\the\numexpr(#2-(#1/2))/#1\relax}
\ifnum\texengine=\xetexengine
\edef\xetexversion {\numexpr\XeTeXversion*100+(\expandafter\gobbleoneargument\XeTeXrevision-5)/10\relax}
@@ -901,19 +900,19 @@
\fi
\ifcase\texengine
- \def\texenginename {impossible}
+ \def \texenginename {impossible}
\edef\texengineversion{0}
\or
- \def\texenginename {pdfTeX}
- \edef\texengineversion{\dividonumber{100}\pdftexversion.\modulonumber{100}\pdftexversion.\pdftexrevision}
+ \def \texenginename {pdfTeX}
+ \edef\texengineversion{\dividenumber{100}\pdftexversion.\modulonumber{100}\pdftexversion.\pdftexrevision}
\or
- \def\texenginename {XeTeX}
- \edef\texengineversion{\dividonumber{100}\xetexversion .\modulonumber{100}\xetexversion .\xetexrevision}
+ \def \texenginename {XeTeX}
+ \edef\texengineversion{\dividenumber{100}\xetexversion .\modulonumber{100}\xetexversion .\xetexrevision}
\or
- \def\texenginename {LuaTeX}
- \edef\texengineversion{\dividonumber{100}\luatexversion.\modulonumber{100}\luatexversion.\luatexrevision}
+ \def \texenginename {LuaTeX}
+ \edef\texengineversion{\dividenumber{100}\luatexversion.\modulonumber{100}\luatexversion.\luatexrevision}
\else
- \def\texenginename {impossible}
+ \def \texenginename {impossible}
\edef\texengineversion{0}
\fi
diff --git a/tex/generic/context/luatex/luatex-fonts-merged.lua b/tex/generic/context/luatex/luatex-fonts-merged.lua
index 18d88e815..04bf9675b 100644
--- a/tex/generic/context/luatex/luatex-fonts-merged.lua
+++ b/tex/generic/context/luatex/luatex-fonts-merged.lua
@@ -1,6 +1,6 @@
-- merged file : luatex-fonts-merged.lua
-- parent file : luatex-fonts.lua
--- merge date : 09/14/11 12:21:17
+-- merge date : 09/15/11 09:08:43
do -- begin closure to overcome local limits and interference