diff options
31 files changed, 1318 insertions, 359 deletions
diff --git a/context/data/scite/lexers/scite-context-lexer-cld.lua b/context/data/scite/lexers/scite-context-lexer-cld.lua new file mode 100644 index 000000000..1e5d8b59c --- /dev/null +++ b/context/data/scite/lexers/scite-context-lexer-cld.lua @@ -0,0 +1,171 @@ +local info = { + version = 1.002, + comment = "scintilla lpeg lexer for cld/lua", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files", +} + +-- Adapted from lua.lua by Mitchell who based it on a lexer by Peter Odding. + +local lexer = lexer +local token, style, colors, word_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.word_match, lexer.style_nothing +local P, R, S, C, Cg, Cb, Cs, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cg, lpeg.Cb, lpeg.Cs, lpeg.Cmt +local match, find = string.match, string.find +local global = _G + +module(...) + +local keywords = { + 'and', 'break', 'do', 'else', 'elseif', 'end', 'false', 'for', 'function', + 'if', 'in', 'local', 'nil', 'not', 'or', 'repeat', 'return', 'then', 'true', + 'until', 'while', +} + +local functions = { + 'assert', 'collectgarbage', 'dofile', 'error', 'getfenv', 'getmetatable', + 'ipairs', 'load', 'loadfile', 'loadstring', 'module', 'next', 'pairs', + 'pcall', 'print', 'rawequal', 'rawget', 'rawset', 'require', 'setfenv', + 'setmetatable', 'tonumber', 'tostring', 'type', 'unpack', 'xpcall', +} + +local constants = { + '_G', '_VERSION', +} + +local csnames = { + "context", + "metafun", +} + +local level = nil +local setlevel = function(_,i,s) level = s return i end + +local equals = P("=")^0 + +local longonestart = P("[[") +local longonestop = P("]]") +local longonestring = (1-longonestop)^0 + +local longtwostart = P('[') * Cmt(equals,setlevel) * P('[') +local longtwostop = P(']') * equals * P(']') + +local longtwostring = P(function(input,index) + if level then + local sentinel = ']' .. level .. ']' + local _, stop = find(input,sentinel,index,true) + return stop and stop + 1 - #sentinel or #input + 1 + end +end) + +-- local longtwostart = P("[") * Cg(equals, "init") * P("[") +-- local longtwostop = P("]") * C(equals) * P("]") +-- local longtwocheck = Cmt(longtwostop * Cb("init"), function(s,i,a,b) return a == b end) +-- local longtwostring = (P(1) - longtwocheck)^0 + +local longcomment = Cmt(#('[[' + ('[' * P('=')^0 * '[')), function(input,index) + local level = match(input,'^%[(=*)%[',index) + level = "==" + if level then + local _, stop = find(input,']' .. level .. ']',index,true) + return stop and stop + 1 or #input + 1 + end +end) + +local longcomment = Cmt(#('[[' + ('[' * C(P('=')^0) * '[')), function(input,index,level) + local _, stop = find(input,']' .. level .. ']',index,true) + return stop and stop + 1 or #input + 1 +end) + +local whitespace = token(lexer.WHITESPACE, lexer.space^1) +local any_char = lexer.any_char + +local squote = P("'") +local dquote = P('"') +local escaped = P("\\") * P(1) +local dashes = P('--') + +local shortcomment = dashes * lexer.nonnewline^0 +local longcomment = dashes * longcomment +local comment = token(lexer.COMMENT, longcomment + shortcomment) + +local shortstring = token("quote", squote) + * token(lexer.STRING, (escaped + (1-squote))^0 ) + * token("quote", squote) + + token("quote", dquote) + * token(lexer.STRING, (escaped + (1-dquote))^0 ) + * token("quote", dquote) + +local longstring = token("quote", longonestart) + * token(lexer.STRING, longonestring) + * token("quote", longonestop) + + token("quote", longtwostart) + * token(lexer.STRING, longtwostring) + * token("quote", longtwostop) + +local string = shortstring + + longstring + +local integer = P('-')^-1 * (lexer.hex_num + lexer.dec_num) +local number = token(lexer.NUMBER, lexer.float + integer) + +local word = R('AZ','az','__','\127\255') * (lexer.alnum + '_')^0 +local identifier = token(lexer.IDENTIFIER, word) + +local operator = token(lexer.OPERATOR, P('~=') + S('+-*/%^#=<>;:,.{}[]()')) -- maybe split of {}[]() + +local keyword = token(lexer.KEYWORD, word_match(keywords)) +local builtin = token(lexer.FUNCTION, word_match(functions)) +local constant = token(lexer.CONSTANT, word_match(constants)) +local csname = token("user", word_match(csnames)) * ( + whitespace^0 * #S("{(") + + ( whitespace^0 * token(lexer.OPERATOR, P(".")) * whitespace^0 * token("csname",word) )^1 + ) + +_rules = { + { 'whitespace', whitespace }, + { 'keyword', keyword }, + { 'function', builtin }, + { 'csname', csname }, + { 'constant', constant }, + { 'identifier', identifier }, + { 'string', string }, + { 'comment', comment }, + { 'number', number }, + { 'operator', operator }, + { 'any_char', any_char }, +} + +_tokenstyles = { + { "comment", lexer.style_context_comment }, + { "quote", lexer.style_context_quote }, + { "keyword", lexer.style_context_keyword }, + { "user", lexer.style_context_user }, + { "specials", lexer.style_context_specials }, + { "extras", lexer.style_context_extras }, +} + +_foldsymbols = { + _patterns = { + '%l+', + '[%({%)}%[%]]', + }, + [lexer.KEYWORD] = { + ['if'] = 1, + ['end'] = -1, + ['do'] = 1, + ['function'] = 1, + ['repeat'] = 1, + ['until'] = -1, + }, + [lexer.COMMENT] = { + ['['] = 1, [']'] = -1, + }, + ["quote"] = { -- to be tested + ['['] = 1, [']'] = -1, + }, + [lexer.OPERATOR] = { + ['('] = 1, [')'] = -1, + ['{'] = 1, ['}'] = -1, + }, +} diff --git a/context/data/scite/lexers/scite-context-lexer-mps.lua b/context/data/scite/lexers/scite-context-lexer-mps.lua new file mode 100644 index 000000000..fa7d88c5d --- /dev/null +++ b/context/data/scite/lexers/scite-context-lexer-mps.lua @@ -0,0 +1,89 @@ +local info = { + version = 1.002, + comment = "scintilla lpeg lexer for metafun", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files", +} + +local lexer = lexer +local global, string, table, lpeg = _G, string, table, lpeg +local token, style, colors, word_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.word_match, lexer.style_nothing +local exact_match = lexer.context.exact_match +local P, R, S, V, C, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt +local type, next, pcall, loadfile = type, next, pcall, loadfile + +module(...) + +local metafunlexer = _M +local basepath = lexer.context and lexer.context.path or _LEXERHOME + +local metafuncommands = { } +local plaincommands = { } +local primitivecommands = { } + +do + + local definitions = lexer.context.loaddefinitions("mult-mps.lua") + + if definitions then + metafuncommands = definitions.metafun or { } + plaincommands = definitions.plain or { } + primitivecommands = definitions.primitives or { } + end + +end + +local whitespace = lexer.WHITESPACE -- triggers states +local any_char = lexer.any_char + +local space = lexer.space -- S(" \n\r\t\f\v") +local digit = R("09") +local sign = S("+-") +local period = P(".") +local cstoken = R("az","AZ") + P("_") +local number = sign^-1 * ( -- at most one + digit^1 * period * digit^0 -- 10.0 10. + + digit^0 * period * digit^1 -- 0.10 .10 + + digit^1 -- 10 + ) + +local spacing = token(whitespace, space^1) +local comment = token('comment', P('%') * (1-S("\n\r"))^0) +local metafun = token('command', exact_match(metafuncommands)) +local plain = token('plain', exact_match(plaincommands)) +local quoted = token('specials', P('"')) + * token('default', P(1-P('"'))^1) + * token('specials', P('"')) +local primitive = token('primitive', exact_match(primitivecommands)) +local csname = token('user', cstoken^1) +local specials = token('specials', S("#()[]<>=:\"")) +local number = token('number', number) +local extras = token('extras', S("`~%^&_-+/\'|\\")) +local default = token('default', P(1)) + +_rules = { + { 'whitespace', spacing }, + { 'comment', comment }, + { 'metafun', metafun }, + { 'plain', plain }, + { 'primitive', primitive }, + { 'csname', csname }, + { 'number', number }, + { 'quoted', quoted }, + { 'specials', specials }, + { 'extras', extras }, + { 'any_char', any_char }, +} + +_tokenstyles = { + { "comment", lexer.style_context_comment }, + { "default", lexer.style_context_default }, + { "number" , lexer.style_context_number }, + { "primitive", lexer.style_context_primitive }, + { "plain", lexer.style_context_plain }, + { "command", lexer.style_context_command }, + { "user", lexer.style_context_user }, + { "specials", lexer.style_context_specials }, + { "extras", lexer.style_context_extras }, +} diff --git a/context/data/scite/lexers/scite-context-lexer-tex.lua b/context/data/scite/lexers/scite-context-lexer-tex.lua new file mode 100644 index 000000000..4a1a0a766 --- /dev/null +++ b/context/data/scite/lexers/scite-context-lexer-tex.lua @@ -0,0 +1,226 @@ +local info = { + version = 1.002, + comment = "scintilla lpeg lexer for context", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files", +} + + +-- maybe: protected_macros + +--[[ + + experiment dd 2009/10/28 .. todo: + + -- figure out if tabs instead of splits are possible + -- locate an option to enter name in file dialogue (like windows permits) + -- figure out why loading a file fails + -- we cannot print to the log pane + -- we cannot access props["keywordclass.macros.context.en"] + -- lexer.get_property only handles integers + -- we cannot run a command to get the location of mult-def.lua + + -- local interface = props["keywordclass.macros.context.en"] + -- local interface = lexer.get_property("keywordclass.macros.context.en","") + + -- the embedded lexers don't backtrack (so they're not that usefull on large + -- texts) which is probably a scintilla issue (trade off between speed and lexable + -- area); also there is some weird bleeding back to the parent lexer with respect + -- to colors (i.e. the \ in \relax can become black) so I might as well use private + -- color specifications + + -- this lexer does not care about other macro packages (one can of course add a fake + -- interface but it's not on the agenda) + +]]-- + +local lexer = lexer +local global, string, table, lpeg = _G, string, table, lpeg +local token, style, colors, word_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.word_match, lexer.style_nothing +local exact_match = lexer.context.exact_match +local P, R, S, V, C, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt +local type, next, pcall, loadfile, setmetatable = type, next, pcall, loadfile, setmetatable + +module(...) + +local contextlexer = _M +local basepath = lexer.context and lexer.context.path or _LEXERHOME + +local commands = { en = { } } +local primitives = { } +local helpers = { } + +do + + local definitions = lexer.context.loaddefinitions("mult-def.lua") + + if definitions then + for command, languages in next, definitions.commands do + commands.en[languages.en or command] = true + for language, command in next, languages do + local c = commands[language] + if c then + c[command] = true + else + commands[language] = { [command] = true } + end + end + end + helpers = definitions.helpers or { } + end + + local definitions = lexer.context.loaddefinitions("mult-prm.lua") + + if definitions then + primitives = definitions.primitives or { } + for i=1,#primitives do + primitives[#primitives+1] = "normal" .. primitives[i] + end + table.sort(primitives) + end + +end + +local currentcommands = commands.en or { } + +local knowncommand = Cmt(R("az","AZ")^1, function(_,i,s) + return currentcommands[s] and i +end) + +local find, match = string.find, string.match + +local knownpreamble = Cmt(P('% '), function(input,i,_) + if i < 10 then + local s, e, word = find(input,'^(.+)[\n\r]',i) + if word then + local interface = match(word,"interface=(..)") + if interface then + currentcommands = commands[interface] or commands.en or { } + end + end + end + return false +end) + +local whitespace = lexer.WHITESPACE -- triggers states +local any_char = lexer.any_char + +local space = lexer.space -- S(" \n\r\t\f\v") +local cstoken = R("az","AZ") + S("@!?_") -- todo: utf8 + +local spacing = token(whitespace, space^1) +local preamble = token('preamble', knownpreamble) +local comment = token('comment', P('%') * (1-S("\n\r"))^0) +local command = token('command', P('\\') * knowncommand) +local helper = token('plain', P('\\') * exact_match(helpers)) +local primitive = token('primitive', P('\\') * exact_match(primitives)) +local ifprimitive = token('primitive', P('\\if') * cstoken^1) +local csname = token('user', P('\\') * (cstoken^1 + P(1))) +local grouping = token('grouping', S("{$}")) +local specials = token('specials', S("#()[]<>=\"")) +local extras = token('extras', S("`~%^&_-+/\'|")) +local default = token('default', P(1)) + +----- startluacode = token("grouping", P("\\startluacode")) +----- stopluacode = token("grouping", P("\\stopluacode")) + +local luastatus = nil +local luaenvironment = P("luacode") + +local inlinelua = P("\\ctxlua") + + P("\\ctxcommand") + + P("\\cldcontext") + +local startlua = P("\\start") * Cmt(luaenvironment,function(_,i,s) luastatus = s return true end) + + inlinelua + * space^0 + * Cmt(P("{"),function(_,i,s) luastatus = "}" return true end) +local stoplua = P("\\stop") * Cmt(luaenvironment,function(_,i,s) return luastatus == s end) + + Cmt(P("}"),function(_,i,s) return luastatus == "}" end) + +local startluacode = token("embedded", startlua) +local stopluacode = token("embedded", stoplua) + +local metafunenvironment = P("MPcode") + + P("useMPgraphic") + + P("reusableMPgraphic") + + P("uniqueMPgraphic") + + P("MPinclusions") + + P("MPextensions") + + P("MPgraphic") + +-- local metafunstatus = nil -- this does not work, as the status gets lost in an embedded lexer +-- local startmetafun = P("\\start") * Cmt(metafunenvironment,function(_,i,s) metafunstatus = s return true end) +-- local stopmetafun = P("\\stop") * Cmt(metafunenvironment,function(_,i,s) return metafunstatus == s end) + +local startmetafun = P("\\start") * metafunenvironment +local stopmetafun = P("\\stop") * metafunenvironment + +local openargument = token("specials",P("{")) +local closeargument = token("specials",P("}")) +local argumentcontent = token("any_char",(1-P("}"))^0) + +local metafunarguments = (token("default",spacing^0) * openargument * argumentcontent * closeargument)^-2 + +local startmetafuncode = token("embedded", startmetafun) * metafunarguments +local stopmetafuncode = token("embedded", stopmetafun) + +-- Function load(lexer_name) starts with _M.WHITESPACE = lexer_name..'_whitespace' which means that we need to +-- have frozen at the moment we load another lexer. Because spacing is used to revert to a parent lexer we need +-- to make sure that we load children as late as possible in order not to get the wrong whitespace trigger. This +-- took me quite a while to figure out (not being that familiar with the internals). BTW, if performance becomes +-- an issue we can rewrite the main lex function (memorize the grammars and speed up the byline variant). + +local cldlexer = lexer.load('scite-context-lexer-cld') +local mpslexer = lexer.load('scite-context-lexer-mps') + +lexer.embed_lexer(contextlexer, cldlexer, startluacode, stopluacode) +lexer.embed_lexer(contextlexer, mpslexer, startmetafuncode, stopmetafuncode) + +_rules = { + { "whitespace", spacing }, + { "preamble", preamble }, + { "comment", comment }, + { "helper", helper }, + { "command", command }, + { "ifprimitive", ifprimitive }, + { "primitive", primitive }, + { "csname", csname }, + { "grouping", grouping }, + { "specials", specials }, + { "extras", extras }, + { 'any_char', any_char }, +} + +_tokenstyles = { + { "preamble", lexer.style_context_preamble }, + { "comment", lexer.style_context_comment }, + { "default", lexer.style_context_default }, + { 'number', lexer.style_context_number }, + { "embedded", lexer.style_context_embedded }, + { "grouping", lexer.style_context_grouping }, + { "primitive", lexer.style_context_primitive }, + { "plain", lexer.style_context_plain }, + { "command", lexer.style_context_command }, + { "user", lexer.style_context_user }, + { "specials", lexer.style_context_specials }, + { "extras", lexer.style_context_extras }, + { "quote", lexer.style_context_quote }, + { "keyword", lexer.style_context_keyword }, +} + +local folds = { + ["\\start"] = 1, ["\\stop" ] = -1, + ["\\begin"] = 1, ["\\end" ] = -1, +} + +_foldsymbols = { + _patterns = { + "\\start", "\\stop", -- regular environments + "\\begin", "\\end", -- (moveable) blocks + }, + ["helper"] = folds, + ["command"] = folds, + ["grouping"] = folds, +} diff --git a/context/data/scite/lexers/scite-context-lexer.lua b/context/data/scite/lexers/scite-context-lexer.lua new file mode 100644 index 000000000..688eb5776 --- /dev/null +++ b/context/data/scite/lexers/scite-context-lexer.lua @@ -0,0 +1,360 @@ +local info = { + version = 1.002, + comment = "basics for scintilla lpeg lexer for context/metafun", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files", +} + +-- The fold and lex functions are copied and patched from original code by Mitchell (see +-- lexer.lua). All errors are mine. +-- +-- For huge files folding can be pretty slow and I do have some large ones that I keep +-- open all the time. Loading is normally no ussue, unless one has remembered the status +-- and the cursor is at the last line of a 200K line file. Optimizing the fold function +-- brought down loading of char-def.lua from 14 sec => 8 sec. Replacing the word_match +-- function and optimizing the lex function gained another 2+ seconds. A 6 second load +-- is quite ok for me. + +local R, P, S, Cp, Cs, Ct, Cmt, Cc = lpeg.R, lpeg.P, lpeg.S, lpeg.Cp, lpeg.Cs, lpeg.Ct, lpeg.Cmt, lpeg.Cc +local lpegmatch = lpeg.match +local find, gmatch, match, lower, upper, gsub = string.find, string.gmatch, string.match, string.lower, string.upper, string.gsub +local concat = table.concat +local global = _G +local type, next, setmetatable = type, next, setmetatable + +dofile(_LEXERHOME .. '/lexer.lua') + +lexer.context = lexer.context or { } + +-- function lexer.context.loaddefinitions(name) +-- local basepath = lexer.context and lexer.context.path or _LEXERHOME +-- local definitions = loadfile(basepath and (basepath .. "/" .. name) or name) +-- if not definitions then +-- definitions = loadfile(_LEXERHOME .. "/context/" .. name) +-- end +-- if type(definitions) == "function" then +-- definitions = definitions() +-- end +-- if type(definitions) == "table" then +-- return definitions +-- else +-- return nil +-- end +-- end + +function lexer.context.loaddefinitions(name) + local definitions = loadfile(_LEXERHOME .. "/context/" .. name) + if not definitions and lexer.context and lexer.context.path then + definitions = loadfile(lexer.context.path .. "/" .. name) + end + if not definitions and lexer.context and lexer.context.path then + definitions = loadfile(name) + end + if type(definitions) == "function" then + definitions = definitions() + end + if type(definitions) == "table" then + return definitions + else + return nil + end +end + +-- maybe more efficient: + +function lexer.context.word_match(words,word_chars,case_insensitive) + local chars = '%w_' -- maybe just "" when word_chars + if word_chars then + chars = '^([' .. chars .. gsub(word_chars,'([%^%]%-])', '%%%1') ..']+)' + else + chars = '^([' .. chars ..']+)' + end + if case_insensitive then + local word_list = { } + for i=1,#words do + word_list[lower(words[i])] = true + end + return P(function(input, index) + local s, e, word = find(input,chars,index) + return word and word_list[lower(word)] and e + 1 or nil + end) + else + local word_list = { } + for i=1,#words do + word_list[words[i]] = true + end + return P(function(input, index) + local s, e, word = find(input,chars,index) + return word and word_list[word] and e + 1 or nil + end) + end +end + +-- nicer anyway: + +-- todo: utf + +function lexer.context.exact_match(words,case_insensitive) + local pattern = S(concat(words)) + R("az","AZ","\127\255") -- the concat catches _ etc + if case_insensitive then + local list = { } + for i=1,#words do + list[lower(words[i])] = true + end + return Cmt(pattern^1, function(_,i,s) + return list[lower(s)] and i + end) + else + local list = { } + for i=1,#words do + list[words[i]] = true + end + return Cmt(pattern^1, function(_,i,s) + return list[s] and i + end) + end +end + +function lexer.context.word_match(words,word_chars,case_insensitive) -- word_chars not used (can be omitted) + if word_chars == true then + return lexer.context.exact_match(words,true) + else + return lexer.context.exact_match(words,case_insensitive) + end +end + +-- Overloaded functions. + +local FOLD_BASE = SC_FOLDLEVELBASE +local FOLD_HEADER = SC_FOLDLEVELHEADERFLAG +local FOLD_BLANK = SC_FOLDLEVELWHITEFLAG + +local newline = P("\r\n") + S("\r\n") + +local splitlines = Ct( ( Ct ( (Cp() * Cs((1-newline)^1) * newline^-1) + (Cp() * Cc("") * newline) ) )^0) + +local h_table, b_table, n_table = { }, { }, { } + +setmetatable(h_table, { __index = function(t,level) local v = { level, FOLD_HEADER } t[level] = v return v end }) +setmetatable(b_table, { __index = function(t,level) local v = { level, FOLD_BLANK } t[level] = v return v end }) +setmetatable(n_table, { __index = function(t,level) local v = { level } t[level] = v return v end }) + +local get_style_at = GetStyleAt +local get_property = GetProperty +local get_indent_amount = GetIndentAmount + +-- local lines = lpegmatch(splitlines,text) -- iterating over lines is faster +-- for i=1, #lines do +-- local li = lines[i] +-- local line = li[2] +-- if line ~= "" then +-- local pos = li[1] +-- for i=1,nofpatterns do +-- for s, m in gmatch(line,patterns[i]) do +-- if hash[m] then +-- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)] +-- if symbols then +-- local l = symbols[m] +-- if l then +-- local t = type(l) +-- if t == 'number' then +-- current_level = current_level + l +-- elseif t == 'function' then +-- current_level = current_level + l(text, pos, line, s, match) +-- end +-- if current_level < FOLD_BASE then -- integrate in previous +-- current_level = FOLD_BASE +-- end +-- end +-- end +-- end +-- end +-- end +-- if current_level > prev_level then +-- folds[line_num] = h_table[prev_level] -- { prev_level, FOLD_HEADER } +-- else +-- folds[line_num] = n_table[prev_level] -- { prev_level } +-- end +-- prev_level = current_level +-- else +-- folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK } +-- end +-- line_num = line_num + 1 +-- end + +-- not that much faster but less memory: + +local action_y, action_n + +local splitlines = ( ( + (Cp() * Cs((1-newline)^1) * newline^-1) / function(p,l) action_y(p,l) end + + ( newline ) / function() action_n() end +) )^0 + +function lexer.context.fold(text, start_pos, start_line, start_level) + if text == '' then + return folds + end + local lexer = global._LEXER + if lexer._fold then + return lexer._fold(text, start_pos, start_line, start_level) + end + local folds = { } + if lexer._foldsymbols then + local fold_symbols = lexer._foldsymbols + local line_num = start_line + local prev_level = start_level + local current_level = prev_level + local patterns = fold_symbols._patterns + local nofpatterns = #patterns + local hash = fold_symbols._hash + if not hash then + hash = { } + for symbol, matches in next, fold_symbols do + if not find(symbol,"^_") then + for s, _ in next, matches do + hash[s] = true + end + end + end + fold_symbols._hash = hash + end + action_y = function(pos,line) + for i=1,nofpatterns do + for s, m in gmatch(line,patterns[i]) do + if hash[m] then + local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)] + if symbols then + local l = symbols[m] + if l then + local t = type(l) + if t == 'number' then + current_level = current_level + l + if current_level < FOLD_BASE then -- can this happen? + current_level = FOLD_BASE + end + elseif t == 'function' then + current_level = current_level + l(text, pos, line, s, match) + if current_level < FOLD_BASE then + current_level = FOLD_BASE + end + end + end + end + end + end + end + if current_level > prev_level then + folds[line_num] = h_table[prev_level] -- { prev_level, FOLD_HEADER } + else + folds[line_num] = n_table[prev_level] -- { prev_level } + end + prev_level = current_level + line_num = line_num + 1 + end + action_n = function() + folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK } + line_num = line_num + 1 + end + local lines = lpegmatch(splitlines,text) + elseif get_property('fold.by.indentation', 1) == 1 then + local current_line = start_line + local prev_level = start_level + for _, line in gmatch(text,'([\t ]*)(.-)\r?\n') do + if line ~= "" then + local current_level = FOLD_BASE + get_indent_amount(current_line) + if current_level > prev_level then -- next level + local i = current_line - 1 + while true do + local f = folds[i] + if f and f[2] == FOLD_BLANK then + i = i - 1 + else + break + end + end + local f = folds[i] + if f then + f[2] = FOLD_HEADER + end -- low indent + folds[current_line] = n_table[current_level] -- { current_level } -- high indent + elseif current_level < prev_level then -- prev level + local f = folds[current_line - 1] + if f then + f[1] = prev_level -- high indent + end + folds[current_line] = n_table[current_level] -- { current_level } -- low indent + else -- same level + folds[current_line] = n_table[prev_level] -- { prev_level } + end + prev_level = current_level + else + folds[current_line] = b_table[prev_level] -- { prev_level, FOLD_BLANK } + end + current_line = current_line + 1 + end + else + for _ in gmatch(text,".-\r?\n") do + folds[start_line] = n_table[start_level] -- { start_level } + start_line = start_line + 1 + end + end + return folds +end + +function lexer.context.lex(text, init_style) + local lexer = global._LEXER + local grammar = lexer._GRAMMAR + if not grammar then + return { } + elseif lexer._LEXBYLINE then + local tokens = { } + local offset = 0 + local noftokens = 0 + for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg + local line_tokens = lpeg_match(grammar, line) + if line_tokens then + for i=1,#line_tokens do + local token = line_tokens[i] + token[2] = token[2] + offset + noftokens = noftokens + 1 + tokens[noftokens] = token + end + end + offset = offset + #line + if noftokens > 0 and tokens[noftokens][2] ~= offset then + noftokens = noftokens + 1 + tokens[noftokens] = { 'default', offset + 1 } + end + end + return tokens + elseif lexer._CHILDREN then + local _hash = lexer._HASH + if not hash then + hash = { } + lexer._HASH = hash + end + grammar = hash[init_style] + if not grammar then + for style, style_num in next, lexer._TOKENS do + if style_num == init_style then + local lexer_name = match(style,'^(.+)_whitespace') or lexer._NAME + if lexer._INITIALRULE ~= lexer_name then + build_grammar(lexer, lexer_name) + end + break + end + end + grammar = lexer._GRAMMAR + hash[init_style] = grammar + end + return lpegmatch(grammar, text) + else + return lpegmatch(grammar, text) + end +end + +lexer.fold = lexer.context.fold +lexer.lex = lexer.context.lex +lexer.word_match = lexer.context.word_match diff --git a/context/data/scite/lexers/themes/scite-context-theme.lua b/context/data/scite/lexers/themes/scite-context-theme.lua index 9dc859c34..556779ce6 100644 --- a/context/data/scite/lexers/themes/scite-context-theme.lua +++ b/context/data/scite/lexers/themes/scite-context-theme.lua @@ -9,60 +9,13 @@ local info = { -- we need a proper pipe: -- -- -- context_path = string.split(os.resultof("mtxrun --find-file context.mkiv"))[1] or "" +-- -- global.trace("OEPS") -- how do we get access to the regular lua extensions local context_path = "t:/sources" -- c:/data/tex-context/tex/texmf-context/tex/base local font_name = 'Dejavu Sans Mono' local font_size = 14 --- The following files are needed: mult-def.lua, mult-prm.lua and mult-def.lua. They can be --- put in the _LEXERHOME/context path of needed. Currently we have: --- --- _LEXERHOME/themes/scite-context-theme.lua --- _LEXERHOME/scite-context-lexer.lua --- _LEXERHOME/context/mult-def.lua --- _LEXERHOME/context/mult-prm.lua --- _LEXERHOME/context/mult-mps.lua --- _LEXERHOME/context.lua --- _LEXERHOME/metafun.lua --- --- However, when you set the context_path variable and omit the files in the --- _LEXERHOME/context path then the files will be picked up from the context --- distribution which keeps them up to date automatically. --- --- This (plus a bit more) is what goes in context.properties: --- --- lexer.lpeg.home=$(SciteDefaultHome)/lexers --- lexer.lpeg.script=$(lexer.lpeg.home)/scite-context-lexer.lua --- lexer.lpeg.color.theme=$(lexer.lpeg.home)/themes/scite-context-theme.lua --- --- fold.by.indentation=0 --- --- if PLAT_WIN --- lexerpath.*.lpeg=$(lexer.lpeg.home)/LexLPeg.dll --- --- if PLAT_GTK --- lexerpath.*.lpeg=$(lexer.lpeg.home)/liblexlpeg.so --- --- lexer.*.lpeg=lpeg --- --- lexer.$(file.patterns.metapost)=lpeg_metafun --- lexer.$(file.patterns.metafun)=lpeg_metafun --- lexer.$(file.patterns.context)=lpeg_context --- lexer.$(file.patterns.tex)=lpeg_context --- lexer.$(file.patterns.lua)=lpeg_lua --- lexer.$(file.patterns.xml)=lpeg_xml --- --- comment.block.lpeg_context=% --- comment.block.at.line.start.lpeg_context=1 --- --- comment.block.lpeg_metafun=% --- comment.block.at.line.start.lpeg_metafun=1 --- --- comment.block.lpeg_lua=-- --- comment.block.at.line.start.lpeg_lua=1 --- --- comment.block.lpeg_props=# --- comment.block.at.line.start.lpeg_props=1 +local global = _G dofile(_LEXERHOME .. '/themes/scite.lua') -- starting point so we miss nothing @@ -71,20 +24,19 @@ module('lexer', package.seeall) lexer.context = lexer.context or { } lexer.context.path = context_path -lexer.colors = { +colors = { red = color('7F', '00', '00'), green = color('00', '7F', '00'), blue = color('00', '00', '7F'), cyan = color('00', '7F', '7F'), magenta = color('7F', '00', '7F'), yellow = color('7F', '7F', '00'), - -- - teal = color('00', '7F', '7F'), -- cyan - purple = color('7F', '00', '7F'), -- magenta orange = color('B0', '7F', '00'), -- white = color('FF', 'FF', 'FF'), + light = color('CF', 'CF', 'CF'), grey = color('80', '80', '80'), + dark = color('4F', '4F', '4F'), black = color('00', '00', '00'), -- selection = color('F7', 'F7', 'F7'), @@ -95,9 +47,11 @@ lexer.colors = { -- right = color('00', '00', 'FF'), wrong = color('FF', '00', '00'), - } +colors.teal = colors.cyan +colors.purple = colors.magenta + style_default = style { font = font_name, size = font_size, @@ -109,26 +63,44 @@ style_nothing = style { -- empty } -style_char = style { fore = colors.purple } -style_class = style { fore = colors.black, bold = true } -style_comment = style { fore = colors.green } -style_constant = style { fore = colors.cyan, bold = true } -style_definition = style { fore = colors.black, bold = true } -style_error = style { fore = colors.red } -style_function = style { fore = colors.black, bold = true } -style_keyword = style { fore = colors.blue, bold = true } -style_number = style { fore = colors.cyan } -style_operator = style { fore = colors.black, bold = true } -style_string = style { fore = colors.magenta } -style_preproc = style { fore = colors.yellow } -style_tag = style { fore = colors.cyan } -style_type = style { fore = colors.blue } -style_variable = style { fore = colors.black } -style_identifier = style_nothing +style_comment = style { fore = colors.yellow } +style_string = style { fore = colors.magenta } + +style_char = style { fore = colors.magenta } +style_class = style { fore = colors.black, bold = true } +style_constant = style { fore = colors.cyan, bold = true } +style_definition = style { fore = colors.black, bold = true } +style_error = style { fore = colors.red } +style_function = style { fore = colors.black, bold = true } +style_keyword = style { fore = colors.blue, bold = true } +style_number = style { fore = colors.cyan } +style_operator = style { fore = colors.blue } +style_preproc = style { fore = colors.yellow, bold = true } +style_tag = style { fore = colors.cyan } +style_type = style { fore = colors.blue } +style_variable = style { fore = colors.black } +style_identifier = style_nothing + +style_line_number = style { back = colors.linepanel, } +style_bracelight = style { bold = true, fore = colors.orange } +style_bracebad = style { bold = true, fore = colors.orange } +style_indentguide = style { fore = colors.linepanel, back = colors.white } +style_calltip = style { fore = colors.white, back = colors.tippanel } +style_controlchar = style_nothing + +style_context_preamble = style_comment +style_context_comment = style_comment +style_context_string = style_string +style_context_default = style_nothing +style_context_number = style_number +style_context_keyword = style_keyword +style_context_quote = style { fore = colors.blue, bold = true } +style_context_primitive = style_keyword +style_context_plain = style { fore = colors.dark, bold = true } +style_context_command = style { fore = colors.green, bold = true } +style_context_embedded = style { fore = colors.black, bold = true } +style_context_user = style { fore = colors.green } +style_context_grouping = style { fore = colors.red } +style_context_specials = style { fore = colors.blue } +style_context_extras = style { fore = colors.yellow } -style_line_number = style { back = colors.linepanel } -style_bracelight = style { fore = colors.right, bold = true } -style_bracebad = style { fore = colors.wrong, bold = true } -style_controlchar = style_nothing -style_indentguide = style { fore = colors.linepanel, back = colors.white } -style_calltip = style { fore = colors.white, back = colors.tippanel } diff --git a/context/data/scite/scite-context-readme.tex b/context/data/scite/scite-context-readme.tex index 58c15da20..7af38ecd9 100644 --- a/context/data/scite/scite-context-readme.tex +++ b/context/data/scite/scite-context-readme.tex @@ -6,11 +6,13 @@ The following files are needed for the lpeg based lexer: scite-ctx.lua scite-context.properties scite-pragma.properties +scite-ctx.properties scite-ctx-context.properties scite-ctx-example.properties -lezers/context.lua -lezers/metafun.lua -lezers/scite-context-lexer.lua +lexers/scite-context-lexer-tex.lua +lexers/scite-context-lexer-mps.lua +lexers/scite-context-lexer-cld.lua +lexers/scite-context-lexer.lua lexers/context/mult-def.lua lexers/context/mult-prm.lua lexers/context/mult-mps.lua @@ -33,6 +35,7 @@ the following files: \starttyping scite-context.properties scite-pragma.properties +scite-ctx.properties scite-ctx-context.properties scite-ctx-example.properties \stoptyping diff --git a/context/data/scite/scite-context.properties b/context/data/scite/scite-context.properties index 4c7565a51..ea48ecc10 100644 --- a/context/data/scite/scite-context.properties +++ b/context/data/scite/scite-context.properties @@ -580,7 +580,7 @@ style.metapost.4=fore:#007F00 style.metapost.5=fore:#000000 # Extra style.metapost.6=fore:#007F00,italics - +# Bracematch style.metapost.34=fore:#00007F style.metapost.35=fore:#7F007F @@ -607,21 +607,25 @@ if PLAT_GTK lexer.*.lpeg=lpeg -lexer.$(file.patterns.metapost)=lpeg_metafun -lexer.$(file.patterns.metafun)=lpeg_metafun -lexer.$(file.patterns.context)=lpeg_context -lexer.$(file.patterns.tex)=lpeg_context -lexer.$(file.patterns.lua)=lpeg_lua +lexer.$(file.patterns.metapost)=lpeg_scite-context-lexer-mps +lexer.$(file.patterns.metafun)=lpeg_scite-context-lexer-mps +lexer.$(file.patterns.context)=lpeg_scite-context-lexer-tex +lexer.$(file.patterns.tex)=lpeg_scite-context-lexer-tex +lexer.$(file.patterns.lua)=lpeg_scite-context-lexer-cld lexer.$(file.patterns.xml)=lpeg_xml -comment.block.lpeg_context=% -comment.block.at.line.start.lpeg_context=1 +comment.block.lpeg_scite-context-lexer-tex=% +comment.block.at.line.start.lpeg_scite-context-lexer-tex=1 -comment.block.lpeg_metafun=% -comment.block.at.line.start.lpeg_metafun=1 +comment.block.lpeg_scite-context-lexer-mps=% +comment.block.at.line.start.lpeg_scite-context-lexer-mps=1 -comment.block.lpeg_lua=-- -comment.block.at.line.start.lpeg_lua=1 +comment.block.lpeg_scite-context-lexer-cld=-- +comment.block.at.line.start.lpeg_scite-context-lexer-cld=1 comment.block.lpeg_props=# comment.block.at.line.start.lpeg_props=1 + +style.*.34=bold,fore=#7F0000,back:#CFCFCF +style.*.35=bold,fore=#7F0000,back:#CFCFCF + diff --git a/context/data/scite/scite-ctx.lua b/context/data/scite/scite-ctx.lua index fb10ce87d..72fcb967c 100644 --- a/context/data/scite/scite-ctx.lua +++ b/context/data/scite/scite-ctx.lua @@ -68,7 +68,7 @@ -- generic functions -props = props or { } setmetatable(props,{ __index = function(k,v) props[k] = "unknown" return "unknown" end } ) +props = props or { } -- setmetatable(props,{ __index = function(k,v) props[k] = "unknown" return "unknown" end } ) local byte, lower, upper, gsub, sub, find, rep, match, gmatch = string.byte, string.lower, string.upper, string.gsub, string.sub, string.find, string.rep, string.match, string.gmatch local sort, concat = table.sort, table.concat @@ -80,20 +80,20 @@ function traceln(str) io.flush() end -function string:grab(delimiter) - local list = {} - for snippet in self:gmatch(delimiter) do +function string.grab(str,delimiter) + local list = { } + for snippet in gmatch(str,delimiter) do list[#list+1] = snippet end return list end -function string:expand() - return (self:gsub("ENV%((%w+)%)", os.envvar)) +function string.expand(str) + return (gsub(str,"ENV%((%w+)%)", os.envvar)) end -function string:strip() - return (self:gsub("^%s*(.-)%s*$", "%1")) +function string.strip(str) + return (gsub(str,"^%s*(.-)%s*$", "%1")) end function table.alphasort(list,i) diff --git a/context/data/scite/scite-ctx.properties b/context/data/scite/scite-ctx.properties new file mode 100644 index 000000000..94a51aeb7 --- /dev/null +++ b/context/data/scite/scite-ctx.properties @@ -0,0 +1,154 @@ +# author +# +# Hans Hagen - PRAGMA ADE - www.pragma-ade.com +# +# environment variable +# +# CTXSPELLPATH=t:/spell +# +# auto language detection +# +# % version =1.0 language=uk +# <?xml version='1.0' language='uk' ?> + +ext.lua.auto.reload=1 +ext.lua.startup.script=$(SciteDefaultHome)/scite-ctx.lua + +#~ extension.$(file.patterns.context)=scite-ctx.lua +#~ extension.$(file.patterns.example)=scite-ctx.lua + +#~ ext.lua.reset=1 +#~ ext.lua.auto.reload=1 +#~ ext.lua.startup.script=t:/lua/scite-ctx.lua + +ctx.menulist.default=\ + wrap=wrap_text|\ + unwrap=unwrap_text|\ + sort=sort_text|\ + check=check_text|\ + reset=reset_text + +ctx.menulist.context=\ + wrap=wrap_text|\ + unwrap=unwrap_text|\ + sort=sort_text|\ + document=document_text|\ + quote=quote_text|\ + compound=compound_text|\ + check=check_text|\ + reset=reset_text + +ctx.menulist.example=\ + wrap=wrap_text|\ + unwrap=unwrap_text|\ + sort=sort_text|\ + uncomment=uncomment_xml|\ + document=document_text|\ + quote=quote_text|\ + compound=compound_text|\ + check=check_text|\ + reset=reset_text + +ctx.wraptext.length=65 + +ctx.spellcheck.language=auto +ctx.spellcheck.wordsize=4 +ctx.spellcheck.wordpath=ENV(CTXSPELLPATH) + +ctx.spellcheck.wordfile.all=spell-uk.txt,spell-nl.txt + +ctx.spellcheck.wordfile.uk=spell-uk.txt +ctx.spellcheck.wordfile.nl=spell-nl.txt +ctx.spellcheck.wordsize.uk=4 +ctx.spellcheck.wordsize.nl=4 + +ctx.helpinfo=\ + Shift + F11 pop up menu with ctx options|\ + |\ + Ctrl + B check spelling|\ + Ctrl + M wrap text (auto indent)|\ + Ctrl + R reset spelling results|\ + Ctrl + I insert template|\ + Ctrl + E open log file + +command.name.21.$(file.patterns.context)=CTX Action List +command.subsystem.21.$(file.patterns.context)=3 +command.21.$(file.patterns.context)=show_menu $(ctx.menulist.context) +command.groupundo.21.$(file.patterns.context)=yes +command.save.before.21.$(file.patterns.context)=2 +command.shortcut.21.$(file.patterns.context)=Shift+F11 + +command.name.21.$(file.patterns.example)=CTX Action List +command.subsystem.21.$(file.patterns.example)=3 +command.21.$(file.patterns.example)=show_menu $(ctx.menulist.example) +command.groupundo.21.$(file.patterns.example)=yes +command.save.before.21.$(file.patterns.example)=2 +command.shortcut.21.$(file.patterns.example)=Shift+F11 + +#~ command.name.21.*=CTX Action List +#~ command.subsystem.21.*=3 +#~ command.21.*=show_menu $(ctx.menulist.default) +#~ command.groupundo.21.*=yes +#~ command.save.before.21.*=2 +#~ command.shortcut.21.*=Shift+F11 + +command.name.22.*=CTX Check Text +command.subsystem.22.*=3 +command.22.*=check_text +command.groupundo.22.*=yes +command.save.before.22.*=2 +command.shortcut.22.*=Ctrl+B + +command.name.23.*=CTX Wrap Text +command.subsystem.23.*=3 +command.23.*=wrap_text +command.groupundo.23.*=yes +command.save.before.23.*=2 +command.shortcut.23.*=Ctrl+M + +command.name.24.*=CTX Reset Text +command.subsystem.24.*=3 +command.24.*=reset_text +command.groupundo.24.*=yes +command.save.before.24.*=2 +command.shortcut.24.*=Ctrl+R + +command.name.25.*=CTX Template +command.subsystem.25.*=3 +command.save.before.25.*=2 +command.groupundo.25.*=yes +command.shortcut.25.*=Ctrl+I + +# command.25.$(file.patterns.context)=insert_template $(ctx.template.list.context) +# command.25.$(file.patterns.example)=insert_template $(ctx.template.list.example) +# +# ctx.template.list.example=\ +# foo=mathadore.foo|\ +# bar=mathadore.bar +# +# ctx.template.mathadore.foo.file=./ctx-templates/foo.xml +# ctx.template.mathadore.bar.data=bar bar bar +# +# paths: ./ctx-templates, ../ctx-templates, ../../ctx-templates + +command.name.26.*=Open Logfile +command.subsystem.26.*=3 +command.26.*=open_log +command.save.before.26.*=2 +command.groupundo.26.*=yes +command.shortcut.26.*=Ctrl+E + +import scite-ctx-context +import scite-ctx-example + +ctx.template.scan=yes +ctx.template.rescan=no + +ctx.template.suffix.tex=tex +ctx.template.suffix.xml=xml + +command.name.27.*=XML Uncomment +command.subsystem.27.*=3 +command.27.*=uncomment_xml +command.save.before.27.*=2 +command.groupundo.27.*=yes diff --git a/context/data/scite/scite-pragma.properties b/context/data/scite/scite-pragma.properties index abc149f12..9e06a586a 100644 --- a/context/data/scite/scite-pragma.properties +++ b/context/data/scite/scite-pragma.properties @@ -8,10 +8,11 @@ # Editor: screen -position.left=25 -position.top=25 -position.width=1550 -position.height=1100 +position.left=0 +position.top=0 +position.width=1920 +position.height=1160 + output.horizontal.size=250 output.vertical.size=100 diff --git a/scripts/context/lua/mtxrun.lua b/scripts/context/lua/mtxrun.lua index 91491c3d6..7061c54d5 100644 --- a/scripts/context/lua/mtxrun.lua +++ b/scripts/context/lua/mtxrun.lua @@ -1670,6 +1670,29 @@ function lpeg.append(list,pp,delayed) return p end +-- function lpeg.exact_match(words,case_insensitive) +-- local pattern = concat(words) +-- if case_insensitive then +-- local pattern = S(upper(characters)) + S(lower(characters)) +-- local list = { } +-- for i=1,#words do +-- list[lower(words[i])] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[lower(s)] and i +-- end) +-- else +-- local pattern = S(concat(words)) +-- local list = { } +-- for i=1,#words do +-- list[words[i]] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[s] and i +-- end) +-- end +-- end + end -- of closure diff --git a/scripts/context/stubs/mswin/mtxrun.lua b/scripts/context/stubs/mswin/mtxrun.lua index 91491c3d6..7061c54d5 100644 --- a/scripts/context/stubs/mswin/mtxrun.lua +++ b/scripts/context/stubs/mswin/mtxrun.lua @@ -1670,6 +1670,29 @@ function lpeg.append(list,pp,delayed) return p end +-- function lpeg.exact_match(words,case_insensitive) +-- local pattern = concat(words) +-- if case_insensitive then +-- local pattern = S(upper(characters)) + S(lower(characters)) +-- local list = { } +-- for i=1,#words do +-- list[lower(words[i])] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[lower(s)] and i +-- end) +-- else +-- local pattern = S(concat(words)) +-- local list = { } +-- for i=1,#words do +-- list[words[i]] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[s] and i +-- end) +-- end +-- end + end -- of closure diff --git a/scripts/context/stubs/unix/mtxrun b/scripts/context/stubs/unix/mtxrun index 91491c3d6..7061c54d5 100755 --- a/scripts/context/stubs/unix/mtxrun +++ b/scripts/context/stubs/unix/mtxrun @@ -1670,6 +1670,29 @@ function lpeg.append(list,pp,delayed) return p end +-- function lpeg.exact_match(words,case_insensitive) +-- local pattern = concat(words) +-- if case_insensitive then +-- local pattern = S(upper(characters)) + S(lower(characters)) +-- local list = { } +-- for i=1,#words do +-- list[lower(words[i])] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[lower(s)] and i +-- end) +-- else +-- local pattern = S(concat(words)) +-- local list = { } +-- for i=1,#words do +-- list[words[i]] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[s] and i +-- end) +-- end +-- end + end -- of closure diff --git a/tex/context/base/buff-imp-lua.lua b/tex/context/base/buff-imp-lua.lua index 3a3bb3d2c..1147666cc 100644 --- a/tex/context/base/buff-imp-lua.lua +++ b/tex/context/base/buff-imp-lua.lua @@ -146,7 +146,7 @@ local special = S("-+/*^%=#") + P("..") local equals = P("=")^0 local open = P("[") * Cg(equals, "init") * P("[") * P("\n")^-1 -- maybe better: patterns.newline^-1 local close = P("]") * C(equals) * P("]") -local closeeq = Cmt(close * Cb("init"), function(s,i,a,b) return a == b end) +local closeeq = Cmt(close * Cb("init"), function(s,i,a,b) return a == b end) -- wrong return value local longstring = open * Cs((P(1) - closeeq)^0) * close * Carg(1) local function long(content,equals,settings) diff --git a/tex/context/base/buff-imp-mp.lua b/tex/context/base/buff-imp-mp.lua index 6b8a26536..24ca597e7 100644 --- a/tex/context/base/buff-imp-mp.lua +++ b/tex/context/base/buff-imp-mp.lua @@ -6,98 +6,21 @@ if not modules then modules = { } end modules ['buff-imp-mp'] = { license = "see context related readme files" } +-- Now that we also use lpeg lexers in scite, we can share the keywords +-- so we have moved the keyword lists to mult-mps.lua. Don't confuse the +-- scite lexers with the ones we use here. Of course all those lexers +-- boil down to doing similar things, but here we need more control over +-- the rendering and have a different way of nesting. It is no coincidence +-- that the coloring looks similar: both are derived from earlier lexing (in +-- texedit, mkii and the c++ scite lexer). + local P, S, V, patterns = lpeg.P, lpeg.S, lpeg.V, lpeg.patterns -local primitives = table.tohash { - 'charcode', 'day', 'linecap', 'linejoin', 'miterlimit', 'month', 'pausing', - 'prologues', 'showstopping', 'time', 'tracingcapsules', 'tracingchoices', - 'tracingcommands', 'tracingequations', 'tracinglostchars', - 'tracingmacros', 'tracingonline', 'tracingoutput', 'tracingrestores', - 'tracingspecs', 'tracingstats', 'tracingtitles', 'truecorners', - 'warningcheck', 'year', 'mpprocset', - 'false', 'nullpicture', 'pencircle', 'true', - 'and', 'angle', 'arclength', 'arctime', 'ASCII', 'bluepart', 'boolean', 'bot', - 'char', 'color', 'cosd', 'cycle', 'decimal', 'directiontime', 'floor', 'fontsize', - 'greenpart', 'hex', 'infont', 'intersectiontimes', 'known', 'length', 'llcorner', - 'lrcorner', 'makepath', 'makepen', 'mexp', 'mlog', 'normaldeviate', 'not', - 'numeric', 'oct', 'odd', 'or', 'path', 'pair', 'pen', 'penoffset', 'picture', 'point', - 'postcontrol', 'precontrol', 'redpart', 'reverse', 'rotated', 'scaled', - 'shifted', 'sind', 'slanted', 'sqrt', 'str', 'string', 'subpath', 'substring', - 'transform', 'transformed', 'ulcorner', 'uniformdeviate', 'unknown', - 'urcorner', 'xpart', 'xscaled', 'xxpart', 'xypart', 'ypart', 'yscaled', 'yxpart', - 'yypart', 'zscaled', - 'addto', 'clip', 'input', 'interim', 'let', 'newinternal', 'save', 'setbounds', - 'shipout', 'show', 'showdependencies', 'showtoken', 'showvariable', - 'special', - 'begingroup', 'endgroup', 'of', 'curl', 'tension', 'and', 'controls', - 'reflectedabout', 'rotatedaround', 'interpath', 'on', 'off', 'beginfig', - 'endfig', 'def', 'vardef', 'enddef', 'epxr', 'suffix', 'text', 'primary', 'secondary', - 'tertiary', 'primarydef', 'secondarydef', 'tertiarydef', 'top', 'bottom', - 'ulft', 'urt', 'llft', 'lrt', 'randomseed', 'also', 'contour', 'doublepath', - 'withcolor', 'withpen', 'dashed', 'if', 'else', 'elseif', 'fi', 'for', 'endfor', 'forever', 'exitif', - 'forsuffixes', 'downto', 'upto', 'step', 'until', - 'charlist', 'extensible', 'fontdimen', 'headerbyte', 'kern', 'ligtable', - 'boundarychar', 'chardp', 'charext', 'charht', 'charic', 'charwd', 'designsize', - 'fontmaking', 'charexists', - 'cullit', 'currenttransform', 'gfcorners', 'grayfont', 'hround', - 'imagerules', 'lowres_fix', 'nodisplays', 'notransforms', 'openit', - 'displaying', 'currentwindow', 'screen_rows', 'screen_cols', - 'pixels_per_inch', 'cull', 'display', 'openwindow', 'numspecial', - 'totalweight', 'autorounding', 'fillin', 'proofing', 'tracingpens', - 'xoffset', 'chardx', 'granularity', 'smoothing', 'turningcheck', 'yoffset', - 'chardy', 'hppp', 'tracingedges', 'vppp', - 'extra_beginfig', 'extra_endfig', 'mpxbreak', - 'end', 'btex', 'etex', 'verbatimtex' -} +local mps = dofile(resolvers.findfile("mult-mps.lua","tex")) -local plain = table.tohash { - 'ahangle', 'ahlength', 'bboxmargin', 'defaultpen', 'defaultscale', - 'labeloffset', 'background', 'currentpen', 'currentpicture', 'cuttings', - 'defaultfont', 'extra_beginfig', 'extra_endfig', - 'beveled', 'black', 'blue', 'bp', 'butt', 'cc', 'cm', 'dd', 'ditto', 'down', 'epsilon', - 'evenly', 'fullcircle', 'green', 'halfcircle', 'identity', 'in', 'infinity', 'left', - 'mitered', 'mm', 'origin', 'pensquare', 'pt', 'quartercircle', 'red', 'right', - 'rounded', 'squared', 'unitsquare', 'up', 'white', 'withdots', - 'abs', 'bbox', 'ceiling', 'center', 'cutafter', 'cutbefore', 'dir', - 'directionpoint', 'div', 'dotprod', 'intersectionpoint', 'inverse', 'mod', 'lft', - 'round', 'rt', 'unitvector', 'whatever', - 'cutdraw', 'draw', 'drawarrow', 'drawdblarrow', 'fill', 'filldraw', 'drawdot', - 'loggingall', 'pickup', 'tracingall', 'tracingnone', 'undraw', 'unfill', - 'unfilldraw', - 'buildcycle', 'dashpattern', 'decr', 'dotlabel', 'dotlabels', 'drawoptions', - 'incr', 'label', 'labels', 'max', 'min', 'thelabel', 'z', - 'beginchar', 'blacker', 'capsule_end', 'change_width', - 'define_blacker_pixels', 'define_corrected_pixels', - 'define_good_x_pixels', 'define_good_y_pixels', - 'define_horizontal_corrected_pixels', 'define_pixels', - 'define_whole_blacker_pixels', 'define_whole_pixels', - 'define_whole_vertical_blacker_pixels', - 'define_whole_vertical_pixels', 'endchar', 'extra_beginchar', - 'extra_endchar', 'extra_setup', 'font_coding_scheme', - 'font_extra_space' -} - -local metafun = table.tohash { - 'unitcircle', 'fulldiamond', 'unitdiamond', - 'halfcircle', 'quartercircle', - 'llcircle', 'lrcircle', 'urcircle', 'ulcircle', - 'tcircle', 'bcircle', 'lcircle', 'rcircle', - 'lltriangle', 'lrtriangle', 'urtriangle', 'ultriangle', - 'smoothed', 'cornered', 'superellipsed', 'randomized', 'squeezed', - 'punked', 'curved', 'unspiked', 'simplified', 'blownup', 'stretched', - 'paralled', 'enlonged', 'shortened', - 'enlarged', 'leftenlarged', 'topenlarged', 'rightenlarged', 'bottomenlarged', - 'llenlarged', 'lrenlarged', 'urenlarged', 'ulenlarged', - 'llmoved', 'lrmoved', 'urmoved', 'ulmoved', - 'boundingbox', 'innerboundingbox', 'outerboundingbox', - 'bottomboundary', 'leftboundary', 'topboundary', 'rightboundary', - 'xsized', 'ysized', 'xysized', - 'cmyk', 'transparent', 'withshade', 'spotcolor', - 'drawfill', 'undrawfill', - 'inverted', 'uncolored', 'softened', 'grayed', - 'textext', 'graphictext', - 'loadfigure', 'externalfigure' -} +local primitives = table.tohash(mps.primitives) +local plain = table.tohash(mps.plain) +local metafun = table.tohash(mps.metafun) local context = context local verbatim = context.verbatim diff --git a/tex/context/base/cont-new.mkii b/tex/context/base/cont-new.mkii index ba26b7226..abd0357f8 100644 --- a/tex/context/base/cont-new.mkii +++ b/tex/context/base/cont-new.mkii @@ -11,7 +11,7 @@ %C therefore copyrighted by \PRAGMA. See mreadme.pdf for %C details. -\newcontextversion{2011.09.12 22:49} +\newcontextversion{2011.09.14 12:21} %D This file is loaded at runtime, thereby providing an %D excellent place for hacks, patches, extensions and new diff --git a/tex/context/base/cont-new.mkiv b/tex/context/base/cont-new.mkiv index 1c31ebf94..d80c38ea8 100644 --- a/tex/context/base/cont-new.mkiv +++ b/tex/context/base/cont-new.mkiv @@ -11,7 +11,7 @@ %C therefore copyrighted by \PRAGMA. See mreadme.pdf for %C details. -\newcontextversion{2011.09.12 22:49} +\newcontextversion{2011.09.14 12:21} %D This file is loaded at runtime, thereby providing an %D excellent place for hacks, patches, extensions and new diff --git a/tex/context/base/context-version.pdf b/tex/context/base/context-version.pdf Binary files differindex ef175af2a..f28a6957c 100644 --- a/tex/context/base/context-version.pdf +++ b/tex/context/base/context-version.pdf diff --git a/tex/context/base/context-version.png b/tex/context/base/context-version.png Binary files differindex 1c047a3b6..b421b99a0 100644 --- a/tex/context/base/context-version.png +++ b/tex/context/base/context-version.png diff --git a/tex/context/base/context.mkii b/tex/context/base/context.mkii index a0ada15e9..ce28c02a6 100644 --- a/tex/context/base/context.mkii +++ b/tex/context/base/context.mkii @@ -20,7 +20,7 @@ %D your styles an modules. \edef\contextformat {\jobname} -\edef\contextversion{2011.09.12 22:49} +\edef\contextversion{2011.09.14 12:21} %D For those who want to use this: diff --git a/tex/context/base/context.mkiv b/tex/context/base/context.mkiv index 88217fc54..f52f46334 100644 --- a/tex/context/base/context.mkiv +++ b/tex/context/base/context.mkiv @@ -20,7 +20,7 @@ %D your styles an modules. \edef\contextformat {\jobname} -\edef\contextversion{2011.09.12 22:49} +\edef\contextversion{2011.09.14 12:21} %D For those who want to use this: diff --git a/tex/context/base/l-lpeg.lua b/tex/context/base/l-lpeg.lua index 4b40b641e..9860937c4 100644 --- a/tex/context/base/l-lpeg.lua +++ b/tex/context/base/l-lpeg.lua @@ -633,3 +633,26 @@ function lpeg.append(list,pp,delayed) end return p end + +-- function lpeg.exact_match(words,case_insensitive) +-- local pattern = concat(words) +-- if case_insensitive then +-- local pattern = S(upper(characters)) + S(lower(characters)) +-- local list = { } +-- for i=1,#words do +-- list[lower(words[i])] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[lower(s)] and i +-- end) +-- else +-- local pattern = S(concat(words)) +-- local list = { } +-- for i=1,#words do +-- list[words[i]] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[s] and i +-- end) +-- end +-- end diff --git a/tex/context/base/mult-def.lua b/tex/context/base/mult-def.lua index 1144dba22..7e0ed0bf1 100644 --- a/tex/context/base/mult-def.lua +++ b/tex/context/base/mult-def.lua @@ -7,6 +7,62 @@ if not modules then modules = { } end modules ['mult-def'] = { } return { + ["helpers"]={ -- for syntax highlighters, only the ones that are for users (boring to collect them) + -- + "doif", "doifnot", "doifelse", + "doifinset", "doifnotinset", "doifinsetelse", + "doifnextcharelse", "doifnextoptionalelse", "doifnextparenthesiselse", "doiffastoptionalcheckelse", + "doifundefinedelse", "doifdefinedelse", "doifundefined", "doifdefined", + "doifelsevalue", "doifvalue", "doifnotvalue", + "doifnothing", "doifsomething", "doifelsenothing", "doifsomethingelse", + "doifvaluenothing", "doifvaluesomething", "doifelsevaluenothing", + "doifdimensionelse", + -- + "tracingall", "tracingnone", "loggingall", + -- + "appendtoks", "prependtoks", "appendtotoks", "prependtotoks", + -- + "endgraf", "empty", "null", "space", "obeyspaces", "obeylines", + -- + "executeifdefined", + -- + "dontleavehmode", + -- + "setmeasure", "setemeasure", "setgmeasure", "setxmeasure", "definemeasure", "measure", + -- + "getvalue", "setvalue", "setevalue", "setgvalue", "setxvalue", "letvalue", "letgvalue", + "resetvalue", "undefinevalue", "ignorevalue", + "setuvalue", "setuevalue", "setugvalue", "setuxvalue", + "globallet", "glet", + "getparameters", "geteparameters", + -- + "processcommalist", "processcommacommand", "quitcommalist", + "processaction", "processallactions", + -- + "startsetups", "stopsetups", + "startxmlsetups", "stopxmlsetups", + "starttexdefinition", "stoptexdefinition", + -- + "unexpanded", "expanded", "startexpanded", "stopexpanded", "protected", "protect", "unprotect", + -- + "firstofoneargument", + "firstoftwoarguments", "secondoftwoarguments", + "firstofthreearguments", "secondofthreearguments", "thirdofthreearguments", + "firstoffourarguments", "secondoffourarguments", "thirdoffourarguments", "fourthoffourarguments", + "firstoffivearguments", "secondoffivearguments", "thirdoffivearguments", "fourthoffivearguments", "fifthoffivearguments", + "firstofsixarguments", "secondofsixarguments", "thirdofsixarguments", "fourthofsixarguments", "fifthofsixarguments", "sixthofsixarguments", + -- + "gobbleoneargument", "gobbletwoarguments", "gobblethreearguments", "gobblefourarguments", "gobblefivearguments", "gobblesixarguments", "gobblesevenarguments", "gobbleeightarguments", "gobbleninearguments", "gobbletenarguments", + "gobbleoneoptional", "gobbletwooptionals", "gobblethreeoptionals", "gobblefouroptionals", "gobblefiveoptionals", + -- + "dorecurse", "doloop", "exitloop", "dostepwiserecurse", "recurselevel", "recursedepth", + -- + "newconstant", "setnewconstant", "newconditional", "settrue", "setfalse", + -- + "dosingleempty", "dodoubleempty", "dotripleempty", "doquadrupleempty", "doquintupleempty", "dosixtupleempty", "doseventupleempty", + "dosinglegroupempty", "dodoublegroupempty", "dotriplegroupempty", "doquadruplegroupempty", "doquintuplegroupempty", + -- + }, ["commands"]={ ["CAPPED"]={ ["cs"]="KAP", diff --git a/tex/context/base/mult-mps.lua b/tex/context/base/mult-mps.lua index c56fe2eac..f382433de 100644 --- a/tex/context/base/mult-mps.lua +++ b/tex/context/base/mult-mps.lua @@ -22,7 +22,7 @@ return { "special", "begingroup", "endgroup", "of", "curl", "tension", "and", "controls", "reflectedabout", "rotatedaround", "interpath", "on", "off", "beginfig", - "endfig", "def", "vardef", "enddef", "epxr", "suffix", "text", "primary", "secondary", + "endfig", "def", "vardef", "enddef", "expr", "suffix", "text", "primary", "secondary", "tertiary", "primarydef", "secondarydef", "tertiarydef", "top", "bottom", "ulft", "urt", "llft", "lrt", "randomseed", "also", "contour", "doublepath", "withcolor", "withpen", "dashed", "if", "else", "elseif", "fi", "for", "endfor", "forever", "exitif", diff --git a/tex/context/base/mult-prm.lua b/tex/context/base/mult-prm.lua index 0c84563fc..61ba38199 100644 --- a/tex/context/base/mult-prm.lua +++ b/tex/context/base/mult-prm.lua @@ -6,212 +6,135 @@ return { "Omegaminorversion", "Omegarevision", "Udelcode", - "Udelcode", "Udelcodenum", "Udelimiter", "Udelimiterover", "Udelimiterunder", "Umathaccent", - "Umathaccent", - "Umathaccents", "Umathaccents", "Umathaxis", "Umathbinbinspacing", - "Umathbinbinspacing", "Umathbinclosespacing", "Umathbininnerspacing", "Umathbinopenspacing", "Umathbinopspacing", - "Umathbinopspacing", - "Umathbinordspacing", "Umathbinordspacing", "Umathbinpunctspacing", - "Umathbinpunctspacing", "Umathbinrelspacing", "Umathbotaccent", - "Umathbotaccent", "Umathchar", "Umathchardef", - "Umathchardef", "Umathcharnum", "Umathclosebinspacing", - "Umathclosebinspacing", - "Umathcloseclosespacing", "Umathcloseclosespacing", "Umathcloseinnerspacing", "Umathcloseopenspacing", - "Umathcloseopenspacing", "Umathcloseopspacing", "Umathcloseordspacing", "Umathclosepunctspacing", "Umathcloserelspacing", "Umathcode", - "Umathcode", - "Umathcodenum", "Umathcodenum", "Umathconnectoroverlapmin", - "Umathconnectoroverlapmin", "Umathfractiondelsize", "Umathfractiondenomdown", - "Umathfractiondenomdown", - "Umathfractiondenomvgap", "Umathfractiondenomvgap", "Umathfractionnumup", "Umathfractionnumvgap", - "Umathfractionnumvgap", "Umathfractionrule", "Umathinnerbinspacing", - "Umathinnerbinspacing", "Umathinnerclosespacing", - "Umathinnerclosespacing", - "Umathinnerinnerspacing", "Umathinnerinnerspacing", "Umathinneropenspacing", - "Umathinneropenspacing", - "Umathinneropspacing", "Umathinneropspacing", "Umathinnerordspacing", "Umathinnerpunctspacing", "Umathinnerrelspacing", - "Umathinnerrelspacing", "Umathlimitabovebgap", "Umathlimitabovekern", - "Umathlimitabovekern", - "Umathlimitabovevgap", "Umathlimitabovevgap", "Umathlimitbelowbgap", "Umathlimitbelowkern", "Umathlimitbelowvgap", - "Umathlimitbelowvgap", "Umathopbinspacing", - "Umathopbinspacing", - "Umathopclosespacing", "Umathopclosespacing", "Umathopenbinspacing", - "Umathopenbinspacing", - "Umathopenclosespacing", "Umathopenclosespacing", "Umathopeninnerspacing", "Umathopenopenspacing", "Umathopenopspacing", "Umathopenordspacing", - "Umathopenordspacing", "Umathopenrelspacing", "Umathoperatorsize", - "Umathoperatorsize", - "Umathopinnerspacing", "Umathopinnerspacing", "Umathopopenspacing", - "Umathopopenspacing", "Umathopopspacing", "Umathopordspacing", "Umathoppunctspacing", - "Umathoppunctspacing", - "Umathoprelspacing", "Umathoprelspacing", "Umathordbinspacing", "Umathordclosespacing", "Umathordinnerspacing", "Umathordopenspacing", "Umathordopspacing", - "Umathordopspacing", "Umathordordspacing", "Umathordpunctspacing", "Umathordrelspacing", - "Umathordrelspacing", "Umathoverbarkern", "Umathoverbarrule", - "Umathoverbarrule", "Umathoverbarvgap", "Umathoverdelimiterbgap", - "Umathoverdelimiterbgap", "Umathoverdelimitervgap", "Umathpunctbinspacing", - "Umathpunctbinspacing", "Umathpunctclosespacing", "Umathpunctinnerspacing", "Umathpunctopenspacing", - "Umathpunctopenspacing", - "Umathpunctopspacing", "Umathpunctopspacing", "Umathpunctordspacing", - "Umathpunctordspacing", "Umathpunctpunctspacing", "Umathpunctrelspacing", - "Umathpunctrelspacing", - "Umathquad", "Umathquad", "Umathradicaldegreeafter", - "Umathradicaldegreeafter", "Umathradicaldegreebefore", "Umathradicaldegreeraise", - "Umathradicaldegreeraise", "Umathradicalkern", - "Umathradicalkern", - "Umathradicalrule", "Umathradicalrule", "Umathradicalvgap", - "Umathradicalvgap", - "Umathrelbinspacing", "Umathrelbinspacing", "Umathrelclosespacing", - "Umathrelclosespacing", - "Umathrelinnerspacing", "Umathrelinnerspacing", "Umathrelopenspacing", - "Umathrelopenspacing", - "Umathrelopspacing", "Umathrelopspacing", "Umathrelpunctspacing", "Umathrelrelspacing", "Umathspaceafterscript", - "Umathspaceafterscript", "Umathstackdenomdown", "Umathstacknumup", - "Umathstacknumup", - "Umathstackvgap", "Umathstackvgap", "Umathsubshiftdown", - "Umathsubshiftdown", - "Umathsubshiftdrop", "Umathsubshiftdrop", "Umathsubsupshiftdown", "Umathsubsupvgap", - "Umathsubsupvgap", - "Umathsubtopmax", "Umathsubtopmax", "Umathsupbottommin", "Umathsupshiftdrop", "Umathsupshiftup", - "Umathsupshiftup", "Umathsupsubbottommax", "Umathunderbarkern", - "Umathunderbarkern", "Umathunderbarrule", - "Umathunderbarrule", - "Umathunderbarvgap", "Umathunderbarvgap", "Umathunderdelimiterbgap", - "Umathunderdelimiterbgap", - "Umathunderdelimitervgap", "Umathunderdelimitervgap", "Uoverdelimiter", "Uradical", - "Uradical", - "Uroot", "Uroot", "Ustack", - "Ustack", "Ustartdisplaymath", "Ustartmath", - "Ustartmath", "Ustopdisplaymath", "Ustopmath", "Usubscript", - "Usubscript", "Usuperscript", - "Usuperscript", - "Uunderdelimiter", "Uunderdelimiter", "abovedisplayshortskip", "abovedisplayskip", @@ -223,12 +146,9 @@ return { "aftergroup", "alignmark", "aligntab", - "aligntab", "atop", "atopwithdelims", "attribute", - "attribute", - "attributedef", "attributedef", "badness", "baselineskip", @@ -240,15 +160,13 @@ return { "boxdir", "boxmaxdepth", "brokenpenalty", - "catcodetable", + "catcode", "catcodetable", "char", - "chardp", + "chardef", "chardp", "charht", "charit", - "charit", - "charwd", "charwd", "cleaders", "clearmarks", @@ -260,11 +178,9 @@ return { "countdef", "cr", "crampeddisplaystyle", - "crampeddisplaystyle", "crampedscriptscriptstyle", "crampedscriptstyle", "crampedtextstyle", - "crampedtextstyle", "crcr", "csname", "currentgrouplevel", @@ -273,6 +189,7 @@ return { "currentiflevel", "day", "deadcycles", + "def", "defaultskewchar", "delcode", "delimiterfactor", @@ -319,9 +236,9 @@ return { "fontcharic", "fontcharwd", "fontid", - "fontid", "fontname", "formatname", + "futurelet", "gdef", "gleaders", "global", @@ -349,8 +266,6 @@ return { "hyphenpenalty", "if", "ifabsdim", - "ifabsdim", - "ifabsnum", "ifabsnum", "ifcase", "ifcat", @@ -366,7 +281,6 @@ return { "ifpdfabsdim", "ifpdfprimitive", "ifprimitive", - "ifprimitive", "iftrue", "ifvbox", "ifvmode", @@ -376,7 +290,6 @@ return { "immediate", "indent", "initcatcodetable", - "initcatcodetable", "input", "insert", "insertpenalties", @@ -389,11 +302,9 @@ return { "lastnodetype", "lastpenalty", "latelua", - "latelua", "lccode", "leaders", "leftghost", - "leftghost", "lefthyphenmin", "leftmarginkern", "leftskip", @@ -404,19 +315,15 @@ return { "lineskip", "lineskiplimit", "localbrokenpenalty", - "localbrokenpenalty", - "localinterlinepenalty", "localinterlinepenalty", "localleftbox", "localrightbox", - "localrightbox", "long", "lowercase", "lpcode", "luaescapestring", "luatexdatestamp", "luatexrevision", - "luatexrevision", "luatexversion", "mark", "marks", @@ -450,8 +357,6 @@ return { "noexpand", "noindent", "nokerns", - "nokerns", - "noligs", "noligs", "nolimits", "nolocaldirs", @@ -460,27 +365,19 @@ return { "nullfont", "number", "odelcode", - "odelcode", "odelimiter", - "odelimiter", - "omathaccent", "omathaccent", "omathchar", - "omathchar", - "omathchardef", "omathchardef", "omathcode", - "omathcode", "omit", "openin", "openout", "or", "oradical", - "oradical", "outer", "output", "outputbox", - "outputbox", "outputpenalty", "over", "overfullrule", @@ -495,19 +392,14 @@ return { "pagefilstretch", "pagegoal", "pageheight", - "pageheight", - "pageleftoffset", "pageleftoffset", "pageshrink", "pagestretch", "pagetopoffset", - "pagetopoffset", "pagetotal", "pagewidth", - "pagewidth", "par", "pardir", - "pardir", "parfillskip", "parshapedimen", "parshapeindent", @@ -593,18 +485,14 @@ return { "postdisplaypenalty", "postexhyphenchar", "posthyphenchar", - "posthyphenchar", "predisplaydirection", "predisplaysize", "preexhyphenchar", - "preexhyphenchar", - "prehyphenchar", "prehyphenchar", "pretolerance", "prevdepth", "prevgraf", "primitive", - "primitive", "protected", "quitvmode", "raise", @@ -613,12 +501,10 @@ return { "relax", "relpenalty", "right", - "rightghost", "rightmarginkern", "rightskip", "rpcode", "savecatcodetable", - "savecatcodetable", "savinghyphcodes", "scantextokens", "scriptfont", @@ -651,16 +537,12 @@ return { "splittopskip", "string", "suppressfontnotfounderror", - "suppressfontnotfounderror", - "suppressifcsnameerror", "suppressifcsnameerror", "suppresslongerror", "suppressoutererror", - "suppressoutererror", "synctex", "tabskip", "tagcode", - "textdir", "textfont", "the", "thickmuskip", diff --git a/tex/context/base/mult-prm.mkiv b/tex/context/base/mult-prm.mkiv index 4fda18463..ab0c91b8e 100644 --- a/tex/context/base/mult-prm.mkiv +++ b/tex/context/base/mult-prm.mkiv @@ -1,17 +1,20 @@ \starttext \startluacode - local primitives = { } - table.merge(primitives,table.fromhash(tex.primitives())) - table.merge(primitives,tex.extraprimitives('etex')) - table.merge(primitives,tex.extraprimitives('pdftex')) - table.merge(primitives,tex.extraprimitives('luatex')) - table.merge(primitives,tex.extraprimitives('aleph')) - table.merge(primitives,tex.extraprimitives('omega')) + local primitives = table.unique( + table.merge( + { }, + table.fromhash(tex.primitives()), + tex.extraprimitives('etex'), + tex.extraprimitives('pdftex'), + tex.extraprimitives('luatex'), + tex.extraprimitives('aleph'), + tex.extraprimitives('omega'), + { "def", "catcode", "futurelet", "chardef", } + ) + ) table.sort(primitives) - while string.find(primitives[1],"[^A-Za-z]") do - table.remove(primitives,1) - end + table.remove(primitives,1) -- \- io.savedata("mult-prm.lua",table.serialize({ primitives = primitives },true,{ reduce = true, inline = false })) \stopluacode diff --git a/tex/context/base/status-files.pdf b/tex/context/base/status-files.pdf Binary files differindex 1e72db3fc..2abcef338 100644 --- a/tex/context/base/status-files.pdf +++ b/tex/context/base/status-files.pdf diff --git a/tex/context/base/status-lua.pdf b/tex/context/base/status-lua.pdf Binary files differindex 48e8df334..e89a371d1 100644 --- a/tex/context/base/status-lua.pdf +++ b/tex/context/base/status-lua.pdf diff --git a/tex/context/base/syst-aux.mkiv b/tex/context/base/syst-aux.mkiv index c7a3d2325..796f3321e 100644 --- a/tex/context/base/syst-aux.mkiv +++ b/tex/context/base/syst-aux.mkiv @@ -156,7 +156,7 @@ %D alternatives to this command. Beware, only the simple one %D has \type {\noexpand} before its argument. - \long\def\@@expanded{} % always long; global (less restores) +\long\def\@@expanded{} % always long; global (less restores) \long\def\expanded#1% {\long\xdef\@@expanded{\noexpand#1}\@@expanded} @@ -3763,28 +3763,28 @@ % faster -\long\unexpanded\def\dostepwiserecurse#1#2#3#4% can be made faster by postponing #4 - {\global\advance\outerrecurse \plusone - \long\global\@EA\def\csname\@@arecurse\recursedepth\endcsname##1##2{#4}% - \global\@EA\let\csname\@@irecurse\recursedepth\endcsname\recurselevel - \ifnum#3>\zerocount - \ifnum#2<#1\relax - \let\nextrecurse\exitstepwiserecurse - \else - \let\nextrecurse\dodostepwiserecurse - \fi - \else - \ifnum#3<\zerocount - \ifnum#1<#2\relax - \let\nextrecurse\exitstepwiserecurse - \else - \let\nextrecurse\dodostepwisereverse - \fi - \else - \let\nextrecurse\exitstepwiserecurse - \fi - \fi - \normalexpanded{\nextrecurse{\number#1}{\number#2}{\number#3}}} +% \long\unexpanded\def\dostepwiserecurse#1#2#3#4% can be made faster by postponing #4 +% {\global\advance\outerrecurse \plusone +% \long\global\@EA\def\csname\@@arecurse\recursedepth\endcsname##1##2{#4}% +% \global\@EA\let\csname\@@irecurse\recursedepth\endcsname\recurselevel +% \ifnum#3>\zerocount +% \ifnum#2<#1\relax +% \let\nextrecurse\exitstepwiserecurse +% \else +% \let\nextrecurse\dodostepwiserecurse +% \fi +% \else +% \ifnum#3<\zerocount +% \ifnum#1<#2\relax +% \let\nextrecurse\exitstepwiserecurse +% \else +% \let\nextrecurse\dodostepwisereverse +% \fi +% \else +% \let\nextrecurse\exitstepwiserecurse +% \fi +% \fi +% \normalexpanded{\nextrecurse{\number#1}{\number#2}{\number#3}}} % slightly faster @@ -5982,7 +5982,7 @@ \newbox\@@dlhbox -\unexpanded \def\dontleavehmode +\unexpanded\def\dontleavehmode {\ifhmode\else \ifmmode\else \setbox\@@dlhbox\hbox{\mathsurround\zeropoint\everymath\emptytoks$ $}\unhbox\@@dlhbox \fi \fi} diff --git a/tex/context/base/syst-ini.mkiv b/tex/context/base/syst-ini.mkiv index 3e8c8a749..7ff0c5575 100644 --- a/tex/context/base/syst-ini.mkiv +++ b/tex/context/base/syst-ini.mkiv @@ -581,7 +581,7 @@ %D %D In \LUATEX\ we have ways around this. -\def\tracingall +\normalprotected\def\tracingall {\tracingonline \plusone \tracingcommands \plusthree \tracingstats \plustwo @@ -600,11 +600,11 @@ \tracingassigns \plustwo \errorstopmode} -\def\loggingall +\normalprotected\def\loggingall {\tracingall \tracingonline \zerocount} -\def\tracingnone +\normalprotected\def\tracingnone {\tracingassigns \zerocount \tracingnesting \zerocount \tracingscantokens\zerocount @@ -700,7 +700,7 @@ %D does not support nested loops. We use a namespace prefix %D \type {@@pln}. -\long\def\loop#1\repeat{\long\def\@@plnbody{#1}\@@plniterate} +\long\def\loop#1\repeat{\long\def\@@plnbody{#1}\@@plniterate} % might go %D The following makes \type {\loop} \unknown\ \type {\if} %D \unknown\ \type {\repeat} skippable (clever trick): diff --git a/tex/generic/context/luatex/luatex-fonts-merged.lua b/tex/generic/context/luatex/luatex-fonts-merged.lua index 48733addb..18d88e815 100644 --- a/tex/generic/context/luatex/luatex-fonts-merged.lua +++ b/tex/generic/context/luatex/luatex-fonts-merged.lua @@ -1,6 +1,6 @@ -- merged file : luatex-fonts-merged.lua -- parent file : luatex-fonts.lua --- merge date : 09/12/11 22:49:51 +-- merge date : 09/14/11 12:21:17 do -- begin closure to overcome local limits and interference @@ -1726,6 +1726,29 @@ function lpeg.append(list,pp,delayed) return p end +-- function lpeg.exact_match(words,case_insensitive) +-- local pattern = concat(words) +-- if case_insensitive then +-- local pattern = S(upper(characters)) + S(lower(characters)) +-- local list = { } +-- for i=1,#words do +-- list[lower(words[i])] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[lower(s)] and i +-- end) +-- else +-- local pattern = S(concat(words)) +-- local list = { } +-- for i=1,#words do +-- list[words[i]] = true +-- end +-- return Cmt(pattern^1, function(_,i,s) +-- return list[s] and i +-- end) +-- end +-- end + end -- closure do -- begin closure to overcome local limits and interference |