diff options
author | Hans Hagen <pragma@wxs.nl> | 2013-05-18 12:41:00 +0200 |
---|---|---|
committer | Hans Hagen <pragma@wxs.nl> | 2013-05-18 12:41:00 +0200 |
commit | 0deffde58a47f5c85a46a7d999ff9cf817b81cef (patch) | |
tree | 0e63c0c5221b50d7b15f5a05cb369f3104f9515c | |
parent | 8ee5d015261386246844d543cf68c557aac3ec64 (diff) | |
download | context-0deffde58a47f5c85a46a7d999ff9cf817b81cef.tar.gz |
beta 2013.05.18 12:41
-rw-r--r-- | context/data/scite/lexers/archive/scite-context-lexer-pre-3-3-1.lua | 1100 | ||||
-rw-r--r-- | context/data/scite/lexers/scite-context-lexer.lua | 426 | ||||
-rw-r--r-- | context/data/scite/scite-context.properties | 17 | ||||
-rw-r--r-- | tex/context/base/cont-new.mkiv | 2 | ||||
-rw-r--r-- | tex/context/base/cont-new.tmp | 83 | ||||
-rw-r--r-- | tex/context/base/context-version.pdf | bin | 4127 -> 4134 bytes | |||
-rw-r--r-- | tex/context/base/context.mkiv | 4 | ||||
-rw-r--r-- | tex/context/base/context.tmp | 513 | ||||
-rw-r--r-- | tex/context/base/status-files.pdf | bin | 24755 -> 24757 bytes | |||
-rw-r--r-- | tex/context/base/status-lua.pdf | bin | 211803 -> 211838 bytes | |||
-rw-r--r-- | tex/generic/context/luatex/luatex-fonts-merged.lua | 2 |
11 files changed, 1195 insertions, 952 deletions
diff --git a/context/data/scite/lexers/archive/scite-context-lexer-pre-3-3-1.lua b/context/data/scite/lexers/archive/scite-context-lexer-pre-3-3-1.lua new file mode 100644 index 000000000..7883177b4 --- /dev/null +++ b/context/data/scite/lexers/archive/scite-context-lexer-pre-3-3-1.lua @@ -0,0 +1,1100 @@ +local info = { + version = 1.324, + comment = "basics for scintilla lpeg lexer for context/metafun", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files", + comment = "contains copyrighted code from mitchell.att.foicica.com", + +} + +-- todo: move all code here +-- todo: explore adapted dll ... properties + init + +-- The fold and lex functions are copied and patched from original code by Mitchell (see +-- lexer.lua). All errors are mine. +-- +-- Starting with SciTE version 3.20 there is an issue with coloring. As we still lack +-- a connection with scite itself (properties as well as printing to the log pane) we +-- cannot trace this (on windows). As far as I can see, there are no fundamental +-- changes in lexer.lua or LexLPeg.cxx so it must be in scintilla itself. So for the +-- moment I stick to 3.10. Indicators are: no lexing of 'next' and 'goto <label>' in the +-- Lua lexer and no brace highlighting either. Interesting is that it does work ok in +-- the cld lexer (so the Lua code is okay). Also the fact that char-def.lua lexes fast +-- is a signal that the lexer quits somewhere halfway. +-- +-- After checking 3.24 and adapting to the new lexer tables things are okay again. So, +-- this version assumes 3.24 or higher. In 3.24 we have a different token result, i.e. no +-- longer a { tag, pattern } but just two return values. I didn't check other changes but +-- will do that when I run into issues. +-- +-- I've considered making a whole copy and patch the other functions too as we need +-- an extra nesting model. However, I don't want to maintain too much. An unfortunate +-- change in 3.03 is that no longer a script can be specified. This means that instead +-- of loading the extensions via the properties file, we now need to load them in our +-- own lexers, unless of course we replace lexer.lua completely (which adds another +-- installation issue). +-- +-- Another change has been that _LEXERHOME is no longer available. It looks like more and +-- more functionality gets dropped so maybe at some point we need to ship our own dll/so +-- files. For instance, I'd like to have access to the current filename and other scite +-- properties. For instance, we could cache some info with each file, if only we had +-- knowledge of what file we're dealing with. +-- +-- For huge files folding can be pretty slow and I do have some large ones that I keep +-- open all the time. Loading is normally no ussue, unless one has remembered the status +-- and the cursor is at the last line of a 200K line file. Optimizing the fold function +-- brought down loading of char-def.lua from 14 sec => 8 sec. Replacing the word_match +-- function and optimizing the lex function gained another 2+ seconds. A 6 second load +-- is quite ok for me. The changed lexer table structure (no subtables) brings loading +-- down to a few seconds. +-- +-- When the lexer path is copied to the textadept lexer path, and the theme definition to +-- theme path (as lexer.lua), the lexer works there as well. When I have time and motive +-- I will make a proper setup file to tune the look and feel a bit and associate suffixes +-- with the context lexer. The textadept editor has a nice style tracing option but lacks +-- the tabs for selecting files that scite has. It also has no integrated run that pipes +-- to the log pane (I wonder if it could borrow code from the console2 project). Interesting +-- is that the jit version of textadept crashes on lexing large files (and does not feel +-- faster either). +-- +-- Function load(lexer_name) starts with _M.WHITESPACE = lexer_name..'_whitespace' which +-- means that we need to have it frozen at the moment we load another lexer. Because spacing +-- is used to revert to a parent lexer we need to make sure that we load children as late +-- as possible in order not to get the wrong whitespace trigger. This took me quite a while +-- to figure out (not being that familiar with the internals). The lex and fold functions +-- have been optimized. It is a pitty that there is no proper print available. Another thing +-- needed is a default style in ourown theme style definition, as otherwise we get wrong +-- nested lexers, especially if they are larger than a view. This is the hardest part of +-- getting things right. +-- +-- Eventually it might be safer to copy the other methods from lexer.lua here as well so +-- that we have no dependencies, apart from the c library (for which at some point the api +-- will be stable I hope). +-- +-- It's a pitty that there is no scintillua library for the OSX version of scite. Even +-- better would be to have the scintillua library as integral part of scite as that way I +-- could use OSX alongside windows and linux (depending on needs). Also nice would be to +-- have a proper interface to scite then because currently the lexer is rather isolated and the +-- lua version does not provide all standard libraries. It would also be good to have lpeg +-- support in the regular scite lua extension (currently you need to pick it up from someplace +-- else). + +local lpeg = require 'lpeg' + +local R, P, S, C, V, Cp, Cs, Ct, Cmt, Cc, Cf, Cg, Carg = lpeg.R, lpeg.P, lpeg.S, lpeg.C, lpeg.V, lpeg.Cp, lpeg.Cs, lpeg.Ct, lpeg.Cmt, lpeg.Cc, lpeg.Cf, lpeg.Cg, lpeg.Carg +local lpegmatch = lpeg.match +local find, gmatch, match, lower, upper, gsub = string.find, string.gmatch, string.match, string.lower, string.upper, string.gsub +local concat = table.concat +local global = _G +local type, next, setmetatable, rawset = type, next, setmetatable, rawset + +if lexer then + -- in recent c++ code the lexername and loading is hard coded +elseif _LEXERHOME then + dofile(_LEXERHOME .. '/lexer.lua') -- pre 3.03 situation +else + dofile('lexer.lua') -- whatever +end + +lexer.context = lexer.context or { } +local context = lexer.context + +context.patterns = context.patterns or { } +local patterns = context.patterns + +lexer._CONTEXTEXTENSIONS = true + +local locations = { + -- lexer.context.path, + "data", -- optional data directory + "..", -- regular scite directory +} + +local function collect(name) +-- local definitions = loadfile(name .. ".luc") or loadfile(name .. ".lua") + local okay, definitions = pcall(function () return require(name) end) + if okay then + if type(definitions) == "function" then + definitions = definitions() + end + if type(definitions) == "table" then + return definitions + end + end +end + +function context.loaddefinitions(name) + for i=1,#locations do + local data = collect(locations[i] .. "/" .. name) + if data then + return data + end + end +end + +-- maybe more efficient: + +function context.word_match(words,word_chars,case_insensitive) + local chars = '%w_' -- maybe just "" when word_chars + if word_chars then + chars = '^([' .. chars .. gsub(word_chars,'([%^%]%-])', '%%%1') ..']+)' + else + chars = '^([' .. chars ..']+)' + end + if case_insensitive then + local word_list = { } + for i=1,#words do + word_list[lower(words[i])] = true + end + return P(function(input, index) + local s, e, word = find(input,chars,index) + return word and word_list[lower(word)] and e + 1 or nil + end) + else + local word_list = { } + for i=1,#words do + word_list[words[i]] = true + end + return P(function(input, index) + local s, e, word = find(input,chars,index) + return word and word_list[word] and e + 1 or nil + end) + end +end + +local idtoken = R("az","AZ","\127\255","__") +local digit = R("09") +local sign = S("+-") +local period = P(".") +local space = S(" \n\r\t\f\v") + +patterns.idtoken = idtoken + +patterns.digit = digit +patterns.sign = sign +patterns.period = period + +patterns.cardinal = digit^1 +patterns.integer = sign^-1 * digit^1 + +patterns.real = + sign^-1 * ( -- at most one + digit^1 * period * digit^0 -- 10.0 10. + + digit^0 * period * digit^1 -- 0.10 .10 + + digit^1 -- 10 + ) + +patterns.restofline = (1-S("\n\r"))^1 +patterns.space = space +patterns.spacing = space^1 +patterns.nospacing = (1-space)^1 +patterns.anything = P(1) + +local endof = S("\n\r\f") + +patterns.startofline = P(function(input,index) + return (index == 1 or lpegmatch(endof,input,index-1)) and index +end) + +function context.exact_match(words,word_chars,case_insensitive) + local characters = concat(words) + local pattern -- the concat catches _ etc + if word_chars == true or word_chars == false or word_chars == nil then + word_chars = "" + end + if type(word_chars) == "string" then + pattern = S(characters) + idtoken + if case_insensitive then + pattern = pattern + S(upper(characters)) + S(lower(characters)) + end + if word_chars ~= "" then + pattern = pattern + S(word_chars) + end + elseif word_chars then + pattern = word_chars + end + if case_insensitive then + local list = { } + for i=1,#words do + list[lower(words[i])] = true + end + return Cmt(pattern^1, function(_,i,s) + return list[lower(s)] -- and i or nil + end) + else + local list = { } + for i=1,#words do + list[words[i]] = true + end + return Cmt(pattern^1, function(_,i,s) + return list[s] -- and i or nil + end) + end +end + +-- spell checking (we can only load lua files) +-- +-- return { +-- min = 3, +-- max = 40, +-- n = 12345, +-- words = { +-- ["someword"] = "someword", +-- ["anotherword"] = "Anotherword", +-- }, +-- } + +local lists = { } + +function context.setwordlist(tag,limit) -- returns hash (lowercase keys and original values) + if not tag or tag == "" then + return false, 3 + end + local list = lists[tag] + if not list then + list = context.loaddefinitions("spell-" .. tag) + if not list or type(list) ~= "table" then + list = { words = false, min = 3 } + else + list.words = list.words or false + list.min = list.min or 3 + end + lists[tag] = list + end + return list.words, list.min +end + +patterns.wordtoken = R("az","AZ","\127\255") +patterns.wordpattern = patterns.wordtoken^3 -- todo: if limit and #s < limit then + +-- -- pre 3.24: +-- +-- function context.checkedword(validwords,validminimum,s,i) -- ,limit +-- if not validwords then -- or #s < validminimum then +-- return true, { "text", i } -- { "default", i } +-- else +-- -- keys are lower +-- local word = validwords[s] +-- if word == s then +-- return true, { "okay", i } -- exact match +-- elseif word then +-- return true, { "warning", i } -- case issue +-- else +-- local word = validwords[lower(s)] +-- if word == s then +-- return true, { "okay", i } -- exact match +-- elseif word then +-- return true, { "warning", i } -- case issue +-- elseif upper(s) == s then +-- return true, { "warning", i } -- probably a logo or acronym +-- else +-- return true, { "error", i } +-- end +-- end +-- end +-- end + +function context.checkedword(validwords,validminimum,s,i) -- ,limit + if not validwords then -- or #s < validminimum then + return true, "text", i -- { "default", i } + else + -- keys are lower + local word = validwords[s] + if word == s then + return true, "okay", i -- exact match + elseif word then + return true, "warning", i -- case issue + else + local word = validwords[lower(s)] + if word == s then + return true, "okay", i -- exact match + elseif word then + return true, "warning", i -- case issue + elseif upper(s) == s then + return true, "warning", i -- probably a logo or acronym + else + return true, "error", i + end + end + end +end + +function context.styleofword(validwords,validminimum,s) -- ,limit + if not validwords or #s < validminimum then + return "text" + else + -- keys are lower + local word = validwords[s] + if word == s then + return "okay" -- exact match + elseif word then + return "warning" -- case issue + else + local word = validwords[lower(s)] + if word == s then + return "okay" -- exact match + elseif word then + return "warning" -- case issue + elseif upper(s) == s then + return "warning" -- probably a logo or acronym + else + return "error" + end + end + end +end + +-- overloaded functions + +local FOLD_BASE = SC_FOLDLEVELBASE +local FOLD_HEADER = SC_FOLDLEVELHEADERFLAG +local FOLD_BLANK = SC_FOLDLEVELWHITEFLAG + +local get_style_at = GetStyleAt +local get_property = GetProperty +local get_indent_amount = GetIndentAmount + +local h_table, b_table, n_table = { }, { }, { } + +setmetatable(h_table, { __index = function(t,level) local v = { level, FOLD_HEADER } t[level] = v return v end }) +setmetatable(b_table, { __index = function(t,level) local v = { level, FOLD_BLANK } t[level] = v return v end }) +setmetatable(n_table, { __index = function(t,level) local v = { level } t[level] = v return v end }) + +-- -- todo: move the local functions outside (see below) .. old variant < 3.24 +-- +-- local newline = P("\r\n") + S("\r\n") +-- local p_yes = Cp() * Cs((1-newline)^1) * newline^-1 +-- local p_nop = newline +-- +-- local function fold_by_parsing(text,start_pos,start_line,start_level,lexer) +-- local foldsymbols = lexer._foldsymbols +-- if not foldsymbols then +-- return { } +-- end +-- local patterns = foldsymbols._patterns +-- if not patterns then +-- return { } +-- end +-- local nofpatterns = #patterns +-- if nofpatterns == 0 then +-- return { } +-- end +-- local folds = { } +-- local line_num = start_line +-- local prev_level = start_level +-- local current_level = prev_level +-- local validmatches = foldsymbols._validmatches +-- if not validmatches then +-- validmatches = { } +-- for symbol, matches in next, foldsymbols do -- whatever = { start = 1, stop = -1 } +-- if not find(symbol,"^_") then -- brrr +-- for s, _ in next, matches do +-- validmatches[s] = true +-- end +-- end +-- end +-- foldsymbols._validmatches = validmatches +-- end +-- -- of course we could instead build a nice lpeg checker .. something for +-- -- a rainy day with a stack of new cd's at hand +-- local function action_y(pos,line) +-- for i=1,nofpatterns do +-- for s, m in gmatch(line,patterns[i]) do +-- if validmatches[m] then +-- local symbols = foldsymbols[get_style_at(start_pos + pos + s - 1)] +-- if symbols then +-- local action = symbols[m] +-- if action then +-- if type(action) == 'number' then -- we could store this in validmatches if there was only one symbol category +-- current_level = current_level + action +-- else +-- current_level = current_level + action(text,pos,line,s,m) +-- end +-- if current_level < FOLD_BASE then +-- current_level = FOLD_BASE +-- end +-- end +-- end +-- end +-- end +-- end +-- if current_level > prev_level then +-- folds[line_num] = h_table[prev_level] -- { prev_level, FOLD_HEADER } +-- else +-- folds[line_num] = n_table[prev_level] -- { prev_level } +-- end +-- prev_level = current_level +-- line_num = line_num + 1 +-- end +-- local function action_n() +-- folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK } +-- line_num = line_num + 1 +-- end +-- if lexer._reset_parser then +-- lexer._reset_parser() +-- end +-- local lpegpattern = (p_yes/action_y + p_nop/action_n)^0 -- not too efficient but indirect function calls are neither but +-- lpegmatch(lpegpattern,text) -- keys are not pressed that fast ... large files are slow anyway +-- return folds +-- end + +-- The 3.24 variant; no longer subtable optimization is needed: + +local newline = P("\r\n") + S("\r\n") +local p_yes = Cp() * Cs((1-newline)^1) * newline^-1 +local p_nop = newline + +local folders = { } + +local function fold_by_parsing(text,start_pos,start_line,start_level,lexer) + local folder = folders[lexer] + if not folder then + -- + local pattern, folds, text, start_pos, line_num, prev_level, current_level + -- + local fold_symbols = lexer._foldsymbols + local fold_pattern = lexer._foldpattern -- use lpeg instead (context extension) + -- + if fold_pattern then + -- if no functions are found then we could have a faster one + + -- fold_pattern = Cp() * C(fold_pattern) * Carg(1) / function(s,match,pos) + -- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)] + -- local l = symbols and symbols[match] + -- if l then + -- local t = type(l) + -- if t == 'number' then + -- current_level = current_level + l + -- elseif t == 'function' then + -- current_level = current_level + l(text, pos, line, s, match) + -- end + -- end + -- end + -- fold_pattern = (fold_pattern + P(1))^0 + -- local action_y = function(pos,line) + -- lpegmatch(fold_pattern,line,1,pos) + -- folds[line_num] = prev_level + -- if current_level > prev_level then + -- folds[line_num] = prev_level + FOLD_HEADER + -- end + -- if current_level < FOLD_BASE then + -- current_level = FOLD_BASE + -- end + -- prev_level = current_level + -- line_num = line_num + 1 + -- end + -- local action_n = function() + -- folds[line_num] = prev_level + FOLD_BLANK + -- line_num = line_num + 1 + -- end + -- pattern = (p_yes/action_y + p_nop/action_n)^0 + + fold_pattern = Cp() * C(fold_pattern) / function(s,match) + local symbols = fold_symbols[get_style_at(start_pos + s)] + if symbols then + local l = symbols[match] + if l then + current_level = current_level + l + end + end + end + local action_y = function() + folds[line_num] = prev_level + if current_level > prev_level then + folds[line_num] = prev_level + FOLD_HEADER + end + if current_level < FOLD_BASE then + current_level = FOLD_BASE + end + prev_level = current_level + line_num = line_num + 1 + end + local action_n = function() + folds[line_num] = prev_level + FOLD_BLANK + line_num = line_num + 1 + end + pattern = ((fold_pattern + (1-newline))^1 * newline / action_y + newline/action_n)^0 + + else + -- the traditional one but a bit optimized + local fold_symbols_patterns = fold_symbols._patterns + local action_y = function(pos,line) + for j = 1, #fold_symbols_patterns do + for s, match in gmatch(line,fold_symbols_patterns[j]) do -- '()('..patterns[i]..')' + local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)] + local l = symbols and symbols[match] + local t = type(l) + if t == 'number' then + current_level = current_level + l + elseif t == 'function' then + current_level = current_level + l(text, pos, line, s, match) + end + end + end + folds[line_num] = prev_level + if current_level > prev_level then + folds[line_num] = prev_level + FOLD_HEADER + end + if current_level < FOLD_BASE then + current_level = FOLD_BASE + end + prev_level = current_level + line_num = line_num + 1 + end + local action_n = function() + folds[line_num] = prev_level + FOLD_BLANK + line_num = line_num + 1 + end + pattern = (p_yes/action_y + p_nop/action_n)^0 + end + -- + local reset_parser = lexer._reset_parser + -- + folder = function(_text_,_start_pos_,_start_line_,_start_level_) + if reset_parser then + reset_parser() + end + folds = { } + text = _text_ + start_pos = _start_pos_ + line_num = _start_line_ + prev_level = _start_level_ + current_level = prev_level + lpegmatch(pattern,text) +-- return folds +local t = folds +folds = nil +return t -- so folds can be collected + end + folders[lexer] = folder + end + return folder(text,start_pos,start_line,start_level,lexer) +end + +-- local function fold_by_indentation(text,start_pos,start_line,start_level) +-- local folds = { } +-- local current_line = start_line +-- local prev_level = start_level +-- for line in gmatch(text,'[\t ]*(.-)\r?\n') do +-- if line ~= "" then +-- local current_level = FOLD_BASE + get_indent_amount(current_line) +-- if current_level > prev_level then -- next level +-- local i = current_line - 1 +-- while true do +-- local f = folds[i] +-- if f and f[2] == FOLD_BLANK then +-- i = i - 1 +-- else +-- break +-- end +-- end +-- local f = folds[i] +-- if f then +-- f[2] = FOLD_HEADER +-- end -- low indent +-- folds[current_line] = n_table[current_level] -- { current_level } -- high indent +-- elseif current_level < prev_level then -- prev level +-- local f = folds[current_line - 1] +-- if f then +-- f[1] = prev_level -- high indent +-- end +-- folds[current_line] = n_table[current_level] -- { current_level } -- low indent +-- else -- same level +-- folds[current_line] = n_table[prev_level] -- { prev_level } +-- end +-- prev_level = current_level +-- else +-- folds[current_line] = b_table[prev_level] -- { prev_level, FOLD_BLANK } +-- end +-- current_line = current_line + 1 +-- end +-- return folds +-- end + +-- local function fold_by_indentation(text,start_pos,start_line,start_level) +-- local folds = { } +-- local current_line = start_line +-- local prev_level = start_level +-- for line in gmatch(text,'[\t ]*(.-)\r?\n') do +-- if line ~= '' then +-- local current_level = FOLD_BASE + get_indent_amount(current_line) +-- if current_level > prev_level then -- next level +-- local i = current_line - 1 +-- local f +-- while true do +-- f = folds[i] +-- if not f then +-- break +-- elseif f[2] == FOLD_BLANK then +-- i = i - 1 +-- else +-- f[2] = FOLD_HEADER -- low indent +-- break +-- end +-- end +-- folds[current_line] = { current_level } -- high indent +-- elseif current_level < prev_level then -- prev level +-- local f = folds[current_line - 1] +-- if f then +-- f[1] = prev_level -- high indent +-- end +-- folds[current_line] = { current_level } -- low indent +-- else -- same level +-- folds[current_line] = { prev_level } +-- end +-- prev_level = current_level +-- else +-- folds[current_line] = { prev_level, FOLD_BLANK } +-- end +-- current_line = current_line + 1 +-- end +-- for line, level in next, folds do +-- folds[line] = level[1] + (level[2] or 0) +-- end +-- return folds +-- end + +local folds, current_line, prev_level + +local function action_y() + local current_level = FOLD_BASE + get_indent_amount(current_line) + if current_level > prev_level then -- next level + local i = current_line - 1 + local f + while true do + f = folds[i] + if not f then + break + elseif f[2] == FOLD_BLANK then + i = i - 1 + else + f[2] = FOLD_HEADER -- low indent + break + end + end + folds[current_line] = { current_level } -- high indent + elseif current_level < prev_level then -- prev level + local f = folds[current_line - 1] + if f then + f[1] = prev_level -- high indent + end + folds[current_line] = { current_level } -- low indent + else -- same level + folds[current_line] = { prev_level } + end + prev_level = current_level + current_line = current_line + 1 +end + +local function action_n() + folds[current_line] = { prev_level, FOLD_BLANK } + current_line = current_line + 1 +end + +local pattern = ( S("\t ")^0 * ( (1-S("\n\r"))^1 / action_y + P(true) / action_n) * newline )^0 + +local function fold_by_indentation(text,start_pos,start_line,start_level) + -- initialize + folds = { } + current_line = start_line + prev_level = start_level + -- define + -- -- not here .. pattern binds and local functions are not frozen + -- analyze + lpegmatch(pattern,text) + -- flatten + for line, level in next, folds do + folds[line] = level[1] + (level[2] or 0) + end + -- done +-- return folds +local t = folds +folds = nil +return t -- so folds can be collected +end + +local function fold_by_line(text,start_pos,start_line,start_level) + local folds = { } + -- can also be lpeg'd + for _ in gmatch(text,".-\r?\n") do + folds[start_line] = n_table[start_level] -- { start_level } + start_line = start_line + 1 + end + return folds +end + +local threshold_by_lexer = 512 * 1024 -- we don't know the filesize yet +local threshold_by_parsing = 512 * 1024 -- we don't know the filesize yet +local threshold_by_indentation = 512 * 1024 -- we don't know the filesize yet +local threshold_by_line = 512 * 1024 -- we don't know the filesize yet + +function context.fold(text,start_pos,start_line,start_level) -- hm, we had size thresholds .. where did they go + if text == '' then + return { } + end + local lexer = global._LEXER + local fold_by_lexer = lexer._fold + local fold_by_symbols = lexer._foldsymbols + local filesize = 0 -- we don't know that + if fold_by_lexer then + if filesize <= threshold_by_lexer then + return fold_by_lexer(text,start_pos,start_line,start_level,lexer) + end + elseif fold_by_symbols then -- and get_property('fold.by.parsing',1) > 0 then + if filesize <= threshold_by_parsing then + return fold_by_parsing(text,start_pos,start_line,start_level,lexer) + end + elseif get_property('fold.by.indentation',1) > 0 then + if filesize <= threshold_by_indentation then + return fold_by_indentation(text,start_pos,start_line,start_level,lexer) + end + elseif get_property('fold.by.line',1) > 0 then + if filesize <= threshold_by_line then + return fold_by_line(text,start_pos,start_line,start_level,lexer) + end + end + return { } +end + +-- The following code is mostly unchanged: + +local function add_rule(lexer, id, rule) + if not lexer._RULES then + lexer._RULES = {} + lexer._RULEORDER = {} + end + lexer._RULES[id] = rule + lexer._RULEORDER[#lexer._RULEORDER + 1] = id +end + +local function add_style(lexer, token_name, style) + local len = lexer._STYLES.len + if len == 32 then + len = len + 8 + end + if len >= 128 then + print('Too many styles defined (128 MAX)') + end + lexer._TOKENS[token_name] = len + lexer._STYLES[len] = style + lexer._STYLES.len = len + 1 +end + +local function join_tokens(lexer) + local patterns, order = lexer._RULES, lexer._RULEORDER + local token_rule = patterns[order[1]] + for i=2,#order do + token_rule = token_rule + patterns[order[i]] + end + lexer._TOKENRULE = token_rule + return lexer._TOKENRULE +end + +local function add_lexer(grammar, lexer, token_rule) + local token_rule = join_tokens(lexer) + local lexer_name = lexer._NAME + local children = lexer._CHILDREN + for i=1,#children do + local child = children[i] + if child._CHILDREN then + add_lexer(grammar, child) + end + local child_name = child._NAME + local rules = child._EMBEDDEDRULES[lexer_name] + local rules_token_rule = grammar['__'..child_name] or rules.token_rule + grammar[child_name] = (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1 * V(lexer_name) + local embedded_child = '_' .. child_name + grammar[embedded_child] = rules.start_rule * (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1 + token_rule = V(embedded_child) + token_rule + end + grammar['__' .. lexer_name] = token_rule + grammar[lexer_name] = token_rule^0 +end + +local function build_grammar(lexer, initial_rule) + local children = lexer._CHILDREN + if children then + local lexer_name = lexer._NAME + if not initial_rule then + initial_rule = lexer_name + end + local grammar = { initial_rule } + add_lexer(grammar, lexer) + lexer._INITIALRULE = initial_rule + lexer._GRAMMAR = Ct(P(grammar)) + else + lexer._GRAMMAR = Ct(join_tokens(lexer)^0) + end +end + +-- so far. We need these local functions in the next one. +-- +-- Before 3.24 we had tokens[..] = { category, position }, now it's a two values. + +local lineparsers = { } + +function context.lex(text,init_style) + local lexer = global._LEXER + local grammar = lexer._GRAMMAR + if not grammar then + return { } + elseif lexer._LEXBYLINE then -- we could keep token + local tokens = { } + local offset = 0 + local noftokens = 0 + -- -- pre 3.24 + -- + -- for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg + -- local line_tokens = lpegmatch(grammar,line) + -- if line_tokens then + -- for i=1,#line_tokens do + -- local token = line_tokens[i] + -- token[2] = token[2] + offset + -- noftokens = noftokens + 1 + -- tokens[noftokens] = token + -- end + -- end + -- offset = offset + #line + -- if noftokens > 0 and tokens[noftokens][2] ~= offset then + -- noftokens = noftokens + 1 + -- tokens[noftokens] = { 'default', offset + 1 } + -- end + -- end + + -- for line in gmatch(text,'[^\r\n]*\r?\n?') do + -- local line_tokens = lpegmatch(grammar,line) + -- if line_tokens then + -- for i=1,#line_tokens,2 do + -- noftokens = noftokens + 1 + -- tokens[noftokens] = line_tokens[i] + -- noftokens = noftokens + 1 + -- tokens[noftokens] = line_tokens[i + 1] + offset + -- end + -- end + -- offset = offset + #line + -- if noftokens > 0 and tokens[noftokens] ~= offset then + -- noftokens = noftokens + 1 + -- tokens[noftokens] = 'default' + -- noftokens = noftokens + 1 + -- tokens[noftokens] = offset + 1 + -- end + -- end + + local lineparser = lineparsers[lexer] + if not lineparser then -- probably a cmt is more efficient + lineparser = C((1-newline)^0 * newline) / function(line) + local length = #line + local line_tokens = length > 0 and lpegmatch(grammar,line) + if line_tokens then + for i=1,#line_tokens,2 do + noftokens = noftokens + 1 + tokens[noftokens] = line_tokens[i] + noftokens = noftokens + 1 + tokens[noftokens] = line_tokens[i + 1] + offset + end + end + offset = offset + length + if noftokens > 0 and tokens[noftokens] ~= offset then + noftokens = noftokens + 1 + tokens[noftokens] = 'default' + noftokens = noftokens + 1 + tokens[noftokens] = offset + 1 + end + end + lineparser = lineparser^0 + lineparsers[lexer] = lineparser + end + lpegmatch(lineparser,text) + return tokens + + elseif lexer._CHILDREN then + -- as we cannot print, tracing is not possible ... this might change as we can as well + -- generate them all in one go (sharing as much as possible) + local hash = lexer._HASH -- hm, was _hash + if not hash then + hash = { } + lexer._HASH = hash + end + grammar = hash[init_style] + if grammar then + lexer._GRAMMAR = grammar + else + for style, style_num in next, lexer._TOKENS do + if style_num == init_style then + -- the name of the lexers is filtered from the whitespace + -- specification + local lexer_name = match(style,'^(.+)_whitespace') or lexer._NAME + if lexer._INITIALRULE ~= lexer_name then + grammar = hash[lexer_name] + if not grammar then + build_grammar(lexer,lexer_name) + grammar = lexer._GRAMMAR + hash[lexer_name] = grammar + end + end + break + end + end + grammar = grammar or lexer._GRAMMAR + hash[init_style] = grammar + end + return lpegmatch(grammar,text) + else + return lpegmatch(grammar,text) + end +end + +-- todo: keywords: one lookup and multiple matches + +-- function context.token(name, patt) +-- return Ct(patt * Cc(name) * Cp()) +-- end +-- +-- -- hm, changed in 3.24 .. no longer a table + +function context.token(name, patt) + return patt * Cc(name) * Cp() +end + +lexer.fold = context.fold +lexer.lex = context.lex +lexer.token = context.token +lexer.exact_match = context.exact_match + +-- helper .. alas ... the lexer's lua instance is rather crippled .. not even +-- math is part of it + +local floor = math and math.floor +local char = string.char + +if not floor then + + floor = function(n) + return tonumber(string.format("%d",n)) + end + + math = math or { } + + math.floor = floor + +end + +local function utfchar(n) + if n < 0x80 then + return char(n) + elseif n < 0x800 then + return char( + 0xC0 + floor(n/0x40), + 0x80 + (n % 0x40) + ) + elseif n < 0x10000 then + return char( + 0xE0 + floor(n/0x1000), + 0x80 + (floor(n/0x40) % 0x40), + 0x80 + (n % 0x40) + ) + elseif n < 0x40000 then + return char( + 0xF0 + floor(n/0x40000), + 0x80 + floor(n/0x1000), + 0x80 + (floor(n/0x40) % 0x40), + 0x80 + (n % 0x40) + ) + else + -- return char( + -- 0xF1 + floor(n/0x1000000), + -- 0x80 + floor(n/0x40000), + -- 0x80 + floor(n/0x1000), + -- 0x80 + (floor(n/0x40) % 0x40), + -- 0x80 + (n % 0x40) + -- ) + return "?" + end +end + +context.utfchar = utfchar + +-- a helper from l-lpeg: + +local gmatch = string.gmatch + +local function make(t) + local p + for k, v in next, t do + if not p then + if next(v) then + p = P(k) * make(v) + else + p = P(k) + end + else + if next(v) then + p = p + P(k) * make(v) + else + p = p + P(k) + end + end + end + return p +end + +function lpeg.utfchartabletopattern(list) + local tree = { } + for i=1,#list do + local t = tree + for c in gmatch(list[i],".") do + if not t[c] then + t[c] = { } + end + t = t[c] + end + end + return make(tree) +end + +-- patterns.invisibles = +-- P(utfchar(0x00A0)) -- nbsp +-- + P(utfchar(0x2000)) -- enquad +-- + P(utfchar(0x2001)) -- emquad +-- + P(utfchar(0x2002)) -- enspace +-- + P(utfchar(0x2003)) -- emspace +-- + P(utfchar(0x2004)) -- threeperemspace +-- + P(utfchar(0x2005)) -- fourperemspace +-- + P(utfchar(0x2006)) -- sixperemspace +-- + P(utfchar(0x2007)) -- figurespace +-- + P(utfchar(0x2008)) -- punctuationspace +-- + P(utfchar(0x2009)) -- breakablethinspace +-- + P(utfchar(0x200A)) -- hairspace +-- + P(utfchar(0x200B)) -- zerowidthspace +-- + P(utfchar(0x202F)) -- narrownobreakspace +-- + P(utfchar(0x205F)) -- math thinspace + +patterns.invisibles = lpeg.utfchartabletopattern { + utfchar(0x00A0), -- nbsp + utfchar(0x2000), -- enquad + utfchar(0x2001), -- emquad + utfchar(0x2002), -- enspace + utfchar(0x2003), -- emspace + utfchar(0x2004), -- threeperemspace + utfchar(0x2005), -- fourperemspace + utfchar(0x2006), -- sixperemspace + utfchar(0x2007), -- figurespace + utfchar(0x2008), -- punctuationspace + utfchar(0x2009), -- breakablethinspace + utfchar(0x200A), -- hairspace + utfchar(0x200B), -- zerowidthspace + utfchar(0x202F), -- narrownobreakspace + utfchar(0x205F), -- math thinspace +} + +-- now we can make: + +patterns.iwordtoken = patterns.wordtoken - patterns.invisibles +patterns.iwordpattern = patterns.iwordtoken^3 + +-- require("themes/scite-context-theme") + +-- In order to deal with some bug in additional styles (I have no cue what is +-- wrong, but additional styles get ignored and clash somehow) I just copy the +-- original lexer code ... see original for comments. diff --git a/context/data/scite/lexers/scite-context-lexer.lua b/context/data/scite/lexers/scite-context-lexer.lua index 8d06b4923..816d9583b 100644 --- a/context/data/scite/lexers/scite-context-lexer.lua +++ b/context/data/scite/lexers/scite-context-lexer.lua @@ -10,9 +10,15 @@ local info = { -- todo: move all code here -- todo: explore adapted dll ... properties + init +-- todo: play with hotspot and other properties -- The fold and lex functions are copied and patched from original code by Mitchell (see --- lexer.lua). All errors are mine. +-- lexer.lua). All errors are mine. The ability to use lpeg is a real nice adition and a +-- brilliant move. The code is a byproduct of the (mainly Lua based) textadept (still a +-- rapidly moving target) that unfortunately misses a realtime output pane. On the other +-- hand, SciTE is somewhat crippled by the fact that we cannot pop in our own (language +-- dependent) lexer into the output pane (somehow the errorlist lexer is hard coded into +-- the editor). Hopefully that will change some day. -- -- Starting with SciTE version 3.20 there is an issue with coloring. As we still lack -- a connection with scite itself (properties as well as printing to the log pane) we @@ -26,7 +32,12 @@ local info = { -- After checking 3.24 and adapting to the new lexer tables things are okay again. So, -- this version assumes 3.24 or higher. In 3.24 we have a different token result, i.e. no -- longer a { tag, pattern } but just two return values. I didn't check other changes but --- will do that when I run into issues. +-- will do that when I run into issues. I had optimized these small tables by hashing which +-- was more efficient but this is no longer needed. +-- +-- In 3.3.1 another major change took place: some helper constants (maybe they're no +-- longer constants) and functions were moved into the lexer modules namespace but the +-- functions are assigned to the Lua module afterward so we cannot alias them beforehand. -- -- I've considered making a whole copy and patch the other functions too as we need -- an extra nesting model. However, I don't want to maintain too much. An unfortunate @@ -89,26 +100,38 @@ local concat = table.concat local global = _G local type, next, setmetatable, rawset = type, next, setmetatable, rawset -if lexer then - -- in recent c++ code the lexername and loading is hard coded -elseif _LEXERHOME then - dofile(_LEXERHOME .. '/lexer.lua') -- pre 3.03 situation -else - dofile('lexer.lua') -- whatever +-- less confusing as we also use lexer for the current lexer and local _M = lexer is just ugly + +local lexers = lexer + +-- these helpers are set afterwards so we delay their initialization ... there is no need to alias each time again + +local get_style_at, get_indent_amount, get_property, get_fold_level, FOLD_BASE, FOLD_HEADER, FOLD_BLANK, initialize + +initialize = function() + FOLD_BASE = lexers.FOLD_BASE or SC_FOLDLEVELBASE + FOLD_HEADER = lexers.FOLD_HEADER or SC_FOLDLEVELHEADERFLAG + FOLD_BLANK = lexers.FOLD_BLANK or SC_FOLDLEVELWHITEFLAG + get_style_at = lexers.get_style_at or GetStyleAt + get_indent_amount = lexers.get_indent_amount or GetIndentAmount + get_property = lexers.get_property or GetProperty + get_fold_level = lexers.get_fold_level or GetFoldLevel + -- + initialize = nil end -local LEXER = lexer +-- we create our own extra namespace for extensions and helpers -lexer.context = lexer.context or { } -local context = lexer.context +lexers.context = lexers.context or { } +local context = lexers.context context.patterns = context.patterns or { } local patterns = context.patterns -lexer._CONTEXTEXTENSIONS = true +lexers._CONTEXTEXTENSIONS = true local locations = { - -- lexer.context.path, + -- lexers.context.path, "data", -- optional data directory "..", -- regular scite directory } @@ -135,8 +158,6 @@ function context.loaddefinitions(name) end end --- maybe more efficient: - function context.word_match(words,word_chars,case_insensitive) local chars = '%w_' -- maybe just "" when word_chars if word_chars then @@ -270,36 +291,9 @@ end patterns.wordtoken = R("az","AZ","\127\255") patterns.wordpattern = patterns.wordtoken^3 -- todo: if limit and #s < limit then --- -- pre 3.24: --- --- function context.checkedword(validwords,validminimum,s,i) -- ,limit --- if not validwords then -- or #s < validminimum then --- return true, { "text", i } -- { "default", i } --- else --- -- keys are lower --- local word = validwords[s] --- if word == s then --- return true, { "okay", i } -- exact match --- elseif word then --- return true, { "warning", i } -- case issue --- else --- local word = validwords[lower(s)] --- if word == s then --- return true, { "okay", i } -- exact match --- elseif word then --- return true, { "warning", i } -- case issue --- elseif upper(s) == s then --- return true, { "warning", i } -- probably a logo or acronym --- else --- return true, { "error", i } --- end --- end --- end --- end - function context.checkedword(validwords,validminimum,s,i) -- ,limit if not validwords then -- or #s < validminimum then - return true, "text", i -- { "default", i } + return true, "text", i -- true, "default", i else -- keys are lower local word = validwords[s] @@ -349,91 +343,11 @@ end -- overloaded functions -local h_table, b_table, n_table = { }, { }, { } - -setmetatable(h_table, { __index = function(t,level) local v = { level, LEXER.FOLD_HEADER } t[level] = v return v end }) -setmetatable(b_table, { __index = function(t,level) local v = { level, LEXER.FOLD_BLANK } t[level] = v return v end }) -setmetatable(n_table, { __index = function(t,level) local v = { level } t[level] = v return v end }) - --- -- todo: move the local functions outside (see below) .. old variant < 3.24 --- --- local newline = P("\r\n") + S("\r\n") --- local p_yes = Cp() * Cs((1-newline)^1) * newline^-1 --- local p_nop = newline --- --- local function fold_by_parsing(text,start_pos,start_line,start_level,lexer) --- local foldsymbols = lexer._foldsymbols --- if not foldsymbols then --- return { } --- end --- local patterns = foldsymbols._patterns --- if not patterns then --- return { } --- end --- local nofpatterns = #patterns --- if nofpatterns == 0 then --- return { } --- end --- local folds = { } --- local line_num = start_line --- local prev_level = start_level --- local current_level = prev_level --- local validmatches = foldsymbols._validmatches --- if not validmatches then --- validmatches = { } --- for symbol, matches in next, foldsymbols do -- whatever = { start = 1, stop = -1 } --- if not find(symbol,"^_") then -- brrr --- for s, _ in next, matches do --- validmatches[s] = true --- end --- end --- end --- foldsymbols._validmatches = validmatches --- end --- -- of course we could instead build a nice lpeg checker .. something for --- -- a rainy day with a stack of new cd's at hand --- local function action_y(pos,line) --- for i=1,nofpatterns do --- for s, m in gmatch(line,patterns[i]) do --- if validmatches[m] then --- local symbols = foldsymbols[get_style_at(start_pos + pos + s - 1)] --- if symbols then --- local action = symbols[m] --- if action then --- if type(action) == 'number' then -- we could store this in validmatches if there was only one symbol category --- current_level = current_level + action --- else --- current_level = current_level + action(text,pos,line,s,m) --- end --- if current_level < FOLD_BASE then --- current_level = FOLD_BASE --- end --- end --- end --- end --- end --- end --- if current_level > prev_level then --- folds[line_num] = h_table[prev_level] -- { prev_level, FOLD_HEADER } --- else --- folds[line_num] = n_table[prev_level] -- { prev_level } --- end --- prev_level = current_level --- line_num = line_num + 1 --- end --- local function action_n() --- folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK } --- line_num = line_num + 1 --- end --- if lexer._reset_parser then --- lexer._reset_parser() --- end --- local lpegpattern = (p_yes/action_y + p_nop/action_n)^0 -- not too efficient but indirect function calls are neither but --- lpegmatch(lpegpattern,text) -- keys are not pressed that fast ... large files are slow anyway --- return folds --- end +local h_table, b_table, n_table = { }, { }, { } -- from the time small tables were used (optimization) --- The 3.24 variant; no longer subtable optimization is needed: +setmetatable(h_table, { __index = function(t,level) local v = { level, FOLD_HEADER } t[level] = v return v end }) +setmetatable(b_table, { __index = function(t,level) local v = { level, FOLD_BLANK } t[level] = v return v end }) +setmetatable(n_table, { __index = function(t,level) local v = { level } t[level] = v return v end }) local newline = P("\r\n") + S("\r\n") local p_yes = Cp() * Cs((1-newline)^1) * newline^-1 @@ -450,45 +364,8 @@ local function fold_by_parsing(text,start_pos,start_line,start_level,lexer) local fold_symbols = lexer._foldsymbols local fold_pattern = lexer._foldpattern -- use lpeg instead (context extension) -- -local FOLD_BASE = LEXER.FOLD_BASE or SC_FOLDLEVELBASE -local FOLD_HEADER = LEXER.FOLD_HEADER or SC_FOLDLEVELHEADERFLAG -local FOLD_BLANK = LEXER.FOLD_BLANK or SC_FOLDLEVELWHITEFLAG -local get_style_at = LEXER.get_style_at or GetStyleAt - -- if fold_pattern then -- if no functions are found then we could have a faster one - - -- fold_pattern = Cp() * C(fold_pattern) * Carg(1) / function(s,match,pos) - -- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)] - -- local l = symbols and symbols[match] - -- if l then - -- local t = type(l) - -- if t == 'number' then - -- current_level = current_level + l - -- elseif t == 'function' then - -- current_level = current_level + l(text, pos, line, s, match) - -- end - -- end - -- end - -- fold_pattern = (fold_pattern + P(1))^0 - -- local action_y = function(pos,line) - -- lpegmatch(fold_pattern,line,1,pos) - -- folds[line_num] = prev_level - -- if current_level > prev_level then - -- folds[line_num] = prev_level + FOLD_HEADER - -- end - -- if current_level < FOLD_BASE then - -- current_level = FOLD_BASE - -- end - -- prev_level = current_level - -- line_num = line_num + 1 - -- end - -- local action_n = function() - -- folds[line_num] = prev_level + FOLD_BLANK - -- line_num = line_num + 1 - -- end - -- pattern = (p_yes/action_y + p_nop/action_n)^0 - fold_pattern = Cp() * C(fold_pattern) / function(s,match) local symbols = fold_symbols[get_style_at(start_pos + s)] if symbols then @@ -561,106 +438,19 @@ local get_style_at = LEXER.get_style_at or GetStyleAt prev_level = _start_level_ current_level = prev_level lpegmatch(pattern,text) --- return folds -local t = folds -folds = nil -return t -- so folds can be collected + -- make folds collectable + local t = folds + folds = nil + return t end folders[lexer] = folder end return folder(text,start_pos,start_line,start_level,lexer) end --- local function fold_by_indentation(text,start_pos,start_line,start_level) --- local folds = { } --- local current_line = start_line --- local prev_level = start_level --- for line in gmatch(text,'[\t ]*(.-)\r?\n') do --- if line ~= "" then --- local current_level = FOLD_BASE + get_indent_amount(current_line) --- if current_level > prev_level then -- next level --- local i = current_line - 1 --- while true do --- local f = folds[i] --- if f and f[2] == FOLD_BLANK then --- i = i - 1 --- else --- break --- end --- end --- local f = folds[i] --- if f then --- f[2] = FOLD_HEADER --- end -- low indent --- folds[current_line] = n_table[current_level] -- { current_level } -- high indent --- elseif current_level < prev_level then -- prev level --- local f = folds[current_line - 1] --- if f then --- f[1] = prev_level -- high indent --- end --- folds[current_line] = n_table[current_level] -- { current_level } -- low indent --- else -- same level --- folds[current_line] = n_table[prev_level] -- { prev_level } --- end --- prev_level = current_level --- else --- folds[current_line] = b_table[prev_level] -- { prev_level, FOLD_BLANK } --- end --- current_line = current_line + 1 --- end --- return folds --- end - --- local function fold_by_indentation(text,start_pos,start_line,start_level) --- local folds = { } --- local current_line = start_line --- local prev_level = start_level --- for line in gmatch(text,'[\t ]*(.-)\r?\n') do --- if line ~= '' then --- local current_level = FOLD_BASE + get_indent_amount(current_line) --- if current_level > prev_level then -- next level --- local i = current_line - 1 --- local f --- while true do --- f = folds[i] --- if not f then --- break --- elseif f[2] == FOLD_BLANK then --- i = i - 1 --- else --- f[2] = FOLD_HEADER -- low indent --- break --- end --- end --- folds[current_line] = { current_level } -- high indent --- elseif current_level < prev_level then -- prev level --- local f = folds[current_line - 1] --- if f then --- f[1] = prev_level -- high indent --- end --- folds[current_line] = { current_level } -- low indent --- else -- same level --- folds[current_line] = { prev_level } --- end --- prev_level = current_level --- else --- folds[current_line] = { prev_level, FOLD_BLANK } --- end --- current_line = current_line + 1 --- end --- for line, level in next, folds do --- folds[line] = level[1] + (level[2] or 0) --- end --- return folds --- end - local folds, current_line, prev_level local function action_y() -local FOLD_BASE = LEXER.FOLD_BASE or SC_FOLDLEVELBASE -local FOLD_HEADER = LEXER.FOLD_HEADER or SC_FOLDLEVELHEADERFLAG -local FOLD_BLANK = LEXER.FOLD_BLANK or SC_FOLDLEVELWHITEFLAG -local get_indent_amount = LEXER.get_indent_amount or GetIndentAmount local current_level = FOLD_BASE + get_indent_amount(current_line) if current_level > prev_level then -- next level local i = current_line - 1 @@ -691,7 +481,7 @@ local get_indent_amount = LEXER.get_indent_amount or GetIndentAmount end local function action_n() - folds[current_line] = { prev_level, LEXER.FOLD_BLANK } + folds[current_line] = { prev_level, FOLD_BLANK } current_line = current_line + 1 end @@ -710,18 +500,17 @@ local function fold_by_indentation(text,start_pos,start_line,start_level) for line, level in next, folds do folds[line] = level[1] + (level[2] or 0) end - -- done --- return folds -local t = folds -folds = nil -return t -- so folds can be collected + -- done, make folds collectable + local t = folds + folds = nil + return t end local function fold_by_line(text,start_pos,start_line,start_level) local folds = { } -- can also be lpeg'd for _ in gmatch(text,".-\r?\n") do - folds[start_line] = n_table[start_level] -- { start_level } + folds[start_line] = n_table[start_level] -- { start_level } -- stile tables ? needs checking start_line = start_line + 1 end return folds @@ -736,11 +525,13 @@ function context.fold(text,start_pos,start_line,start_level) -- hm, we had size if text == '' then return { } end - local lexer = global._LEXER - local fold_by_lexer = lexer._fold + if initialize then + initialize() + end + local lexer = global._LEXER + local fold_by_lexer = lexer._fold local fold_by_symbols = lexer._foldsymbols - local filesize = 0 -- we don't know that -local get_property = LEXER.get_property or GetProperty + local filesize = 0 -- we don't know that if fold_by_lexer then if filesize <= threshold_by_lexer then return fold_by_lexer(text,start_pos,start_line,start_level,lexer) @@ -763,16 +554,16 @@ end -- The following code is mostly unchanged: -local function add_rule(lexer, id, rule) +local function add_rule(lexer,id,rule) if not lexer._RULES then - lexer._RULES = {} - lexer._RULEORDER = {} + lexer._RULES = { } + lexer._RULEORDER = { } end lexer._RULES[id] = rule lexer._RULEORDER[#lexer._RULEORDER + 1] = id end -local function add_style(lexer, token_name, style) +local function add_style(lexer,token_name,style) local len = lexer._STYLES.len if len == 32 then len = len + 8 @@ -781,39 +572,40 @@ local function add_style(lexer, token_name, style) print('Too many styles defined (128 MAX)') end lexer._TOKENS[token_name] = len - lexer._STYLES[len] = style - lexer._STYLES.len = len + 1 + lexer._STYLES[len] = style + lexer._STYLES.len = len + 1 end local function join_tokens(lexer) - local patterns, order = lexer._RULES, lexer._RULEORDER + local patterns = lexer._RULES + local order = lexer._RULEORDER local token_rule = patterns[order[1]] for i=2,#order do token_rule = token_rule + patterns[order[i]] end lexer._TOKENRULE = token_rule - return lexer._TOKENRULE + return token_rule end local function add_lexer(grammar, lexer, token_rule) local token_rule = join_tokens(lexer) local lexer_name = lexer._NAME - local children = lexer._CHILDREN + local children = lexer._CHILDREN for i=1,#children do local child = children[i] if child._CHILDREN then add_lexer(grammar, child) end - local child_name = child._NAME - local rules = child._EMBEDDEDRULES[lexer_name] - local rules_token_rule = grammar['__'..child_name] or rules.token_rule - grammar[child_name] = (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1 * V(lexer_name) - local embedded_child = '_' .. child_name + local child_name = child._NAME + local rules = child._EMBEDDEDRULES[lexer_name] + local rules_token_rule = grammar['__'..child_name] or rules.token_rule + grammar[child_name] = (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1 * V(lexer_name) + local embedded_child = '_' .. child_name grammar[embedded_child] = rules.start_rule * (-rules.end_rule * rules_token_rule)^0 * rules.end_rule^-1 - token_rule = V(embedded_child) + token_rule + token_rule = V(embedded_child) + token_rule end grammar['__' .. lexer_name] = token_rule - grammar[lexer_name] = token_rule^0 + grammar[lexer_name] = token_rule^0 end local function build_grammar(lexer, initial_rule) @@ -833,58 +625,21 @@ local function build_grammar(lexer, initial_rule) end -- so far. We need these local functions in the next one. --- --- Before 3.24 we had tokens[..] = { category, position }, now it's a two values. local lineparsers = { } function context.lex(text,init_style) local lexer = global._LEXER local grammar = lexer._GRAMMAR + if initialize then + initialize() + end if not grammar then return { } elseif lexer._LEXBYLINE then -- we could keep token local tokens = { } local offset = 0 local noftokens = 0 - -- -- pre 3.24 - -- - -- for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg - -- local line_tokens = lpegmatch(grammar,line) - -- if line_tokens then - -- for i=1,#line_tokens do - -- local token = line_tokens[i] - -- token[2] = token[2] + offset - -- noftokens = noftokens + 1 - -- tokens[noftokens] = token - -- end - -- end - -- offset = offset + #line - -- if noftokens > 0 and tokens[noftokens][2] ~= offset then - -- noftokens = noftokens + 1 - -- tokens[noftokens] = { 'default', offset + 1 } - -- end - -- end - - -- for line in gmatch(text,'[^\r\n]*\r?\n?') do - -- local line_tokens = lpegmatch(grammar,line) - -- if line_tokens then - -- for i=1,#line_tokens,2 do - -- noftokens = noftokens + 1 - -- tokens[noftokens] = line_tokens[i] - -- noftokens = noftokens + 1 - -- tokens[noftokens] = line_tokens[i + 1] + offset - -- end - -- end - -- offset = offset + #line - -- if noftokens > 0 and tokens[noftokens] ~= offset then - -- noftokens = noftokens + 1 - -- tokens[noftokens] = 'default' - -- noftokens = noftokens + 1 - -- tokens[noftokens] = offset + 1 - -- end - -- end - local lineparser = lineparsers[lexer] if not lineparser then -- probably a cmt is more efficient lineparser = C((1-newline)^0 * newline) / function(line) @@ -961,10 +716,10 @@ function context.token(name, patt) return patt * Cc(name) * Cp() end -lexer.fold = context.fold -lexer.lex = context.lex -lexer.token = context.token -lexer.exact_match = context.exact_match +lexers.fold = context.fold +lexers.lex = context.lex +lexers.token = context.token +lexers.exact_match = context.exact_match -- helper .. alas ... the lexer's lua instance is rather crippled .. not even -- math is part of it @@ -1057,23 +812,6 @@ function lpeg.utfchartabletopattern(list) return make(tree) end --- patterns.invisibles = --- P(utfchar(0x00A0)) -- nbsp --- + P(utfchar(0x2000)) -- enquad --- + P(utfchar(0x2001)) -- emquad --- + P(utfchar(0x2002)) -- enspace --- + P(utfchar(0x2003)) -- emspace --- + P(utfchar(0x2004)) -- threeperemspace --- + P(utfchar(0x2005)) -- fourperemspace --- + P(utfchar(0x2006)) -- sixperemspace --- + P(utfchar(0x2007)) -- figurespace --- + P(utfchar(0x2008)) -- punctuationspace --- + P(utfchar(0x2009)) -- breakablethinspace --- + P(utfchar(0x200A)) -- hairspace --- + P(utfchar(0x200B)) -- zerowidthspace --- + P(utfchar(0x202F)) -- narrownobreakspace --- + P(utfchar(0x205F)) -- math thinspace - patterns.invisibles = lpeg.utfchartabletopattern { utfchar(0x00A0), -- nbsp utfchar(0x2000), -- enquad @@ -1103,4 +841,4 @@ patterns.iwordpattern = patterns.iwordtoken^3 -- wrong, but additional styles get ignored and clash somehow) I just copy the -- original lexer code ... see original for comments. -return LEXER +return lexers diff --git a/context/data/scite/scite-context.properties b/context/data/scite/scite-context.properties index 3ee28e229..ce56084c0 100644 --- a/context/data/scite/scite-context.properties +++ b/context/data/scite/scite-context.properties @@ -27,13 +27,14 @@ # if PLAT_WIN # find.command=fgrep -G -n $(find.what) $(find.files) -# bugged: bad cursor -# -# technology=0 -# +# 1: better anti-aliasing on windows, also slightly diffent lineheights + +technology=1 + # not much difference # # buffered.draw=0 +# two.phase.draw=0 # # no auto save: # @@ -44,13 +45,13 @@ code.page=65001 output.code.page=65001 -# caret gets weird: -# -# technology=1 +position.maximize=1 +virtual.space=1 textwrapper.margin=4 textwrapper.length=68 -#~ textwrapper.length=80 + +# xml.auto.close.tags=1 # ConTeXt: suffixes (really needed) diff --git a/tex/context/base/cont-new.mkiv b/tex/context/base/cont-new.mkiv index d60f0f3a6..8dcacd5a9 100644 --- a/tex/context/base/cont-new.mkiv +++ b/tex/context/base/cont-new.mkiv @@ -11,7 +11,7 @@ %C therefore copyrighted by \PRAGMA. See mreadme.pdf for %C details. -\newcontextversion{2013.05.18 02:06} +\newcontextversion{2013.05.18 12:41} %D This file is loaded at runtime, thereby providing an excellent place for %D hacks, patches, extensions and new features. diff --git a/tex/context/base/cont-new.tmp b/tex/context/base/cont-new.tmp deleted file mode 100644 index 1518f9c68..000000000 --- a/tex/context/base/cont-new.tmp +++ /dev/null @@ -1,83 +0,0 @@ -%D \module -%D [ file=cont-new, -%D version=1995.10.10, -%D title=\CONTEXT\ Miscellaneous Macros, -%D subtitle=New Macros, -%D author=Hans Hagen, -%D date=\currentdate, -%D copyright={PRAGMA ADE \& \CONTEXT\ Development Team}] -%C -%C This module is part of the \CONTEXT\ macro||package and is -%C therefore copyrighted by \PRAGMA. See mreadme.pdf for -%C details. - -\newcontextversion{2013.05.18 02:03} - -%D This file is loaded at runtime, thereby providing an excellent place for -%D hacks, patches, extensions and new features. - -\unprotect - -% \writestatus\m!system{beware: some patches loaded from cont-new.mkiv} - -% \attribute152\zerocount : marks ... lots of sweeps so best early in list - -%D Maybe: - -\unexpanded\def\tightvbox{\dowithnextbox{\dp\nextbox\zeropoint\box\nextbox}\vbox} -\unexpanded\def\tightvtop{\dowithnextbox{\ht\nextbox\zeropoint\box\nextbox}\vtop} - -%D Maybe: - -% \startluacode -% function context.loadfile(filename) -% context(string.strip(io.loaddata(resolvers.findfile(filename)))) -% end -% \stopluacode -% -% \edef\tufte{\cldcommand{loadfile("tufte.tex")}} - -%D Needs some work: - -\unexpanded\def\startgridcorrection - {\dosingleempty\spac_grid_correction_start} - -\def\spac_grid_correction_start[#1]% - {\ifgridsnapping - \snaptogrid[#1]\vbox\bgroup - \else - \startbaselinecorrection - \fi} - -\unexpanded\def\stopgridcorrection - {\ifgridsnapping - \egroup - \else - \stopbaselinecorrection - \fi} - -\unexpanded\def\checkgridsnapping - {\lineskip\ifgridsnapping\zeropoint\else\normallineskip\fi} - -%D Probably obsolete: - -\unexpanded\def\startcolumnmakeup % don't change - {\bgroup - \getrawnoflines\textheight % raw as we cna have topskip - \setbox\scratchbox\vbox to \dimexpr\noflines\lineheight-\lineheight+\topskip\relax - \bgroup - \forgetall} - -\unexpanded\def\stopcolumnmakeup - {\egroup - \dp\scratchbox\zeropoint - \wd\scratchbox\textwidth - \box\scratchbox - \egroup - \page_otr_command_synchronize_hsize} - -%D Till we fixed all styles: - -\let\\=\crlf - -\protect \endinput diff --git a/tex/context/base/context-version.pdf b/tex/context/base/context-version.pdf Binary files differindex 67d4fc3d4..2e30d9d38 100644 --- a/tex/context/base/context-version.pdf +++ b/tex/context/base/context-version.pdf diff --git a/tex/context/base/context.mkiv b/tex/context/base/context.mkiv index 9ab9b3aad..c10c70e6f 100644 --- a/tex/context/base/context.mkiv +++ b/tex/context/base/context.mkiv @@ -25,8 +25,8 @@ %D up and the dependencies are more consistent. \edef\contextformat {\jobname} -\edef\contextversion{2013.05.18 02:06} -\edef\contextkind {current} +\edef\contextversion{2013.05.18 12:41} +\edef\contextkind {beta} %D For those who want to use this: diff --git a/tex/context/base/context.tmp b/tex/context/base/context.tmp deleted file mode 100644 index d5913624e..000000000 --- a/tex/context/base/context.tmp +++ /dev/null @@ -1,513 +0,0 @@ -%D \module -%D [ file=context, -%D version=2008.28.10, % 1995.10.10, -%D title=\CONTEXT, -%D subtitle=\CONTEXT\ Format Generation, -%D author=Hans Hagen, -%D date=\currentdate, -%D copyright={PRAGMA ADE \& \CONTEXT\ Development Team}] -%C -%C This module is part of the \CONTEXT\ macro||package and is -%C therefore copyrighted by \PRAGMA. See mreadme.pdf for -%C details. - -\catcode`\{=1 \catcode`\}=2 \catcode`\#=6 - -%D From the next string (which is set by the script that assembles the -%D distribution) later on we will calculate a number that can be used -%D by use modules to identify the feature level. Starting with version -%D 2004.8.30 the low level interface is english. Watch out and adapt -%D your styles an modules. - -% \everypar{\writestatus{!!!!}{some spurious input in line \the\inputlineno}\wait} - -%D The order of loading will change when all modules have been cleaned -%D up and the dependencies are more consistent. - -\edef\contextformat {\jobname} -\edef\contextversion{2013.05.18 02:03} -\edef\contextkind {beta} - -%D For those who want to use this: - -\let\fmtname \contextformat -\let\fmtversion\contextversion - -%D Loading: - -\edef\mksuffix {mkiv} -\edef\contextmark{MKIV} - -\ifx\normalinput\undefined \let\normalinput\input \fi - -\def\loadcorefile#1{\normalinput#1\relax} -\def\loadmarkfile#1{\normalinput#1.\mksuffix\relax} -\def\loadmkiifile#1{} -\def\loadmkivfile#1{\normalinput#1.mkiv\relax} -\def\loadmkvifile#1{\normalinput#1.mkvi\relax} - -%D First we load the system modules. These implement a lot of -%D manipulation macros. We start with setting up some basic \TEX\ -%D machinery. - -\loadmarkfile{syst-ini} - -%D Some checking (more primitives are now defined): - -\ifdefined\defaultinterface \else \def\defaultinterface {english} \fi -%ifdefined\messageinterface \else \let\messageinterface \defaultinterface \fi -\ifdefined\defaultlanguagetag \else \def\defaultlanguagetag{en} \fi - -%D We just quit if new functionality is expected. - -\ifnum\luatexversion<70 % also change message - \writestatus{!!!!}{Your luatex binary is too old, you need at least version 0.70!} - \expandafter\end -\fi - -%D There is only this way to pass the version info to \LUA\ (currently). Hm, we could -%D now put it into the environment. - -\newtoks\contextversiontoks \contextversiontoks\expandafter{\contextversion} -\newtoks\contextkindtoks \contextkindtoks \expandafter{\contextkind} - -% \normaleverypar{\wait} % uncomment for test of funny injections - -%D Now the more fundamental code gets defined. - -\loadmarkfile{norm-ctx} -\loadmarkfile{syst-pln} -\loadmarkfile{syst-mes} - -\loadmarkfile{luat-cod} -\loadmarkfile{luat-bas} -\loadmarkfile{luat-lib} - -\loadmarkfile{catc-ini} -\loadmarkfile{catc-act} -\loadmarkfile{catc-def} -\loadmarkfile{catc-ctx} -\loadmarkfile{catc-sym} - -\loadmarkfile{cldf-ini} - -% From here on we have \unexpanded being \normalprotected, as we already had -% \unexpanded long before etex came around. - -\loadmarkfile{syst-aux} -\loadmarkfile{syst-lua} -\loadmarkfile{syst-con} - -\loadmarkfile{syst-fnt} -\loadmarkfile{syst-rtp} - -\loadmkvifile{file-ini} -\loadmkvifile{file-res} -\loadmkvifile{file-lib} - -\loadmarkfile{supp-dir} - -\loadmarkfile{char-ini} -\loadmarkfile{char-utf} -\loadmarkfile{char-act} - -\loadmarkfile{mult-ini} -\loadmarkfile{mult-sys} -\loadmarkfile{mult-aux} -\loadmarkfile{mult-def} -\loadmarkfile{mult-chk} -%loadmarkfile{mult-aux} % moved up -\loadmkvifile{mult-dim} - -\loadmarkfile{cldf-int} % interface - -\loadmarkfile{luat-ini} - -\loadmarkfile{toks-ini} - -\loadmarkfile{attr-ini} - -\loadmarkfile{core-ini} -\loadmarkfile{core-env} - -\loadmarkfile{layo-ini} - -\loadmarkfile{node-ini} - -\loadmarkfile{cldf-bas} % basics / depends on nodes - -\loadmarkfile{node-fin} -\loadmarkfile{node-mig} -\loadmarkfile{typo-bld} % par builders -%loadmarkfile{node-pag} - -\loadmarkfile{back-ini} - -\loadmarkfile{attr-col} -\loadmarkfile{attr-lay} -\loadmarkfile{attr-neg} -\loadmarkfile{attr-eff} -\loadmarkfile{attr-mkr} - -\loadmarkfile{trac-tex} -\loadmarkfile{trac-deb} % will move up -\loadmarkfile{trac-ctx} % maybe move up - -%loadmarkfile{blob-ini} % not to be used, we only use a helper - -\loadmarkfile{supp-box} - -%loadmarkfile{supp-vis} % replaced by trac-vis -%loadmarkfile{supp-fun} % mostly replaced - -\loadmarkfile{supp-ran} -\loadmarkfile{supp-mat} -\loadmarkfile{spac-cha} -%loadmarkfile{supp-num} % obsolete - -\loadmarkfile{typo-ini} - -\loadmkvifile{file-syn} -\loadmkvifile{file-mod} - -\loadmarkfile{core-con} - -\loadmarkfile{cont-fil} - -\loadmarkfile{regi-ini} -\loadmarkfile{enco-ini} -\loadmarkfile{hand-ini} - -\loadmarkfile{lang-ini} -\loadmarkfile{lang-lab} - -\loadmarkfile{unic-ini} - -\loadmarkfile{core-uti} -\loadmarkfile{core-two} -\loadmarkfile{core-dat} - -\loadmarkfile{colo-ini} -\loadmarkfile{colo-grp} % optional -\loadmarkfile{colo-ext} - -\loadmarkfile{node-bck} % overloads anch-pgr (experimental and undocumented) - -\loadmarkfile{pack-cut} % leftovers from trac-vis - -\loadmarkfile{lang-mis} -\loadmarkfile{lang-url} -\loadmarkfile{lang-def} - -\loadmkvifile{file-job} % why so late? - -\loadmarkfile{symb-ini} % brrr depends on fonts - -\loadmarkfile{sort-ini} - -\loadmkvifile{pack-mis} -\loadmarkfile{pack-rul} -\loadmarkfile{pack-mrl} -\loadmkvifile{pack-bck} -\loadmarkfile{pack-fen} - -\loadmarkfile{lxml-ini} -\loadmarkfile{lxml-sor} - -\loadmkvifile{typo-prc} - -\loadmkvifile{strc-ini} -\loadmarkfile{strc-tag} -\loadmarkfile{strc-doc} -\loadmarkfile{strc-num} -\loadmarkfile{strc-mar} -\loadmarkfile{strc-sbe} -\loadmkvifile{strc-lst} -\loadmarkfile{strc-sec} -\loadmarkfile{strc-pag} % hm, depends on core-num -\loadmarkfile{strc-ren} -\loadmarkfile{strc-xml} -\loadmarkfile{strc-def} % might happen later -\loadmkvifile{strc-ref} -\loadmarkfile{strc-reg} -\loadmkvifile{strc-lev} % experiment - -\loadmarkfile{spac-ali} -\loadmarkfile{spac-hor} -\loadmarkfile{spac-flr} -\loadmarkfile{spac-ver} -\loadmarkfile{spac-lin} -\loadmarkfile{spac-pag} -\loadmarkfile{spac-par} -%loadmarkfile{spac-adj} % no longer needed -\loadmarkfile{spac-def} -\loadmarkfile{spac-grd} - -\loadmarkfile{anch-pos} - -\loadmkvifile{scrn-ini} -\loadmkvifile{scrn-ref} - -\loadmarkfile{pack-obj} - -\loadmkvifile{strc-itm} - -\loadmkvifile{strc-con} -\loadmkvifile{strc-des} -\loadmkvifile{strc-enu} - -\loadmarkfile{strc-ind} -\loadmarkfile{strc-lab} -\loadmarkfile{strc-syn} - -\loadmarkfile{core-sys} - -\loadmarkfile{page-var} -\loadmkvifile{page-otr} -\loadmarkfile{page-ini} -\loadmarkfile{page-ins} -\loadmarkfile{page-fac} -\loadmarkfile{page-brk} -\loadmarkfile{page-col} -\loadmarkfile{page-inf} -\loadmarkfile{page-grd} -\loadmarkfile{page-flt} -\loadmarkfile{page-bck} -\loadmarkfile{page-not} -\loadmarkfile{page-one} -\loadmarkfile{page-lay} -\loadmkvifile{page-box} -\loadmkvifile{page-txt} -\loadmarkfile{page-sid} % when - -\loadmkvifile{strc-flt} - -\loadmarkfile{page-pst} -\loadmkvifile{page-mbk} -\loadmarkfile{page-mul} % partly overloaded -\loadmarkfile{page-mix} % new -\loadmarkfile{page-set} -\loadmarkfile{pack-lyr} -\loadmarkfile{pack-pos} -\loadmkvifile{page-mak} - -\loadmarkfile{page-lin} -\loadmarkfile{page-par} -\loadmarkfile{typo-pag} -\loadmarkfile{typo-mar} -\loadmarkfile{typo-itm} - -\loadmarkfile{buff-ini} -\loadmarkfile{buff-ver} -\loadmkvifile{buff-par} - -\loadmarkfile{buff-imp-tex} % optional as also runtime if not loaded -\loadmarkfile{buff-imp-mp} % optional as also runtime if not loaded -\loadmarkfile{buff-imp-lua} % optional as also runtime if not loaded -\loadmarkfile{buff-imp-xml} % optional as also runtime if not loaded - -\loadmarkfile{buff-imp-parsed-xml} % optional -%loadmarkfile{buff-imp-parsed-lua} % optional - -\loadmarkfile{strc-blk} - -\loadmarkfile{page-imp} -\loadmkvifile{page-sel} % optional -\loadmkvifile{page-inj} % optional - -\loadmkvifile{scrn-pag} -\loadmkvifile{scrn-wid} -\loadmkvifile{scrn-but} -\loadmkvifile{scrn-bar} - -\loadmarkfile{page-com} % optional (after scrn-pag) - -\loadmarkfile{strc-bkm} % bookmarks - -\loadmarkfile{tabl-com} -\loadmarkfile{tabl-pln} - -\loadmarkfile{tabl-tab} % thrd-tab stripped and merged - -\loadmarkfile{tabl-tbl} -\loadmarkfile{tabl-ntb} -\loadmarkfile{tabl-nte} -\loadmarkfile{tabl-ltb} -\loadmarkfile{tabl-tsp} -\loadmkvifile{tabl-xtb} -\loadmarkfile{tabl-mis} - -\loadmarkfile{java-ini} - -\loadmkvifile{scrn-fld} -\loadmkvifile{scrn-hlp} - -\loadmarkfile{char-enc} % will move up - -\loadmkvifile{font-lib} % way too late -\loadmkvifile{font-fil} -\loadmkvifile{font-var} -\loadmkvifile{font-fea} -\loadmkvifile{font-mat} -\loadmkvifile{font-ini} -\loadmkvifile{font-sym} -\loadmkvifile{font-sty} -\loadmkvifile{font-set} -\loadmkvifile{font-emp} -\loadmarkfile{font-pre} -\loadmarkfile{font-unk} -\loadmarkfile{font-tra} -\loadmarkfile{font-chk} -\loadmarkfile{font-uni} -\loadmkvifile{font-col} -\loadmkvifile{font-gds} -\loadmkvifile{font-aux} - -\loadmarkfile{typo-lan} - -\loadmarkfile{lxml-css} - -\loadmarkfile{spac-chr} % depends on fonts - -\loadmarkfile{blob-ini} % not to be used, we only use a helper - -\loadmarkfile{trac-vis} -\loadmarkfile{trac-jus} - -\loadmarkfile{typo-cln} -\loadmarkfile{typo-spa} -\loadmarkfile{typo-krn} -\loadmkvifile{typo-itc} -\loadmarkfile{typo-dir} -\loadmarkfile{typo-brk} -\loadmarkfile{typo-cap} -\loadmarkfile{typo-dig} -\loadmarkfile{typo-rep} -\loadmkvifile{typo-txt} -\loadmarkfile{typo-par} - -\loadmkvifile{type-ini} -\loadmarkfile{type-set} - -\loadmarkfile{scrp-ini} - -\loadmarkfile{lang-wrd} % can be optional (discussion with mm sideeffect) -%loadmarkfile{lang-rep} % can be optional (bt 2013 side effect) - -\loadmarkfile{prop-ini} % only for downward compatibility - -\loadmarkfile{mlib-ctx} - -\loadmarkfile{meta-ini} -\loadmarkfile{meta-tex} -\loadmarkfile{meta-fun} -\loadmarkfile{meta-pag} -\loadmarkfile{meta-grd} - -\loadmarkfile{page-mrk} % depends on mp - -\loadmarkfile{page-flw} -\loadmarkfile{page-spr} -\loadmarkfile{page-plg} -\loadmarkfile{page-str} - -\loadmarkfile{anch-pgr} % can be moved up (nicer for dependencies) -\loadmkvifile{anch-bck} -\loadmarkfile{anch-tab} % overloads tabl-tbl -\loadmarkfile{anch-bar} -%loadmarkfile{anch-snc} % when needed this one will be redone - -\loadmarkfile{math-ini} -\loadmarkfile{math-pln} -\loadmarkfile{math-for} -\loadmarkfile{math-def} -\loadmarkfile{math-ali} -%loadmarkfile{math-arr} -\loadmkvifile{math-stc} -\loadmarkfile{math-frc} -\loadmarkfile{math-mis} -\loadmarkfile{math-scr} -\loadmarkfile{math-int} -\loadmarkfile{math-del} -\loadmarkfile{math-fen} -\loadmarkfile{math-inl} -\loadmarkfile{math-dis} -%loadmarkfile{math-lan} - -\loadmarkfile{phys-dim} - -\loadmarkfile{strc-mat} - -\loadmarkfile{chem-ini} -\loadmarkfile{chem-str} - -\loadmarkfile{typo-scr} - -\loadmarkfile{node-rul} -\loadmkvifile{font-sol} % font solutions - -\loadmkvifile{strc-not} -\loadmkvifile{strc-lnt} - -\loadmarkfile{pack-com} -\loadmarkfile{typo-del} - -\loadmarkfile{grph-trf} -\loadmarkfile{grph-inc} -\loadmarkfile{grph-fig} -\loadmarkfile{grph-raw} - -\loadmarkfile{pack-box} -\loadmarkfile{pack-bar} -\loadmarkfile{page-app} -\loadmarkfile{meta-fig} - -\loadmarkfile{lang-spa} % will become obsolete - -\loadmarkfile{bibl-bib} -\loadmarkfile{bibl-tra} - -%loadmarkfile{x-xtag} % no longer preloaded - -\loadmarkfile{meta-xml} - -\loadmarkfile{cont-log} - -\loadmarkfile{task-ini} - -\loadmarkfile{cldf-ver} % verbatim, this can come late -\loadmarkfile{cldf-com} % commands, this can come late - -\loadmarkfile{core-ctx} % this order might change but we need to check depedencies / move to another namespace - -\loadmarkfile{core-def} - -%usemodule[x][res-04] % xml resource libraries -%usemodule[x][res-08] % rlx runtime conversion -%usemodule[x][res-12] % rli external indentification - -% now we hook in backend code (needs checking) - -\loadmarkfile{back-pdf} % actually, this one should load the next three using document.arguments.backend -\loadmarkfile{mlib-pdf} -\loadmarkfile{mlib-pps} -\loadmarkfile{meta-pdf} -\loadmarkfile{grph-epd} - -\loadmarkfile{back-exp} - -\setupcurrentlanguage[\defaultlanguagetag] - -\prependtoks - \ctxlua{statistics.starttiming(statistics)}% -\to \everyjob - -\appendtoks - \ctxlua{statistics.stoptiming(statistics)}% -\to \everyjob - -\appendtoks - \ctxlua{statistics.savefmtstatus("\jobname","\contextversion","context.mkiv","\contextkind")}% can become automatic -\to \everydump - -\errorstopmode \dump \endinput diff --git a/tex/context/base/status-files.pdf b/tex/context/base/status-files.pdf Binary files differindex 73b91cc5f..679c052b5 100644 --- a/tex/context/base/status-files.pdf +++ b/tex/context/base/status-files.pdf diff --git a/tex/context/base/status-lua.pdf b/tex/context/base/status-lua.pdf Binary files differindex 3c96f5711..cf1f4f0e3 100644 --- a/tex/context/base/status-lua.pdf +++ b/tex/context/base/status-lua.pdf diff --git a/tex/generic/context/luatex/luatex-fonts-merged.lua b/tex/generic/context/luatex/luatex-fonts-merged.lua index 9f0348709..e0fe66cb5 100644 --- a/tex/generic/context/luatex/luatex-fonts-merged.lua +++ b/tex/generic/context/luatex/luatex-fonts-merged.lua @@ -1,6 +1,6 @@ -- merged file : luatex-fonts-merged.lua -- parent file : luatex-fonts.lua --- merge date : 05/18/13 02:03:53 +-- merge date : 05/18/13 12:41:50 do -- begin closure to overcome local limits and interference |