summaryrefslogtreecommitdiff
path: root/context
diff options
context:
space:
mode:
authorMarius <mariausol@gmail.com>2011-09-15 10:20:14 +0300
committerMarius <mariausol@gmail.com>2011-09-15 10:20:14 +0300
commit99ff9ece308b251302ce7a18f9be0d68278d9ee7 (patch)
tree5eb0b389881fd5412bcff70b030b4f9552ce213b /context
parenta39b448f695e8f4ce44c909a493d83643e8227cc (diff)
downloadcontext-99ff9ece308b251302ce7a18f9be0d68278d9ee7.tar.gz
beta 2011.09.15 09:08
Diffstat (limited to 'context')
-rw-r--r--context/data/scite/lexers/scite-context-lexer-cld.lua82
-rw-r--r--context/data/scite/lexers/scite-context-lexer-mps.lua75
-rw-r--r--context/data/scite/lexers/scite-context-lexer-tex.lua162
-rw-r--r--context/data/scite/lexers/scite-context-lexer.lua238
-rw-r--r--context/data/scite/lexers/themes/scite-context-theme.lua103
-rw-r--r--context/data/scite/scite-context-readme.tex1
-rw-r--r--context/data/scite/scite-context.properties4
7 files changed, 384 insertions, 281 deletions
diff --git a/context/data/scite/lexers/scite-context-lexer-cld.lua b/context/data/scite/lexers/scite-context-lexer-cld.lua
index 1e5d8b59c..1abc55f91 100644
--- a/context/data/scite/lexers/scite-context-lexer-cld.lua
+++ b/context/data/scite/lexers/scite-context-lexer-cld.lua
@@ -9,13 +9,15 @@ local info = {
-- Adapted from lua.lua by Mitchell who based it on a lexer by Peter Odding.
local lexer = lexer
-local token, style, colors, word_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.word_match, lexer.style_nothing
+local token, style, colors, exact_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.exact_match, lexer.style_nothing
local P, R, S, C, Cg, Cb, Cs, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.C, lpeg.Cg, lpeg.Cb, lpeg.Cs, lpeg.Cmt
local match, find = string.match, string.find
local global = _G
module(...)
+local cldlexer = _M
+
local keywords = {
'and', 'break', 'do', 'else', 'elseif', 'end', 'false', 'for', 'function',
'if', 'in', 'local', 'nil', 'not', 'or', 'repeat', 'return', 'then', 'true',
@@ -77,53 +79,58 @@ local longcomment = Cmt(#('[[' + ('[' * C(P('=')^0) * '[')), function(input,ind
return stop and stop + 1 or #input + 1
end)
-local whitespace = token(lexer.WHITESPACE, lexer.space^1)
-local any_char = lexer.any_char
+local whitespace = cldlexer.WHITESPACE -- triggers states
+
+local space = lexer.space -- S(" \n\r\t\f\v")
+local any = lexer.any
local squote = P("'")
local dquote = P('"')
local escaped = P("\\") * P(1)
local dashes = P('--')
+local spacing = token(whitespace, space^1)
+local rest = token("default", any)
+
local shortcomment = dashes * lexer.nonnewline^0
local longcomment = dashes * longcomment
-local comment = token(lexer.COMMENT, longcomment + shortcomment)
-
-local shortstring = token("quote", squote)
- * token(lexer.STRING, (escaped + (1-squote))^0 )
- * token("quote", squote)
- + token("quote", dquote)
- * token(lexer.STRING, (escaped + (1-dquote))^0 )
- * token("quote", dquote)
-
-local longstring = token("quote", longonestart)
- * token(lexer.STRING, longonestring)
- * token("quote", longonestop)
- + token("quote", longtwostart)
- * token(lexer.STRING, longtwostring)
- * token("quote", longtwostop)
+local comment = token("comment", longcomment + shortcomment)
+
+local shortstring = token("quote", squote)
+ * token("string", (escaped + (1-squote))^0 )
+ * token("quote", squote)
+ + token("quote", dquote)
+ * token("string", (escaped + (1-dquote))^0 )
+ * token("quote", dquote)
+
+local longstring = token("quote", longonestart)
+ * token("string", longonestring)
+ * token("quote", longonestop)
+ + token("quote", longtwostart)
+ * token("string", longtwostring)
+ * token("quote", longtwostop)
local string = shortstring
+ longstring
local integer = P('-')^-1 * (lexer.hex_num + lexer.dec_num)
-local number = token(lexer.NUMBER, lexer.float + integer)
+local number = token("number", lexer.float + integer)
local word = R('AZ','az','__','\127\255') * (lexer.alnum + '_')^0
-local identifier = token(lexer.IDENTIFIER, word)
+local identifier = token("default", word)
-local operator = token(lexer.OPERATOR, P('~=') + S('+-*/%^#=<>;:,.{}[]()')) -- maybe split of {}[]()
+local operator = token("special", P('~=') + S('+-*/%^#=<>;:,.{}[]()')) -- maybe split of {}[]()
-local keyword = token(lexer.KEYWORD, word_match(keywords))
-local builtin = token(lexer.FUNCTION, word_match(functions))
-local constant = token(lexer.CONSTANT, word_match(constants))
-local csname = token("user", word_match(csnames)) * (
- whitespace^0 * #S("{(")
- + ( whitespace^0 * token(lexer.OPERATOR, P(".")) * whitespace^0 * token("csname",word) )^1
+local keyword = token("keyword", exact_match(keywords))
+local builtin = token("plain", exact_match(functions))
+local constant = token("data", exact_match(constants))
+local csname = token("user", exact_match(csnames)) * (
+ spacing^0 * #S("{(")
+ + ( spacing^0 * token("special", P(".")) * spacing^0 * token("csname",word) )^1
)
_rules = {
- { 'whitespace', whitespace },
+ { 'whitespace', spacing },
{ 'keyword', keyword },
{ 'function', builtin },
{ 'csname', csname },
@@ -133,24 +140,17 @@ _rules = {
{ 'comment', comment },
{ 'number', number },
{ 'operator', operator },
- { 'any_char', any_char },
+ { 'rest', rest },
}
-_tokenstyles = {
- { "comment", lexer.style_context_comment },
- { "quote", lexer.style_context_quote },
- { "keyword", lexer.style_context_keyword },
- { "user", lexer.style_context_user },
- { "specials", lexer.style_context_specials },
- { "extras", lexer.style_context_extras },
-}
+_tokenstyles = lexer.context.styleset
_foldsymbols = {
_patterns = {
'%l+',
'[%({%)}%[%]]',
},
- [lexer.KEYWORD] = {
+ ['keyword'] = {
['if'] = 1,
['end'] = -1,
['do'] = 1,
@@ -158,13 +158,13 @@ _foldsymbols = {
['repeat'] = 1,
['until'] = -1,
},
- [lexer.COMMENT] = {
+ ['comment'] = {
['['] = 1, [']'] = -1,
},
- ["quote"] = { -- to be tested
+ ['quote'] = { -- to be tested
['['] = 1, [']'] = -1,
},
- [lexer.OPERATOR] = {
+ ['special'] = {
['('] = 1, [')'] = -1,
['{'] = 1, ['}'] = -1,
},
diff --git a/context/data/scite/lexers/scite-context-lexer-mps.lua b/context/data/scite/lexers/scite-context-lexer-mps.lua
index fa7d88c5d..70324f340 100644
--- a/context/data/scite/lexers/scite-context-lexer-mps.lua
+++ b/context/data/scite/lexers/scite-context-lexer-mps.lua
@@ -8,8 +8,7 @@ local info = {
local lexer = lexer
local global, string, table, lpeg = _G, string, table, lpeg
-local token, style, colors, word_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.word_match, lexer.style_nothing
-local exact_match = lexer.context.exact_match
+local token, style, colors, exact_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.exact_match, lexer.style_nothing
local P, R, S, V, C, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt
local type, next, pcall, loadfile = type, next, pcall, loadfile
@@ -34,13 +33,15 @@ do
end
-local whitespace = lexer.WHITESPACE -- triggers states
-local any_char = lexer.any_char
+local whitespace = metafunlexer.WHITESPACE -- triggers states
local space = lexer.space -- S(" \n\r\t\f\v")
+local any = lexer.any
+
local digit = R("09")
local sign = S("+-")
local period = P(".")
+local dquote = P('"')
local cstoken = R("az","AZ") + P("_")
local number = sign^-1 * ( -- at most one
digit^1 * period * digit^0 -- 10.0 10.
@@ -48,19 +49,20 @@ local number = sign^-1 * ( -- at most one
+ digit^1 -- 10
)
-local spacing = token(whitespace, space^1)
-local comment = token('comment', P('%') * (1-S("\n\r"))^0)
-local metafun = token('command', exact_match(metafuncommands))
-local plain = token('plain', exact_match(plaincommands))
-local quoted = token('specials', P('"'))
- * token('default', P(1-P('"'))^1)
- * token('specials', P('"'))
-local primitive = token('primitive', exact_match(primitivecommands))
-local csname = token('user', cstoken^1)
-local specials = token('specials', S("#()[]<>=:\""))
-local number = token('number', number)
-local extras = token('extras', S("`~%^&_-+/\'|\\"))
-local default = token('default', P(1))
+local spacing = token(whitespace, space^1)
+local rest = token('default', any)
+local comment = token('comment', P('%') * (1-S("\n\r"))^0)
+local metafun = token('command', exact_match(metafuncommands))
+local plain = token('plain', exact_match(plaincommands))
+local quoted = token('quote', dquote)
+ * token('string', P(1-dquote)^1)
+ * token('quote', dquote)
+local primitive = token('primitive', exact_match(primitivecommands))
+----- csname = token('user', cstoken^1)
+local identifier = token('default', cstoken^1)
+local number = token('number', number)
+local special = token('special', S("#()[]<>=:\""))
+local extra = token('extra', S("`~%^&_-+/\'|\\"))
_rules = {
{ 'whitespace', spacing },
@@ -68,22 +70,33 @@ _rules = {
{ 'metafun', metafun },
{ 'plain', plain },
{ 'primitive', primitive },
- { 'csname', csname },
+ { 'identifier', identifier },
{ 'number', number },
{ 'quoted', quoted },
- { 'specials', specials },
- { 'extras', extras },
- { 'any_char', any_char },
+ { 'special', special },
+ { 'extra', extra },
+ { 'rest', rest },
}
-_tokenstyles = {
- { "comment", lexer.style_context_comment },
- { "default", lexer.style_context_default },
- { "number" , lexer.style_context_number },
- { "primitive", lexer.style_context_primitive },
- { "plain", lexer.style_context_plain },
- { "command", lexer.style_context_command },
- { "user", lexer.style_context_user },
- { "specials", lexer.style_context_specials },
- { "extras", lexer.style_context_extras },
+_tokenstyles = lexer.context.styleset
+
+_foldsymbols = {
+ _patterns = {
+ "%l+",
+ },
+ ["primitive"] = {
+ ["beginfig"] = 1,
+ ["endfig"] = -1,
+ ["def"] = 1,
+ ["vardef"] = 1,
+ ["primarydef"] = 1,
+ ["secondarydef" ] = 1,
+ ["tertiarydef"] = 1,
+ ["enddef"] = -1,
+ ["if"] = 1,
+ ["fi"] = -1,
+ ["for"] = 1,
+ ["forever"] = 1,
+ ["endfor"] = -1,
+ }
}
diff --git a/context/data/scite/lexers/scite-context-lexer-tex.lua b/context/data/scite/lexers/scite-context-lexer-tex.lua
index 4a1a0a766..caab6fc4b 100644
--- a/context/data/scite/lexers/scite-context-lexer-tex.lua
+++ b/context/data/scite/lexers/scite-context-lexer-tex.lua
@@ -6,7 +6,7 @@ local info = {
license = "see context related readme files",
}
-
+-- maybe: _LINEBYLINE variant for large files (no nesting)
-- maybe: protected_macros
--[[
@@ -24,11 +24,8 @@ local info = {
-- local interface = props["keywordclass.macros.context.en"]
-- local interface = lexer.get_property("keywordclass.macros.context.en","")
- -- the embedded lexers don't backtrack (so they're not that usefull on large
- -- texts) which is probably a scintilla issue (trade off between speed and lexable
- -- area); also there is some weird bleeding back to the parent lexer with respect
- -- to colors (i.e. the \ in \relax can become black) so I might as well use private
- -- color specifications
+ -- it seems that whitespace triggers the lexer when embedding happens, but this
+ -- is quite fragile due to duplicate styles
-- this lexer does not care about other macro packages (one can of course add a fake
-- interface but it's not on the agenda)
@@ -37,21 +34,23 @@ local info = {
local lexer = lexer
local global, string, table, lpeg = _G, string, table, lpeg
-local token, style, colors, word_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.word_match, lexer.style_nothing
-local exact_match = lexer.context.exact_match
-local P, R, S, V, C, Cmt = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt
+local token, style, colors, exact_match, no_style = lexer.token, lexer.style, lexer.colors, lexer.exact_match, lexer.style_nothing
+local P, R, S, V, C, Cmt, Cp, Cc, Ct = lpeg.P, lpeg.R, lpeg.S, lpeg.V, lpeg.C, lpeg.Cmt, lpeg.Cp, lpeg.Cc, lpeg.Ct
local type, next, pcall, loadfile, setmetatable = type, next, pcall, loadfile, setmetatable
+local find, match = string.find, string.match
module(...)
local contextlexer = _M
+
local basepath = lexer.context and lexer.context.path or _LEXERHOME
local commands = { en = { } }
local primitives = { }
local helpers = { }
+local constants = { }
-do
+do -- todo: only once, store in global
local definitions = lexer.context.loaddefinitions("mult-def.lua")
@@ -67,7 +66,13 @@ do
end
end
end
- helpers = definitions.helpers or { }
+ end
+
+ local definitions = lexer.context.loaddefinitions("mult-low.lua")
+
+ if definitions then
+ helpers = definitions.helpers or { }
+ constants = definitions.constants or { }
end
local definitions = lexer.context.loaddefinitions("mult-prm.lua")
@@ -84,43 +89,81 @@ end
local currentcommands = commands.en or { }
-local knowncommand = Cmt(R("az","AZ")^1, function(_,i,s)
+local cstoken = R("az","AZ","\127\255") + S("@!?_")
+
+local knowncommand = Cmt(cstoken^1, function(_,i,s)
return currentcommands[s] and i
end)
-local find, match = string.find, string.match
-
-local knownpreamble = Cmt(P('% '), function(input,i,_)
+local knownpreamble = Cmt(P("% "), function(input,i,_)
if i < 10 then
- local s, e, word = find(input,'^(.+)[\n\r]',i)
+ local s, e, word = find(input,'^(.+)[\n\r]',i) -- combine with match
if word then
local interface = match(word,"interface=(..)")
if interface then
- currentcommands = commands[interface] or commands.en or { }
+ currentcommands = commands[interface] or commands.en or { }
end
end
end
return false
end)
-local whitespace = lexer.WHITESPACE -- triggers states
-local any_char = lexer.any_char
+-- -- the token list contains { "style", endpos } entries
+-- --
+-- -- in principle this is faster but it is also crash sensitive for large files
+
+-- local constants_hash = { } for i=1,#constants do constants_hash [constants [i]] = true end
+-- local helpers_hash = { } for i=1,#helpers do helpers_hash [helpers [i]] = true end
+-- local primitives_hash = { } for i=1,#primitives do primitives_hash[primitives[i]] = true end
+
+-- local specialword = Ct( P('\\') * Cmt( C(cstoken^1), function(input,i,s)
+-- if currentcommands[s] then
+-- return true, "command", i
+-- elseif constants_hash[s] then
+-- return true, "data", i
+-- elseif helpers_hash[s] then
+-- return true, "plain", i
+-- elseif primitives_hash[s] then
+-- return true, "primitive", i
+-- else -- if starts with if then primitive
+-- return true, "user", i
+-- end
+-- end) )
+
+-- local specialword = P('\\') * Cmt( C(cstoken^1), function(input,i,s)
+-- if currentcommands[s] then
+-- return true, { "command", i }
+-- elseif constants_hash[s] then
+-- return true, { "data", i }
+-- elseif helpers_hash[s] then
+-- return true, { "plain", i }
+-- elseif primitives_hash[s] then
+-- return true, { "primitive", i }
+-- else -- if starts with if then primitive
+-- return true, { "user", i }
+-- end
+-- end)
+
+local whitespace = contextlexer.WHITESPACE -- triggers states
local space = lexer.space -- S(" \n\r\t\f\v")
-local cstoken = R("az","AZ") + S("@!?_") -- todo: utf8
+local any = lexer.any
local spacing = token(whitespace, space^1)
+local rest = token('default', any)
local preamble = token('preamble', knownpreamble)
local comment = token('comment', P('%') * (1-S("\n\r"))^0)
local command = token('command', P('\\') * knowncommand)
+local constant = token('data', P('\\') * exact_match(constants))
local helper = token('plain', P('\\') * exact_match(helpers))
local primitive = token('primitive', P('\\') * exact_match(primitives))
local ifprimitive = token('primitive', P('\\if') * cstoken^1)
local csname = token('user', P('\\') * (cstoken^1 + P(1)))
-local grouping = token('grouping', S("{$}"))
-local specials = token('specials', S("#()[]<>=\""))
-local extras = token('extras', S("`~%^&_-+/\'|"))
-local default = token('default', P(1))
+local grouping = token('grouping', S("{$}")) -- maybe also \bgroup \egroup \begingroup \endgroup
+local special = token('special', S("#()[]<>=\""))
+local extra = token('extra', S("`~%^&_-+/\'|"))
+
+local text = token('default', cstoken^1 )
----- startluacode = token("grouping", P("\\startluacode"))
----- stopluacode = token("grouping", P("\\stopluacode"))
@@ -128,8 +171,7 @@ local default = token('default', P(1))
local luastatus = nil
local luaenvironment = P("luacode")
-local inlinelua = P("\\ctxlua")
- + P("\\ctxcommand")
+local inlinelua = P("\\ctx") * ( P("lua") + P("command") )
+ P("\\cldcontext")
local startlua = P("\\start") * Cmt(luaenvironment,function(_,i,s) luastatus = s return true end)
@@ -142,13 +184,17 @@ local stoplua = P("\\stop") * Cmt(luaenvironment,function(_,i,s)
local startluacode = token("embedded", startlua)
local stopluacode = token("embedded", stoplua)
-local metafunenvironment = P("MPcode")
- + P("useMPgraphic")
- + P("reusableMPgraphic")
- + P("uniqueMPgraphic")
- + P("MPinclusions")
- + P("MPextensions")
- + P("MPgraphic")
+-- local metafunenvironment = P("useMPgraphic")
+-- + P("reusableMPgraphic")
+-- + P("uniqueMPgraphic")
+-- + P("MPcode")
+-- + P("MPpage")
+-- + P("MPinclusions")
+-- + P("MPextensions")
+-- + P("MPgraphic")
+
+local metafunenvironment = ( P("use") + P("reusable") + P("unique") ) * ("MPgraphic")
+ + P("MP") * ( P("code")+ P("page") + P("inclusions") + P("extensions") + P("graphic") )
-- local metafunstatus = nil -- this does not work, as the status gets lost in an embedded lexer
-- local startmetafun = P("\\start") * Cmt(metafunenvironment,function(_,i,s) metafunstatus = s return true end)
@@ -157,59 +203,50 @@ local metafunenvironment = P("MPcode")
local startmetafun = P("\\start") * metafunenvironment
local stopmetafun = P("\\stop") * metafunenvironment
-local openargument = token("specials",P("{"))
-local closeargument = token("specials",P("}"))
-local argumentcontent = token("any_char",(1-P("}"))^0)
+local openargument = token("special", P("{"))
+local closeargument = token("special", P("}"))
+local argumentcontent = token("default",(1-P("}"))^0)
-local metafunarguments = (token("default",spacing^0) * openargument * argumentcontent * closeargument)^-2
+local metafunarguments = (spacing^0 * openargument * argumentcontent * closeargument)^-2
local startmetafuncode = token("embedded", startmetafun) * metafunarguments
local stopmetafuncode = token("embedded", stopmetafun)
--- Function load(lexer_name) starts with _M.WHITESPACE = lexer_name..'_whitespace' which means that we need to
--- have frozen at the moment we load another lexer. Because spacing is used to revert to a parent lexer we need
--- to make sure that we load children as late as possible in order not to get the wrong whitespace trigger. This
--- took me quite a while to figure out (not being that familiar with the internals). BTW, if performance becomes
--- an issue we can rewrite the main lex function (memorize the grammars and speed up the byline variant).
-
local cldlexer = lexer.load('scite-context-lexer-cld')
local mpslexer = lexer.load('scite-context-lexer-mps')
lexer.embed_lexer(contextlexer, cldlexer, startluacode, stopluacode)
lexer.embed_lexer(contextlexer, mpslexer, startmetafuncode, stopmetafuncode)
+-- Watch the text grabber, after all, we're talking mostly of text (beware,
+-- no punctuation here as it can be special. We might go for utf here.
+
_rules = {
{ "whitespace", spacing },
{ "preamble", preamble },
+
+ { "text", text },
+
{ "comment", comment },
+
+ { "constant", constant },
{ "helper", helper },
{ "command", command },
{ "ifprimitive", ifprimitive },
{ "primitive", primitive },
{ "csname", csname },
+
+ -- { "whatever", specialword }, -- not yet, crashes
+
{ "grouping", grouping },
- { "specials", specials },
- { "extras", extras },
- { 'any_char', any_char },
-}
+ { "special", special },
+ { "extra", extra },
-_tokenstyles = {
- { "preamble", lexer.style_context_preamble },
- { "comment", lexer.style_context_comment },
- { "default", lexer.style_context_default },
- { 'number', lexer.style_context_number },
- { "embedded", lexer.style_context_embedded },
- { "grouping", lexer.style_context_grouping },
- { "primitive", lexer.style_context_primitive },
- { "plain", lexer.style_context_plain },
- { "command", lexer.style_context_command },
- { "user", lexer.style_context_user },
- { "specials", lexer.style_context_specials },
- { "extras", lexer.style_context_extras },
- { "quote", lexer.style_context_quote },
- { "keyword", lexer.style_context_keyword },
+ { "rest", rest },
}
+_tokenstyles = lexer.context.styleset
+
local folds = {
["\\start"] = 1, ["\\stop" ] = -1,
["\\begin"] = 1, ["\\end" ] = -1,
@@ -222,5 +259,6 @@ _foldsymbols = {
},
["helper"] = folds,
["command"] = folds,
+ ["user"] = folds,
["grouping"] = folds,
}
diff --git a/context/data/scite/lexers/scite-context-lexer.lua b/context/data/scite/lexers/scite-context-lexer.lua
index 688eb5776..04937cbc0 100644
--- a/context/data/scite/lexers/scite-context-lexer.lua
+++ b/context/data/scite/lexers/scite-context-lexer.lua
@@ -16,6 +16,14 @@ local info = {
-- function and optimizing the lex function gained another 2+ seconds. A 6 second load
-- is quite ok for me.
+-- Function load(lexer_name) starts with _M.WHITESPACE = lexer_name..'_whitespace' which
+-- means that we need to have it frozen at the moment we load another lexer. Because spacing
+-- is used to revert to a parent lexer we need to make sure that we load children as late
+-- as possible in order not to get the wrong whitespace trigger. This took me quite a while
+-- to figure out (not being that familiar with the internals). BTW, if performance becomes
+-- an issue we can rewrite the main lex function (memorize the grammars and speed up the
+-- byline variant).
+
local R, P, S, Cp, Cs, Ct, Cmt, Cc = lpeg.R, lpeg.P, lpeg.S, lpeg.Cp, lpeg.Cs, lpeg.Ct, lpeg.Cmt, lpeg.Cc
local lpegmatch = lpeg.match
local find, gmatch, match, lower, upper, gsub = string.find, string.gmatch, string.match, string.lower, string.upper, string.gsub
@@ -27,22 +35,6 @@ dofile(_LEXERHOME .. '/lexer.lua')
lexer.context = lexer.context or { }
--- function lexer.context.loaddefinitions(name)
--- local basepath = lexer.context and lexer.context.path or _LEXERHOME
--- local definitions = loadfile(basepath and (basepath .. "/" .. name) or name)
--- if not definitions then
--- definitions = loadfile(_LEXERHOME .. "/context/" .. name)
--- end
--- if type(definitions) == "function" then
--- definitions = definitions()
--- end
--- if type(definitions) == "table" then
--- return definitions
--- else
--- return nil
--- end
--- end
-
function lexer.context.loaddefinitions(name)
local definitions = loadfile(_LEXERHOME .. "/context/" .. name)
if not definitions and lexer.context and lexer.context.path then
@@ -91,19 +83,34 @@ function lexer.context.word_match(words,word_chars,case_insensitive)
end
end
--- nicer anyway:
+-- nicer (todo: utf):
--- todo: utf
+local defaults = R("az","AZ","\127\255","__")
-function lexer.context.exact_match(words,case_insensitive)
- local pattern = S(concat(words)) + R("az","AZ","\127\255") -- the concat catches _ etc
+function lexer.context.exact_match(words,word_chars,case_insensitive)
+ local characters = concat(words)
+ local pattern -- the concat catches _ etc
+ if word_chars == true or word_chars == false or word_chars == nil then
+ word_chars = ""
+ end
+ if type(word_chars) == "string" then
+ pattern = S(characters) + defaults
+ if case_insensitive then
+ pattern = pattern + S(upper(characters)) + S(lower(characters))
+ end
+ if word_chars ~= "" then
+ pattern = pattern + S(word_chars)
+ end
+ elseif word_chars then
+ pattern = word_chars
+ end
if case_insensitive then
local list = { }
for i=1,#words do
list[lower(words[i])] = true
end
return Cmt(pattern^1, function(_,i,s)
- return list[lower(s)] and i
+ return list[lower(s)] -- and i
end)
else
local list = { }
@@ -111,28 +118,20 @@ function lexer.context.exact_match(words,case_insensitive)
list[words[i]] = true
end
return Cmt(pattern^1, function(_,i,s)
- return list[s] and i
+ return list[s] -- and i
end)
end
end
-function lexer.context.word_match(words,word_chars,case_insensitive) -- word_chars not used (can be omitted)
- if word_chars == true then
- return lexer.context.exact_match(words,true)
- else
- return lexer.context.exact_match(words,case_insensitive)
- end
-end
+-- overloaded functions
--- Overloaded functions.
+local FOLD_BASE = SC_FOLDLEVELBASE
+local FOLD_HEADER = SC_FOLDLEVELHEADERFLAG
+local FOLD_BLANK = SC_FOLDLEVELWHITEFLAG
-local FOLD_BASE = SC_FOLDLEVELBASE
-local FOLD_HEADER = SC_FOLDLEVELHEADERFLAG
-local FOLD_BLANK = SC_FOLDLEVELWHITEFLAG
-
-local newline = P("\r\n") + S("\r\n")
-
-local splitlines = Ct( ( Ct ( (Cp() * Cs((1-newline)^1) * newline^-1) + (Cp() * Cc("") * newline) ) )^0)
+local get_style_at = GetStyleAt
+local get_property = GetProperty
+local get_indent_amount = GetIndentAmount
local h_table, b_table, n_table = { }, { }, { }
@@ -140,53 +139,53 @@ setmetatable(h_table, { __index = function(t,level) local v = { level, FOLD_HEAD
setmetatable(b_table, { __index = function(t,level) local v = { level, FOLD_BLANK } t[level] = v return v end })
setmetatable(n_table, { __index = function(t,level) local v = { level } t[level] = v return v end })
-local get_style_at = GetStyleAt
-local get_property = GetProperty
-local get_indent_amount = GetIndentAmount
-
--- local lines = lpegmatch(splitlines,text) -- iterating over lines is faster
--- for i=1, #lines do
--- local li = lines[i]
--- local line = li[2]
--- if line ~= "" then
--- local pos = li[1]
--- for i=1,nofpatterns do
--- for s, m in gmatch(line,patterns[i]) do
--- if hash[m] then
--- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)]
--- if symbols then
--- local l = symbols[m]
--- if l then
--- local t = type(l)
--- if t == 'number' then
--- current_level = current_level + l
--- elseif t == 'function' then
--- current_level = current_level + l(text, pos, line, s, match)
--- end
--- if current_level < FOLD_BASE then -- integrate in previous
--- current_level = FOLD_BASE
--- end
--- end
+-- local newline = P("\r\n") + S("\r\n")
+-- local splitlines = Ct( ( Ct ( (Cp() * Cs((1-newline)^1) * newline^-1) + (Cp() * Cc("") * newline) ) )^0)
+--
+-- local lines = lpegmatch(splitlines,text) -- iterating over lines is faster
+-- for i=1, #lines do
+-- local li = lines[i]
+-- local line = li[2]
+-- if line ~= "" then
+-- local pos = li[1]
+-- for i=1,nofpatterns do
+-- for s, m in gmatch(line,patterns[i]) do
+-- if hash[m] then
+-- local symbols = fold_symbols[get_style_at(start_pos + pos + s - 1)]
+-- if symbols then
+-- local l = symbols[m]
+-- if l then
+-- local t = type(l)
+-- if t == 'number' then
+-- current_level = current_level + l
+-- elseif t == 'function' then
+-- current_level = current_level + l(text, pos, line, s, match)
+-- end
+-- if current_level < FOLD_BASE then -- integrate in previous
+-- current_level = FOLD_BASE
-- end
-- end
-- end
-- end
--- if current_level > prev_level then
--- folds[line_num] = h_table[prev_level] -- { prev_level, FOLD_HEADER }
--- else
--- folds[line_num] = n_table[prev_level] -- { prev_level }
--- end
--- prev_level = current_level
--- else
--- folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK }
-- end
--- line_num = line_num + 1
-- end
-
--- not that much faster but less memory:
+-- if current_level > prev_level then
+-- folds[line_num] = h_table[prev_level] -- { prev_level, FOLD_HEADER }
+-- else
+-- folds[line_num] = n_table[prev_level] -- { prev_level }
+-- end
+-- prev_level = current_level
+-- else
+-- folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK }
+-- end
+-- line_num = line_num + 1
+-- end
+--
+-- -- not that much faster but less memory:
local action_y, action_n
+local newline = P("\r\n") + S("\r\n")
local splitlines = ( (
(Cp() * Cs((1-newline)^1) * newline^-1) / function(p,l) action_y(p,l) end
+ ( newline ) / function() action_n() end
@@ -194,7 +193,7 @@ local splitlines = ( (
function lexer.context.fold(text, start_pos, start_line, start_level)
if text == '' then
- return folds
+ return { }
end
local lexer = global._LEXER
if lexer._fold then
@@ -220,7 +219,7 @@ function lexer.context.fold(text, start_pos, start_line, start_level)
end
fold_symbols._hash = hash
end
- action_y = function(pos,line)
+ action_y = function(pos,line) -- we can consider moving this one outside the function
for i=1,nofpatterns do
for s, m in gmatch(line,patterns[i]) do
if hash[m] then
@@ -253,12 +252,12 @@ function lexer.context.fold(text, start_pos, start_line, start_level)
prev_level = current_level
line_num = line_num + 1
end
- action_n = function()
+ action_n = function() -- we can consider moving this one outside the function
folds[line_num] = b_table[prev_level] -- { prev_level, FOLD_BLANK }
line_num = line_num + 1
end
local lines = lpegmatch(splitlines,text)
- elseif get_property('fold.by.indentation', 1) == 1 then
+ elseif get_property('fold.by.indentation',1) == 1 then
local current_line = start_line
local prev_level = start_level
for _, line in gmatch(text,'([\t ]*)(.-)\r?\n') do
@@ -303,58 +302,97 @@ function lexer.context.fold(text, start_pos, start_line, start_level)
return folds
end
-function lexer.context.lex(text, init_style)
+function lexer.context.lex(text,init_style)
local lexer = global._LEXER
local grammar = lexer._GRAMMAR
if not grammar then
return { }
- elseif lexer._LEXBYLINE then
+ elseif lexer._LEXBYLINE then -- we could keep token
local tokens = { }
local offset = 0
local noftokens = 0
- for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg
- local line_tokens = lpeg_match(grammar, line)
- if line_tokens then
- for i=1,#line_tokens do
- local token = line_tokens[i]
- token[2] = token[2] + offset
+ if true then
+ for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg
+ local line_tokens = lpeg_match(grammar,line)
+ if line_tokens then
+ for i=1,#line_tokens do
+ local token = line_tokens[i]
+ token[2] = token[2] + offset
+ noftokens = noftokens + 1
+ tokens[noftokens] = token
+ end
+ end
+ offset = offset + #line
+ if noftokens > 0 and tokens[noftokens][2] ~= offset then
noftokens = noftokens + 1
- tokens[noftokens] = token
+ tokens[noftokens] = { 'default', offset + 1 }
end
end
- offset = offset + #line
- if noftokens > 0 and tokens[noftokens][2] ~= offset then
- noftokens = noftokens + 1
- tokens[noftokens] = { 'default', offset + 1 }
+ else -- alternative
+ local lasttoken, lastoffset
+ for line in gmatch(text,'[^\r\n]*\r?\n?') do -- could be an lpeg
+ local line_tokens = lpeg_match(grammar,line)
+ if line_tokens then
+ for i=1,#line_tokens do
+ lasttoken = line_tokens[i]
+ lastoffset = lasttoken[2] + offset
+ lasttoken[2] = lastoffset
+ noftokens = noftokens + 1
+ tokens[noftokens] = lasttoken
+ end
+ end
+ offset = offset + #line
+ if lastoffset ~= offset then
+ lastoffset = offset + 1
+ lasttoken = { 'default', lastoffset }
+ noftokens = noftokens + 1
+ tokens[noftokens] = lasttoken
+ end
end
end
return tokens
elseif lexer._CHILDREN then
+ -- as we cannot print, tracing is not possible ... this might change as we can as well
+ -- generate them all in one go (sharing as much as possible)
local _hash = lexer._HASH
if not hash then
hash = { }
lexer._HASH = hash
end
grammar = hash[init_style]
- if not grammar then
+ if grammar then
+ lexer._GRAMMAR = grammar
+ else
for style, style_num in next, lexer._TOKENS do
if style_num == init_style then
local lexer_name = match(style,'^(.+)_whitespace') or lexer._NAME
if lexer._INITIALRULE ~= lexer_name then
- build_grammar(lexer, lexer_name)
+ grammar = hash[lexer_name]
+ if not grammar then
+ build_grammar(lexer,lexer_name)
+ grammar = lexer._GRAMMAR
+ hash[lexer_name] = grammar
+ end
end
break
end
end
- grammar = lexer._GRAMMAR
+ grammar = grammar or lexer._GRAMMAR
hash[init_style] = grammar
end
- return lpegmatch(grammar, text)
+ return lpegmatch(grammar,text)
else
- return lpegmatch(grammar, text)
+ return lpegmatch(grammar,text)
end
end
-lexer.fold = lexer.context.fold
-lexer.lex = lexer.context.lex
-lexer.word_match = lexer.context.word_match
+-- todo: keywords: one lookup and multiple matches
+
+-- function lexer.context.token(name, patt)
+-- return Ct(patt * Cc(name) * Cp())
+-- end
+
+lexer.fold = lexer.context.fold
+lexer.lex = lexer.context.lex
+-- lexer.token = lexer.context.token
+lexer.exact_match = lexer.context.exact_match
diff --git a/context/data/scite/lexers/themes/scite-context-theme.lua b/context/data/scite/lexers/themes/scite-context-theme.lua
index 556779ce6..360a5d435 100644
--- a/context/data/scite/lexers/themes/scite-context-theme.lua
+++ b/context/data/scite/lexers/themes/scite-context-theme.lua
@@ -6,10 +6,8 @@ local info = {
license = "see context related readme files",
}
--- we need a proper pipe:
---
--- -- context_path = string.split(os.resultof("mtxrun --find-file context.mkiv"))[1] or ""
--- -- global.trace("OEPS") -- how do we get access to the regular lua extensions
+-- context_path = string.split(os.resultof("mtxrun --find-file context.mkiv"))[1] or ""
+-- global.trace("OEPS") -- how do we get access to the regular lua extensions
local context_path = "t:/sources" -- c:/data/tex-context/tex/texmf-context/tex/base
local font_name = 'Dejavu Sans Mono'
@@ -17,7 +15,7 @@ local font_size = 14
local global = _G
-dofile(_LEXERHOME .. '/themes/scite.lua') -- starting point so we miss nothing
+-- dofile(_LEXERHOME .. '/themes/scite.lua') -- starting point so we miss nothing
module('lexer', package.seeall)
@@ -63,44 +61,59 @@ style_nothing = style {
-- empty
}
-style_comment = style { fore = colors.yellow }
-style_string = style { fore = colors.magenta }
-
-style_char = style { fore = colors.magenta }
-style_class = style { fore = colors.black, bold = true }
-style_constant = style { fore = colors.cyan, bold = true }
-style_definition = style { fore = colors.black, bold = true }
-style_error = style { fore = colors.red }
-style_function = style { fore = colors.black, bold = true }
-style_keyword = style { fore = colors.blue, bold = true }
-style_number = style { fore = colors.cyan }
-style_operator = style { fore = colors.blue }
-style_preproc = style { fore = colors.yellow, bold = true }
-style_tag = style { fore = colors.cyan }
-style_type = style { fore = colors.blue }
-style_variable = style { fore = colors.black }
-style_identifier = style_nothing
-
-style_line_number = style { back = colors.linepanel, }
-style_bracelight = style { bold = true, fore = colors.orange }
-style_bracebad = style { bold = true, fore = colors.orange }
-style_indentguide = style { fore = colors.linepanel, back = colors.white }
-style_calltip = style { fore = colors.white, back = colors.tippanel }
-style_controlchar = style_nothing
-
-style_context_preamble = style_comment
-style_context_comment = style_comment
-style_context_string = style_string
-style_context_default = style_nothing
-style_context_number = style_number
-style_context_keyword = style_keyword
-style_context_quote = style { fore = colors.blue, bold = true }
-style_context_primitive = style_keyword
-style_context_plain = style { fore = colors.dark, bold = true }
-style_context_command = style { fore = colors.green, bold = true }
-style_context_embedded = style { fore = colors.black, bold = true }
-style_context_user = style { fore = colors.green }
-style_context_grouping = style { fore = colors.red }
-style_context_specials = style { fore = colors.blue }
-style_context_extras = style { fore = colors.yellow }
+style_number = style { fore = colors.cyan }
+style_comment = style { fore = colors.yellow }
+style_string = style { fore = colors.magenta }
+style_keyword = style { fore = colors.blue, bold = true }
+style_char = style { fore = colors.magenta }
+style_class = style { fore = colors.black, bold = true }
+style_constant = style { fore = colors.cyan, bold = true }
+style_definition = style { fore = colors.black, bold = true }
+style_error = style { fore = colors.red }
+style_function = style { fore = colors.black, bold = true }
+style_operator = style { fore = colors.blue }
+style_preproc = style { fore = colors.yellow, bold = true }
+style_tag = style { fore = colors.cyan }
+style_type = style { fore = colors.blue }
+style_variable = style { fore = colors.black }
+style_identifier = style_nothing
+
+style_line_number = style { back = colors.linepanel }
+style_bracelight = style { fore = colors.orange, bold = true }
+style_bracebad = style { fore = colors.orange, bold = true }
+style_indentguide = style { fore = colors.linepanel, back = colors.white }
+style_calltip = style { fore = colors.white, back = colors.tippanel }
+style_controlchar = style_nothing
+
+lexer.context.styles = {
+
+ -- ["whitespace"] = style_whitespace,
+
+ ["default"] = style_nothing,
+ ["number"] = style_number,
+ ["comment"] = style_comment,
+ ["keyword"] = style_keyword,
+ ["string"] = style_string,
+
+ ["command"] = style { fore = colors.green, bold = true },
+ ["preamble"] = style_comment,
+ ["embedded"] = style { fore = colors.black, bold = true },
+ ["grouping"] = style { fore = colors.red },
+ ["primitive"] = style_keyword,
+ ["plain"] = style { fore = colors.dark, bold = true },
+ ["user"] = style { fore = colors.green },
+ ["data"] = style_constant,
+ ["special"] = style { fore = colors.blue },
+ ["extra"] = style { fore = colors.yellow },
+ ["quote"] = style { fore = colors.blue, bold = true },
+
+}
+
+local styleset = { }
+
+for k, v in next, lexer.context.styles do
+ styleset[#styleset+1] = { k, v }
+end
+
+lexer.context.styleset = styleset
diff --git a/context/data/scite/scite-context-readme.tex b/context/data/scite/scite-context-readme.tex
index 7af38ecd9..32c3cfb10 100644
--- a/context/data/scite/scite-context-readme.tex
+++ b/context/data/scite/scite-context-readme.tex
@@ -15,6 +15,7 @@ lexers/scite-context-lexer-cld.lua
lexers/scite-context-lexer.lua
lexers/context/mult-def.lua
lexers/context/mult-prm.lua
+lexers/context/mult-low.lua
lexers/context/mult-mps.lua
lexers/themes/scite-context-theme.lua
\stoptyping
diff --git a/context/data/scite/scite-context.properties b/context/data/scite/scite-context.properties
index ea48ecc10..14af2182d 100644
--- a/context/data/scite/scite-context.properties
+++ b/context/data/scite/scite-context.properties
@@ -626,6 +626,6 @@ comment.block.at.line.start.lpeg_scite-context-lexer-cld=1
comment.block.lpeg_props=#
comment.block.at.line.start.lpeg_props=1
-style.*.34=bold,fore=#7F0000,back:#CFCFCF
-style.*.35=bold,fore=#7F0000,back:#CFCFCF
+style.*.34=bold
+style.*.35=bold