diff options
-rw-r--r-- | scripts/context/lua/mtx-check.lua | 4 | ||||
-rw-r--r-- | tex/context/base/bibl-bib.mkiv | 9 | ||||
-rw-r--r-- | tex/context/base/core-uti.lua | 1 | ||||
-rw-r--r-- | tex/context/base/math-int.mkiv | 5 | ||||
-rw-r--r-- | tex/context/base/meta-ini.mkiv | 28 | ||||
-rw-r--r-- | tex/context/base/meta-txt.tex | 6 | ||||
-rw-r--r-- | tex/context/base/mlib-pps.lua | 2 | ||||
-rw-r--r-- | tex/context/base/sort-ini.lua | 80 | ||||
-rw-r--r-- | tex/context/base/sort-lan.lua | 61 | ||||
-rw-r--r-- | tex/context/base/spac-hor.lua | 32 | ||||
-rw-r--r-- | tex/context/base/spac-hor.mkiv | 22 | ||||
-rw-r--r-- | tex/context/base/strc-reg.lua | 78 | ||||
-rw-r--r-- | tex/context/base/strc-reg.mkiv | 66 | ||||
-rw-r--r-- | tex/context/base/strc-syn.mkiv | 5 |
14 files changed, 283 insertions, 116 deletions
diff --git a/scripts/context/lua/mtx-check.lua b/scripts/context/lua/mtx-check.lua index 436a205e0..49c57ab97 100644 --- a/scripts/context/lua/mtx-check.lua +++ b/scripts/context/lua/mtx-check.lua @@ -40,6 +40,8 @@ do local l_s, r_s = P("["), P("]") local l_g, r_g = P("{"), P("}") + local okay = lpeg.P("{[}") + lpeg.P("{]}") + local esc = P("\\") local cr = P("\r") local lf = P("\n") @@ -72,7 +74,7 @@ do ["tokens"] = (V("ignore") + V("whatever") + V("grouped") + V("setup") + V("display") + V("inline") + V("errors") + 1)^0, ["whatever"] = line + esc * 1 + C(P("%") * (1-line)^0), ["grouped"] = l_g * (V("whatever") + V("grouped") + V("setup") + V("display") + V("inline") + (1 - l_g - r_g))^0 * r_g, - ["setup"] = l_s * (V("whatever") + V("grouped") + V("setup") + V("display") + V("inline") + (1 - l_s - r_s))^0 * r_s, + ["setup"] = l_s * (okay + V("whatever") + V("grouped") + V("setup") + V("display") + V("inline") + (1 - l_s - r_s))^0 * r_s, ["display"] = d_m * (V("whatever") + V("grouped") + (1 - d_m))^0 * d_m, ["inline"] = i_m * (V("whatever") + V("grouped") + (1 - i_m))^0 * i_m, ["errors"] = (V("gerror")+ V("serror") + V("derror") + V("ierror")), diff --git a/tex/context/base/bibl-bib.mkiv b/tex/context/base/bibl-bib.mkiv index aed598ca7..94737b03a 100644 --- a/tex/context/base/bibl-bib.mkiv +++ b/tex/context/base/bibl-bib.mkiv @@ -37,4 +37,13 @@ \xmlmain{#1} \stopxmlsetups +\def\bibxmldoifelse#1#2#3#4#5% entry field before after else + {\xmldoifelse{#1}{/field[@name='#2']}{#3\xmlfilter{#1}{/field[@name='#2']/context()}#4}{#5}} + +\def\bibxmldoif#1#2#3#4% entry field before after + {\xmldoif{#1}{/field[@name='#2']}{#3\xmlfilter{#1}{/field[@name='#2']/context()}#4}} + +\def\bibxmldoifnot#1#2#3#4% entry field before after + {\xmldoifnot{#1}{/field[@name='#2']}{#3\xmlfilter{#1}{/field[@name='#2']/context()}#4}} + \protect \endinput diff --git a/tex/context/base/core-uti.lua b/tex/context/base/core-uti.lua index 6a47cf45c..01fd8522b 100644 --- a/tex/context/base/core-uti.lua +++ b/tex/context/base/core-uti.lua @@ -194,6 +194,7 @@ local packlist = { "pagedata", "directives", "specification", + "processors", -- might become key under directives or metadata -- "references", -- we need to rename of them as only one packs (not structure.lists.references) } diff --git a/tex/context/base/math-int.mkiv b/tex/context/base/math-int.mkiv index 8ac2d4776..2af471b5c 100644 --- a/tex/context/base/math-int.mkiv +++ b/tex/context/base/math-int.mkiv @@ -57,11 +57,14 @@ \definemathcommand [iiint] {\repeatintegral\plustwo } \definemathcommand [iiiint] {\repeatintegral\plusthree} +%def\integralrepeatsymbol{\intop} +\def\integralrepeatsymbol{{\int}} + \def\repeatintegral#1% {\scratchtoks\emptytoks \let\dointlimits\donothing \let\dodointlimits\intlimits - \dorecurse{#1}{\appendtoks \intop \dointkern \to \scratchtoks} + \dorecurse{#1}{\appendtoks \integralrepeatsymbol \dointkern \to \scratchtoks} \appendtoks \intop \dointlimits \dodointlimits \to \scratchtoks \edef\dodorepeatintegral{\the\scratchtoks}% \futurelet\next\dorepeatintegral} diff --git a/tex/context/base/meta-ini.mkiv b/tex/context/base/meta-ini.mkiv index 9b26dbd44..8467b960b 100644 --- a/tex/context/base/meta-ini.mkiv +++ b/tex/context/base/meta-ini.mkiv @@ -267,30 +267,26 @@ \let\stopMPdrawing\relax -\long\def\startMPclip#1#2\stopMPclip % todo: store at the lua end - {\dostartcurrentMPgraphic - \long\setxvalue{MPC:#1}{\noexpand\ctxlua{metapost.theclippath( - "\currentMPgraphicinstance", - "\currentMPgraphicformat", - \!!bs#2\!!es, - \!!bs\currentMPinitializations\!!es, - \!!bs\currentMPpreamble\!!es - )}}% - \dostopcurrentMPgraphic} - -% \unexpanded\def\processMPgraphic -% {\doprocessMPgraphic\placeMPgraphic} - \let\stopMPclip\relax +\long\def\startMPclip#1#2\stopMPclip % todo: store at the lua end or just store less + {\long\setgvalue{MPC:#1}{#2}} + \def\grabMPclippath#1#2#3#4#5% #5 is alternative {\begingroup \edef\width {#3\space}\let\overlaywidth \width \edef\height{#4\space}\let\overlayheight\height \ifcsname MPC:#1\endcsname - \xdef\MPclippath{\getvalue{MPC:#1}}% + \dostartcurrentMPgraphic + \xdef\MPclippath{\normalexpanded{\noexpand\ctxlua{metapost.theclippath( + "\currentMPgraphicinstance", + "\currentMPgraphicformat", + \!!bs\getvalue{MPC:#1}\!!es, + \!!bs\currentMPinitializations\!!es, + \!!bs\currentMPpreamble\!!es + )}}}% + \dostopcurrentMPgraphic \ifx\MPclippath\empty\xdef\MPclippath{#5}\fi - \setxvalue{MPC:#1}{\MPclippath}% \else \xdef\MPclippath{#5}% \fi diff --git a/tex/context/base/meta-txt.tex b/tex/context/base/meta-txt.tex index b784d7dda..474253a40 100644 --- a/tex/context/base/meta-txt.tex +++ b/tex/context/base/meta-txt.tex @@ -24,6 +24,8 @@ % textext ipv btex ... etex +% we need a proper prefix here + \unprotect \startMPextensions @@ -35,7 +37,7 @@ % \def\newchar#1{\chardef#1=0 } \ifdefined\MPtoks \else \newtoks\MPtoks \fi -\ifdefined\MPnox \else \newbox \MPbox \fi +\ifdefined\MPbox \else \newbox \MPbox \fi \ifdefined\parwidth \else \newdimen\parwidth \fi \ifdefined\parheight \else \newdimen\parheight \fi @@ -44,7 +46,7 @@ \ifdefined\parlines \else \newcount\parlines \fi \ifdefined\partoks \else \newtoks \partoks \fi \ifdefined\shapetextbox \else \newbox \shapetextbox \fi -\ifdefined\ifparseries \else \newif \ifparseries \fi + \newif \ifparseries \ifdefined\parfirst \else \chardef \parfirst=0 \fi \def\startshapetext[#1]% diff --git a/tex/context/base/mlib-pps.lua b/tex/context/base/mlib-pps.lua index 900e0c7e7..c1614243a 100644 --- a/tex/context/base/mlib-pps.lua +++ b/tex/context/base/mlib-pps.lua @@ -685,7 +685,7 @@ do local function convert(str) found = true - return "textext(\"" .. str .. "\")" + return "rawtextext(\"" .. str .. "\")" -- centered end local function ditto(str) return "\" & ditto & \"" diff --git a/tex/context/base/sort-ini.lua b/tex/context/base/sort-ini.lua index a94eed946..0348e0132 100644 --- a/tex/context/base/sort-ini.lua +++ b/tex/context/base/sort-ini.lua @@ -15,9 +15,11 @@ if not modules then modules = { } end modules ['sort-ini'] = { local utf = unicode.utf8 local gsub, rep, sort, concat = string.gsub, string.rep, table.sort, table.concat +local utfbyte, utfchar = utf.byte, utf.char local utfcharacters, utfvalues, strcharacters = string.utfcharacters, string.utfvalues, string.characters +local chardata = characters.data -local trace_sorters = false -- true +local trace_tests = false trackers.register("sorters.tests", function(v) trace_tests = v end) sorters = { } sorters.comparers = { } @@ -27,12 +29,13 @@ sorters.mappings = { } sorters.replacements = { } sorters.language = 'en' -local mappings = sorters.mappings -local entries = sorters.entries +local mappings = sorters.mappings +local entries = sorters.entries +local replacements = sorters.replacements -function sorters.comparers.basic(sort_a,sort_b) +function sorters.comparers.basic(sort_a,sort_b,map) -- sm assignment is slow, will become sorters.initialize - local sm = mappings[sorters.language or sorters.defaultlanguage] or mappings.en + local sm = map or mappings[sorters.language or sorters.defaultlanguage] or mappings.en if #sort_a > #sort_b then if #sort_b == 0 then return 1 @@ -118,9 +121,9 @@ end function sorters.firstofsplit(split) -- numbers are left padded by spaces - local se = entries[sorters.language or sorters.defaultlanguage] or entries.en-- slow, will become sorters.initialize + local se = entries[sorters.language or sorters.defaultlanguage] or entries.en -- slow, will become sorters.initialize local vs = split[1] - local entry = (vs and vs[1]) or "" + local entry = vs and vs[1] or "" return entry, (se and se[entry]) or "\000" end @@ -132,37 +135,74 @@ function sorters.splitters.utf(str) -- brrr, todo: language local r = sorters.replacements[sorters.language] or sorters.replacements[sorters.defaultlanguage] or { } -- local m = mappings [sorters.language] or mappings [sorters.defaultlanguage] or { } local u = characters.uncompose - local b = utf.byte local t = { } for _,v in next, r do str = gsub(str,v[1],v[2]) end for c in utfcharacters(str) do -- maybe an lpeg - if #c == 1 then - t[#t+1] = c - else - for cc in strcharacters(c) do - t[#t+1] = cc - end - end + t[#t+1] = c end return t end +function table.remap(t) + local tt = { } + for k,v in pairs(t) do + tt[v] = k + end + return tt +end + function sorters.sort(entries,cmp) - if trace_sorters then + local language = sorters.language or sorters.defaultlanguage + local map = mappings[language] or mappings.en + if trace_tests then + local function pack(l) + local t = { } + for i=1,#l do + local tt, li = { }, l[i] + for j=1,#li do + local lij = li[j] + if utfbyte(lij) > 0xFF00 then + tt[j] = "[]" + else + tt[j] = li[j] + end + end + t[i] = concat(tt) + end + return concat(t," + ") + end sort(entries, function(a,b) - local r = cmp(a,b) + local r = cmp(a,b,map) local as, bs = a.split, b.split if as and bs then - logs.report("sorter","%s %s %s", - concat(as[1]), (not r and "?") or (r<0 and "<") or (r>0 and ">") or "=", concat(bs[1])) + logs.report("sorter","%s %s %s",pack(as),(not r and "?") or (r<0 and "<") or (r>0 and ">") or "=",pack(bs)) end return r == -1 end) else sort(entries, function(a,b) - return cmp(a,b) == -1 + return cmp(a,b,map) == -1 end) end end + +function sorters.add_uppercase_entries(entries) + for k, v in pairs(entries) do + local u = chardata[utfbyte(k)].uccode + if u then + entries[utfchar(u)] = v + end + end +end + +function sorters.add_uppercase_mappings(mappings,offset) + offset = offset or 0 + for k, v in pairs(mappings) do + local u = chardata[utfbyte(k)].uccode + if u then + mappings[utfchar(u)] = v + offset + end + end +end diff --git a/tex/context/base/sort-lan.lua b/tex/context/base/sort-lan.lua index d80254728..8f5d95708 100644 --- a/tex/context/base/sort-lan.lua +++ b/tex/context/base/sort-lan.lua @@ -8,6 +8,9 @@ if not modules then modules = { } end modules ['sort-lan'] = { local utf = unicode.utf8 +local uc = utf.char +local ub = utf.byte + -- this is a rather preliminary and incomplete file -- maybe we should load this kind of stuff runtime @@ -16,6 +19,8 @@ local utf = unicode.utf8 -- The next one can be more efficient when not indexed this way, but -- other languages are sparse so for the moment we keep this one. +-- replacements are indexed as they need to be applied in sequence + sorters = sorters or { entries = { }, replacements = { }, mappings = { } } sorters.entries['en'] = { @@ -65,9 +70,6 @@ sorters.mappings ['nl'] = sorters.mappings['en'] -- czech -local uc = utf.char -local ub = utf.byte - sorters.replacements['cz'] = { [1] = { "ch", uc(0xFF01) } } @@ -161,6 +163,59 @@ sorters.mappings['cz'] = { [uc(0x017E)] = 42, -- zcaron } +sorters.mappings['cz'] = { + ['a'] = 1, -- a + [uc(0x00E1)] = 3, -- aacute + ['b'] = 5, -- b + ['c'] = 7, -- c + [uc(0x010D)] = 9, -- ccaron + ['d'] = 11, -- d + [uc(0x010F)] = 13, -- dcaron + ['e'] = 15, -- e + [uc(0x00E9)] = 17, -- eacute + [uc(0x011B)] = 19, -- ecaron + ['f'] = 21, -- f + ['g'] = 23, -- g + ['h'] = 25, -- h + [uc(0xFF01)] = 27, -- ch + ['i'] = 29, -- i + [uc(0x00ED)] = 31, -- iacute + ['j'] = 33, -- j + ['k'] = 35, -- k + ['l'] = 37, -- l + ['m'] = 39, -- m + ['n'] = 41, -- n + [uc(0x0147)] = 43, -- ncaron + [uc(0x00F3)] = 45, -- oacute + ['p'] = 47, -- p + ['q'] = 49, -- q + ['r'] = 51, -- r + [uc(0x0147)] = 53, -- rcaron + ['s'] = 55, -- s + [uc(0x0161)] = 57, -- scaron + ['t'] = 59, -- t + [uc(0x0165)] = 61, -- tcaron + ['u'] = 63, -- u + [uc(0x00FA)] = 65, -- uacute + [uc(0x016F)] = 67, -- uring + ['v'] = 69, -- v + ['w'] = 71, -- w + ['x'] = 73, -- x + ['y'] = 75, -- y + [uc(0x00FD)] = 77, -- yacute + ['z'] = 79, -- z + [uc(0x017E)] = 81, -- zcaron +} + +sorters.replacements['cs'] = sorters.replacements['cz'] +sorters.entries ['cs'] = sorters.entries ['cz'] +sorters.mappings ['cs'] = sorters.mappings ['cz'] + +sorters.add_uppercase_entries (sorters.entries.cs) +sorters.add_uppercase_mappings(sorters.mappings.cs,1) + +--~ print(table.serialize(sorters.mappings.cs)) + -- French sorters.entries ['fr'] = sorters.entries ['en'] diff --git a/tex/context/base/spac-hor.lua b/tex/context/base/spac-hor.lua new file mode 100644 index 000000000..6cb6e3f49 --- /dev/null +++ b/tex/context/base/spac-hor.lua @@ -0,0 +1,32 @@ +if not modules then modules = { } end modules ['spac-hor'] = { + version = 1.001, + comment = "companion to spac-hor.mkiv", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files" +} + +local match = string.match +local utfbyte = utf.byte +local chardata = characters.data + +local can_have_space = table.tohash { + "lu", "ll", "lt", "lm", "lo", -- letters + -- "mn", "mc", "me", -- marks + "nd", "nl", "no", -- numbers + "ps", "pi", -- initial + -- "pe", "pf", -- final + -- "pc", "pd", "po", -- punctuation + "sm", "sc", "sk", "so", -- symbols + -- "zs", "zl", "zp", -- separators + -- "cc", "cf", "cs", "co", "cn", -- others +} + +function commands.autonextspace(str) -- todo: use nexttoken + local ch = match(str,"the letter (.)") or match(str,"the character (.)") + ch = ch and chardata[utfbyte(ch)] + if ch and can_have_space[ch.category] then + -- texsprint(ctxcatcodes,"\\space") -- faster + context.space() + end +end diff --git a/tex/context/base/spac-hor.mkiv b/tex/context/base/spac-hor.mkiv index cfd2677d4..a8d71705a 100644 --- a/tex/context/base/spac-hor.mkiv +++ b/tex/context/base/spac-hor.mkiv @@ -15,6 +15,8 @@ \unprotect +\registerctxluafile{spac-hor}{1.001} + \let\currentindentation\empty % amount/keyword \let\currentindenting \empty % method @@ -907,4 +909,24 @@ \normalspaces % to be sure \to \everybeforeoutput +%D A more robust variant ofthe \MKII\ one: +%D +%D \startbuffer +%D bla \TEX\autoinsertnextspace bla +%D bla \TEX\autoinsertnextspace (bla) +%D bla (\TEX\autoinsertnextspace) bla +%D bla \TEX\autoinsertnextspace\ bla +%D \stopbuffer +%D +%D \typebuffer \getbuffer + +% unexpanded is important here as it prevents premature expansion in +% e.g. \normalexpanded{\egroup\sortingparameter\c!next} + +\unexpanded\def\autoinsertnextspace + {\futurelet\nexttoken\doautoinsertnextspace} + +\def\doautoinsertnextspace + {\ctxlua{commands.autonextspace("\meaning\nexttoken")}} % todo, just consult nexttoken at the lua end + \protect \endinput diff --git a/tex/context/base/strc-reg.lua b/tex/context/base/strc-reg.lua index 60d00f8b2..c4adbfca4 100644 --- a/tex/context/base/strc-reg.lua +++ b/tex/context/base/strc-reg.lua @@ -18,10 +18,13 @@ local ctxcatcodes = tex.ctxcatcodes local variables = interfaces.variables -local helpers = structure.helpers -local sections = structure.sections -local documents = structure.documents -local pages = structure.pages +local helpers = structure.helpers +local sections = structure.sections +local documents = structure.documents +local pages = structure.pages +local processors = structure.processors + +local processor_split = processors.split local matching_till_depth, number_at_depth = sections.matching_till_depth, sections.number_at_depth @@ -216,8 +219,19 @@ local function preprocessentries(rawdata) local entries = rawdata.entries if entries then local e, k = entries[1] or "", entries[2] or "" - local et = (type(e) == "table" and e) or lpegmatch(entrysplitter,e) - local kt = (type(k) == "table" and k) or lpegmatch(entrysplitter,k) + local et, kt, entryproc, pageproc + if type(e) == "table" then + et = e + else + entryproc, e = processor_split(e) + et = lpegmatch(entrysplitter,e) + end + if type(k) == "table" then + kt = e + else + pageproc, k = processor_split(k) + kt = lpegmatch(entrysplitter,k) + end entries = { } for k=1,#et do entries[k] = { et[k] or "", kt[k] or "" } @@ -230,6 +244,9 @@ local function preprocessentries(rawdata) end end rawdata.list = entries + if pageproc or entryproc then + rawdata.processors = { entryproc, pageproc } + end rawdata.entries = nil else rawdata.list = { { "", "" } } -- br @@ -434,19 +451,45 @@ function jobregisters.flush(data,options,prefixspec,pagespec) local function pagenumber(entry) local er = entry.references texsprint(ctxcatcodes,format("\\registeronepage{%s}{%s}{",er.internal or 0,er.realpage or 0)) -- internal realpage content +local proc = entry.processors and entry.processors[2] +if proc then + texsprint(ctxcatcodes,"\\applyprocessor{",proc,"}{") + helpers.prefixpage(entry,prefixspec,pagespec) + texsprint(ctxcatcodes,"}") +else helpers.prefixpage(entry,prefixspec,pagespec) +end texsprint(ctxcatcodes,"}") end local function pagerange(f_entry,t_entry,is_last) local er = f_entry.references texsprint(ctxcatcodes,format("\\registerpagerange{%s}{%s}{",er.internal or 0,er.realpage or 0)) +local proc = entry.processors and entry.processors[2] +if proc then + texsprint(ctxcatcodes,"\\applyprocessor{",proc,"}{") + helpers.prefixpage(f_entry,prefixspec,pagespec) + texsprint(ctxcatcodes,"}") +else helpers.prefixpage(f_entry,prefixspec,pagespec) +end local er = t_entry.references texsprint(ctxcatcodes,format("}{%s}{%s}{",er.internal or 0,er.lastrealpage or er.realpage or 0)) if is_last then +if proc then + texsprint(ctxcatcodes,"\\applyprocessor{",proc,"}{") + helpers.prefixlastpage(t_entry,prefixspec,pagespec) -- swaps page and realpage keys + texsprint(ctxcatcodes,"}") +else helpers.prefixlastpage(t_entry,prefixspec,pagespec) -- swaps page and realpage keys +end else +if proc then + texsprint(ctxcatcodes,"\\applyprocessor{",proc,"}{") + helpers.prefixpage(t_entry,prefixspec,pagespec) + texsprint(ctxcatcodes,"}") +else helpers.prefixpage(t_entry,prefixspec,pagespec) +end end texsprint(ctxcatcodes,"}") end @@ -483,10 +526,24 @@ function jobregisters.flush(data,options,prefixspec,pagespec) end if metadata then texsprint(ctxcatcodes,"\\registerentry{") +local proc = entry.processors and entry.processors[1] +if proc then + texsprint(ctxcatcodes,"\\applyprocessor{",proc,"}{") + helpers.title(e[i],metadata) + texsprint(ctxcatcodes,"}") +else helpers.title(e[i],metadata) +end texsprint(ctxcatcodes,"}") else +local proc = entry.processors and entry.processors[1] +if proc then + texsprint(ctxcatcodes,"\\applyprocessor{",proc,"}{") + texsprint(ctxcatcodes,format("\\registerentry{%s}",e[i])) + texsprint(ctxcatcodes,"}") +else texsprint(ctxcatcodes,format("\\registerentry{%s}",e[i])) +end end else done[i] = false @@ -496,7 +553,7 @@ function jobregisters.flush(data,options,prefixspec,pagespec) local kind = entry.metadata.kind if kind == 'entry' then texsprint(ctxcatcodes,"\\startregisterpages") ---~ collapse_ranges = true + --~ collapse_ranges = true if collapse_singles or collapse_ranges then -- we collapse ranges and keep existing ranges as they are -- so we get prebuilt as well as built ranges @@ -647,7 +704,14 @@ function jobregisters.flush(data,options,prefixspec,pagespec) elseif kind == 'see' then -- maybe some day more words texsprint(ctxcatcodes,"\\startregisterseewords") +local proc = entry.processors and entry.processors[1] +if proc then + texsprint(ctxcatcodes,"\\applyprocessor{",proc,"}{") + texsprint(ctxcatcodes,format("\\registeroneword{0}{0}{%s}",entry.seeword.text)) -- todo: internal + texsprint(ctxcatcodes,"}") +else texsprint(ctxcatcodes,format("\\registeroneword{0}{0}{%s}",entry.seeword.text)) -- todo: internal +end texsprint(ctxcatcodes,"\\stopregisterseewords") end end diff --git a/tex/context/base/strc-reg.mkiv b/tex/context/base/strc-reg.mkiv index b9498c0d2..09c65b64b 100644 --- a/tex/context/base/strc-reg.mkiv +++ b/tex/context/base/strc-reg.mkiv @@ -145,66 +145,6 @@ % tzt variant met n entries, parameters en userdata (altnum) -% \def\doregisterstructurepageregister#1#2#3% register data userdata -% {\begingroup -% \edef\currentregister{#1}% -% \getparameters[\??id][\c!entries=,\c!label=,\c!keys=,\c!alternative=,#2]% -% \edef\currentregisterlabel {\registerparameter\c!label}% -% \edef\currentregisterexpansion{\registerparameter\c!expansion}% -% \edef\currentregisterownnumber{\registerparameter\c!ownnumber}% -% \xdef\currentregisterkeys {\registerparameter\c!keys}% -% \ifx\currentregisterexpansion\s!xml -% \xmlstartraw -% \xdef\currentregisterentries{\registerparameter\c!entries}% -% \xmlstopraw -% \globallet\currentregistercoding\s!xml -% \else -% \ifx\currentregisterexpansion\v!yes -% \xdef\currentregisterentries{\registerparameter\c!entries}% -% \else -% \xdef\currentregisterentries{\detokenizedregisterparameter\c!entries}% -% \fi -% \globallet\currentregistercoding\s!tex -% \fi -% \setnextinternalreference -% % we could consider storing register entries in a list which we -% % could then sort -% \xdef\currentregisternumber{\ctxlua{ -% jobregisters.store { -% metadata = { -% kind = "entry", -% name = "\currentregister", -% level = structure.sections.currentlevel(), -% coding = "\currentregistercoding", -% catcodes = \the\catcodetable, -% \ifx\currentregisterownnumber\v!yes -% own = "\registerparameter\c!alternative", % can be used instead of pagenumber -% \fi -% }, -% references = { -% internal = \nextinternalreference, -% section = structure.sections.currentid(), % hm, why then not also lastsection the same way -% label = "\currentregisterlabel", -% }, -% \ifx\currentregisterentries\empty \else -% entries = { -% % we need a special one for xml, this is just a single one -% \!!bs\currentregisterentries\!!es, \!!bs\currentregisterkeys\!!es -% }, -% \fi -% userdata = structure.helpers.touserdata(\!!bs\detokenize{#3}\!!es) -% } -% } }% -% \ctxlua{jobreferences.setinternalreference(nil,nil,\nextinternalreference)}% -% \ifx\currentregisterownnumber\v!yes -% \glet\currentregistersynchronize\relax -% \else -% \xdef\currentregistersynchronize{\ctxlatelua{jobregisters.enhance("\currentregister",\currentregisternumber)}}% -% \fi -% \currentregistersynchronize % here? -% \attribute\destinationattribute\lastdestinationattribute \strut % todo -% \endgroup} - \getparameters [\??id] [\c!label=, @@ -230,7 +170,7 @@ \edef\currentregisterownnumber{\registerparameter\c!ownnumber}% \xdef\currentregisterkeys {\registerparameter\c!keys}% \xdef\currentregisterentries {\registerparameter\c!entries}% -\xdef\currentregisterxmlsetup{\registerparameter\c!xmlsetup}% + \xdef\currentregisterxmlsetup {\registerparameter\c!xmlsetup}% \ifx\currentregisterentries\empty \ifx\currentregisterexpansion\s!xml \xmlstartraw @@ -300,8 +240,8 @@ }, references = { internal = \nextinternalreference, - section = structure.sections.currentid(), % hm, why then not also lastsection the same way - label = "\currentregisterlabel", + section = structure.sections.currentid(), % hm, why then not also lastsection the same way + label = "\currentregisterlabel", }, % \ifx\currentregisterentries\empty \else entries = { diff --git a/tex/context/base/strc-syn.mkiv b/tex/context/base/strc-syn.mkiv index 9da6aad40..be24497f0 100644 --- a/tex/context/base/strc-syn.mkiv +++ b/tex/context/base/strc-syn.mkiv @@ -172,7 +172,7 @@ \def\currentsynonym{#1}% \dosetsynonymattributes\c!synonymstyle\c!synonymcolor \synonymparameter\c!synonymcommand{\ctxlua{joblists.synonym("#1","#2")}}% - \endgroup} + \normalexpanded{\endgroup\synonymparameter\c!next}} \def\placelistofsynonyms {\dodoubleempty\doplacelistofsynonyms} @@ -320,9 +320,10 @@ \unexpanded\def\doinsertsort#1#2% name tag {\begingroup % no kap currently, of .. we need to map cap onto WORD + \edef\currentsorting{#1}% \dosetsynonymattributes\c!style\c!color \ctxlua{joblists.synonym("#1","#2")}% - \endgroup} + \normalexpanded{\endgroup\sortingparameter\c!next}} \def\registersort {\dodoubleargument\doregistersort} |