summaryrefslogtreecommitdiff
path: root/tex
diff options
context:
space:
mode:
Diffstat (limited to 'tex')
-rw-r--r--tex/context/base/char-utf.lua150
-rw-r--r--tex/context/base/cont-new.mkiv2
-rw-r--r--tex/context/base/context-version.pdfbin4347 -> 4342 bytes
-rw-r--r--tex/context/base/context.mkiv2
-rw-r--r--tex/context/base/node-fin.lua2
-rw-r--r--tex/context/base/node-inj.lua2
-rw-r--r--tex/context/base/publ-imp-list.mkvi16
-rw-r--r--tex/context/base/publ-imp-test.bib283
-rw-r--r--tex/context/base/publ-ini.lua3
-rw-r--r--tex/context/base/publ-ini.mkiv4
-rw-r--r--tex/context/base/regi-ini.lua139
-rw-r--r--tex/context/base/status-files.pdfbin24888 -> 24904 bytes
-rw-r--r--tex/context/base/status-lua.pdfbin248187 -> 248181 bytes
-rw-r--r--tex/context/base/typo-dha.lua2
-rw-r--r--tex/context/base/typo-man.lua2
-rw-r--r--tex/generic/context/luatex/luatex-fonts-merged.lua2
16 files changed, 476 insertions, 133 deletions
diff --git a/tex/context/base/char-utf.lua b/tex/context/base/char-utf.lua
index 8714d6b44..0d45bbd07 100644
--- a/tex/context/base/char-utf.lua
+++ b/tex/context/base/char-utf.lua
@@ -27,7 +27,11 @@ over a string.</p>
local concat, gmatch, gsub, find = table.concat, string.gmatch, string.gsub, string.find
local utfchar, utfbyte, utfcharacters, utfvalues = utf.char, utf.byte, utf.characters, utf.values
local allocate = utilities.storage.allocate
-local lpegmatch, lpegpatterns = lpeg.match, lpeg.patterns
+local lpegmatch, lpegpatterns, P = lpeg.match, lpeg.patterns, lpeg.P
+
+if not characters then
+ require("char-def")
+end
local charfromnumber = characters.fromnumber
@@ -294,7 +298,7 @@ not collecting tokens is not only faster but also saves garbage collecting.
</p>
--ldx]]--
-local skippable = table.tohash { "mkiv", "mkvi" }
+local skippable = table.tohash { "mkiv", "mkvi", "mkix", "mkxi" }
local filesuffix = file.suffix
-- function utffilters.collapse(str,filename) -- we can make high a seperate pass (never needed with collapse)
@@ -412,7 +416,7 @@ function utffilters.collapse(str,filename)
initialize()
end
local tree = lpeg.utfchartabletopattern(table.keys(collapsed))
- p_collapse = lpeg.Cs((tree/collapsed + lpegpatterns.utf8char)^0)
+ p_collapse = lpeg.Cs((tree/collapsed + lpegpatterns.utf8char)^0 * P(-1)) -- the P(1) is needed in order to accept non utf
end
if not str or #str == "" or #str == 1 then
return str
@@ -491,8 +495,7 @@ function utffilters.decompose(str) -- 3 to 4 times faster than the above
initialize()
end
local tree = lpeg.utfchartabletopattern(table.keys(decomposed))
- p_decompose = lpeg.Cs((tree/decomposed + lpegpatterns.utf8char)^0)
-
+ p_decompose = lpeg.Cs((tree/decomposed + lpegpatterns.utf8char)^0 * P(-1))
end
if str and str ~= "" and #str > 1 then
return lpegmatch(p_decompose,str)
@@ -521,30 +524,6 @@ end
-- --
--- local c1, c2, c3 = "a", "̂", "̃"
--- local r2, r3 = "â", "ẫ"
--- local l1 = "ffl"
---
--- local str = c1..c2..c3 .. " " .. c1..c2 .. " " .. l1
--- local res = r3 .. " " .. r2 .. " " .. "ffl"
---
--- local text = io.loaddata("t:/sources/tufte.tex")
---
--- local function test(n)
--- local data = text .. string.rep(str,100) .. text
--- local okay = text .. string.rep(res,100) .. text
--- local t = os.clock()
--- for i=1,10000 do
--- collapse(data)
--- end
--- print(os.clock()-t,decompose(collapse(data))==okay,decompose(collapse(str)))
--- end
---
--- test(050)
--- test(150)
-
--- --
-
local sequencers = utilities.sequencers
if sequencers then
@@ -571,3 +550,116 @@ if sequencers then
end)
end
+
+-- Faster when we deal with lots of data but somewhat complicated by the fact that we want to be
+-- downward compatible .. so maybe some day I'll simplify it. We seldom have large quantities of
+-- text.
+
+-- local p_processed = nil -- so we can reset if needed
+--
+-- function utffilters.preprocess(str,filename)
+-- if not p_processed then
+-- if initialize then
+-- initialize()
+-- end
+-- local merged = table.merged(collapsed,decomposed)
+-- local tree = lpeg.utfchartabletopattern(table.keys(merged))
+-- p_processed = lpeg.Cs((tree/merged + lpegpatterns.utf8char)^0 * P(-1)) -- the P(1) is needed in order to accept non utf
+-- local tree = lpeg.utfchartabletopattern(table.keys(collapsed))
+-- p_collapse = lpeg.Cs((tree/collapsed + lpegpatterns.utf8char)^0 * P(-1)) -- the P(1) is needed in order to accept non utf
+-- local tree = lpeg.utfchartabletopattern(table.keys(decomposed))
+-- p_decompose = lpeg.Cs((tree/decomposed + lpegpatterns.utf8char)^0 * P(-1)) -- the P(1) is needed in order to accept non utf
+-- end
+-- if not str or #str == "" or #str == 1 then
+-- return str
+-- elseif filename and skippable[filesuffix(filename)] then -- we could hash the collapsables or do a quicker test
+-- return str
+-- else
+-- return lpegmatch(p_processed,str) or str
+-- end
+-- end
+--
+-- local sequencers = utilities.sequencers
+--
+-- if sequencers then
+--
+-- local textfileactions = resolvers.openers.helpers.textfileactions
+--
+-- local collapse, decompose = false, false
+--
+-- sequencers.appendaction (textfileactions,"system","characters.filters.utf.preprocess")
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.preprocess")
+--
+-- local function checkable()
+-- if decompose then
+-- if collapse then
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.collapse")
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.decompose")
+-- sequencers.enableaction (textfileactions,"characters.filters.utf.preprocess")
+-- else
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.collapse")
+-- sequencers.enableaction (textfileactions,"characters.filters.utf.decompose")
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.preprocess")
+-- end
+-- else
+-- if collapse then
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.collapse")
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.decompose")
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.preprocess")
+-- else
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.collapse")
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.decompose")
+-- sequencers.disableaction(textfileactions,"characters.filters.utf.preprocess")
+-- end
+-- end
+-- end
+--
+-- function characters.filters.utf.enable()
+-- collapse = true
+-- decompose = true
+-- checkable()
+-- end
+--
+-- directives.register("filters.utf.collapse", function(v)
+-- collapse = v
+-- checkable()
+-- end)
+--
+-- directives.register("filters.utf.decompose", function(v)
+-- decompose = v
+-- checkable()
+-- end)
+--
+-- end
+
+-- local collapse = utffilters.collapse
+-- local decompose = utffilters.decompose
+-- local preprocess = utffilters.preprocess
+--
+-- local c1, c2, c3 = "a", "̂", "̃"
+-- local r2, r3 = "â", "ẫ"
+-- local l1 = "ffl"
+--
+-- local str = c1..c2..c3 .. " " .. c1..c2 .. " " .. l1
+-- local res = r3 .. " " .. r2 .. " " .. "ffl"
+--
+-- local text = io.loaddata("t:/sources/tufte.tex")
+--
+-- local function test(n)
+-- local data = text .. string.rep(str,100) .. text
+-- local okay = text .. string.rep(res,100) .. text
+-- local t = os.clock()
+-- for i=1,10000 do
+-- collapse(data)
+-- decompose(data)
+-- -- preprocess(data)
+-- end
+-- print(os.clock()-t,decompose(collapse(data))==okay,decompose(collapse(str)))
+-- end
+--
+-- test(050)
+-- test(150)
+--
+-- local old = "foo" .. string.char(0xE1) .. "bar"
+-- local new = collapse(old)
+-- print(old,new)
diff --git a/tex/context/base/cont-new.mkiv b/tex/context/base/cont-new.mkiv
index a7dc71745..16bbf0708 100644
--- a/tex/context/base/cont-new.mkiv
+++ b/tex/context/base/cont-new.mkiv
@@ -11,7 +11,7 @@
%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
%C details.
-\newcontextversion{2014.06.17 16:53}
+\newcontextversion{2014.06.18 17:19}
%D This file is loaded at runtime, thereby providing an excellent place for
%D hacks, patches, extensions and new features.
diff --git a/tex/context/base/context-version.pdf b/tex/context/base/context-version.pdf
index 33c79b44e..7da17ba25 100644
--- a/tex/context/base/context-version.pdf
+++ b/tex/context/base/context-version.pdf
Binary files differ
diff --git a/tex/context/base/context.mkiv b/tex/context/base/context.mkiv
index 6a63eca5b..80b004fd6 100644
--- a/tex/context/base/context.mkiv
+++ b/tex/context/base/context.mkiv
@@ -28,7 +28,7 @@
%D up and the dependencies are more consistent.
\edef\contextformat {\jobname}
-\edef\contextversion{2014.06.17 16:53}
+\edef\contextversion{2014.06.18 17:19}
\edef\contextkind {beta}
%D For those who want to use this:
diff --git a/tex/context/base/node-fin.lua b/tex/context/base/node-fin.lua
index 76ad973da..5758fea38 100644
--- a/tex/context/base/node-fin.lua
+++ b/tex/context/base/node-fin.lua
@@ -363,7 +363,7 @@ local function selective(namespace,attribute,head,inheritance,default) -- two at
done = done or ok
end
-- end nested
- leader = false
+ leader = false
end
elseif default and inheritance then
if current ~= default then
diff --git a/tex/context/base/node-inj.lua b/tex/context/base/node-inj.lua
index b91646ffc..f9270b735 100644
--- a/tex/context/base/node-inj.lua
+++ b/tex/context/base/node-inj.lua
@@ -343,7 +343,7 @@ function injections.handler(head,where,keep)
for i=maxt,1,-1 do
ny = ny + d[i]
local ti = t[i]
- setfield(ti,"yoffset",ny)
+ setfield(ti,"yoffset",ny) -- maybe add to current yoffset
end
maxt = 0
end
diff --git a/tex/context/base/publ-imp-list.mkvi b/tex/context/base/publ-imp-list.mkvi
index d585fd48f..a1d015ea0 100644
--- a/tex/context/base/publ-imp-list.mkvi
+++ b/tex/context/base/publ-imp-list.mkvi
@@ -55,22 +55,18 @@
\currentbtxfirst
}
\stopsetups
-
-\startsetups \s!btx:\s!list:short
- \texdefinition {btx:list:inject} {
- \currentbtxfirst
- \currentbtxsecond
- }
-\stopsetups
-
\startsetups \s!btx:\s!list:num
\directsetup{\s!btx:\s!list:yes}
\stopsetups
\startsetups \s!btx:\s!list:bib
\directsetup{\s!btx:\s!list:yes}
\stopsetups
-\startsetups \s!btx:\s!list:unknown
- \directsetup{\s!btx:\s!list:yes}
+
+\startsetups \s!btx:\s!list:short
+ \texdefinition {btx:list:inject} {
+ \currentbtxfirst
+ \currentbtxsecond
+ }
\stopsetups
\protect
diff --git a/tex/context/base/publ-imp-test.bib b/tex/context/base/publ-imp-test.bib
index 50e887a88..0d81b49ff 100644
--- a/tex/context/base/publ-imp-test.bib
+++ b/tex/context/base/publ-imp-test.bib
@@ -1,63 +1,294 @@
-% The fields. This file needs to be filled with dummy data. Either fake names
-% or maybe something DEK.
+% This is a test bibliography for developing publ-impl-XXX files. Although
+% meant for testing apa styles, it can also be used for other styles.
+%
+% All publication categories have an example entry here. Most have an entry
+% with a "-min" key, containing only the minimal set of fields.
+%
+% Maintained by: Alan Braslau, Hans Hagen, Robin Kirkham
+
+@article {test-article-min,
+ author = "An Author",
+ title = "Title-article",
+ journal = "Journal",
+ year = "Year"
+}
@article {test-article,
- author = "...",
- comment = "...",
- crossref = "...",
- editor = "...",
- issue = "...",
- journal = "...",
- note = "...",
- pages = "...",
- title = "...",
- volume = "...",
- year = "...",
+ author = "An Author",
+ title = "Title-article",
+ journal = "Journal",
+ year = "Year",
+ volume = "Volume",
+ number = "Number",
+ pages = "Pages",
+ month = "Month",
+ note = "Note"
+}
+
+% author, editor, or both
+
+@book {test-book-min,
+ author = "An Author",
+ title = "Title-book",
+ publisher = "Publisher",
+ year = "Year"
}
+% author, editor, or both
+% volume, number, or both
+
@book {test-book,
- author = "...",
- comment = "...",
- chapter = "...",
- crossref = "...",
- editor = "...",
- series = "...",
- pages = "...",
- publisher = "...",
- volume = "...",
- year = "...",
+ author = "An Author",
+ editor = "An Editor",
+ title = "Title-book",
+ publisher = "Publisher",
+ year = "Year",
+ volume = "Volume",
+ number = "Number",
+ series = "Series",
+ address = "Address",
+ edition = "Edition",
+ month = "Month",
+ note = "Note"
}
-@inbook {test-inbook,
+% no author
+
+@book {test-book-edited,
+ editor = "An Editor",
+ title = "Title-book-edited",
+ publisher = "Publisher",
+ year = "Year"
+}
+
+@booklet {test-booklet-min,
+ title = "Title-booklet"
}
@booklet {test-booklet,
+ title = "Title-booklet",
+ author = "An Author",
+ howpublished = "How-published",
+ address = "Address",
+ month = "Month",
+ year = "Year",
+ note = "Note"
}
-@manual {test-manual,
+% author, editor, or both
+% chapter, pages, or both
+
+@inbook {test-inbook-min,
+ author = "An Author",
+ editor = "An Editor",
+ title = "Title-inbook",
+ chapter = "Chapter",
+ pages = "Pages",
+ publisher = "Publisher",
+ year = "Year"
+}
+
+% author, editor, or both
+% chapter, pages, or both
+% volume, number, or both
+
+@inbook {test-inbook,
+ author = "An Author",
+ editor = "An Editor",
+ title = "Title-inbook",
+ chapter = "Chapter",
+ pages = "Pages",
+ publisher = "Publisher",
+ year = "Year",
+ volume = "Volume",
+ number = "Number",
+ series = "Series",
+ type = "Type",
+ address = "Address",
+ edition = "Edition",
+ month = "Month",
+ note = "Note"
}
+@incollection {test-incollection-min,
+ author = "An Author",
+ title = "Title-incollection",
+ booktitle = "Booktitle",
+ publisher = "Publisher",
+ year = "Year"
+}
+
+% volume, number, or both
+
@incollection {test-incollection,
+ author = "An Author",
+ title = "Title-incollection",
+ booktitle = "Booktitle",
+ publisher = "Publisher",
+ year = "Year",
+ editor = "An Editor",
+ volume = "Volume",
+ number = "Number",
+ series = "Series",
+ type = "Type",
+ chapter = "Chapter",
+ pages = "Pages",
+ address = "Address",
+ edition = "Edition",
+ month = "Month",
+ note = "Note"
}
+@inproceedings {test-inproceedings-min,
+ author = "An Author",
+ title = "Title-inproceedings",
+ booktitle = "Booktitle",
+ year = "Year"
+}
+
+% volume, number, or both
+
@inproceedings {test-inproceedings,
+ author = "An Author",
+ title = "Title-inproceedings",
+ booktitle = "Booktitle",
+ year = "Year",
+ editor = "An Editor",
+ volume = "Volume",
+ number = "Number",
+ series = "Series",
+ pages = "Pages",
+ address = "Address",
+ month = "Month",
+ organization = "Organization",
+ publisher = "Publisher",
+ note = "Note"
}
-@proceedings {test-proceedings,
+@manual {test-manual-min,
+ title = "Title-manual"
+}
+
+@manual {test-manual,
+ title = "Title-manual",
+ author = "An Author",
+ organization = "Organization",
+ address = "Address",
+ edition = "Edition",
+ month = "Month",
+ year = "Year"
+ note = "Note"
+}
+
+
+@mastersthesis {test-mastersthesis-min,
+ author = "An Author",
+ title = "Title-mastersthesis",
+ school = "School",
+ year = "Year",
}
@mastersthesis {test-mastersthesis,
+ author = "An Author",
+ title = "Title-mastersthesis",
+ school = "School",
+ year = "Year",
+ type = "Type",
+ address = "Address",
+ month = "Month",
+ note = "Note"
+}
+
+@proceedings {test-proceedings-min,
+ title = "Title-proceedings",
+ year = "Year",
+}
+
+% volume, number, or both
+
+@proceedings {test-proceedings,
+ title = "Title-proceedings",
+ year = "Year",
+ editor = "An Editor",
+ volume = "Volume",
+ number = "Number",
+ series = "Series",
+ address = "Address",
+ month = "Month",
+ organization = "Organization",
+ publisher = "Publisher",
+ note = "Note"
+}
+
+@phdthesis {test-phdthesis-min,
+ author = "An Author",
+ title = "Title-phdthesis",
+ school = "School",
+ year = "Year",
}
@phdthesis {test-phdthesis,
+ author = "An Author",
+ title = "Title-phdthesis",
+ school = "School",
+ year = "Year",
+ type = "Type",
+ address = "Address",
+ month = "Month",
+ note = "Note"
+}
+
+@techreport {test-techreport-min,
+ author = "An Author",
+ title = "Title-techreport",
+ institution = "Institution",
+ year = "Year",
}
@techreport {test-techreport,
+ author = "An Author",
+ title = "Title-techreport",
+ institution = "Institution",
+ year = "Year",
+ type = "Type",
+ number = "Number",
+ address = "Address",
+ month = "Month",
+ note = "Note"
}
@misc {test-misc,
+ author = "An Author",
+ title = "Title-misc",
+ howpublished = "How-published",
+ month = "Month",
+ year = "Year",
+ note = "Note"
+}
+
+@unpublished {test-unpublished-min,
+ author = "An Author",
+ title = "Title-unpublished",
+ note = "Note"
}
@unpublished {test-unpublished,
+ author = "An Author",
+ title = "Title-unpublished",
+ note = "Note",
+ month = "Month",
+ year = "Year"
}
+% some other test entries
+
+@misc {test-manyauthor,
+ author = "A Author and B Author and C Author and D Author and
+ E Author and F Author and G Author and H Author and
+ I Author and J Author and K Author and L Author and
+ M Author and N Author and O Author and P Author and
+ Q Author and R Author and S Author and T Author and
+ U Author and V Author and W Author and X Author and
+ Y Author and Z Author",
+ title = "Title-Many Authors"
+}
diff --git a/tex/context/base/publ-ini.lua b/tex/context/base/publ-ini.lua
index cd65f65bc..38ec1e192 100644
--- a/tex/context/base/publ-ini.lua
+++ b/tex/context/base/publ-ini.lua
@@ -1892,6 +1892,9 @@ function listvariants.num(dataset,block,tag,variant,listindex)
ctx_btxlistsetup(variant)
end
+listvariants[v_yes] = listvariants.num
+listvariants.bib = listvariants.num
+
function listvariants.short(dataset,block,tag,variant,listindex)
local short = getdetail(dataset,tag,"short","short")
local suffix = getdetail(dataset,tag,"suffix","suffix")
diff --git a/tex/context/base/publ-ini.mkiv b/tex/context/base/publ-ini.mkiv
index 1048fd663..fa7a5bca0 100644
--- a/tex/context/base/publ-ini.mkiv
+++ b/tex/context/base/publ-ini.mkiv
@@ -655,7 +655,6 @@
% \setuvalue{\??btxnumbering\v!page }{\btxlistvariant{page}} % these will be setups
% \setuvalue{\??btxnumbering\v!short }{\btxlistvariant{short}} % these will be setups
% \setuvalue{\??btxnumbering\v!bib }{\btxlistvariant{num}} % these will be setups
-% \setuvalue{\??btxnumbering\s!unknown}{\btxlistvariant{num}} % these will be setups
% \setuvalue{\??btxnumbering\v!yes }{\btxlistvariant{num}} % these will be setups
\let\currentbtxnumbering\empty
@@ -1060,7 +1059,8 @@
% \c!setups=btx:\btxrenderingparameter\c!alternative:initialize, % not the same usage as cite !
\c!alternative=apa,
\c!sorttype=,
- \c!criterium=,
+% \c!criterium=,
+ \c!criterium=\v!text,
\c!refcommand=authoryears, % todo
\c!numbering=\v!yes,
% \c!autohang=\v!no, % not used
diff --git a/tex/context/base/regi-ini.lua b/tex/context/base/regi-ini.lua
index d5d278b16..63f45a0b1 100644
--- a/tex/context/base/regi-ini.lua
+++ b/tex/context/base/regi-ini.lua
@@ -243,8 +243,12 @@ end
regimes.push = push
regimes.pop = pop
-sequencers.prependaction(textlineactions,"system","regimes.process")
-sequencers.disableaction(textlineactions,"regimes.process")
+if sequencers then
+
+ sequencers.prependaction(textlineactions,"system","regimes.process")
+ sequencers.disableaction(textlineactions,"regimes.process")
+
+end
-- interface:
@@ -311,48 +315,82 @@ local patterns = { }
--
-- twice as fast and much less lpeg bytecode
+-- function regimes.cleanup(regime,str)
+-- if not str or str == "" then
+-- return str
+-- end
+-- local p = patterns[regime]
+-- if p == nil then
+-- regime = regime and synonyms[regime] or regime or currentregime
+-- local vector = regime ~= "utf" and regime ~= "utf-8" and mapping[regime]
+-- if vector then
+-- local utfchars = { }
+-- local firsts = { }
+-- for k, uchar in next, vector do
+-- local stream = { }
+-- local split = totable(uchar)
+-- local nofsplits = #split
+-- if nofsplits > 1 then
+-- local first
+-- for i=1,nofsplits do
+-- local u = vector[split[i]]
+-- if not first then
+-- first = firsts[u]
+-- if not first then
+-- first = { }
+-- firsts[u] = first
+-- end
+-- end
+-- stream[i] = u
+-- end
+-- local nofstream = #stream
+-- if nofstream > 1 then
+-- first[#first+1] = concat(stream,2,nofstream)
+-- utfchars[concat(stream)] = uchar
+-- end
+-- end
+-- end
+-- p = P(false)
+-- for k, v in next, firsts do
+-- local q = P(false)
+-- for i=1,#v do
+-- q = q + P(v[i])
+-- end
+-- p = p + P(k) * q
+-- end
+-- p = Cs(((p+1)/utfchars)^1)
+-- -- lpeg.print(p) -- size: 1042
+-- else
+-- p = false
+-- end
+-- patterns[regime] = p
+-- end
+-- return p and lpegmatch(p,str) or str
+-- end
+--
+-- 5 times faster:
+
function regimes.cleanup(regime,str)
+ if not str or str == "" then
+ return str
+ end
local p = patterns[regime]
if p == nil then
regime = regime and synonyms[regime] or regime or currentregime
- local vector = regime ~= "utf" and mapping[regime]
+ local vector = regime ~= "utf" and regime ~= "utf-8" and mapping[regime]
if vector then
- local utfchars = { }
- local firsts = { }
- for k, uchar in next, vector do
- local stream = { }
- local split = totable(uchar)
- local nofsplits = #split
- if nofsplits > 1 then
- local first
- for i=1,nofsplits do
- local u = vector[split[i]]
- if not first then
- first = firsts[u]
- if not first then
- first = { }
- firsts[u] = first
- end
- end
- stream[i] = u
- end
- local nofstream = #stream
- if nofstream > 1 then
- first[#first+1] = concat(stream,2,nofstream)
- utfchars[concat(stream)] = uchar
- end
+ local mapping = { }
+ for k, v in next, vector do
+ local split = totable(v)
+ for i=1,#split do
+ split[i] = utfchar(byte(split[i]))
end
- end
- p = P(false)
- for k, v in next, firsts do
- local q = P(false)
- for i=1,#v do
- q = q + P(v[i])
+ split = concat(split)
+ if v ~= split then
+ mapping[split] = v
end
- p = p + P(k) * q
end
- p = Cs(((p+1)/utfchars)^1)
- -- lpeg.print(p) -- size: 1042
+ p = Cs((lpeg.utfchartabletopattern(table.keys(mapping))/mapping+P(1))^0)
else
p = false
end
@@ -361,28 +399,9 @@ function regimes.cleanup(regime,str)
return p and lpegmatch(p,str) or str
end
--- local map = require("regi-cp1252")
-- local old = [[test ë ä ö ü crap]]
--- local new = correctencoding(map,old)
---
--- print(old,new)
-
--- obsolete:
---
--- function regimes.setsynonym(synonym,target)
--- synonyms[synonym] = target
--- end
---
--- function regimes.truename(regime)
--- return regime and synonyms[regime] or regime or currentregime
--- end
---
--- commands.setregimesynonym = regimes.setsynonym
---
--- function commands.trueregimename(regime)
--- context(regimes.truename(regime))
--- end
---
--- function regimes.load(regime)
--- return mapping[synonyms[regime] or regime]
--- end
+-- local new = regimes.cleanup("cp1252",old)
+-- report_translating("%s -> %s",old,new)
+-- local old = "Pozn" .. char(0xE1) .. "mky"
+-- local new = translate(old,"cp1250")
+-- report_translating("%s -> %s",old,new)
diff --git a/tex/context/base/status-files.pdf b/tex/context/base/status-files.pdf
index 10fd98be5..5e5159880 100644
--- a/tex/context/base/status-files.pdf
+++ b/tex/context/base/status-files.pdf
Binary files differ
diff --git a/tex/context/base/status-lua.pdf b/tex/context/base/status-lua.pdf
index 51fd100c0..adc194002 100644
--- a/tex/context/base/status-lua.pdf
+++ b/tex/context/base/status-lua.pdf
Binary files differ
diff --git a/tex/context/base/typo-dha.lua b/tex/context/base/typo-dha.lua
index 4bce53481..8cc6ac5dc 100644
--- a/tex/context/base/typo-dha.lua
+++ b/tex/context/base/typo-dha.lua
@@ -346,7 +346,7 @@ local function process(start)
top = top - 1
end
obsolete[#obsolete+1] = current
- else
+ elseif trace_directions then
setcolor(current)
end
else
diff --git a/tex/context/base/typo-man.lua b/tex/context/base/typo-man.lua
index 9b3c8b92b..6c6d7926f 100644
--- a/tex/context/base/typo-man.lua
+++ b/tex/context/base/typo-man.lua
@@ -19,6 +19,8 @@ local global = global or _G
local methods = {
uppercase = characters.upper,
lowercase = characters.lower,
+ Word = converters.Word,
+ Words = converters.Words,
}
local function nothing(s) return s end -- we already have that one somewhere
diff --git a/tex/generic/context/luatex/luatex-fonts-merged.lua b/tex/generic/context/luatex/luatex-fonts-merged.lua
index c964939d6..270a7512d 100644
--- a/tex/generic/context/luatex/luatex-fonts-merged.lua
+++ b/tex/generic/context/luatex/luatex-fonts-merged.lua
@@ -1,6 +1,6 @@
-- merged file : luatex-fonts-merged.lua
-- parent file : luatex-fonts.lua
--- merge date : 06/17/14 16:53:07
+-- merge date : 06/18/14 17:19:49
do -- begin closure to overcome local limits and interference