From a81728fa7c7e1b92d436835ac4019b2e46f80853 Mon Sep 17 00:00:00 2001
From: Hans Hagen
Date: Mon, 22 Feb 2010 19:38:00 +0100
Subject: beta 2010.02.22 19:38
---
tex/context/base/cont-new.tex | 2 +-
tex/context/base/context.mkiv | 3 +-
tex/context/base/context.tex | 2 +-
tex/context/base/core-uti.lua | 4 +-
tex/context/base/data-res.lua | 10 +-
tex/context/base/font-afm.lua | 5 +-
tex/context/base/font-dum.lua | 5 +-
tex/context/base/font-ini.mkiv | 8 +-
tex/context/base/font-map.lua | 161 +++++-
tex/context/base/font-mis.lua | 2 +-
tex/context/base/font-otf.lua | 156 +-----
tex/context/base/font-syn.lua | 2 +-
tex/context/base/font-tfm.lua | 5 +-
tex/context/base/font-xtx.lua | 17 +-
tex/context/base/lang-ini.lua | 4 +-
tex/context/base/luat-cbk.lua | 20 +-
tex/context/base/luat-dum.lua | 3 +-
tex/context/base/luat-run.lua | 17 +-
tex/context/base/math-noa.lua | 6 +-
tex/context/base/node-fin.lua | 4 +-
tex/context/base/node-mig.lua | 4 +-
tex/context/base/node-pag.lua | 26 +
tex/context/base/node-pag.mkiv | 20 +
tex/context/base/node-par.lua | 35 +-
tex/context/base/node-par.mkiv | 4 +-
tex/context/base/node-pro.lua | 20 +-
tex/context/base/node-seq.lua | 45 +-
tex/context/base/node-tsk.lua | 109 +++-
tex/context/base/spac-ver.lua | 18 +-
tex/context/base/strc-flt.mkiv | 17 +-
tex/context/base/strc-ref.mkiv | 2 +-
tex/context/base/task-ini.lua | 10 +-
tex/context/base/trac-inf.lua | 6 +
tex/context/base/type-otf.mkiv | 20 +
tex/generic/context/luatex-fonts-merged.lua | 819 ++++++++++++++--------------
tex/generic/context/luatex-fonts.lua | 2 +-
36 files changed, 901 insertions(+), 692 deletions(-)
create mode 100644 tex/context/base/node-pag.lua
create mode 100644 tex/context/base/node-pag.mkiv
(limited to 'tex')
diff --git a/tex/context/base/cont-new.tex b/tex/context/base/cont-new.tex
index 604b418bf..9022a8e99 100644
--- a/tex/context/base/cont-new.tex
+++ b/tex/context/base/cont-new.tex
@@ -11,7 +11,7 @@
%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
%C details.
-\newcontextversion{2010.02.20 10:23}
+\newcontextversion{2010.02.22 19:38}
%D This file is loaded at runtime, thereby providing an
%D excellent place for hacks, patches, extensions and new
diff --git a/tex/context/base/context.mkiv b/tex/context/base/context.mkiv
index d26c146c3..a650f03b9 100644
--- a/tex/context/base/context.mkiv
+++ b/tex/context/base/context.mkiv
@@ -71,8 +71,9 @@
\loadmarkfile{node-ini}
\loadmarkfile{node-fin}
-\loadmarkfile{node-par}
\loadmarkfile{node-mig}
+\loadmarkfile{node-par}
+\loadmarkfile{node-pag}
\loadmarkfile{core-var}
diff --git a/tex/context/base/context.tex b/tex/context/base/context.tex
index 75cc3909d..eb2750649 100644
--- a/tex/context/base/context.tex
+++ b/tex/context/base/context.tex
@@ -20,7 +20,7 @@
%D your styles an modules.
\edef\contextformat {\jobname}
-\edef\contextversion{2010.02.20 10:23}
+\edef\contextversion{2010.02.22 19:38}
%D For those who want to use this:
diff --git a/tex/context/base/core-uti.lua b/tex/context/base/core-uti.lua
index c1cf0627c..7dd3bba19 100644
--- a/tex/context/base/core-uti.lua
+++ b/tex/context/base/core-uti.lua
@@ -261,9 +261,7 @@ end
-- eventually this will end up in strc-ini
statistics.register("startup time", function()
- if statistics.elapsedindeed(ctx) then
- return format("%s seconds (including runtime option file processing)", statistics.elapsedtime(ctx))
- end
+ return statistics.elapsedseconds(ctx,"including runtime option file processing")
end)
statistics.register("jobdata time",function()
diff --git a/tex/context/base/data-res.lua b/tex/context/base/data-res.lua
index 83d78e066..40cd3eb1a 100644
--- a/tex/context/base/data-res.lua
+++ b/tex/context/base/data-res.lua
@@ -108,8 +108,8 @@ suffixes['lua'] = { 'lua', 'luc', 'tma', 'tmc' }
alternatives['map files'] = 'map'
alternatives['enc files'] = 'enc'
-alternatives['cid files'] = 'cid'
-alternatives['fea files'] = 'fea'
+alternatives['cid maps'] = 'cid' -- great, why no cid files
+alternatives['font feature files'] = 'fea' -- and fea files here
alternatives['opentype fonts'] = 'otf'
alternatives['truetype fonts'] = 'ttf'
alternatives['truetype collections'] = 'ttc'
@@ -252,8 +252,10 @@ local function check_configuration() -- not yet ok, no time for debugging now
-- bad luck
end
fix("LUAINPUTS" , ".;$TEXINPUTS;$TEXMFSCRIPTS") -- no progname, hm
- fix("FONTFEATURES", ".;$TEXMF/fonts/fea//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS")
- fix("FONTCIDMAPS" , ".;$TEXMF/fonts/cid//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS")
+ -- this will go away some day
+ fix("FONTFEATURES", ".;$TEXMF/fonts/{data,fea}//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS")
+ fix("FONTCIDMAPS" , ".;$TEXMF/fonts/{data,cid}//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS")
+ --
fix("LUATEXLIBS" , ".;$TEXMF/luatex/lua//")
end
diff --git a/tex/context/base/font-afm.lua b/tex/context/base/font-afm.lua
index 348121a83..c9cfdfd26 100644
--- a/tex/context/base/font-afm.lua
+++ b/tex/context/base/font-afm.lua
@@ -30,7 +30,7 @@ fonts.afm = fonts.afm or { }
local afm = fonts.afm
local tfm = fonts.tfm
-afm.version = 1.401 -- incrementing this number one up will force a re-cache
+afm.version = 1.402 -- incrementing this number one up will force a re-cache
afm.syncspace = true -- when true, nicer stretch values
afm.enhance_data = true -- best leave this set to true
afm.features = { }
@@ -282,6 +282,8 @@ function afm.load(filename)
logs.report("load afm", "add extra kerns")
afm.add_kerns(data) -- faster this way
end
+ logs.report("load afm", "add tounicode data")
+ fonts.map.add_to_unicode(data,filename)
data.size = size
data.verbose = fonts.verbose
logs.report("load afm","saving: %s in cache",name)
@@ -338,7 +340,6 @@ function afm.unify(data, filename)
luatex.marks = { } -- todo
luatex.names = names -- name to index
luatex.private = private
-fonts.otf.enhancers["analyse unicodes"](data,filename)
end
--[[ldx--
diff --git a/tex/context/base/font-dum.lua b/tex/context/base/font-dum.lua
index 9dcabf6a3..8e13b5b1b 100644
--- a/tex/context/base/font-dum.lua
+++ b/tex/context/base/font-dum.lua
@@ -10,8 +10,9 @@ fonts = fonts or { }
-- general
-fonts.otf.pack = false
-fonts.tfm.resolve_vf = false -- no sure about this
+fonts.otf.pack = false
+fonts.tfm.resolve_vf = false -- no sure about this
+fonts.tfm.fontname_mode = "specification" -- somehow latex needs this
-- readers
diff --git a/tex/context/base/font-ini.mkiv b/tex/context/base/font-ini.mkiv
index e3c7585ef..ef2e259c0 100644
--- a/tex/context/base/font-ini.mkiv
+++ b/tex/context/base/font-ini.mkiv
@@ -2896,13 +2896,7 @@
%D The next auxilliary macro is an alternative to \type
%D {\fontname}.
-% \def\purefontname#1{\expandafter\splitoffpurefontname\fontname#1 \\}
-%
-% extra level is needed:
-
-\def\purefontname#1{\@EA\splitoffpurefontname\@EA{\@EA{\@EA\unstringed\fontname#1}} \\}
-
-\def\splitoffpurefontname#1 #2\\{#1}
+\def\purefontname#1{\ctxlua{file.basename("\fontname#1"}} % will be function using id
%D \macros
%D {switchstyleonly}
diff --git a/tex/context/base/font-map.lua b/tex/context/base/font-map.lua
index 9e85516d6..9dd2bd0f3 100644
--- a/tex/context/base/font-map.lua
+++ b/tex/context/base/font-map.lua
@@ -6,10 +6,13 @@ if not modules then modules = { } end modules ['font-map'] = {
license = "see context related readme files"
}
-local match, format, find, concat, gsub = string.match, string.format, string.find, table.concat, string.gsub
+local utf = unicode.utf8
+local match, format, find, concat, gsub, lower = string.match, string.format, string.find, table.concat, string.gsub, string.lower
local lpegmatch = lpeg.match
+local utfbyte = utf.byte
-local trace_loading = false trackers.register("otf.loading", function(v) trace_loading = v end)
+local trace_loading = false trackers.register("otf.loading", function(v) trace_loading = v end)
+local trace_unimapping = false trackers.register("otf.unimapping", function(v) trace_unimapping = v end)
local ctxcatcodes = tex and tex.ctxcatcodes
@@ -128,7 +131,7 @@ function fonts.map.load_file(filename, entries, encodings)
return entries, encodings
end
-function fonts.map.load_lum_table(filename)
+local function load_lum_table(filename)
local lumname = file.replacesuffix(file.basename(filename),"lum")
local lumfile = resolvers.find_file(lumname,"map") or ""
if lumfile ~= "" and lfs.isfile(lumfile) then
@@ -154,7 +157,7 @@ local parser = unicode + ucode + index
local parsers = { }
-function fonts.map.make_name_parser(str)
+local function make_name_parser(str)
if not str or str == "" then
return parser
else
@@ -181,7 +184,7 @@ end
--~ test("index1234")
--~ test("Japan1.123")
-function fonts.map.tounicode16(unicode)
+local function tounicode16(unicode)
if unicode < 0x10000 then
return format("%04X",unicode)
else
@@ -189,7 +192,7 @@ function fonts.map.tounicode16(unicode)
end
end
-function fonts.map.tounicode16sequence(unicodes)
+local function tounicode16sequence(unicodes)
local t = { }
for l=1,#unicodes do
local unicode = unicodes[l]
@@ -222,3 +225,149 @@ end
--~ return s
--~ end
+fonts.map.load_lum_table = load_lum_table
+fonts.map.make_name_parser = make_name_parser
+fonts.map.tounicode16 = tounicode16
+fonts.map.tounicode16sequence = tounicode16sequence
+
+local separator = lpeg.S("_.")
+local other = lpeg.C((1 - separator)^1)
+local ligsplitter = lpeg.Ct(other * (separator * other)^0)
+
+--~ print(table.serialize(lpegmatch(ligsplitter,"this")))
+--~ print(table.serialize(lpegmatch(ligsplitter,"this.that")))
+--~ print(table.serialize(lpegmatch(ligsplitter,"japan1.123")))
+--~ print(table.serialize(lpegmatch(ligsplitter,"such_so_more")))
+--~ print(table.serialize(lpegmatch(ligsplitter,"such_so_more.that")))
+
+fonts.map.add_to_unicode = function(data,filename)
+ local unicodes = data.luatex and data.luatex.unicodes
+ if not unicodes then
+ return
+ end
+ -- we need to move this code
+ unicodes['space'] = unicodes['space'] or 32
+ unicodes['hyphen'] = unicodes['hyphen'] or 45
+ unicodes['zwj'] = unicodes['zwj'] or 0x200D
+ unicodes['zwnj'] = unicodes['zwnj'] or 0x200C
+ -- the tounicode mapping is sparse and only needed for alternatives
+ local tounicode, originals, ns, nl, private, unknown = { }, { }, 0, 0, fonts.private, format("%04X",utfbyte("?"))
+ data.luatex.tounicode, data.luatex.originals = tounicode, originals
+ local lumunic, uparser, oparser
+ if false then -- will become an option
+ lumunic = load_lum_table(filename)
+ lumunic = lumunic and lumunic.tounicode
+ end
+ local cidinfo, cidnames, cidcodes = data.cidinfo
+ local usedmap = cidinfo and cidinfo.usedname
+ usedmap = usedmap and lower(usedmap)
+ usedmap = usedmap and fonts.cid.map[usedmap]
+ if usedmap then
+ oparser = usedmap and make_name_parser(cidinfo.ordering)
+ cidnames = usedmap.names
+ cidcodes = usedmap.unicodes
+ end
+ uparser = make_name_parser()
+ local aglmap = fonts.map and fonts.map.agl_to_unicode
+ for index, glyph in next, data.glyphs do
+ local name, unic = glyph.name, glyph.unicode or -1 -- play safe
+ if unic == -1 or unic >= private or (unic >= 0xE000 and unic <= 0xF8FF) or unic == 0xFFFE or unic == 0xFFFF then
+ local unicode = (lumunic and lumunic[name]) or (aglmap and aglmap[name])
+ if unicode then
+ originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
+ end
+ -- cidmap heuristics, beware, there is no guarantee for a match unless
+ -- the chain resolves
+ if (not unicode) and usedmap then
+ local foundindex = lpegmatch(oparser,name)
+ if foundindex then
+ unicode = cidcodes[foundindex] -- name to number
+ if unicode then
+ originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
+ else
+ local reference = cidnames[foundindex] -- number to name
+ if reference then
+ local foundindex = lpegmatch(oparser,reference)
+ if foundindex then
+ unicode = cidcodes[foundindex]
+ if unicode then
+ originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
+ end
+ end
+ if not unicode then
+ local foundcodes, multiple = lpegmatch(uparser,reference)
+ if foundcodes then
+ if multiple then
+ originals[index], tounicode[index], nl, unicode = foundcodes, tounicode16sequence(foundcodes), nl + 1, true
+ else
+ originals[index], tounicode[index], ns, unicode = foundcodes, tounicode16(foundcodes), ns + 1, foundcodes
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ -- a.whatever or a_b_c.whatever or a_b_c (no numbers)
+ if not unicode then
+ local split = lpegmatch(ligsplitter,name)
+ local nplit = (split and #split) or 0
+ if nplit == 0 then
+ -- skip
+ elseif nplit == 1 then
+ local base = split[1]
+ unicode = unicodes[base] or (aglmap and aglmap[base])
+ if unicode then
+ if type(unicode) == "table" then
+ unicode = unicode[1]
+ end
+ originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
+ end
+ else
+ local t = { }
+ for l=1,nplit do
+ local base = split[l]
+ local u = unicodes[base] or (aglmap and aglmap[base])
+ if not u then
+ break
+ elseif type(u) == "table" then
+ t[#t+1] = u[1]
+ else
+ t[#t+1] = u
+ end
+ end
+ if #t > 0 then -- done then
+ originals[index], tounicode[index], nl, unicode = t, tounicode16sequence(t), nl + 1, true
+ end
+ end
+ end
+ -- last resort
+ if not unicode then
+ local foundcodes, multiple = lpegmatch(uparser,name)
+ if foundcodes then
+ if multiple then
+ originals[index], tounicode[index], nl, unicode = foundcodes, tounicode16sequence(foundcodes), nl + 1, true
+ else
+ originals[index], tounicode[index], ns, unicode = foundcodes, tounicode16(foundcodes), ns + 1, foundcodes
+ end
+ end
+ end
+ if not unicode then
+ originals[index], tounicode[index] = 0xFFFD, "FFFD"
+ end
+ end
+ end
+ if trace_unimapping then
+ for index, glyph in table.sortedpairs(data.glyphs) do
+ local toun, name, unic = tounicode[index], glyph.name, glyph.unicode or -1 -- play safe
+ if toun then
+ logs.report("load otf","internal: 0x%05X, name: %s, unicode: 0x%05X, tounicode: %s",index,name,unic,toun)
+ else
+ logs.report("load otf","internal: 0x%05X, name: %s, unicode: 0x%05X",index,name,unic)
+ end
+ end
+ end
+ if trace_loading and (ns > 0 or nl > 0) then
+ logs.report("load otf","enhance: %s tounicode entries added (%s ligatures)",nl+ns, ns)
+ end
+end
diff --git a/tex/context/base/font-mis.lua b/tex/context/base/font-mis.lua
index 1e2dece13..025bcb42e 100644
--- a/tex/context/base/font-mis.lua
+++ b/tex/context/base/font-mis.lua
@@ -11,7 +11,7 @@ local lower, strip = string.lower, string.strip
fonts.otf = fonts.otf or { }
-fonts.otf.version = fonts.otf.version or 2.644
+fonts.otf.version = fonts.otf.version or 2.645
fonts.otf.pack = true
fonts.otf.cache = containers.define("fonts", "otf", fonts.otf.version, true)
diff --git a/tex/context/base/font-otf.lua b/tex/context/base/font-otf.lua
index 84a305912..3488bdfe8 100644
--- a/tex/context/base/font-otf.lua
+++ b/tex/context/base/font-otf.lua
@@ -19,14 +19,10 @@ local trace_features = false trackers.register("otf.features", function(v
local trace_dynamics = false trackers.register("otf.dynamics", function(v) trace_dynamics = v end)
local trace_sequences = false trackers.register("otf.sequences", function(v) trace_sequences = v end)
local trace_math = false trackers.register("otf.math", function(v) trace_math = v end)
-local trace_unimapping = false trackers.register("otf.unimapping", function(v) trace_unimapping = v end)
local trace_defining = false trackers.register("fonts.defining", function(v) trace_defining = v end)
--~ trackers.enable("otf.loading")
-local zwnj = 0x200C
-local zwj = 0x200D
-
--[[ldx--
The fontforge table has organized lookups in a certain way. A first implementation
of this code was organized featurewise: information related to features was
@@ -84,7 +80,7 @@ otf.features.default = otf.features.default or { }
otf.enhancers = otf.enhancers or { }
otf.glists = { "gsub", "gpos" }
-otf.version = 2.644 -- beware: also sync font-mis.lua
+otf.version = 2.645 -- beware: also sync font-mis.lua
otf.pack = true -- beware: also sync font-mis.lua
otf.syncspace = true
otf.notdef = false
@@ -502,145 +498,7 @@ otf.enhancers["analyse marks"] = function(data,filename)
end
end
-local separator = lpeg.S("_.")
-local other = lpeg.C((1 - separator)^1)
-local ligsplitter = lpeg.Ct(other * (separator * other)^0)
-
---~ print(table.serialize(lpegmatch(ligsplitter,"this")))
---~ print(table.serialize(lpegmatch(ligsplitter,"this.that")))
---~ print(table.serialize(lpegmatch(ligsplitter,"japan1.123")))
---~ print(table.serialize(lpegmatch(ligsplitter,"such_so_more")))
---~ print(table.serialize(lpegmatch(ligsplitter,"such_so_more.that")))
-
-otf.enhancers["analyse unicodes"] = function(data,filename)
- local tounicode16, tounicode16sequence = fonts.map.tounicode16, fonts.map.tounicode16sequence
- local unicodes = data.luatex.unicodes
- -- we need to move this code
- unicodes['space'] = unicodes['space'] or 32 -- handly later on
- unicodes['hyphen'] = unicodes['hyphen'] or 45 -- handly later on
- unicodes['zwj'] = unicodes['zwj'] or zwj -- handly later on
- unicodes['zwnj'] = unicodes['zwnj'] or zwnj -- handly later on
- -- the tounicode mapping is sparse and only needed for alternatives
- local tounicode, originals, ns, nl, private, unknown = { }, { }, 0, 0, fonts.private, format("%04X",utfbyte("?"))
- data.luatex.tounicode, data.luatex.originals = tounicode, originals
- local lumunic, uparser, oparser
- if false then -- will become an option
- lumunic = fonts.map.load_lum_table(filename)
- lumunic = lumunic and lumunic.tounicode
- end
- local cidinfo, cidnames, cidcodes = data.cidinfo
- local usedmap = cidinfo and cidinfo.usedname
- usedmap = usedmap and lower(usedmap)
- usedmap = usedmap and fonts.cid.map[usedmap]
- if usedmap then
- oparser = usedmap and fonts.map.make_name_parser(cidinfo.ordering)
- cidnames = usedmap.names
- cidcodes = usedmap.unicodes
- end
- uparser = fonts.map.make_name_parser()
- local aglmap = fonts.map and fonts.map.agl_to_unicode
- for index, glyph in next, data.glyphs do
- local name, unic = glyph.name, glyph.unicode or -1 -- play safe
- if unic == -1 or unic >= private or (unic >= 0xE000 and unic <= 0xF8FF) or unic == 0xFFFE or unic == 0xFFFF then
- local unicode = (aglmap and aglmap[name]) or (lumunic and lumunic[name])
- if unicode then
- originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
- end
- -- cidmap heuristics, beware, there is no guarantee for a match unless
- -- the chain resolves
- if (not unicode) and usedmap then
- local foundindex = lpegmatch(oparser,name)
- if foundindex then
- unicode = cidcodes[foundindex] -- name to number
- if unicode then
- originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
- else
- local reference = cidnames[foundindex] -- number to name
- if reference then
- local foundindex = lpegmatch(oparser,reference)
- if foundindex then
- unicode = cidcodes[foundindex]
- if unicode then
- originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
- end
- end
- if not unicode then
- local foundcodes, multiple = lpegmatch(uparser,reference)
- if foundcodes then
- if multiple then
- originals[index], tounicode[index], nl, unicode = foundcodes, tounicode16sequence(foundcodes), nl + 1, true
- else
- originals[index], tounicode[index], ns, unicode = foundcodes, tounicode16(foundcodes), ns + 1, foundcodes
- end
- end
- end
- end
- end
- end
- end
- -- a.whatever or a_b_c.whatever or a_b_c (no numbers)
- if not unicode then
- local split = lpegmatch(ligsplitter,name)
- local nplit = (split and #split) or 0
- if nplit == 0 then
- -- skip
- elseif nplit == 1 then
- local base = split[1]
- unicode = unicodes[base] or (agl and agl[base])
- if unicode then
- if type(unicode) == "table" then
- unicode = unicode[1]
- end
- originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
- end
- else
- local t = { }
- for l=1,nplit do
- local base = split[l]
- local u = unicodes[base] or (agl and agl[base])
- if not u then
- break
- elseif type(u) == "table" then
- t[#t+1] = u[1]
- else
- t[#t+1] = u
- end
- end
- if #t > 0 then -- done then
- originals[index], tounicode[index], nl, unicode = t, tounicode16sequence(t), nl + 1, true
- end
- end
- end
- -- last resort
- if not unicode then
- local foundcodes, multiple = lpegmatch(uparser,name)
- if foundcodes then
- if multiple then
- originals[index], tounicode[index], nl, unicode = foundcodes, tounicode16sequence(foundcodes), nl + 1, true
- else
- originals[index], tounicode[index], ns, unicode = foundcodes, tounicode16(foundcodes), ns + 1, foundcodes
- end
- end
- end
- if not unicode then
- originals[index], tounicode[index] = 0xFFFD, "FFFD"
- end
- end
- end
- if trace_unimapping then
- for index, glyph in table.sortedpairs(data.glyphs) do
- local toun, name, unic = tounicode[index], glyph.name, glyph.unicode or -1 -- play safe
- if toun then
- logs.report("load otf","internal: 0x%05X, name: %s, unicode: 0x%05X, tounicode: %s",index,name,unic,toun)
- else
- logs.report("load otf","internal: 0x%05X, name: %s, unicode: 0x%05X",index,name,unic)
- end
- end
- end
- if trace_loading and (ns > 0 or nl > 0) then
- logs.report("load otf","enhance: %s tounicode entries added (%s ligatures)",nl+ns, ns)
- end
-end
+otf.enhancers["analyse unicodes"] = fonts.map.add_to_unicode
otf.enhancers["analyse subtables"] = function(data,filename)
data.luatex = data.luatex or { }
@@ -1837,6 +1695,16 @@ function tfm.read_from_open_type(specification)
tfmtable.format = specification.format
end
tfmtable.name = tfmtable.filename or tfmtable.fullname or tfmtable.fontname
+ if tfm.fontname_mode == "specification" then
+ -- not to be used in context !
+ local specname = specification.specification
+ if specname then
+ tfmtable.name = specname
+ if trace_defining then
+ logs.report("define font","overloaded fontname: '%s'",specname)
+ end
+ end
+ end
end
fonts.logger.save(tfmtable,file.extname(specification.filename),specification)
end
diff --git a/tex/context/base/font-syn.lua b/tex/context/base/font-syn.lua
index d6c49b459..4b892ed8c 100644
--- a/tex/context/base/font-syn.lua
+++ b/tex/context/base/font-syn.lua
@@ -838,7 +838,7 @@ local function is_reloaded()
local c_status = table.serialize(resolvers.data_state())
local f_status = table.serialize(data.data_state)
if c_status == f_status then
- logs.report("fonts","font database matches configuration and file hashes")
+ -- logs.report("fonts","font database matches configuration and file hashes")
return
else
logs.report("fonts","font database does not match configuration and file hashes")
diff --git a/tex/context/base/font-tfm.lua b/tex/context/base/font-tfm.lua
index 2f96de44f..fd3d8b4dd 100644
--- a/tex/context/base/font-tfm.lua
+++ b/tex/context/base/font-tfm.lua
@@ -47,6 +47,7 @@ supplied by .
tfm.resolve_vf = true -- false
tfm.share_base_kerns = false -- true (.5 sec slower on mk but brings down mem from 410M to 310M, beware: then script/lang share too)
tfm.mathactions = { }
+tfm.fontname_mode = "fullpath"
function tfm.enhance(tfmdata,specification)
local name, size = specification.name, specification.size
@@ -874,7 +875,5 @@ fonts.initializers.node.tfm.remap = tfm.remap
-- status info
statistics.register("fonts load time", function()
- if statistics.elapsedindeed(fonts) then
- return format("%s seconds",statistics.elapsedtime(fonts))
- end
+ return statistics.elapsedseconds(fonts)
end)
diff --git a/tex/context/base/font-xtx.lua b/tex/context/base/font-xtx.lua
index ec7b2a26e..5a87d0c4f 100644
--- a/tex/context/base/font-xtx.lua
+++ b/tex/context/base/font-xtx.lua
@@ -72,12 +72,15 @@ local function istrue (s) list[s] = 'yes' end
local function isfalse(s) list[s] = 'no' end
local function iskey (k,v) list[k] = v end
+local function istrue (s) list[s] = true end
+local function isfalse(s) list[s] = false end
+
local spaces = lpeg.P(" ")^0
local namespec = (1-lpeg.S("/:("))^0 -- was: (1-lpeg.S("/: ("))^0
local crapspec = spaces * lpeg.P("/") * (((1-lpeg.P(":"))^0)/iscrap) * spaces
local filename = (lpeg.P("file:")/isfile * (namespec/thename)) + (lpeg.P("[") * lpeg.P(true)/isname * (((1-lpeg.P("]"))^0)/thename) * lpeg.P("]"))
local fontname = (lpeg.P("name:")/isname * (namespec/thename)) + lpeg.P(true)/issome * (namespec/thename)
-local sometext = (lpeg.R("az") + lpeg.R("AZ") + lpeg.R("09") + lpeg.P("."))^1
+local sometext = (lpeg.R("az","AZ","09") + lpeg.S("+-."))^1
local truevalue = lpeg.P("+") * spaces * (sometext/istrue)
local falsevalue = lpeg.P("-") * spaces * (sometext/isfalse)
local keyvalue = (lpeg.C(sometext) * spaces * lpeg.P("=") * spaces * lpeg.C(sometext))/iskey
@@ -90,12 +93,12 @@ local pattern = (filename + fontname) * subvalue^0 * crapspec^0 * options^0
function fonts.define.specify.colonized(specification) -- xetex mode
list = { }
lpegmatch(pattern,specification.specification)
- for k, v in next, list do
- list[k] = v:is_boolean()
- if type(list[a]) == "nil" then
- list[k] = v
- end
- end
+--~ for k, v in next, list do
+--~ list[k] = v:is_boolean()
+--~ if type(list[a]) == "nil" then
+--~ list[k] = v
+--~ end
+--~ end
list.crap = nil -- style not supported, maybe some day
if list.name then
specification.name = list.name
diff --git a/tex/context/base/lang-ini.lua b/tex/context/base/lang-ini.lua
index 505c0a00e..512df1b0b 100644
--- a/tex/context/base/lang-ini.lua
+++ b/tex/context/base/lang-ini.lua
@@ -477,7 +477,5 @@ statistics.register("loaded patterns", function()
end)
statistics.register("language load time", function()
- if statistics.elapsedindeed(languages) then
- return format("%s seconds, n=%s", statistics.elapsedtime(languages), languages.hyphenation.n())
- end
+ return statistics.elapsedseconds(languages, format(", n=%s",languages.hyphenation.n()))
end)
diff --git a/tex/context/base/luat-cbk.lua b/tex/context/base/luat-cbk.lua
index cc2f8a354..30930a40c 100644
--- a/tex/context/base/luat-cbk.lua
+++ b/tex/context/base/luat-cbk.lua
@@ -33,15 +33,25 @@ local function frozenmessage(what,name)
logs.report("callbacks","not %s frozen '%s' (%s)",what,name,frozen[name])
end
+local function state(name)
+ local f = find_callback(name)
+ if f == false then
+ return "disabled"
+ elseif f then
+ return "enabled"
+ else
+ return "undefined"
+ end
+end
+
function callbacks.report()
local list = callback.list()
for name, func in table.sortedpairs(list) do
local str = frozen[name]
- func = (func and "set") or "nop"
if str then
- logs.report("callbacks","%s: %s -> %s",func,name,str)
+ logs.report("callbacks","%s: %s -> %s",state(func),name,str)
else
- logs.report("callbacks","%s: %s",func,name)
+ logs.report("callbacks","%s: %s",state(func),name)
end
end
end
@@ -49,11 +59,11 @@ end
function callbacks.table()
context.starttabulate { "|l|l|p|" }
for name, func in table.sortedpairs(callback.list()) do
- context.NC()
- context.type((func and "set") or "nop")
context.NC()
context.type(name)
context.NC()
+ context.type(state(name))
+ context.NC()
context(frozen[name] or "")
context.NC()
context.NR()
diff --git a/tex/context/base/luat-dum.lua b/tex/context/base/luat-dum.lua
index 19e95e2b1..34dd9ed6b 100644
--- a/tex/context/base/luat-dum.lua
+++ b/tex/context/base/luat-dum.lua
@@ -57,7 +57,8 @@ local remapper = {
ttf = "truetype fonts",
ttc = "truetype fonts",
dfont = "truetype dictionary",
- cid = "other text files", -- will become "cid files"
+ cid = "cid maps",
+ fea = "font feature files",
}
function resolvers.find_file(name,kind)
diff --git a/tex/context/base/luat-run.lua b/tex/context/base/luat-run.lua
index f2072f74a..b64a99fc6 100644
--- a/tex/context/base/luat-run.lua
+++ b/tex/context/base/luat-run.lua
@@ -61,9 +61,14 @@ end
-- this can be done later
-callbacks.register('start_run', main.start, "actions performed at the beginning of a run")
-callbacks.register('stop_run', main.stop, "actions performed at the end of a run")
-callbacks.register('report_output_pages', main.report_output_pages, "actions performed when reporting pages")
-callbacks.register('report_output_log' , main.report_output_log, "actions performed when reporting log file")
-callbacks.register('start_page_number' , main.start_shipout_page, "actions performed at the beginning of a shipout")
-callbacks.register('stop_page_number' , main.stop_shipout_page, "actions performed at the end of a shipout")
+callbacks.register('start_run', main.start, "actions performed at the beginning of a run")
+callbacks.register('stop_run', main.stop, "actions performed at the end of a run")
+
+callbacks.register('report_output_pages', main.report_output_pages, "actions performed when reporting pages")
+callbacks.register('report_output_log', main.report_output_log, "actions performed when reporting log file")
+
+callbacks.register('start_page_number', main.start_shipout_page, "actions performed at the beginning of a shipout")
+callbacks.register('stop_page_number', main.stop_shipout_page, "actions performed at the end of a shipout")
+
+callbacks.register('process_input_buffer', false, "actions performed when reading data")
+callbacks.register('process_output_buffer', false, "actions performed when writing data")
diff --git a/tex/context/base/math-noa.lua b/tex/context/base/math-noa.lua
index cca1812d2..87fb8a38a 100644
--- a/tex/context/base/math-noa.lua
+++ b/tex/context/base/math-noa.lua
@@ -342,8 +342,10 @@ end
tasks.new (
"math",
{
+ "before",
"normalizers",
"builders",
+ "after",
}
)
@@ -363,7 +365,5 @@ callbacks.register('mlist_to_hlist',nodes.processors.mlist_to_hlist,"preprocessi
-- tracing
statistics.register("math processing time", function()
- if statistics.elapsedindeed(noads) then
- return format("%s seconds", statistics.elapsedtime(noads))
- end
+ return statistics.elapsedseconds(noads)
end)
diff --git a/tex/context/base/node-fin.lua b/tex/context/base/node-fin.lua
index 9e0538fb5..c6e3be448 100644
--- a/tex/context/base/node-fin.lua
+++ b/tex/context/base/node-fin.lua
@@ -438,7 +438,5 @@ states.stacked = stacked
-- -- --
statistics.register("attribute processing time", function()
- if statistics.elapsedindeed(attributes) then
- return format("%s seconds (front- and backend)",statistics.elapsedtime(attributes))
- end
+ return statistics.elapsedseconds(attributes,"front- and backend")
end)
diff --git a/tex/context/base/node-mig.lua b/tex/context/base/node-mig.lua
index 362247654..f9f0ad231 100644
--- a/tex/context/base/node-mig.lua
+++ b/tex/context/base/node-mig.lua
@@ -101,14 +101,14 @@ end
experiments.register("marks.migrate", function(v)
if v then
- tasks.enableaction("pagebuilders", "nodes.migrate_outwards")
+ tasks.enableaction("mvlbuilders", "nodes.migrate_outwards")
end
migrate_marks = v
end)
experiments.register("inserts.migrate", function(v)
if v then
- tasks.enableaction("pagebuilders", "nodes.migrate_outwards")
+ tasks.enableaction("mvlbuilders", "nodes.migrate_outwards")
end
migrate_inserts = v
end)
diff --git a/tex/context/base/node-pag.lua b/tex/context/base/node-pag.lua
new file mode 100644
index 000000000..fd1504eac
--- /dev/null
+++ b/tex/context/base/node-pag.lua
@@ -0,0 +1,26 @@
+if not modules then modules = { } end modules ['node-pag'] = {
+ version = 1.001,
+ comment = "companion to node-pag.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+pagebuilders = pagebuilders or { }
+
+local starttiming, stoptiming = statistics.starttiming, statistics.stoptiming
+
+local actions = tasks.actions("pagebuilders",5)
+
+local function processor(head,groupcode,size,packtype,maxdepth,direction)
+ starttiming(pagebuilders)
+ local _, done = actions(head,groupcode,size,packtype,maxdepth,direction)
+ stoptiming(pagebuilders)
+ return (done and head) or true
+end
+
+callbacks.register('pre_output_filter', processor, "preparing output box")
+
+statistics.register("output preparation time", function()
+ return statistics.elapsedseconds(pagebuilders)
+end)
diff --git a/tex/context/base/node-pag.mkiv b/tex/context/base/node-pag.mkiv
new file mode 100644
index 000000000..487901ad0
--- /dev/null
+++ b/tex/context/base/node-pag.mkiv
@@ -0,0 +1,20 @@
+%D \module
+%D [ file=node-pag,
+%D version=2008.09.30,
+%D title=\CONTEXT\ Node Macros,
+%D subtitle=Page Building,
+%D author=Hans Hagen,
+%D date=\currentdate,
+%D copyright={PRAGMA / Hans Hagen \& Ton Otten}]
+%C
+%C This module is part of the \CONTEXT\ macro||package and is
+%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
+%C details.
+
+\writestatus{loading}{ConTeXt Node Macros / Page Building}
+
+\unprotect
+
+\registerctxluafile{node-pag}{1.001}
+
+\protect \endinput
diff --git a/tex/context/base/node-par.lua b/tex/context/base/node-par.lua
index 05b83c1ac..2066d8ca4 100644
--- a/tex/context/base/node-par.lua
+++ b/tex/context/base/node-par.lua
@@ -11,20 +11,25 @@ parbuilders.constructors = parbuilders.constructors or { }
parbuilders.names = parbuilders.names or { }
parbuilders.attribute = attributes.numbers['parbuilder'] or 999
+local constructors, names, p_attribute = parbuilders.constructors, parbuilders.names, parbuilders.attribute
+
storage.register("parbuilders.names", parbuilders.names, "parbuilders.names")
+local has_attribute = node.has_attribute
+local starttiming, stoptiming = statistics.starttiming, statistics.stoptiming
+
-- store parbuilders.names
function parbuilders.register(name,attribute)
parbuilders.names[attribute] = name
end
-function parbuilders.main(head,interupted_by_display)
- local attribute = node.has_attribute(head,parbuilders.attribute)
+function parbuilders.constructor(head,is_display)
+ local attribute = has_attribute(head,p_attribute)
if attribute then
- local constructor = parbuilders.names[attribute]
+ local constructor = names[attribute]
if constructor then
- return parbuilders.constructors[constructor](head,interupted_by_display)
+ return constructors[constructor](head,is_display)
end
end
return false
@@ -32,12 +37,28 @@ end
-- just for testing
-function parbuilders.constructors.default(head,ibd)
+function parbuilders.constructors.default(head,is_display)
return false
end
-- also for testing (no surrounding spacing done)
-function parbuilders.constructors.oneline(head,ibd)
- return node.hpack(head)
+function parbuilders.constructors.oneline(head,is_display)
+-- head.list = node.hpack(head)
end
+
+local actions = tasks.actions("parbuilders",1)
+
+local function processor(head,is_display)
+ starttiming(parbuilders)
+ local _, done = actions(head,is_display)
+ stoptiming(parbuilders)
+ return done
+end
+
+--~ callbacks.register('linebreak_filter', actions, "breaking paragraps into lines")
+callbacks.register('linebreak_filter', processor, "breaking paragraps into lines")
+
+statistics.register("linebreak processing time", function()
+ return statistics.elapsedseconds(parbuilders)
+end)
diff --git a/tex/context/base/node-par.mkiv b/tex/context/base/node-par.mkiv
index ae56f85d7..574991282 100644
--- a/tex/context/base/node-par.mkiv
+++ b/tex/context/base/node-par.mkiv
@@ -54,7 +54,7 @@
\defineparbuilder[default] % just for testing
\defineparbuilder[oneline] % just for testing
-\def\enableparbuilders {\ctxlua{callback.register('linebreak_filter', parbuilders.main)}}
-\def\disableparbuilders{\ctxlua{callback.register('linebreak_filter', nil)}}
+\def\enableparbuilders {\ctxlua{tasks.enableaction ("parbuilders", "parbuilders.constructor")}}
+\def\disableparbuilders{\ctxlua{tasks.disableaction("parbuilders", "parbuilders.constructor")}}
\protect \endinput
diff --git a/tex/context/base/node-pro.lua b/tex/context/base/node-pro.lua
index b294b850f..24d11cbe9 100644
--- a/tex/context/base/node-pro.lua
+++ b/tex/context/base/node-pro.lua
@@ -25,7 +25,7 @@ lists = lists or { }
chars = chars or { }
words = words or { } -- not used yet
-local actions = tasks.actions("processors",2) -- head, where, boolean
+local actions = tasks.actions("processors",4)
local n = 0
@@ -60,12 +60,12 @@ end
nodes.processors.enabled = true -- thsi will become a proper state (like trackers)
-function nodes.processors.pre_linebreak_filter(head,groupcode)
+function nodes.processors.pre_linebreak_filter(head,groupcode,size,packtype,direction)
local first, found = first_character(head)
if found then
if trace_callbacks then
local before = nodes.count(head,true)
- local head, done = actions(head,groupcode)
+ local head, done = actions(head,groupcode,size,packtype,direction)
local after = nodes.count(head,true)
if done then
tracer("pre_linebreak","changed",head,groupcode,before,after,true)
@@ -74,7 +74,7 @@ function nodes.processors.pre_linebreak_filter(head,groupcode)
end
return (done and head) or true
else
- local head, done = actions(head,groupcode)
+ local head, done = actions(head,groupcode,size,packtype,direction)
return (done and head) or true
end
elseif trace_callbacks then
@@ -84,12 +84,12 @@ function nodes.processors.pre_linebreak_filter(head,groupcode)
return true
end
-function nodes.processors.hpack_filter(head,groupcode)
+function nodes.processors.hpack_filter(head,groupcode,size,packtype,direction)
local first, found = first_character(head)
if found then
if trace_callbacks then
local before = nodes.count(head,true)
- local head, done = actions(head,groupcode)
+ local head, done = actions(head,groupcode,size,packtype,direction)
local after = nodes.count(head,true)
if done then
tracer("hpack","changed",head,groupcode,before,after,true)
@@ -98,7 +98,7 @@ function nodes.processors.hpack_filter(head,groupcode)
end
return (done and head) or true
else
- local head, done = actions(head,groupcode)
+ local head, done = actions(head,groupcode,size,packtype,direction)
return (done and head) or true
end
elseif trace_callbacks then
@@ -111,7 +111,7 @@ end
callbacks.register('pre_linebreak_filter', nodes.processors.pre_linebreak_filter,"all kind of horizontal manipulations (before par break)")
callbacks.register('hpack_filter' , nodes.processors.hpack_filter,"all kind of horizontal manipulations")
-local actions = tasks.actions("finalizers",2) -- head, where, boolean
+local actions = tasks.actions("finalizers",1) -- head, where
-- beware, these are packaged boxes so no first_character test
-- maybe some day a hash with valid groupcodes
@@ -145,7 +145,5 @@ end
callbacks.register('post_linebreak_filter', nodes.processors.post_linebreak_filter,"all kind of horizontal manipulations (after par break)")
statistics.register("h-node processing time", function()
- if statistics.elapsedindeed(nodes) then
- return format("%s seconds including kernel", statistics.elapsedtime(nodes))
- end
+ return statistics.elapsedseconds(nodes,"including kernel")
end)
diff --git a/tex/context/base/node-seq.lua b/tex/context/base/node-seq.lua
index d3a999030..3a2cf5b6e 100644
--- a/tex/context/base/node-seq.lua
+++ b/tex/context/base/node-seq.lua
@@ -16,6 +16,7 @@ use locals to refer to them when compiling the chain.
-- todo: delayed: i.e. we register them in the right order already but delay usage
local format, gsub, concat, gmatch = string.format, string.gsub, table.concat, string.gmatch
+local type, loadstring = type, loadstring
sequencer = sequencer or { }
@@ -94,7 +95,7 @@ function sequencer.compile(t,compiler,n)
elseif compiler then
t = compiler(t,n)
else
- t = sequencer.tostring(t,n)
+ t = sequencer.tostring(t)
end
return loadstring(t)()
end
@@ -109,7 +110,7 @@ return function(...)
%s
end]]
-function sequencer.tostring(t,n) -- n not done
+function sequencer.tostring(t)
local list, order, kind, gskip, askip = t.list, t.order, t.kind, t.gskip, t.askip
local vars, calls, args = { }, { }, nil
for i=1,#order do
@@ -152,6 +153,12 @@ function sequencer.nodeprocessor(t,n)
args = ",one"
elseif n == 2 then
args = ",one,two"
+ elseif n == 3 then
+ args = ",one,two,three"
+ elseif n == 4 then
+ args = ",one,two,three,four"
+ elseif n == 5 then
+ args = ",one,two,three,four,five"
else
args = ",..."
end
@@ -174,38 +181,6 @@ function sequencer.nodeprocessor(t,n)
end
end
local processor = format(template,concat(vars,"\n"),args,concat(calls,"\n"))
---~ print(processor)
+--~ print(processor)
return processor
end
-
---~ hans = {}
---~ taco = {}
-
---~ function hans.a(head,tail) print("a",head,tail) return head,tail,true end
---~ function hans.b(head,tail) print("b",head,tail) return head,tail,true end
---~ function hans.c(head,tail) print("c",head,tail) return head,tail,true end
---~ function hans.x(head,tail) print("x",head,tail) return head,tail,true end
---~ function taco.i(head,tail) print("i",head,tail) return head,tail,true end
---~ function taco.j(head,tail) print("j",head,tail) return head,tail,true end
-
---~ t = sequencer.reset()
-
---~ sequencer.appendgroup(t,"hans")
---~ sequencer.appendgroup(t,"taco")
---~ sequencer.prependaction(t,"hans","hans.a")
---~ sequencer.appendaction (t,"hans","hans.b")
---~ sequencer.appendaction (t,"hans","hans.x")
---~ sequencer.prependaction(t,"hans","hans.c","hans.b")
---~ sequencer.prependaction(t,"taco","taco.i")
---~ sequencer.prependaction(t,"taco","taco.j")
---~ sequencer.removeaction(t,"hans","hans.x")
-
---~ sequencer.setkind(t,"hans.b")
---~ sequencer.setkind(t,"taco.j","nohead")
-
---~ print(sequencer.tostring(t))
-
---~ s = sequencer.compile(t,sequencer.nodeprocessor)
-
---~ print(sequencer.nodeprocessor(t))
---~ print(s("head","tail"))
diff --git a/tex/context/base/node-tsk.lua b/tex/context/base/node-tsk.lua
index 59de5621b..553f0fc3b 100644
--- a/tex/context/base/node-tsk.lua
+++ b/tex/context/base/node-tsk.lua
@@ -8,8 +8,8 @@ if not modules then modules = { } end modules ['node-tsk'] = {
local trace_tasks = false trackers.register("tasks.creation", function(v) trace_tasks = v end)
-tasks = tasks or { }
-tasks.data = tasks.data or { }
+tasks = tasks or { }
+tasks.data = tasks.data or { }
function tasks.new(name,list)
local tasklist = sequencer.reset()
@@ -95,12 +95,14 @@ local created, total = 0, 0
statistics.register("node list callback tasks", function()
if total > 0 then
- return string.format("%s unique tasks, %s created, %s calls",table.count(tasks.data),created,total)
+ return string.format("%s unique task lists, %s instances (re)created, %s calls",table.count(tasks.data),created,total)
else
return nil
end
end)
+local compile, nodeprocessor = sequencer.compile, sequencer.nodeprocessor
+
function tasks.actions(name,n) -- we optimize for the number or arguments (no ...)
local data = tasks.data[name]
if data then
@@ -113,7 +115,7 @@ function tasks.actions(name,n) -- we optimize for the number or arguments (no ..
if trace_tasks then
logs.report("nodes","creating task runner '%s'",name)
end
- runner = sequencer.compile(data.list,sequencer.nodeprocessor,0)
+ runner = compile(data.list,nodeprocessor,0)
data.runner = runner
end
return runner(head)
@@ -125,9 +127,9 @@ function tasks.actions(name,n) -- we optimize for the number or arguments (no ..
if not runner then
created = created + 1
if trace_tasks then
- logs.report("nodes","creating task runner '%s'",name)
+ logs.report("nodes","creating task runner '%s' with 1 extra arguments",name)
end
- runner = sequencer.compile(data.list,sequencer.nodeprocessor,1)
+ runner = compile(data.list,nodeprocessor,1)
data.runner = runner
end
return runner(head,one)
@@ -139,13 +141,55 @@ function tasks.actions(name,n) -- we optimize for the number or arguments (no ..
if not runner then
created = created + 1
if trace_tasks then
- logs.report("nodes","creating task runner '%s'",name)
+ logs.report("nodes","creating task runner '%s' with 2 extra arguments",name)
end
- runner = sequencer.compile(data.list,sequencer.nodeprocessor,2)
+ runner = compile(data.list,nodeprocessor,2)
data.runner = runner
end
return runner(head,one,two)
end
+ elseif n == 3 then
+ return function(head,one,two,three)
+ total = total + 1 -- will go away
+ local runner = data.runner
+ if not runner then
+ created = created + 1
+ if trace_tasks then
+ logs.report("nodes","creating task runner '%s' with 3 extra arguments",name)
+ end
+ runner = compile(data.list,nodeprocessor,3)
+ data.runner = runner
+ end
+ return runner(head,one,two,three)
+ end
+ elseif n == 4 then
+ return function(head,one,two,three,four)
+ total = total + 1 -- will go away
+ local runner = data.runner
+ if not runner then
+ created = created + 1
+ if trace_tasks then
+ logs.report("nodes","creating task runner '%s' with 4 extra arguments",name)
+ end
+ runner = compile(data.list,nodeprocessor,4)
+ data.runner = runner
+ end
+ return runner(head,one,two,three,four)
+ end
+ elseif n == 5 then
+ return function(head,one,two,three,four,five)
+ total = total + 1 -- will go away
+ local runner = data.runner
+ if not runner then
+ created = created + 1
+ if trace_tasks then
+ logs.report("nodes","creating task runner '%s' with 5 extra arguments",name)
+ end
+ runner = compile(data.list,nodeprocessor,5)
+ data.runner = runner
+ end
+ return runner(head,one,two,three,four,five)
+ end
else
return function(head,...)
total = total + 1 -- will go away
@@ -153,9 +197,9 @@ function tasks.actions(name,n) -- we optimize for the number or arguments (no ..
if not runner then
created = created + 1
if trace_tasks then
- logs.report("nodes","creating task runner '%s'",name)
+ logs.report("nodes","creating task runner '%s' with n extra arguments",name)
end
- runner = sequencer.compile(data.list,sequencer.nodeprocessor,3)
+ runner = compile(data.list,nodeprocessor,"n")
data.runner = runner
end
return runner(head,...)
@@ -166,6 +210,31 @@ function tasks.actions(name,n) -- we optimize for the number or arguments (no ..
end
end
+function tasks.table(name) --maybe move this to task-deb.lua
+ local tsk = tasks.data[name]
+ local lst = tsk and tsk.list
+ local HL, NC, NR, bold, type = context.HL, context.NC, context.NR, context.bold, context.type
+ if lst then
+ local list, order = lst.list, lst.order
+ if list and order then
+ context.starttabulate { "|l|l|" }
+ HL() NC() bold("category") NC() bold("function") NC() NR() HL()
+ for i=1,#order do
+ local o = order[i]
+ local l = list[o]
+ if #l == 0 then
+ NC() type(o) NC() NC() NR()
+ else
+ for k, v in table.sortedpairs(l) do
+ NC() type(o) NC() type(v) NC() NR()
+ end
+ end
+ HL()
+ end
+ context.stoptabulate()
+ end
+ end
+end
tasks.new (
"processors",
{
@@ -203,7 +272,7 @@ tasks.new (
)
tasks.new (
- "pagebuilders",
+ "mvlbuilders",
{
"before", -- for users
"normalizers",
@@ -219,3 +288,21 @@ tasks.new (
"after", -- for users
}
)
+
+tasks.new (
+ "parbuilders",
+ {
+ "before", -- for users
+ "lists",
+ "after", -- for users
+ }
+)
+
+tasks.new (
+ "pagebuilders",
+ {
+ "before", -- for users
+ "lists",
+ "after", -- for users
+ }
+)
diff --git a/tex/context/base/spac-ver.lua b/tex/context/base/spac-ver.lua
index e5d81066c..02fe1fe85 100644
--- a/tex/context/base/spac-ver.lua
+++ b/tex/context/base/spac-ver.lua
@@ -6,6 +6,8 @@ if not modules then modules = { } end modules ['spac-ver'] = {
license = "see context related readme files"
}
+-- we also need to call the spacer for inserts!
+
-- this code dates from the beginning and is kind of experimental; it
-- will be optimized and improved soon
--
@@ -1092,15 +1094,15 @@ nodes.builders = nodes.builder or { }
local builders = nodes.builders
-local actions = tasks.actions("vboxbuilders",2)
+local actions = tasks.actions("vboxbuilders",5)
-function nodes.builders.vpack_filter(head,groupcode)
+function nodes.builders.vpack_filter(head,groupcode,size,packtype,maxdepth,direction)
local done = false
if head then
starttiming(builders)
if trace_callbacks then
local before = nodes.count(head)
- head, done = actions(head,groupcode)
+ head, done = actions(head,groupcode,size,packtype,maxdepth,direction)
local after = nodes.count(head)
if done then
tracer("vpack","changed",head,groupcode,before,after,true)
@@ -1120,7 +1122,7 @@ end
-- and we operate on the mlv. Also, we need to do the
-- vspacing last as it removes items from the mvl.
-local actions = tasks.actions("pagebuilders",2)
+local actions = tasks.actions("mvlbuilders",1)
function nodes.builders.buildpage_filter(groupcode)
starttiming(builders)
@@ -1131,11 +1133,9 @@ function nodes.builders.buildpage_filter(groupcode)
return (done and head) or true
end
-callbacks.register('vpack_filter', nodes.builders.vpack_filter,"vertical spacing etc")
-callbacks.register('buildpage_filter', nodes.builders.buildpage_filter,"vertical spacing etc (mvl)")
+callbacks.register('vpack_filter', nodes.builders.vpack_filter, "vertical spacing etc")
+callbacks.register('buildpage_filter', nodes.builders.buildpage_filter, "vertical spacing etc (mvl)")
statistics.register("v-node processing time", function()
- if statistics.elapsedindeed(builders) then
- return format("%s seconds", statistics.elapsedtime(builders))
- end
+ return statistics.elapsedseconds(builders)
end)
diff --git a/tex/context/base/strc-flt.mkiv b/tex/context/base/strc-flt.mkiv
index f0035166c..08b91823b 100644
--- a/tex/context/base/strc-flt.mkiv
+++ b/tex/context/base/strc-flt.mkiv
@@ -1154,14 +1154,17 @@
% beter de laatste skip buiten de \insert uitvoeren,
% bovendien bij volle flush onder baseline.
+% \def\betweenfloatblanko% assumes that spaceafter is present
+% {\bgroup
+% \setbox0\vbox{\strut\blank[\floatsharedparameter\c!spacebefore]\strut}%
+% \setbox2\vbox{\strut\blank[\floatsharedparameter\c!spaceafter]\strut}%
+% \ifdim\ht0>\ht2
+% \blank[-\floatsharedparameter\c!spaceafter,\floatsharedparameter\c!spacebefore]%
+% \fi
+% \egroup}
+
\def\betweenfloatblanko% assumes that spaceafter is present
- {\bgroup
- \setbox0\vbox{\strut\blank[\floatsharedparameter\c!spacebefore]\strut}%
- \setbox2\vbox{\strut\blank[\floatsharedparameter\c!spaceafter]\strut}%
- \ifdim\ht0>\ht2
- \blank[-\floatsharedparameter\c!spaceafter,\floatsharedparameter\c!spacebefore]%
- \fi
- \egroup}
+ {\blank[\floatsharedparameter\c!spacebefore]} % or v!back,....
\def\doplacefloatbox
{%\forgetall % NO
diff --git a/tex/context/base/strc-ref.mkiv b/tex/context/base/strc-ref.mkiv
index d8fe470d1..7e578459b 100644
--- a/tex/context/base/strc-ref.mkiv
+++ b/tex/context/base/strc-ref.mkiv
@@ -1306,7 +1306,7 @@
%D We keep this for compatibility reasons, hence the hackery.
-\def\dospecialfrom
+\unexpanded\def\dospecialfrom
{\dosingleempty\dodospecialfrom}
\def\dodospecialfrom[#1]%
diff --git a/tex/context/base/task-ini.lua b/tex/context/base/task-ini.lua
index c98d76aeb..3a1714e89 100644
--- a/tex/context/base/task-ini.lua
+++ b/tex/context/base/task-ini.lua
@@ -51,14 +51,16 @@ tasks.appendaction("math", "normalizers", "noads.respace_characters", nil
tasks.appendaction("math", "builders", "noads.mlist_to_hlist") -- always on
+tasks.appendaction("parbuilders", "lists", "parbuilders.constructor") -- disabled
+
-- quite experimental
tasks.appendaction("finalizers", "lists", "nodes.repackage_graphicvadjust") -- todo
-- rather new
-tasks.appendaction("pagebuilders", "normalizers", "nodes.migrate_outwards")
-tasks.appendaction("pagebuilders", "normalizers", "nodes.handle_page_spacing") -- last !
+tasks.appendaction("mvlbuilders", "normalizers", "nodes.migrate_outwards")
+tasks.appendaction("mvlbuilders", "normalizers", "nodes.handle_page_spacing") -- last !
tasks.appendaction("vboxbuilders", "normalizers", "nodes.handle_vbox_spacing")
@@ -85,7 +87,9 @@ tasks.disableaction("shipouts", "shipouts.handle_viewerlayer")
tasks.disableaction("shipouts", "nodes.add_references")
tasks.disableaction("shipouts", "nodes.add_destinations")
-tasks.disableaction("pagebuilders", "nodes.migrate_outwards")
+tasks.disableaction("mvlbuilders", "nodes.migrate_outwards")
+
+tasks.disableaction("parbuilders", "parbuilders.constructor")
callbacks.freeze("find_.*_file", "find file using resolver")
callbacks.freeze("read_.*_file", "read file at once")
diff --git a/tex/context/base/trac-inf.lua b/tex/context/base/trac-inf.lua
index 1a1977f3f..84fa5507a 100644
--- a/tex/context/base/trac-inf.lua
+++ b/tex/context/base/trac-inf.lua
@@ -84,6 +84,12 @@ function statistics.elapsedindeed(instance)
return t > statistics.threshold
end
+function statistics.elapsedseconds(instance,rest) -- returns nil if 0 seconds
+ if statistics.elapsedindeed(instance) then
+ return format("%s seconds %s", statistics.elapsedtime(instance),rest or "")
+ end
+end
+
-- general function
function statistics.register(tag,fnc)
diff --git a/tex/context/base/type-otf.mkiv b/tex/context/base/type-otf.mkiv
index 199d50585..01971147d 100644
--- a/tex/context/base/type-otf.mkiv
+++ b/tex/context/base/type-otf.mkiv
@@ -1702,4 +1702,24 @@
\stoptypescriptcollection
+\starttypescriptcollection[pagella-euler]
+
+ \starttypescript [math] [euler]
+ \definefontsynonym [EulerMath] [\s!file:euler.otf]
+ \stoptypescript
+
+ \starttypescript [math] [euler] [name]
+ \definefontsynonym [MathRoman] [EulerMath] [\s!features=math\mathsizesuffix]
+ \stoptypescript
+
+ \starttypescript [pagella-euler]
+ \definetypeface [\typescriptone] [rm] [serif] [pagella] [default]
+ % \definetypeface [\typescriptone] [ss] [sans] [pagella] [default] [rscale=auto]
+ \definetypeface [\typescriptone] [tt] [mono] [modern] [default] [rscale=auto]
+ \definetypeface [\typescriptone] [mm] [math] [euler] [default] [rscale=auto]
+ \quittypescriptscanning
+ \stoptypescript
+
+\stoptypescriptcollection
+
\protect \endinput
diff --git a/tex/generic/context/luatex-fonts-merged.lua b/tex/generic/context/luatex-fonts-merged.lua
index 808fb069d..3704975a2 100644
--- a/tex/generic/context/luatex-fonts-merged.lua
+++ b/tex/generic/context/luatex-fonts-merged.lua
@@ -1,6 +1,6 @@
-- merged file : c:/data/develop/context/texmf/tex/generic/context/luatex-fonts-merged.lua
-- parent file : c:/data/develop/context/texmf/tex/generic/context/luatex-fonts.lua
--- merge date : 02/21/10 19:28:43
+-- merge date : 02/22/10 19:49:18
do -- begin closure to overcome local limits and interference
@@ -1990,7 +1990,8 @@ local remapper = {
ttf = "truetype fonts",
ttc = "truetype fonts",
dfont = "truetype dictionary",
- cid = "other text files", -- will become "cid files"
+ cid = "cid maps",
+ fea = "font feature files",
}
function resolvers.find_file(name,kind)
@@ -3420,6 +3421,7 @@ supplied by .
tfm.resolve_vf = true -- false
tfm.share_base_kerns = false -- true (.5 sec slower on mk but brings down mem from 410M to 310M, beware: then script/lang share too)
tfm.mathactions = { }
+tfm.fontname_mode = "fullpath"
function tfm.enhance(tfmdata,specification)
local name, size = specification.name, specification.size
@@ -4247,9 +4249,7 @@ fonts.initializers.node.tfm.remap = tfm.remap
-- status info
statistics.register("fonts load time", function()
- if statistics.elapsedindeed(fonts) then
- return format("%s seconds",statistics.elapsedtime(fonts))
- end
+ return statistics.elapsedseconds(fonts)
end)
end -- closure
@@ -5359,6 +5359,384 @@ end -- closure
do -- begin closure to overcome local limits and interference
+if not modules then modules = { } end modules ['font-map'] = {
+ version = 1.001,
+ comment = "companion to font-ini.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+local utf = unicode.utf8
+local match, format, find, concat, gsub, lower = string.match, string.format, string.find, table.concat, string.gsub, string.lower
+local lpegmatch = lpeg.match
+local utfbyte = utf.byte
+
+local trace_loading = false trackers.register("otf.loading", function(v) trace_loading = v end)
+local trace_unimapping = false trackers.register("otf.unimapping", function(v) trace_unimapping = v end)
+
+local ctxcatcodes = tex and tex.ctxcatcodes
+
+--[[ldx--
+Eventually this code will disappear because map files are kind
+of obsolete. Some code may move to runtime or auxiliary modules.
+The name to unciode related code will stay of course.
+--ldx]]--
+
+fonts = fonts or { }
+fonts.map = fonts.map or { }
+fonts.map.data = fonts.map.data or { }
+fonts.map.encodings = fonts.map.encodings or { }
+fonts.map.done = fonts.map.done or { }
+fonts.map.loaded = fonts.map.loaded or { }
+fonts.map.direct = fonts.map.direct or { }
+fonts.map.line = fonts.map.line or { }
+
+function fonts.map.line.pdfmapline(tag,str)
+ return "\\loadmapline[" .. tag .. "][" .. str .. "]"
+end
+
+function fonts.map.line.pdftex(e) -- so far no combination of slant and extend
+ if e.name and e.fontfile then
+ local fullname = e.fullname or ""
+ if e.slant and e.slant ~= 0 then
+ if e.encoding then
+ return fonts.map.line.pdfmapline("=",format('%s %s "%g SlantFont" <%s <%s',e.name,fullname,e.slant,e.encoding,e.fontfile))
+ else
+ return fonts.map.line.pdfmapline("=",format('%s %s "%g SlantFont" <%s',e.name,fullname,e.slant,e.fontfile))
+ end
+ elseif e.extend and e.extend ~= 1 and e.extend ~= 0 then
+ if e.encoding then
+ return fonts.map.line.pdfmapline("=",format('%s %s "%g ExtendFont" <%s <%s',e.name,fullname,e.extend,e.encoding,e.fontfile))
+ else
+ return fonts.map.line.pdfmapline("=",format('%s %s "%g ExtendFont" <%s',e.name,fullname,e.extend,e.fontfile))
+ end
+ else
+ if e.encoding then
+ return fonts.map.line.pdfmapline("=",format('%s %s <%s <%s',e.name,fullname,e.encoding,e.fontfile))
+ else
+ return fonts.map.line.pdfmapline("=",format('%s %s <%s',e.name,fullname,e.fontfile))
+ end
+ end
+ else
+ return nil
+ end
+end
+
+function fonts.map.flush(backend) -- will also erase the accumulated data
+ local flushline = fonts.map.line[backend or "pdftex"] or fonts.map.line.pdftex
+ for _, e in pairs(fonts.map.data) do
+ tex.sprint(ctxcatcodes,flushline(e))
+ end
+ fonts.map.data = { }
+end
+
+fonts.map.line.dvips = fonts.map.line.pdftex
+fonts.map.line.dvipdfmx = function() end
+
+function fonts.map.convert_entries(filename)
+ if not fonts.map.loaded[filename] then
+ fonts.map.data, fonts.map.encodings = fonts.map.load_file(filename,fonts.map.data, fonts.map.encodings)
+ fonts.map.loaded[filename] = true
+ end
+end
+
+function fonts.map.load_file(filename, entries, encodings)
+ entries = entries or { }
+ encodings = encodings or { }
+ local f = io.open(filename)
+ if f then
+ local data = f:read("*a")
+ if data then
+ for line in gmatch(data,"(.-)[\n\t]") do
+ if find(line,"^[%#%%%s]") then
+ -- print(line)
+ else
+ local extend, slant, name, fullname, fontfile, encoding
+ line = gsub(line,'"(.+)"', function(s)
+ extend = find(s,'"([^"]+) ExtendFont"')
+ slant = find(s,'"([^"]+) SlantFont"')
+ return ""
+ end)
+ if not name then
+ -- name fullname encoding fontfile
+ name, fullname, encoding, fontfile = match(line,"^(%S+)%s+(%S*)[%s<]+(%S*)[%s<]+(%S*)%s*$")
+ end
+ if not name then
+ -- name fullname (flag) fontfile encoding
+ name, fullname, fontfile, encoding = match(line,"^(%S+)%s+(%S*)[%d%s<]+(%S*)[%s<]+(%S*)%s*$")
+ end
+ if not name then
+ -- name fontfile
+ name, fontfile = match(line,"^(%S+)%s+[%d%s<]+(%S*)%s*$")
+ end
+ if name then
+ if encoding == "" then encoding = nil end
+ entries[name] = {
+ name = name, -- handy
+ fullname = fullname,
+ encoding = encoding,
+ fontfile = fontfile,
+ slant = tonumber(slant),
+ extend = tonumber(extend)
+ }
+ encodings[name] = encoding
+ elseif line ~= "" then
+ -- print(line)
+ end
+ end
+ end
+ end
+ f:close()
+ end
+ return entries, encodings
+end
+
+local function load_lum_table(filename)
+ local lumname = file.replacesuffix(file.basename(filename),"lum")
+ local lumfile = resolvers.find_file(lumname,"map") or ""
+ if lumfile ~= "" and lfs.isfile(lumfile) then
+ if trace_loading or trace_unimapping then
+ logs.report("load otf","enhance: loading %s ",lumfile)
+ end
+ lumunic = dofile(lumfile)
+ return lumunic, lumfile
+ end
+end
+
+local hex = lpeg.R("AF","09")
+local hexfour = (hex*hex*hex*hex) / function(s) return tonumber(s,16) end
+local hexsix = (hex^1) / function(s) return tonumber(s,16) end
+local dec = (lpeg.R("09")^1) / tonumber
+local period = lpeg.P(".")
+
+local unicode = lpeg.P("uni") * (hexfour * (period + lpeg.P(-1)) * lpeg.Cc(false) + lpeg.Ct(hexfour^1) * lpeg.Cc(true))
+local ucode = lpeg.P("u") * (hexsix * (period + lpeg.P(-1)) * lpeg.Cc(false) + lpeg.Ct(hexsix ^1) * lpeg.Cc(true))
+local index = lpeg.P("index") * dec * lpeg.Cc(false)
+
+local parser = unicode + ucode + index
+
+local parsers = { }
+
+local function make_name_parser(str)
+ if not str or str == "" then
+ return parser
+ else
+ local p = parsers[str]
+ if not p then
+ p = lpeg.P(str) * period * dec * lpeg.Cc(false)
+ parsers[str] = p
+ end
+ return p
+ end
+end
+
+--~ local parser = fonts.map.make_name_parser("Japan1")
+--~ local parser = fonts.map.make_name_parser()
+--~ local function test(str)
+--~ local b, a = lpegmatch(parser,str)
+--~ print((a and table.serialize(b)) or b)
+--~ end
+--~ test("a.sc")
+--~ test("a")
+--~ test("uni1234")
+--~ test("uni1234.xx")
+--~ test("uni12349876")
+--~ test("index1234")
+--~ test("Japan1.123")
+
+local function tounicode16(unicode)
+ if unicode < 0x10000 then
+ return format("%04X",unicode)
+ else
+ return format("%04X%04X",unicode/1024+0xD800,unicode%1024+0xDC00)
+ end
+end
+
+local function tounicode16sequence(unicodes)
+ local t = { }
+ for l=1,#unicodes do
+ local unicode = unicodes[l]
+ if unicode < 0x10000 then
+ t[l] = format("%04X",unicode)
+ else
+ t[l] = format("%04X%04X",unicode/1024+0xD800,unicode%1024+0xDC00)
+ end
+ end
+ return concat(t)
+end
+
+--~ This is quite a bit faster but at the cost of some memory but if we
+--~ do this we will also use it elsewhere so let's not follow this route
+--~ now. I might use this method in the plain variant (no caching there)
+--~ but then I need a flag that distinguishes between code branches.
+--~
+--~ local cache = { }
+--~
+--~ function fonts.map.tounicode16(unicode)
+--~ local s = cache[unicode]
+--~ if not s then
+--~ if unicode < 0x10000 then
+--~ s = format("%04X",unicode)
+--~ else
+--~ s = format("%04X%04X",unicode/1024+0xD800,unicode%1024+0xDC00)
+--~ end
+--~ cache[unicode] = s
+--~ end
+--~ return s
+--~ end
+
+fonts.map.load_lum_table = load_lum_table
+fonts.map.make_name_parser = make_name_parser
+fonts.map.tounicode16 = tounicode16
+fonts.map.tounicode16sequence = tounicode16sequence
+
+local separator = lpeg.S("_.")
+local other = lpeg.C((1 - separator)^1)
+local ligsplitter = lpeg.Ct(other * (separator * other)^0)
+
+--~ print(table.serialize(lpegmatch(ligsplitter,"this")))
+--~ print(table.serialize(lpegmatch(ligsplitter,"this.that")))
+--~ print(table.serialize(lpegmatch(ligsplitter,"japan1.123")))
+--~ print(table.serialize(lpegmatch(ligsplitter,"such_so_more")))
+--~ print(table.serialize(lpegmatch(ligsplitter,"such_so_more.that")))
+
+fonts.map.add_to_unicode = function(data,filename)
+ local unicodes = data.luatex and data.luatex.unicodes
+ if not unicodes then
+ return
+ end
+ -- we need to move this code
+ unicodes['space'] = unicodes['space'] or 32
+ unicodes['hyphen'] = unicodes['hyphen'] or 45
+ unicodes['zwj'] = unicodes['zwj'] or 0x200D
+ unicodes['zwnj'] = unicodes['zwnj'] or 0x200C
+ -- the tounicode mapping is sparse and only needed for alternatives
+ local tounicode, originals, ns, nl, private, unknown = { }, { }, 0, 0, fonts.private, format("%04X",utfbyte("?"))
+ data.luatex.tounicode, data.luatex.originals = tounicode, originals
+ local lumunic, uparser, oparser
+ if false then -- will become an option
+ lumunic = load_lum_table(filename)
+ lumunic = lumunic and lumunic.tounicode
+ end
+ local cidinfo, cidnames, cidcodes = data.cidinfo
+ local usedmap = cidinfo and cidinfo.usedname
+ usedmap = usedmap and lower(usedmap)
+ usedmap = usedmap and fonts.cid.map[usedmap]
+ if usedmap then
+ oparser = usedmap and make_name_parser(cidinfo.ordering)
+ cidnames = usedmap.names
+ cidcodes = usedmap.unicodes
+ end
+ uparser = make_name_parser()
+ local aglmap = fonts.map and fonts.map.agl_to_unicode
+ for index, glyph in next, data.glyphs do
+ local name, unic = glyph.name, glyph.unicode or -1 -- play safe
+ if unic == -1 or unic >= private or (unic >= 0xE000 and unic <= 0xF8FF) or unic == 0xFFFE or unic == 0xFFFF then
+ local unicode = (lumunic and lumunic[name]) or (aglmap and aglmap[name])
+ if unicode then
+ originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
+ end
+ -- cidmap heuristics, beware, there is no guarantee for a match unless
+ -- the chain resolves
+ if (not unicode) and usedmap then
+ local foundindex = lpegmatch(oparser,name)
+ if foundindex then
+ unicode = cidcodes[foundindex] -- name to number
+ if unicode then
+ originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
+ else
+ local reference = cidnames[foundindex] -- number to name
+ if reference then
+ local foundindex = lpegmatch(oparser,reference)
+ if foundindex then
+ unicode = cidcodes[foundindex]
+ if unicode then
+ originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
+ end
+ end
+ if not unicode then
+ local foundcodes, multiple = lpegmatch(uparser,reference)
+ if foundcodes then
+ if multiple then
+ originals[index], tounicode[index], nl, unicode = foundcodes, tounicode16sequence(foundcodes), nl + 1, true
+ else
+ originals[index], tounicode[index], ns, unicode = foundcodes, tounicode16(foundcodes), ns + 1, foundcodes
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ -- a.whatever or a_b_c.whatever or a_b_c (no numbers)
+ if not unicode then
+ local split = lpegmatch(ligsplitter,name)
+ local nplit = (split and #split) or 0
+ if nplit == 0 then
+ -- skip
+ elseif nplit == 1 then
+ local base = split[1]
+ unicode = unicodes[base] or (aglmap and aglmap[base])
+ if unicode then
+ if type(unicode) == "table" then
+ unicode = unicode[1]
+ end
+ originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
+ end
+ else
+ local t = { }
+ for l=1,nplit do
+ local base = split[l]
+ local u = unicodes[base] or (aglmap and aglmap[base])
+ if not u then
+ break
+ elseif type(u) == "table" then
+ t[#t+1] = u[1]
+ else
+ t[#t+1] = u
+ end
+ end
+ if #t > 0 then -- done then
+ originals[index], tounicode[index], nl, unicode = t, tounicode16sequence(t), nl + 1, true
+ end
+ end
+ end
+ -- last resort
+ if not unicode then
+ local foundcodes, multiple = lpegmatch(uparser,name)
+ if foundcodes then
+ if multiple then
+ originals[index], tounicode[index], nl, unicode = foundcodes, tounicode16sequence(foundcodes), nl + 1, true
+ else
+ originals[index], tounicode[index], ns, unicode = foundcodes, tounicode16(foundcodes), ns + 1, foundcodes
+ end
+ end
+ end
+ if not unicode then
+ originals[index], tounicode[index] = 0xFFFD, "FFFD"
+ end
+ end
+ end
+ if trace_unimapping then
+ for index, glyph in table.sortedpairs(data.glyphs) do
+ local toun, name, unic = tounicode[index], glyph.name, glyph.unicode or -1 -- play safe
+ if toun then
+ logs.report("load otf","internal: 0x%05X, name: %s, unicode: 0x%05X, tounicode: %s",index,name,unic,toun)
+ else
+ logs.report("load otf","internal: 0x%05X, name: %s, unicode: 0x%05X",index,name,unic)
+ end
+ end
+ end
+ if trace_loading and (ns > 0 or nl > 0) then
+ logs.report("load otf","enhance: %s tounicode entries added (%s ligatures)",nl+ns, ns)
+ end
+end
+
+end -- closure
+
+do -- begin closure to overcome local limits and interference
+
if not modules then modules = { } end modules ['font-otf'] = {
version = 1.001,
comment = "companion to font-ini.mkiv",
@@ -5380,14 +5758,10 @@ local trace_features = false trackers.register("otf.features", function(v
local trace_dynamics = false trackers.register("otf.dynamics", function(v) trace_dynamics = v end)
local trace_sequences = false trackers.register("otf.sequences", function(v) trace_sequences = v end)
local trace_math = false trackers.register("otf.math", function(v) trace_math = v end)
-local trace_unimapping = false trackers.register("otf.unimapping", function(v) trace_unimapping = v end)
local trace_defining = false trackers.register("fonts.defining", function(v) trace_defining = v end)
--~ trackers.enable("otf.loading")
-local zwnj = 0x200C
-local zwj = 0x200D
-
--[[ldx--
The fontforge table has organized lookups in a certain way. A first implementation
of this code was organized featurewise: information related to features was
@@ -5445,7 +5819,7 @@ otf.features.default = otf.features.default or { }
otf.enhancers = otf.enhancers or { }
otf.glists = { "gsub", "gpos" }
-otf.version = 2.644 -- beware: also sync font-mis.lua
+otf.version = 2.645 -- beware: also sync font-mis.lua
otf.pack = true -- beware: also sync font-mis.lua
otf.syncspace = true
otf.notdef = false
@@ -5845,164 +6219,26 @@ otf.enhancers["analyse anchors"] = function(data,filename)
local l = lookup_to_anchor[lookup]
if not l then l = { } lookup_to_anchor[lookup] = l end
l[anchor] = true
- a[lookup] = true
- end
- end
- end
-end
-
-otf.enhancers["analyse marks"] = function(data,filename)
- local glyphs = data.glyphs
- local marks = { }
- data.luatex.marks = marks
- for unicode, index in next, data.luatex.indices do
- local glyph = glyphs[index]
- if glyph.class == "mark" then
- marks[unicode] = true
- end
- end
-end
-
-local separator = lpeg.S("_.")
-local other = lpeg.C((1 - separator)^1)
-local ligsplitter = lpeg.Ct(other * (separator * other)^0)
-
---~ print(table.serialize(lpegmatch(ligsplitter,"this")))
---~ print(table.serialize(lpegmatch(ligsplitter,"this.that")))
---~ print(table.serialize(lpegmatch(ligsplitter,"japan1.123")))
---~ print(table.serialize(lpegmatch(ligsplitter,"such_so_more")))
---~ print(table.serialize(lpegmatch(ligsplitter,"such_so_more.that")))
-
-otf.enhancers["analyse unicodes"] = function(data,filename)
- local tounicode16, tounicode16sequence = fonts.map.tounicode16, fonts.map.tounicode16sequence
- local unicodes = data.luatex.unicodes
- -- we need to move this code
- unicodes['space'] = unicodes['space'] or 32 -- handly later on
- unicodes['hyphen'] = unicodes['hyphen'] or 45 -- handly later on
- unicodes['zwj'] = unicodes['zwj'] or zwj -- handly later on
- unicodes['zwnj'] = unicodes['zwnj'] or zwnj -- handly later on
- -- the tounicode mapping is sparse and only needed for alternatives
- local tounicode, originals, ns, nl, private, unknown = { }, { }, 0, 0, fonts.private, format("%04X",utfbyte("?"))
- data.luatex.tounicode, data.luatex.originals = tounicode, originals
- local lumunic, uparser, oparser
- if false then -- will become an option
- lumunic = fonts.map.load_lum_table(filename)
- lumunic = lumunic and lumunic.tounicode
- end
- local cidinfo, cidnames, cidcodes = data.cidinfo
- local usedmap = cidinfo and cidinfo.usedname
- usedmap = usedmap and lower(usedmap)
- usedmap = usedmap and fonts.cid.map[usedmap]
- if usedmap then
- oparser = usedmap and fonts.map.make_name_parser(cidinfo.ordering)
- cidnames = usedmap.names
- cidcodes = usedmap.unicodes
- end
- uparser = fonts.map.make_name_parser()
- local aglmap = fonts.map and fonts.map.agl_to_unicode
- for index, glyph in next, data.glyphs do
- local name, unic = glyph.name, glyph.unicode or -1 -- play safe
- if unic == -1 or unic >= private or (unic >= 0xE000 and unic <= 0xF8FF) or unic == 0xFFFE or unic == 0xFFFF then
- local unicode = (aglmap and aglmap[name]) or (lumunic and lumunic[name])
- if unicode then
- originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
- end
- -- cidmap heuristics, beware, there is no guarantee for a match unless
- -- the chain resolves
- if (not unicode) and usedmap then
- local foundindex = lpegmatch(oparser,name)
- if foundindex then
- unicode = cidcodes[foundindex] -- name to number
- if unicode then
- originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
- else
- local reference = cidnames[foundindex] -- number to name
- if reference then
- local foundindex = lpegmatch(oparser,reference)
- if foundindex then
- unicode = cidcodes[foundindex]
- if unicode then
- originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
- end
- end
- if not unicode then
- local foundcodes, multiple = lpegmatch(uparser,reference)
- if foundcodes then
- if multiple then
- originals[index], tounicode[index], nl, unicode = foundcodes, tounicode16sequence(foundcodes), nl + 1, true
- else
- originals[index], tounicode[index], ns, unicode = foundcodes, tounicode16(foundcodes), ns + 1, foundcodes
- end
- end
- end
- end
- end
- end
- end
- -- a.whatever or a_b_c.whatever or a_b_c (no numbers)
- if not unicode then
- local split = lpegmatch(ligsplitter,name)
- local nplit = (split and #split) or 0
- if nplit == 0 then
- -- skip
- elseif nplit == 1 then
- local base = split[1]
- unicode = unicodes[base] or (agl and agl[base])
- if unicode then
- if type(unicode) == "table" then
- unicode = unicode[1]
- end
- originals[index], tounicode[index], ns = unicode, tounicode16(unicode), ns + 1
- end
- else
- local t = { }
- for l=1,nplit do
- local base = split[l]
- local u = unicodes[base] or (agl and agl[base])
- if not u then
- break
- elseif type(u) == "table" then
- t[#t+1] = u[1]
- else
- t[#t+1] = u
- end
- end
- if #t > 0 then -- done then
- originals[index], tounicode[index], nl, unicode = t, tounicode16sequence(t), nl + 1, true
- end
- end
- end
- -- last resort
- if not unicode then
- local foundcodes, multiple = lpegmatch(uparser,name)
- if foundcodes then
- if multiple then
- originals[index], tounicode[index], nl, unicode = foundcodes, tounicode16sequence(foundcodes), nl + 1, true
- else
- originals[index], tounicode[index], ns, unicode = foundcodes, tounicode16(foundcodes), ns + 1, foundcodes
- end
- end
- end
- if not unicode then
- originals[index], tounicode[index] = 0xFFFD, "FFFD"
+ a[lookup] = true
end
end
end
- if trace_unimapping then
- for index, glyph in table.sortedpairs(data.glyphs) do
- local toun, name, unic = tounicode[index], glyph.name, glyph.unicode or -1 -- play safe
- if toun then
- logs.report("load otf","internal: 0x%05X, name: %s, unicode: 0x%05X, tounicode: %s",index,name,unic,toun)
- else
- logs.report("load otf","internal: 0x%05X, name: %s, unicode: 0x%05X",index,name,unic)
- end
+end
+
+otf.enhancers["analyse marks"] = function(data,filename)
+ local glyphs = data.glyphs
+ local marks = { }
+ data.luatex.marks = marks
+ for unicode, index in next, data.luatex.indices do
+ local glyph = glyphs[index]
+ if glyph.class == "mark" then
+ marks[unicode] = true
end
end
- if trace_loading and (ns > 0 or nl > 0) then
- logs.report("load otf","enhance: %s tounicode entries added (%s ligatures)",nl+ns, ns)
- end
end
+otf.enhancers["analyse unicodes"] = fonts.map.add_to_unicode
+
otf.enhancers["analyse subtables"] = function(data,filename)
data.luatex = data.luatex or { }
local luatex = data.luatex
@@ -7198,6 +7434,16 @@ function tfm.read_from_open_type(specification)
tfmtable.format = specification.format
end
tfmtable.name = tfmtable.filename or tfmtable.fullname or tfmtable.fontname
+ if tfm.fontname_mode == "specification" then
+ -- not to be used in context !
+ local specname = specification.specification
+ if specname then
+ tfmtable.name = specname
+ if trace_defining then
+ logs.report("define font","overloaded fontname: '%s'",specname)
+ end
+ end
+ end
end
fonts.logger.save(tfmtable,file.extname(specification.filename),specification)
end
@@ -11724,12 +11970,15 @@ local function istrue (s) list[s] = 'yes' end
local function isfalse(s) list[s] = 'no' end
local function iskey (k,v) list[k] = v end
+local function istrue (s) list[s] = true end
+local function isfalse(s) list[s] = false end
+
local spaces = lpeg.P(" ")^0
local namespec = (1-lpeg.S("/:("))^0 -- was: (1-lpeg.S("/: ("))^0
local crapspec = spaces * lpeg.P("/") * (((1-lpeg.P(":"))^0)/iscrap) * spaces
local filename = (lpeg.P("file:")/isfile * (namespec/thename)) + (lpeg.P("[") * lpeg.P(true)/isname * (((1-lpeg.P("]"))^0)/thename) * lpeg.P("]"))
local fontname = (lpeg.P("name:")/isname * (namespec/thename)) + lpeg.P(true)/issome * (namespec/thename)
-local sometext = (lpeg.R("az") + lpeg.R("AZ") + lpeg.R("09") + lpeg.P("."))^1
+local sometext = (lpeg.R("az","AZ","09") + lpeg.S("+-."))^1
local truevalue = lpeg.P("+") * spaces * (sometext/istrue)
local falsevalue = lpeg.P("-") * spaces * (sometext/isfalse)
local keyvalue = (lpeg.C(sometext) * spaces * lpeg.P("=") * spaces * lpeg.C(sometext))/iskey
@@ -11742,12 +11991,12 @@ local pattern = (filename + fontname) * subvalue^0 * crapspec^0 * options^0
function fonts.define.specify.colonized(specification) -- xetex mode
list = { }
lpegmatch(pattern,specification.specification)
- for k, v in next, list do
- list[k] = v:is_boolean()
- if type(list[a]) == "nil" then
- list[k] = v
- end
- end
+--~ for k, v in next, list do
+--~ list[k] = v:is_boolean()
+--~ if type(list[a]) == "nil" then
+--~ list[k] = v
+--~ end
+--~ end
list.crap = nil -- style not supported, maybe some day
if list.name then
specification.name = list.name
@@ -11767,235 +12016,6 @@ end
fonts.define.register_split(":", fonts.define.specify.colonized)
-end -- closure
-
-do -- begin closure to overcome local limits and interference
-
-if not modules then modules = { } end modules ['font-map'] = {
- version = 1.001,
- comment = "companion to font-ini.mkiv",
- author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
- copyright = "PRAGMA ADE / ConTeXt Development Team",
- license = "see context related readme files"
-}
-
-local match, format, find, concat, gsub = string.match, string.format, string.find, table.concat, string.gsub
-local lpegmatch = lpeg.match
-
-local trace_loading = false trackers.register("otf.loading", function(v) trace_loading = v end)
-
-local ctxcatcodes = tex and tex.ctxcatcodes
-
---[[ldx--
-
Eventually this code will disappear because map files are kind
-of obsolete. Some code may move to runtime or auxiliary modules.
-The name to unciode related code will stay of course.
---ldx]]--
-
-fonts = fonts or { }
-fonts.map = fonts.map or { }
-fonts.map.data = fonts.map.data or { }
-fonts.map.encodings = fonts.map.encodings or { }
-fonts.map.done = fonts.map.done or { }
-fonts.map.loaded = fonts.map.loaded or { }
-fonts.map.direct = fonts.map.direct or { }
-fonts.map.line = fonts.map.line or { }
-
-function fonts.map.line.pdfmapline(tag,str)
- return "\\loadmapline[" .. tag .. "][" .. str .. "]"
-end
-
-function fonts.map.line.pdftex(e) -- so far no combination of slant and extend
- if e.name and e.fontfile then
- local fullname = e.fullname or ""
- if e.slant and e.slant ~= 0 then
- if e.encoding then
- return fonts.map.line.pdfmapline("=",format('%s %s "%g SlantFont" <%s <%s',e.name,fullname,e.slant,e.encoding,e.fontfile))
- else
- return fonts.map.line.pdfmapline("=",format('%s %s "%g SlantFont" <%s',e.name,fullname,e.slant,e.fontfile))
- end
- elseif e.extend and e.extend ~= 1 and e.extend ~= 0 then
- if e.encoding then
- return fonts.map.line.pdfmapline("=",format('%s %s "%g ExtendFont" <%s <%s',e.name,fullname,e.extend,e.encoding,e.fontfile))
- else
- return fonts.map.line.pdfmapline("=",format('%s %s "%g ExtendFont" <%s',e.name,fullname,e.extend,e.fontfile))
- end
- else
- if e.encoding then
- return fonts.map.line.pdfmapline("=",format('%s %s <%s <%s',e.name,fullname,e.encoding,e.fontfile))
- else
- return fonts.map.line.pdfmapline("=",format('%s %s <%s',e.name,fullname,e.fontfile))
- end
- end
- else
- return nil
- end
-end
-
-function fonts.map.flush(backend) -- will also erase the accumulated data
- local flushline = fonts.map.line[backend or "pdftex"] or fonts.map.line.pdftex
- for _, e in pairs(fonts.map.data) do
- tex.sprint(ctxcatcodes,flushline(e))
- end
- fonts.map.data = { }
-end
-
-fonts.map.line.dvips = fonts.map.line.pdftex
-fonts.map.line.dvipdfmx = function() end
-
-function fonts.map.convert_entries(filename)
- if not fonts.map.loaded[filename] then
- fonts.map.data, fonts.map.encodings = fonts.map.load_file(filename,fonts.map.data, fonts.map.encodings)
- fonts.map.loaded[filename] = true
- end
-end
-
-function fonts.map.load_file(filename, entries, encodings)
- entries = entries or { }
- encodings = encodings or { }
- local f = io.open(filename)
- if f then
- local data = f:read("*a")
- if data then
- for line in gmatch(data,"(.-)[\n\t]") do
- if find(line,"^[%#%%%s]") then
- -- print(line)
- else
- local extend, slant, name, fullname, fontfile, encoding
- line = gsub(line,'"(.+)"', function(s)
- extend = find(s,'"([^"]+) ExtendFont"')
- slant = find(s,'"([^"]+) SlantFont"')
- return ""
- end)
- if not name then
- -- name fullname encoding fontfile
- name, fullname, encoding, fontfile = match(line,"^(%S+)%s+(%S*)[%s<]+(%S*)[%s<]+(%S*)%s*$")
- end
- if not name then
- -- name fullname (flag) fontfile encoding
- name, fullname, fontfile, encoding = match(line,"^(%S+)%s+(%S*)[%d%s<]+(%S*)[%s<]+(%S*)%s*$")
- end
- if not name then
- -- name fontfile
- name, fontfile = match(line,"^(%S+)%s+[%d%s<]+(%S*)%s*$")
- end
- if name then
- if encoding == "" then encoding = nil end
- entries[name] = {
- name = name, -- handy
- fullname = fullname,
- encoding = encoding,
- fontfile = fontfile,
- slant = tonumber(slant),
- extend = tonumber(extend)
- }
- encodings[name] = encoding
- elseif line ~= "" then
- -- print(line)
- end
- end
- end
- end
- f:close()
- end
- return entries, encodings
-end
-
-function fonts.map.load_lum_table(filename)
- local lumname = file.replacesuffix(file.basename(filename),"lum")
- local lumfile = resolvers.find_file(lumname,"map") or ""
- if lumfile ~= "" and lfs.isfile(lumfile) then
- if trace_loading or trace_unimapping then
- logs.report("load otf","enhance: loading %s ",lumfile)
- end
- lumunic = dofile(lumfile)
- return lumunic, lumfile
- end
-end
-
-local hex = lpeg.R("AF","09")
-local hexfour = (hex*hex*hex*hex) / function(s) return tonumber(s,16) end
-local hexsix = (hex^1) / function(s) return tonumber(s,16) end
-local dec = (lpeg.R("09")^1) / tonumber
-local period = lpeg.P(".")
-
-local unicode = lpeg.P("uni") * (hexfour * (period + lpeg.P(-1)) * lpeg.Cc(false) + lpeg.Ct(hexfour^1) * lpeg.Cc(true))
-local ucode = lpeg.P("u") * (hexsix * (period + lpeg.P(-1)) * lpeg.Cc(false) + lpeg.Ct(hexsix ^1) * lpeg.Cc(true))
-local index = lpeg.P("index") * dec * lpeg.Cc(false)
-
-local parser = unicode + ucode + index
-
-local parsers = { }
-
-function fonts.map.make_name_parser(str)
- if not str or str == "" then
- return parser
- else
- local p = parsers[str]
- if not p then
- p = lpeg.P(str) * period * dec * lpeg.Cc(false)
- parsers[str] = p
- end
- return p
- end
-end
-
---~ local parser = fonts.map.make_name_parser("Japan1")
---~ local parser = fonts.map.make_name_parser()
---~ local function test(str)
---~ local b, a = lpegmatch(parser,str)
---~ print((a and table.serialize(b)) or b)
---~ end
---~ test("a.sc")
---~ test("a")
---~ test("uni1234")
---~ test("uni1234.xx")
---~ test("uni12349876")
---~ test("index1234")
---~ test("Japan1.123")
-
-function fonts.map.tounicode16(unicode)
- if unicode < 0x10000 then
- return format("%04X",unicode)
- else
- return format("%04X%04X",unicode/1024+0xD800,unicode%1024+0xDC00)
- end
-end
-
-function fonts.map.tounicode16sequence(unicodes)
- local t = { }
- for l=1,#unicodes do
- local unicode = unicodes[l]
- if unicode < 0x10000 then
- t[l] = format("%04X",unicode)
- else
- t[l] = format("%04X%04X",unicode/1024+0xD800,unicode%1024+0xDC00)
- end
- end
- return concat(t)
-end
-
---~ This is quite a bit faster but at the cost of some memory but if we
---~ do this we will also use it elsewhere so let's not follow this route
---~ now. I might use this method in the plain variant (no caching there)
---~ but then I need a flag that distinguishes between code branches.
---~
---~ local cache = { }
---~
---~ function fonts.map.tounicode16(unicode)
---~ local s = cache[unicode]
---~ if not s then
---~ if unicode < 0x10000 then
---~ s = format("%04X",unicode)
---~ else
---~ s = format("%04X%04X",unicode/1024+0xD800,unicode%1024+0xDC00)
---~ end
---~ cache[unicode] = s
---~ end
---~ return s
---~ end
-
-
end -- closure
do -- begin closure to overcome local limits and interference
@@ -12012,8 +12032,9 @@ fonts = fonts or { }
-- general
-fonts.otf.pack = false
-fonts.tfm.resolve_vf = false -- no sure about this
+fonts.otf.pack = false
+fonts.tfm.resolve_vf = false -- no sure about this
+fonts.tfm.fontname_mode = "specification" -- somehow latex needs this
-- readers
diff --git a/tex/generic/context/luatex-fonts.lua b/tex/generic/context/luatex-fonts.lua
index 56768138b..84acb2b18 100644
--- a/tex/generic/context/luatex-fonts.lua
+++ b/tex/generic/context/luatex-fonts.lua
@@ -111,6 +111,7 @@ else
loadmodule('font-tfm.lua') -- will be split (we may need font-log)
loadmodule('font-cid.lua')
loadmodule('font-ott.lua') -- might be split
+ loadmodule('font-map.lua') -- for loading lum file (will be stripped)
loadmodule('font-otf.lua')
loadmodule('font-otd.lua')
loadmodule('font-oti.lua')
@@ -120,7 +121,6 @@ else
loadmodule('font-otc.lua')
loadmodule('font-def.lua')
loadmodule('font-xtx.lua')
- loadmodule('font-map.lua') -- for loading lum file (will be stripped)
loadmodule('font-dum.lua')
end
--
cgit v1.2.3