From 725d3ea39e6df5a7e274f320b6e52976d093e45e Mon Sep 17 00:00:00 2001 From: Khaled Hosny Date: Fri, 2 Apr 2010 23:35:49 +0200 Subject: Import ConTeXt math modules Modules that provide OpenType and Unicode math related functionality, not very tested yet, but it seems to magically fix some of unicode-math issues. May be it should be trimmed a bit, since a good chunk of the code is of no use to us. --- otfl-char-utf.lua | 273 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 273 insertions(+) create mode 100644 otfl-char-utf.lua (limited to 'otfl-char-utf.lua') diff --git a/otfl-char-utf.lua b/otfl-char-utf.lua new file mode 100644 index 0000000..6dd85fd --- /dev/null +++ b/otfl-char-utf.lua @@ -0,0 +1,273 @@ +if not modules then modules = { } end modules ['char-utf'] = { + version = 1.001, + comment = "companion to char-utf.mkiv", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files" +} + +--[[ldx-- +

When a sequence of characters enters the application, it may +be neccessary to collapse subsequences into their composed variant.

+ +

This module implements methods for collapsing and expanding +sequences. We also provide means to deal with characters that are +special to as well as 8-bit characters that need to end up +in special kinds of output (for instance ).

+ +

We implement these manipulations as filters. One can run multiple filters +over a string.

+--ldx]]-- + +local utf = unicode.utf8 +local concat, gmatch = table.concat, string.gmatch +local utfcharacters, utfvalues = string.utfcharacters, string.utfvalues + +local ctxcatcodes = tex.ctxcatcodes + +characters = characters or { } +characters.graphemes = characters.graphemes or { } +characters.filters = characters.filters or { } +characters.filters.utf = characters.filters.utf or { } + +characters.filters.utf.initialized = false +characters.filters.utf.collapsing = true +characters.filters.utf.expanding = true + +local graphemes = characters.graphemes +local utffilters = characters.filters.utf +local utfchar, utfbyte, utfgsub = utf.char, utf.byte, utf.gsub + +--[[ldx-- +

It only makes sense to collapse at runtime, since we don't expect +source code to depend on collapsing.

+--ldx]]-- + +function utffilters.initialize() + if utffilters.collapsing and not utffilters.initialized then + for k,v in next, characters.data do + -- using vs and first testing for length is faster (.02->.01 s) + local vs = v.specials + if vs and #vs == 3 and vs[1] == 'char' then + local first, second = utfchar(vs[2]), utfchar(vs[3]) + local cgf = graphemes[first] + if not cgf then + cgf = { } + graphemes[first] = cgf + end + cgf[second] = utfchar(k) + end + end + utffilters.initialized = true + end +end + +-- utffilters.add_grapheme(utfchar(318),'l','\string~') +-- utffilters.add_grapheme('c','a','b') + +function utffilters.add_grapheme(result,first,second) + local r, f, s = tonumber(result), tonumber(first), tonumber(second) + if r then result = utfchar(r) end + if f then first = utfchar(f) end + if s then second = utfchar(s) end + if not graphemes[first] then + graphemes[first] = { [second] = result } + else + graphemes[first][second] = result + end +end + +function utffilters.collapse(str) -- old one + if utffilters.collapsing and str and #str > 1 then + if not utffilters.initialized then -- saves a call + utffilters.initialize() + end + local tokens, first, done = { }, false, false + for second in utfcharacters(str) do + local cgf = graphemes[first] + if cgf and cgf[second] then + first, done = cgf[second], true + elseif first then + tokens[#tokens+1] = first + first = second + else + first = second + end + end + if done then + tokens[#tokens+1] = first + return concat(tokens) + end + end + return str +end + +--[[ldx-- +

In order to deal with 8-bit output, we need to find a way to +go from to 8-bit. This is handled in the + engine itself.

+ +

This leaves us problems with characters that are specific to + like {}, $ and alike.

+ +

We can remap some chars that tex input files are sensitive for to +a private area (while writing to a utility file) and revert then +to their original slot when we read in such a file. Instead of +reverting, we can (when we resolve characters to glyphs) map them +to their right glyph there.

+ +

For this purpose we can use the private planes 0x0F0000 and +0x100000.

+--ldx]]-- + +utffilters.private = { + high = { }, + low = { }, + escapes = { }, +} + +local low = utffilters.private.low +local high = utffilters.private.high +local escapes = utffilters.private.escapes +local special = "~#$%^&_{}\\|" + +function utffilters.private.set(ch) + local cb + if type(ch) == "number" then + cb, ch = ch, utfchar(ch) + else + cb = utfbyte(ch) + end + if cb < 256 then + low[ch] = utfchar(0x0F0000 + cb) + high[utfchar(0x0F0000 + cb)] = ch + escapes[ch] = "\\" .. ch + end +end + +function utffilters.private.replace(str) return utfgsub(str,"(.)", low ) end +function utffilters.private.revert(str) return utfgsub(str,"(.)", high ) end +function utffilters.private.escape(str) return utfgsub(str,"(.)", escapes) end + +local set = utffilters.private.set + +for ch in gmatch(special,".") do set(ch) end + +--[[ldx-- +

We get a more efficient variant of this when we integrate +replacements in collapser. This more or less renders the previous +private code redundant. The following code is equivalent but the +first snippet uses the relocated dollars.

+ + +[󰀤x󰀤] [$x$] + +--ldx]]-- + +local cr = utffilters.private.high -- kan via een lpeg +local cf = utffilters + +--[[ldx-- +

The next variant has lazy token collecting, on a 140 page mk.tex this saves +about .25 seconds, which is understandable because we have no graphmes and +not collecting tokens is not only faster but also saves garbage collecting. +

+--ldx]]-- + +-- lpeg variant is not faster + +function utffilters.collapse(str) -- not really tested (we could preallocate a table) + if cf.collapsing and str then + if #str > 1 then + if not cf.initialized then -- saves a call + cf.initialize() + end + local tokens, first, done, n = { }, false, false, 0 + for second in utfcharacters(str) do + if done then + local crs = cr[second] + if crs then + if first then + tokens[#tokens+1] = first + end + first = crs + else + local cgf = graphemes[first] + if cgf and cgf[second] then + first = cgf[second] + elseif first then + tokens[#tokens+1] = first + first = second + else + first = second + end + end + else + local crs = cr[second] + if crs then + for s in utfcharacters(str) do + if n == 1 then + break + else + tokens[#tokens+1], n = s, n - 1 + end + end + if first then + tokens[#tokens+1] = first + end + first, done = crs, true + else + local cgf = graphemes[first] + if cgf and cgf[second] then + for s in utfcharacters(str) do + if n == 1 then + break + else + tokens[#tokens+1], n = s, n -1 + end + end + first, done = cgf[second], true + else + first, n = second, n + 1 + end + end + end + end + if done then + tokens[#tokens+1] = first + return concat(tokens) -- seldom called + end + elseif #str > 0 then + return cr[str] or str + end + end + return str +end + +--[[ldx-- +

Next we implement some commands that are used in the user interface.

+--ldx]]-- + +commands = commands or { } + +function commands.uchar(first,second) + tex.sprint(ctxcatcodes,utfchar(first*256+second)) +end + +--[[ldx-- +

A few helpers (used to be luat-uni).

+--ldx]]-- + +function utf.split(str) + local t = { } + for snippet in utfcharacters(str) do + t[#t+1] = snippet + end + return t +end + +function utf.each(str,fnc) + for snippet in utfcharacters(str) do + fnc(snippet) + end +end -- cgit v1.2.3